[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/shadow: Alter sh_remove_{all_}shadows{, _and_parents}() to take a domain



commit e5a0366ff7c930bceaeecc77d21fdd3894b9c050
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Aug 11 02:38:53 2014 +0100
Commit:     Tim Deegan <tim@xxxxxxx>
CommitDate: Fri Feb 20 14:48:01 2015 +0000

    x86/shadow: Alter sh_remove_{all_}shadows{,_and_parents}() to take a domain
    
    This allows the removal of 3 improper uses of d->vcpu[0] from toolstack 
context
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hvm/hvm.c          |    2 +-
 xen/arch/x86/mm.c               |    4 ++--
 xen/arch/x86/mm/shadow/common.c |   18 ++++++++----------
 xen/arch/x86/mm/shadow/multi.c  |   15 ++++++++-------
 xen/include/asm-x86/shadow.h    |    8 ++++----
 5 files changed, 23 insertions(+), 24 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 72e383f..4734d71 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -6074,7 +6074,7 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                 paging_mark_dirty(d, page_to_mfn(page));
                 /* These are most probably not page tables any more */
                 /* don't take a long time and don't die either */
-                sh_remove_shadows(d->vcpu[0], _mfn(page_to_mfn(page)), 1, 0);
+                sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
                 put_page(page);
             }
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 9ab7a26..ca5369a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2118,7 +2118,7 @@ int free_page_type(struct page_info *page, unsigned long 
type,
         ASSERT(VALID_M2P(gmfn));
         /* Page sharing not supported for shadowed domains */
         if(!SHARED_M2P(gmfn))
-            shadow_remove_all_shadows(owner->vcpu[0], _mfn(gmfn));
+            shadow_remove_all_shadows(owner, _mfn(gmfn));
     }
 
     if ( !(type & PGT_partial) )
@@ -2283,7 +2283,7 @@ static int __get_page_type(struct page_info *page, 
unsigned long type,
                  && (page->count_info & PGC_page_table)
                  && !((page->shadow_flags & (1u<<29))
                       && type == PGT_writable_page) )
-               shadow_remove_all_shadows(d->vcpu[0], _mfn(page_to_mfn(page)));
+               shadow_remove_all_shadows(d, _mfn(page_to_mfn(page)));
 
             ASSERT(!(x & PGT_pae_xen_l2));
             if ( (x & PGT_type_mask) != type )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index e10b578..30580ee 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -687,7 +687,7 @@ static int oos_remove_write_access(struct vcpu *v, mfn_t 
gmfn,
          * the page.  If that doesn't work either, the guest is granting
          * his pagetables and must be killed after all.
          * This will flush the tlb, so we can return with no worries. */
-        sh_remove_shadows(v, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
+        sh_remove_shadows(d, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
         return 1;
     }
 
@@ -1130,7 +1130,7 @@ sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
          * Since the validate call above will have made a "safe" (i.e. zero)
          * shadow entry, we can let the domain live even if we can't fully
          * unshadow the page. */
-        sh_remove_shadows(v, gmfn, 0, 0);
+        sh_remove_shadows(d, gmfn, 0, 0);
     }
 }
 
@@ -2570,7 +2570,7 @@ static int sh_remove_shadow_via_pointer(struct domain *d, 
mfn_t smfn)
     return rc;
 }
 
-void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
+void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all)
 /* Remove the shadows of this guest page.
  * If fast != 0, just try the quick heuristic, which will remove
  * at most one reference to each shadow of the page.  Otherwise, walk
@@ -2579,7 +2579,6 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int 
fast, int all)
  * (all != 0 implies fast == 0)
  */
 {
-    struct domain *d = v->domain;
     struct page_info *pg = mfn_to_page(gmfn);
     mfn_t smfn;
     unsigned char t;
@@ -2633,8 +2632,7 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int 
fast, int all)
      * can be called via put_page_type when we clear a shadow l1e).*/
     paging_lock_recursive(d);
 
-    SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
-                   d->domain_id, v->vcpu_id, mfn_x(gmfn));
+    SHADOW_PRINTK("d=%d: gmfn=%lx\n", d->domain_id, mfn_x(gmfn));
 
     /* Bail out now if the page is not shadowed */
     if ( (pg->count_info & PGC_page_table) == 0 )
@@ -2703,11 +2701,11 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int 
fast, int all)
 }
 
 static void
-sh_remove_all_shadows_and_parents(struct vcpu *v, mfn_t gmfn)
+sh_remove_all_shadows_and_parents(struct domain *d, mfn_t gmfn)
 /* Even harsher: this is a HVM page that we thing is no longer a pagetable.
  * Unshadow it, and recursively unshadow pages that reference it. */
 {
-    sh_remove_shadows(v, gmfn, 0, 1);
+    sh_remove_shadows(d, gmfn, 0, 1);
     /* XXX TODO:
      * Rework this hashtable walker to return a linked-list of all
      * the shadows it modified, then do breadth-first recursion
@@ -3384,7 +3382,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
         p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
         if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
         {
-            sh_remove_all_shadows_and_parents(v, mfn);
+            sh_remove_all_shadows_and_parents(d, mfn);
             if ( sh_remove_all_mappings(v, mfn) )
                 flush_tlb_mask(d->domain_dirty_cpumask);
         }
@@ -3419,7 +3417,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
                      || l1e_get_pfn(npte[i]) != mfn_x(omfn) )
                 {
                     /* This GFN->MFN mapping has gone away */
-                    sh_remove_all_shadows_and_parents(v, omfn);
+                    sh_remove_all_shadows_and_parents(d, omfn);
                     if ( sh_remove_all_mappings(v, omfn) )
                         cpumask_or(&flushmask, &flushmask,
                                    d->domain_dirty_cpumask);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 0d1021b..7705674 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2564,7 +2564,7 @@ static inline void check_for_early_unshadow(struct vcpu 
*v, mfn_t gmfn)
                & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64))) )
     {
         perfc_incr(shadow_early_unshadow);
-        sh_remove_shadows(v, gmfn, 1, 0 /* Fast, can fail to unshadow */ );
+        sh_remove_shadows(d, gmfn, 1, 0 /* Fast, can fail to unshadow */ );
         TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EARLY_UNSHADOW);
     }
     v->arch.paging.shadow.last_emulated_mfn_for_unshadow = mfn_x(gmfn);
@@ -3251,7 +3251,7 @@ static int sh_page_fault(struct vcpu *v,
         SHADOW_PRINTK("user-mode fault to PT, unshadowing mfn %#lx\n",
                       mfn_x(gmfn));
         perfc_incr(shadow_fault_emulate_failed);
-        sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
+        sh_remove_shadows(d, gmfn, 0 /* thorough */, 1 /* must succeed */);
         trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_USER,
                                       va, gfn);
         goto done;
@@ -3293,7 +3293,7 @@ static int sh_page_fault(struct vcpu *v,
         }
 
         if ( !used )
-            sh_remove_shadows(v, gmfn, 1 /* fast */, 0 /* can fail */);
+            sh_remove_shadows(d, gmfn, 1 /* fast */, 0 /* can fail */);
     }
 
     /*
@@ -3331,7 +3331,7 @@ static int sh_page_fault(struct vcpu *v,
             gdprintk(XENLOG_DEBUG, "write to pagetable during event "
                      "injection: cr2=%#lx, mfn=%#lx\n",
                      va, mfn_x(gmfn));
-            sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
+            sh_remove_shadows(d, gmfn, 0 /* thorough */, 1 /* must succeed */);
             trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ,
                                        va, gfn);
             return EXCRET_fault_fixed;
@@ -3365,7 +3365,7 @@ static int sh_page_fault(struct vcpu *v,
         /* If this is actually a page table, then we have a bug, and need
          * to support more operations in the emulator.  More likely,
          * though, this is a hint that this page should not be shadowed. */
-        shadow_remove_all_shadows(v, gmfn);
+        shadow_remove_all_shadows(d, gmfn);
 
         trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED,
                                    va, gfn);
@@ -4626,6 +4626,7 @@ static void *emulate_map_dest(struct vcpu *v,
                               u32 bytes,
                               struct sh_emulate_ctxt *sh_ctxt)
 {
+    struct domain *d = v->domain;
     void *map = NULL;
 
     sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
@@ -4647,7 +4648,7 @@ static void *emulate_map_dest(struct vcpu *v,
 
     /* Unaligned writes mean probably this isn't a pagetable */
     if ( vaddr & (bytes - 1) )
-        sh_remove_shadows(v, sh_ctxt->mfn1, 0, 0 /* Slow, can fail */ );
+        sh_remove_shadows(d, sh_ctxt->mfn1, 0, 0 /* Slow, can fail */ );
 
     if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) )
     {
@@ -4674,7 +4675,7 @@ static void *emulate_map_dest(struct vcpu *v,
                     MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
 
         /* Cross-page writes mean probably not a pagetable */
-        sh_remove_shadows(v, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
+        sh_remove_shadows(d, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
 
         mfns[0] = mfn_x(sh_ctxt->mfn1);
         mfns[1] = mfn_x(sh_ctxt->mfn2);
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index a4c8776..5b9772d 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -79,7 +79,7 @@ void shadow_teardown(struct domain *d);
 /* Call once all of the references to the domain have gone away */
 void shadow_final_teardown(struct domain *d);
 
-void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all);
+void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all);
 
 /* Discard _all_ mappings from the domain's shadows. */
 void shadow_blow_tables_per_domain(struct domain *d);
@@ -93,7 +93,7 @@ void shadow_blow_tables_per_domain(struct domain *d);
 #define shadow_track_dirty_vram(d, begin_pfn, nr, bitmap) \
     ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
 
-static inline void sh_remove_shadows(struct vcpu *v, mfn_t gmfn,
+static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
                                      bool_t fast, bool_t all) {}
 
 static inline void shadow_blow_tables_per_domain(struct domain *d) {}
@@ -107,10 +107,10 @@ static inline int shadow_domctl(struct domain *d, 
xen_domctl_shadow_op_t *sc,
 #endif /* CONFIG_SHADOW_PAGING */
 
 /* Remove all shadows of the guest mfn. */
-static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn)
+static inline void shadow_remove_all_shadows(struct domain *d, mfn_t gmfn)
 {
     /* See the comment about locking in sh_remove_shadows */
-    sh_remove_shadows(v, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
+    sh_remove_shadows(d, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
 }
 
 #endif /* _XEN_SHADOW_H */
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.