[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: Reorder locks used by shadow code in anticipation of synchronized p2m lookups



# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1328185354 0
# Node ID 030db911083f44dce966709991eae0d3dd405bf5
# Parent  b2bfef17db293905d1d813765904ddc03ba87334
x86/mm: Reorder locks used by shadow code in anticipation of synchronized p2m 
lookups

Currently, mm-locks.h enforces a strict ordering between locks in the mm
layer lest there be an inversion in the order locks are taken and thus
the risk of deadlock.

Once p2m lookups becoming synchronized, get_gfn* calls take the p2m lock, and a
new set of inversion arises.  Reorder some of the locks in the shadow code so
that even in this case no deadlocks happen.

After this, synchronized p2m lookups are in principle ready to be enabled in
shadow mode.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r b2bfef17db29 -r 030db911083f xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Feb 02 12:22:34 2012 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Feb 02 12:22:34 2012 +0000
@@ -3609,6 +3609,8 @@
             || end_pfn >= p2m->max_mapped_pfn)
         return -EINVAL;
 
+    /* We perform p2m lookups, so lock the p2m upfront to avoid deadlock */
+    p2m_lock(p2m_get_hostp2m(d));
     paging_lock(d);
 
     if ( dirty_vram && (!nr ||
@@ -3782,6 +3784,7 @@
 
 out:
     paging_unlock(d);
+    p2m_unlock(p2m_get_hostp2m(d));
     return rc;
 }
 
diff -r b2bfef17db29 -r 030db911083f xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Feb 02 12:22:34 2012 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Feb 02 12:22:34 2012 +0000
@@ -2444,7 +2444,7 @@
     perfc_incr(shadow_validate_gl1e_calls);
 
     gfn = guest_l1e_get_gfn(new_gl1e);
-    gmfn = get_gfn_query(v->domain, gfn, &p2mt);
+    gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt);
 
     l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
     result |= shadow_set_l1e(v, sl1p, new_sl1e, p2mt, sl1mfn);
@@ -2466,7 +2466,6 @@
     }
 #endif /* OOS */
 
-    put_gfn(v->domain, gfn_x(gfn));
     return result;
 }
 
@@ -4715,8 +4714,6 @@
     unsigned long l3gfn;
     mfn_t l3mfn;
 
-    paging_lock(v->domain);
-
     gcr3 = (v->arch.hvm_vcpu.guest_cr[3]);
     /* fast path: the pagetable belongs to the current context */
     if ( gcr3 == gpa )
@@ -4728,8 +4725,11 @@
     {
         printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %"PRIpaddr"\n",
                gpa);
-        goto out;
+        goto out_put_gfn;
     }
+
+    paging_lock(v->domain);
+
     if ( !fast_path )
     {
         gl3pa = sh_map_domain_page(l3mfn);
@@ -4770,11 +4770,11 @@
 
     v->arch.paging.shadow.pagetable_dying = 1;
 
-out:
     if ( !fast_path )
         unmap_domain_page(gl3pa);
+    paging_unlock(v->domain);
+out_put_gfn:
     put_gfn(v->domain, l3gfn);
-    paging_unlock(v->domain);
 }
 #else
 static void sh_pagetable_dying(struct vcpu *v, paddr_t gpa)
@@ -4782,15 +4782,14 @@
     mfn_t smfn, gmfn;
     p2m_type_t p2mt;
 
+    gmfn = get_gfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
     paging_lock(v->domain);
 
-    gmfn = get_gfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
 #if GUEST_PAGING_LEVELS == 2
     smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_32_shadow);
 #else
     smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l4_64_shadow);
 #endif
-    put_gfn(v->domain, gpa >> PAGE_SHIFT);
     
     if ( mfn_valid(smfn) )
     {
@@ -4808,6 +4807,7 @@
     v->arch.paging.shadow.pagetable_dying = 1;
 
     paging_unlock(v->domain);
+    put_gfn(v->domain, gpa >> PAGE_SHIFT);
 }
 #endif
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.