[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm/p2m: Fix locking discipline around p2m lookups.



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1307017012 -3600
# Node ID 462280f8fae2fcb137d98f6f1dab105afc6745c3
# Parent  32f80f08d7246cac9c75d4820698e754550a425a
x86/mm/p2m: Fix locking discipline around p2m lookups.

All gfn_to_mfn* functions except _query() might take the p2m lock,
so can't be called with a p2m, shadow, hap or log_dirty lock held.
The remaining offender is the memory sharing code, which calls
_unshare() from inside the pagetable walker!  Fixing that is too big
for a cleanup patch like this one.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---


diff -r 32f80f08d724 -r 462280f8fae2 xen/arch/x86/hvm/mtrr.c
--- a/xen/arch/x86/hvm/mtrr.c   Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/mtrr.c   Thu Jun 02 13:16:52 2011 +0100
@@ -390,7 +390,7 @@
     {
         struct domain *d = v->domain;
         p2m_type_t p2mt;
-        gfn_to_mfn(d, paddr_to_pfn(gpaddr), &p2mt);
+        gfn_to_mfn_query(d, paddr_to_pfn(gpaddr), &p2mt);
         if (p2m_is_ram(p2mt))
             gdprintk(XENLOG_WARNING,
                     "Conflict occurs for a given guest l1e flags:%x "
diff -r 32f80f08d724 -r 462280f8fae2 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
@@ -566,7 +566,7 @@
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
             "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
-            mfn_x(gfn_to_mfn(d, gfn, &ot)));
+            mfn_x(gfn_to_mfn_query(d, gfn, &ot)));
     return rc;
 }
 
@@ -623,8 +623,8 @@
     p2m_unlock(p2m);
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
-            "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
-            gmfn_to_mfn(p2m->domain, gfn));
+            "set_shared_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+            mfn_x(gfn_to_mfn_query(d, gfn, &ot)));
     return rc;
 }
 
diff -r 32f80f08d724 -r 462280f8fae2 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Jun 02 13:16:52 2011 +0100
@@ -3712,7 +3712,7 @@
 
         /* Iterate over VRAM to track dirty bits. */
         for ( i = 0; i < nr; i++ ) {
-            mfn_t mfn = gfn_to_mfn(d, begin_pfn + i, &t);
+            mfn_t mfn = gfn_to_mfn_query(d, begin_pfn + i, &t);
             struct page_info *page;
             int dirty = 0;
             paddr_t sl1ma = dirty_vram->sl1ma[i];
@@ -3797,7 +3797,7 @@
                 /* was clean for more than two seconds, try to disable guest
                  * write access */
                 for ( i = begin_pfn; i < end_pfn; i++ ) {
-                    mfn_t mfn = gfn_to_mfn(d, i, &t);
+                    mfn_t mfn = gfn_to_mfn_query(d, i, &t);
                     if (mfn_x(mfn) != INVALID_MFN)
                         flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 
1, 0);
                 }
diff -r 32f80f08d724 -r 462280f8fae2 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
@@ -411,6 +411,10 @@
     return gfn_to_mfn_type_p2m(p2m, gfn, t, &a, q);
 }
 
+/* Syntactic sugar: most callers will use one of these. 
+ * N.B. gfn_to_mfn_query() is the _only_ one guaranteed not to take the
+ * p2m lock; none of the others can be called with the p2m, hap or
+ * shadow lock held. */
 #define gfn_to_mfn(d, g, t)         gfn_to_mfn_type((d), (g), (t), p2m_alloc)
 #define gfn_to_mfn_query(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_query)
 #define gfn_to_mfn_guest(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_guest)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.