[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 04 of 13] x86/mm: Enforce lock ordering for sharing page locks



 xen/arch/x86/mm/mem_sharing.c |  16 ++++++++++++++++
 xen/arch/x86/mm/mm-locks.h    |  18 +++++++++++++++++-
 xen/include/asm-x86/mm.h      |   3 ++-
 3 files changed, 35 insertions(+), 2 deletions(-)


Use the ordering constructs in mm-locks.h to enforce an order
for the p2m and page locks in the sharing code. Applies to either
the global sharing lock (in audit mode) or the per page locks.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Signed-off-by: Adin Scanneell <adin@xxxxxxxxxxx>

diff -r f5e3c94b2066 -r e66fe1c0b893 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -37,6 +37,13 @@
 
 static shr_handle_t next_handle = 1;
 
+typedef struct pg_lock_data {
+    int mm_unlock_level;
+    unsigned short recurse_count;
+} pg_lock_data_t;
+
+DEFINE_PER_CPU(pg_lock_data_t, __pld);
+
 #if MEM_SHARING_AUDIT
 
 static mm_lock_t shr_lock;
@@ -85,16 +92,25 @@ static inline int mem_sharing_page_lock(
 static inline int mem_sharing_page_lock(struct page_info *pg)
 {
     int rc;
+    pg_lock_data_t *pld = &(this_cpu(__pld));
+
+    page_sharing_mm_pre_lock();
     rc = page_lock(pg);
     if ( rc )
     {
         preempt_disable();
+        page_sharing_mm_post_lock(&pld->mm_unlock_level, 
+                                  &pld->recurse_count);
     }
     return rc;
 }
 
 static inline void mem_sharing_page_unlock(struct page_info *pg)
 {
+    pg_lock_data_t *pld = &(this_cpu(__pld));
+
+    page_sharing_mm_unlock(pld->mm_unlock_level, 
+                           &pld->recurse_count);
     preempt_enable();
     page_unlock(pg);
 }
diff -r f5e3c94b2066 -r e66fe1c0b893 xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -156,7 +156,23 @@ declare_mm_lock(shr)
 
 #else
 
-/* We use an efficient per-page lock when AUDIT is not enabled. */
+/* Sharing per page lock
+ *
+ * This is an external lock, not represented by an mm_lock_t. The memory
+ * sharing lock uses it to protect addition and removal of (gfn,domain)
+ * tuples to a shared page. We enforce order here against the p2m lock,
+ * which is taken after the page_lock to change the gfn's p2m entry.
+ *
+ * Note that in sharing audit mode, we use the global page lock above, 
+ * instead.
+ *
+ * The lock is recursive because during share we lock two pages. */
+
+declare_mm_order_constraint(per_page_sharing)
+#define page_sharing_mm_pre_lock()   
mm_enforce_order_lock_pre_per_page_sharing()
+#define page_sharing_mm_post_lock(l, r) \
+        mm_enforce_order_lock_post_per_page_sharing((l), (r))
+#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
 
 #endif /* MEM_SHARING_AUDIT */
 
diff -r f5e3c94b2066 -r e66fe1c0b893 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -351,7 +351,8 @@ void clear_superpage_mark(struct page_in
  * backing. Nesting may happen when sharing (and locking) two pages -- 
deadlock 
  * is avoided by locking pages in increasing order.
  * Memory sharing may take the p2m_lock within a page_lock/unlock
- * critical section. 
+ * critical section. We enforce ordering between page_lock and p2m_lock using 
an
+ * mm-locks.h construct. 
  *
  * These two users (pte serialization and memory sharing) do not collide, since
  * sharing is only supported for hvm guests, which do not perform pv pte 
updates.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.