[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: Enforce lock ordering for sharing page locks



# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1327581986 0
# Node ID 823829bde70517ae047c10a71b808bc1419d7dff
# Parent  7e9b38097888ce31743815460e57791896b420ac
x86/mm: Enforce lock ordering for sharing page locks

Use the ordering constructs in mm-locks.h to enforce an order
for the p2m and page locks in the sharing code. Applies to either
the global sharing lock (in audit mode) or the per page locks.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Signed-off-by: Adin Scanneell <adin@xxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 7e9b38097888 -r 823829bde705 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Jan 26 12:46:26 2012 +0000
+++ b/xen/arch/x86/mm/mem_sharing.c     Thu Jan 26 12:46:26 2012 +0000
@@ -37,6 +37,13 @@
 
 static shr_handle_t next_handle = 1;
 
+typedef struct pg_lock_data {
+    int mm_unlock_level;
+    unsigned short recurse_count;
+} pg_lock_data_t;
+
+DEFINE_PER_CPU(pg_lock_data_t, __pld);
+
 #if MEM_SHARING_AUDIT
 
 static mm_lock_t shr_lock;
@@ -85,16 +92,25 @@
 static inline int mem_sharing_page_lock(struct page_info *pg)
 {
     int rc;
+    pg_lock_data_t *pld = &(this_cpu(__pld));
+
+    page_sharing_mm_pre_lock();
     rc = page_lock(pg);
     if ( rc )
     {
         preempt_disable();
+        page_sharing_mm_post_lock(&pld->mm_unlock_level, 
+                                  &pld->recurse_count);
     }
     return rc;
 }
 
 static inline void mem_sharing_page_unlock(struct page_info *pg)
 {
+    pg_lock_data_t *pld = &(this_cpu(__pld));
+
+    page_sharing_mm_unlock(pld->mm_unlock_level, 
+                           &pld->recurse_count);
     preempt_enable();
     page_unlock(pg);
 }
diff -r 7e9b38097888 -r 823829bde705 xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h        Thu Jan 26 12:46:26 2012 +0000
+++ b/xen/arch/x86/mm/mm-locks.h        Thu Jan 26 12:46:26 2012 +0000
@@ -156,7 +156,23 @@
 
 #else
 
-/* We use an efficient per-page lock when AUDIT is not enabled. */
+/* Sharing per page lock
+ *
+ * This is an external lock, not represented by an mm_lock_t. The memory
+ * sharing lock uses it to protect addition and removal of (gfn,domain)
+ * tuples to a shared page. We enforce order here against the p2m lock,
+ * which is taken after the page_lock to change the gfn's p2m entry.
+ *
+ * Note that in sharing audit mode, we use the global page lock above, 
+ * instead.
+ *
+ * The lock is recursive because during share we lock two pages. */
+
+declare_mm_order_constraint(per_page_sharing)
+#define page_sharing_mm_pre_lock()   
mm_enforce_order_lock_pre_per_page_sharing()
+#define page_sharing_mm_post_lock(l, r) \
+        mm_enforce_order_lock_post_per_page_sharing((l), (r))
+#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
 
 #endif /* MEM_SHARING_AUDIT */
 
diff -r 7e9b38097888 -r 823829bde705 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Thu Jan 26 12:46:26 2012 +0000
+++ b/xen/include/asm-x86/mm.h  Thu Jan 26 12:46:26 2012 +0000
@@ -351,7 +351,8 @@
  * backing. Nesting may happen when sharing (and locking) two pages -- 
deadlock 
  * is avoided by locking pages in increasing order.
  * Memory sharing may take the p2m_lock within a page_lock/unlock
- * critical section. 
+ * critical section. We enforce ordering between page_lock and p2m_lock using 
an
+ * mm-locks.h construct. 
  *
  * These two users (pte serialization and memory sharing) do not collide, since
  * sharing is only supported for hvm guests, which do not perform pv pte 
updates.

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.