[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 2/4] x86/mem_sharing: introduce and use page_lock_memshr instead of page_lock



Patch cf4b30dca0a "Add debug code to detect illegal page_lock and put_page_type
ordering" added extra sanity checking to page_lock/page_unlock for debug builds
with the assumption that no hypervisor path ever locks two pages at once.

This assumption doesn't hold during memory sharing so we introduce separate
functions, page_lock_memshr and page_unlock_memshr, to be used exclusively
in the memory sharing subsystem.

Also placing these functions behind their appropriate kconfig gates.

Signed-off-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
v3: this patch was "x86/mm: conditionally check page_lock/page_unlock ownership"
---
 xen/arch/x86/mm.c             | 46 ++++++++++++++++++++++++++++-------
 xen/arch/x86/mm/mem_sharing.c |  4 +--
 xen/include/asm-x86/mm.h      |  6 ++++-
 3 files changed, 44 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 45fadbab61..c2c92a96ac 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2030,12 +2030,11 @@ static inline bool current_locked_page_ne_check(struct 
page_info *page) {
 #define current_locked_page_ne_check(x) true
 #endif
 
-int page_lock(struct page_info *page)
+#if defined(CONFIG_PV) || defined(CONFIG_HAS_MEM_SHARING)
+static int _page_lock(struct page_info *page)
 {
     unsigned long x, nx;
 
-    ASSERT(current_locked_page_check(NULL));
-
     do {
         while ( (x = page->u.inuse.type_info) & PGT_locked )
             cpu_relax();
@@ -2046,17 +2045,13 @@ int page_lock(struct page_info *page)
             return 0;
     } while ( cmpxchg(&page->u.inuse.type_info, x, nx) != x );
 
-    current_locked_page_set(page);
-
     return 1;
 }
 
-void page_unlock(struct page_info *page)
+static void _page_unlock(struct page_info *page)
 {
     unsigned long x, nx, y = page->u.inuse.type_info;
 
-    ASSERT(current_locked_page_check(page));
-
     do {
         x = y;
         ASSERT((x & PGT_count_mask) && (x & PGT_locked));
@@ -2065,11 +2060,44 @@ void page_unlock(struct page_info *page)
         /* We must not drop the last reference here. */
         ASSERT(nx & PGT_count_mask);
     } while ( (y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x );
+}
+#endif
 
-    current_locked_page_set(NULL);
+#ifdef CONFIG_HAS_MEM_SHARING
+int page_lock_memshr(struct page_info *page)
+{
+    return _page_lock(page);
 }
 
+void page_unlock_memshr(struct page_info *page)
+{
+    _page_unlock(page);
+}
+#endif
+
 #ifdef CONFIG_PV
+int page_lock(struct page_info *page)
+{
+    int rc;
+
+    ASSERT(current_locked_page_check(NULL));
+
+    rc = _page_lock(page);
+
+    current_locked_page_set(page);
+
+    return rc;
+}
+
+void page_unlock(struct page_info *page)
+{
+    ASSERT(current_locked_page_check(page));
+
+    _page_unlock(page);
+
+    current_locked_page_set(NULL);
+}
+
 /*
  * PTE flags that a guest may change without re-validating the PTE.
  * All other bits affect translation, caching, or Xen's safety.
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index e2f74ac770..4b60bab28b 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -118,7 +118,7 @@ static inline int mem_sharing_page_lock(struct page_info 
*pg)
     pg_lock_data_t *pld = &(this_cpu(__pld));
 
     page_sharing_mm_pre_lock();
-    rc = page_lock(pg);
+    rc = page_lock_memshr(pg);
     if ( rc )
     {
         preempt_disable();
@@ -135,7 +135,7 @@ static inline void mem_sharing_page_unlock(struct page_info 
*pg)
     page_sharing_mm_unlock(pld->mm_unlock_level, 
                            &pld->recurse_count);
     preempt_enable();
-    page_unlock(pg);
+    page_unlock_memshr(pg);
 }
 
 static inline shr_handle_t get_next_handle(void)
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 6faa563167..ba49eee24d 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -356,7 +356,8 @@ struct platform_bad_page {
 const struct platform_bad_page *get_platform_badpages(unsigned int 
*array_size);
 
 /* Per page locks:
- * page_lock() is used for two purposes: pte serialization, and memory sharing.
+ * page_lock() is used for pte serialization.
+ * page_lock_memshr() is used for memory sharing.
  *
  * All users of page lock for pte serialization live in mm.c, use it
  * to lock a page table page during pte updates, do not take other locks within
@@ -378,6 +379,9 @@ const struct platform_bad_page 
*get_platform_badpages(unsigned int *array_size);
 int page_lock(struct page_info *page);
 void page_unlock(struct page_info *page);
 
+int page_lock_memshr(struct page_info *page);
+void page_unlock_memshr(struct page_info *page);
+
 void put_page_type(struct page_info *page);
 int  get_page_type(struct page_info *page, unsigned long type);
 int  put_page_type_preemptible(struct page_info *page);
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.