[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 1/4] x86/mem_sharing: reorder when pages are unlocked and released



Calling _put_page_type while also holding the page_lock
for that page can cause a deadlock.

Signed-off-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>
---
v3: simplified patch by keeping the additional references already in-place
---
 xen/arch/x86/mm/mem_sharing.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index dfc279d371..e2f74ac770 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct 
page_info *page)
         return -EBUSY;
     }
 
-    /* We can only change the type if count is one */
-    /* Because we are locking pages individually, we need to drop
-     * the lock here, while the page is typed. We cannot risk the 
-     * race of page_unlock and then put_page_type. */
     expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
     if ( page->u.inuse.type_info != expected_type )
     {
@@ -660,12 +656,12 @@ static int page_make_private(struct domain *d, struct 
page_info *page)
         return -EEXIST;
     }
 
-    /* Drop the final typecount */
-    put_page_and_type(page);
-
     /* Now that we've dropped the type, we can unlock */
     mem_sharing_page_unlock(page);
 
+    /* Drop the final typecount */
+    put_page_and_type(page);
+
     /* Change the owner */
     ASSERT(page_get_owner(page) == dom_cow);
     page_set_owner(page, d);
@@ -900,6 +896,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
     p2m_type_t smfn_type, cmfn_type;
     struct two_gfns tg;
     struct rmap_iterator ri;
+    unsigned long put_count = 0;
 
     get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
                  cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
@@ -984,7 +981,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
          * Don't change the type of rmap for the client page. */
         rmap_del(gfn, cpage, 0);
         rmap_add(gfn, spage);
-        put_page_and_type(cpage);
+        put_count++;
         d = get_domain_by_id(gfn->domain);
         BUG_ON(!d);
         BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
@@ -999,6 +996,10 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
     mem_sharing_page_unlock(secondpg);
     mem_sharing_page_unlock(firstpg);
 
+    BUG_ON(!put_count);
+    while ( put_count-- )
+        put_page_and_type(cpage);
+
     /* Free the client page */
     if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
         put_page(cpage);
@@ -1167,8 +1168,8 @@ int __mem_sharing_unshare_page(struct domain *d,
     {
         if ( !last_gfn )
             mem_sharing_gfn_destroy(page, d, gfn_info);
-        put_page_and_type(page);
         mem_sharing_page_unlock(page);
+        put_page_and_type(page);
         if ( last_gfn )
         {
             if ( !get_page(page, dom_cow) )
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.