[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/mem_sharing: reorder when pages are unlocked and released



commit c2c6f8b68c082caa14c49c2a02ed527e8cf0f327
Author:     Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
AuthorDate: Fri Jul 19 13:47:17 2019 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Jul 19 13:47:17 2019 +0200

    x86/mem_sharing: reorder when pages are unlocked and released
    
    Calling _put_page_type while also holding the page_lock for that page
    can cause a deadlock. There may be code-paths still in place where this
    is an issue, but for normal sharing purposes this has been tested and
    works.
    
    Removing grabbing the extra page reference at certain points is done
    because it is no longer needed, a reference is held till necessary with
    this reorder thus the extra reference is redundant.
    
    The comment being dropped is incorrect since it's now out-of-date.
    
    Signed-off-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/mm/mem_sharing.c | 40 ++++++++++++----------------------------
 1 file changed, 12 insertions(+), 28 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 58d9157fa8..6c033d1488 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -648,10 +648,6 @@ static int page_make_private(struct domain *d, struct 
page_info *page)
         return -EBUSY;
     }
 
-    /* We can only change the type if count is one */
-    /* Because we are locking pages individually, we need to drop
-     * the lock here, while the page is typed. We cannot risk the 
-     * race of page_unlock and then put_page_type. */
     expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
     if ( page->u.inuse.type_info != expected_type )
     {
@@ -660,12 +656,11 @@ static int page_make_private(struct domain *d, struct 
page_info *page)
         return -EEXIST;
     }
 
+    mem_sharing_page_unlock(page);
+
     /* Drop the final typecount */
     put_page_and_type(page);
 
-    /* Now that we've dropped the type, we can unlock */
-    mem_sharing_page_unlock(page);
-
     /* Change the owner */
     ASSERT(page_get_owner(page) == dom_cow);
     page_set_owner(page, d);
@@ -900,6 +895,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
     p2m_type_t smfn_type, cmfn_type;
     struct two_gfns tg;
     struct rmap_iterator ri;
+    unsigned long put_count = 0;
 
     get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
                  cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
@@ -964,15 +960,6 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
         goto err_out;
     }
 
-    /* Acquire an extra reference, for the freeing below to be safe. */
-    if ( !get_page(cpage, dom_cow) )
-    {
-        ret = -EOVERFLOW;
-        mem_sharing_page_unlock(secondpg);
-        mem_sharing_page_unlock(firstpg);
-        goto err_out;
-    }
-
     /* Merge the lists together */
     rmap_seed_iterator(cpage, &ri);
     while ( (gfn = rmap_iterate(cpage, &ri)) != NULL)
@@ -984,13 +971,14 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
          * Don't change the type of rmap for the client page. */
         rmap_del(gfn, cpage, 0);
         rmap_add(gfn, spage);
-        put_page_and_type(cpage);
+        put_count++;
         d = get_domain_by_id(gfn->domain);
         BUG_ON(!d);
         BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
         put_domain(d);
     }
     ASSERT(list_empty(&cpage->sharing->gfns));
+    BUG_ON(!put_count);
 
     /* Clear the rest of the shared state */
     page_sharing_dispose(cpage);
@@ -1001,7 +989,9 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
 
     /* Free the client page */
     put_page_alloc_ref(cpage);
-    put_page(cpage);
+
+    while ( put_count-- )
+        put_page_and_type(cpage);
 
     /* We managed to free a domain page. */
     atomic_dec(&nr_shared_mfns);
@@ -1165,19 +1155,13 @@ int __mem_sharing_unshare_page(struct domain *d,
     {
         if ( !last_gfn )
             mem_sharing_gfn_destroy(page, d, gfn_info);
-        put_page_and_type(page);
+
         mem_sharing_page_unlock(page);
+
         if ( last_gfn )
-        {
-            if ( !get_page(page, dom_cow) )
-            {
-                put_gfn(d, gfn);
-                domain_crash(d);
-                return -EOVERFLOW;
-            }
             put_page_alloc_ref(page);
-            put_page(page);
-        }
+
+        put_page_and_type(page);
         put_gfn(d, gfn);
 
         return 0;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.