[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] use return value of domain_adjust_tot_pages() where feasible



commit 69a13bafe6bc9f76a06ff423d7fd52aa6016001f
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Dec 3 12:41:54 2013 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Dec 3 12:41:54 2013 +0100

    use return value of domain_adjust_tot_pages() where feasible
    
    This is generally cheaper than re-reading ->tot_pages.
    
    While doing so I also noticed an improper use (lacking error handling)
    of get_domain() as well as lacks of ->is_dying checks in the memory
    sharing code, which the patch fixes at once. In the course of doing
    this I further noticed other error paths there pointlessly calling
    put_page() et al with ->page_alloc_lock still held, which is also being
    reversed.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/mm/mem_sharing.c |   27 ++++++++++++++++++++-------
 xen/common/memory.c           |    4 ++--
 xen/common/page_alloc.c       |    6 +++---
 3 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 1e89f6c..c94856b 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -611,9 +611,16 @@ static int page_make_sharable(struct domain *d,
                        struct page_info *page, 
                        int expected_refcnt)
 {
-    int drop_dom_ref;
+    bool_t drop_dom_ref;
+
     spin_lock(&d->page_alloc_lock);
 
+    if ( d->is_dying )
+    {
+        spin_unlock(&d->page_alloc_lock);
+        return -EBUSY;
+    }
+
     /* Change page type and count atomically */
     if ( !get_page_and_type(page, d, PGT_shared_page) )
     {
@@ -624,8 +631,8 @@ static int page_make_sharable(struct domain *d,
     /* Check it wasn't already sharable and undo if it was */
     if ( (page->u.inuse.type_info & PGT_count_mask) != 1 )
     {
-        put_page_and_type(page);
         spin_unlock(&d->page_alloc_lock);
+        put_page_and_type(page);
         return -EEXIST;
     }
 
@@ -633,15 +640,14 @@ static int page_make_sharable(struct domain *d,
      * the second from get_page_and_type at the top of this function */
     if ( page->count_info != (PGC_allocated | (2 + expected_refcnt)) )
     {
+        spin_unlock(&d->page_alloc_lock);
         /* Return type count back to zero */
         put_page_and_type(page);
-        spin_unlock(&d->page_alloc_lock);
         return -E2BIG;
     }
 
     page_set_owner(page, dom_cow);
-    domain_adjust_tot_pages(d, -1);
-    drop_dom_ref = (d->tot_pages == 0);
+    drop_dom_ref = !domain_adjust_tot_pages(d, -1);
     page_list_del(page, &d->page_list);
     spin_unlock(&d->page_alloc_lock);
 
@@ -659,6 +665,13 @@ static int page_make_private(struct domain *d, struct 
page_info *page)
     
     spin_lock(&d->page_alloc_lock);
 
+    if ( d->is_dying )
+    {
+        spin_unlock(&d->page_alloc_lock);
+        put_page(page);
+        return -EBUSY;
+    }
+
     /* We can only change the type if count is one */
     /* Because we are locking pages individually, we need to drop
      * the lock here, while the page is typed. We cannot risk the 
@@ -666,8 +679,8 @@ static int page_make_private(struct domain *d, struct 
page_info *page)
     expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
     if ( page->u.inuse.type_info != expected_type )
     {
-        put_page(page);
         spin_unlock(&d->page_alloc_lock);
+        put_page(page);
         return -EEXIST;
     }
 
@@ -682,7 +695,7 @@ static int page_make_private(struct domain *d, struct 
page_info *page)
     page_set_owner(page, d);
 
     if ( domain_adjust_tot_pages(d, 1) == 1 )
-        get_domain(d);
+        get_knownalive_domain(d);
     page_list_add_tail(page, &d->page_list);
     spin_unlock(&d->page_alloc_lock);
 
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 50b740f..eb7b72b 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -475,8 +475,8 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
                              (j * (1UL << exch.out.extent_order)));
 
                 spin_lock(&d->page_alloc_lock);
-                domain_adjust_tot_pages(d, -dec_count);
-                drop_dom_ref = (dec_count && !d->tot_pages);
+                drop_dom_ref = (dec_count &&
+                                !domain_adjust_tot_pages(d, -dec_count));
                 spin_unlock(&d->page_alloc_lock);
 
                 if ( drop_dom_ref )
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 9497623..8002bd2 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1519,8 +1519,9 @@ struct page_info *alloc_domheap_pages(
 
 void free_domheap_pages(struct page_info *pg, unsigned int order)
 {
-    int            i, drop_dom_ref;
     struct domain *d = page_get_owner(pg);
+    unsigned int i;
+    bool_t drop_dom_ref;
 
     ASSERT(!in_irq());
 
@@ -1548,8 +1549,7 @@ void free_domheap_pages(struct page_info *pg, unsigned 
int order)
             page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list);
         }
 
-        domain_adjust_tot_pages(d, -(1 << order));
-        drop_dom_ref = (d->tot_pages == 0);
+        drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
 
         spin_unlock_recursive(&d->page_alloc_lock);
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.