[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] xen: centralize accounting for domain tot_pages
# HG changeset patch # User Dan Magenheimer <dan.magenheimer@xxxxxxxxxx> # Date 1355138153 0 # Node ID 21a5b181f8adef6c5f840ee8453de96b340021e3 # Parent 1b72138bddda1982f064f6411d3e4ff9400bd1bd xen: centralize accounting for domain tot_pages Provide and use a common function for all adjustments to a domain's tot_pages counter in anticipation of future and/or out-of-tree patches that must adjust related counters atomically. Signed-off-by: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx> Committed-by: Keir Fraser <keir@xxxxxxx> --- diff -r 1b72138bddda -r 21a5b181f8ad xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Mon Dec 10 11:18:25 2012 +0100 +++ b/xen/arch/x86/mm.c Mon Dec 10 11:15:53 2012 +0000 @@ -3843,7 +3843,7 @@ int donate_page( { if ( d->tot_pages >= d->max_pages ) goto fail; - d->tot_pages++; + domain_adjust_tot_pages(d, 1); } page->count_info = PGC_allocated | 1; @@ -3893,7 +3893,7 @@ int steal_page( } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x ); /* Unlink from original owner. */ - if ( !(memflags & MEMF_no_refcount) && !--d->tot_pages ) + if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) ) drop_dom_ref = 1; page_list_del(page, &d->page_list); diff -r 1b72138bddda -r 21a5b181f8ad xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c Mon Dec 10 11:18:25 2012 +0100 +++ b/xen/arch/x86/mm/mem_sharing.c Mon Dec 10 11:15:53 2012 +0000 @@ -639,7 +639,7 @@ static int page_make_sharable(struct dom } page_set_owner(page, dom_cow); - d->tot_pages--; + domain_adjust_tot_pages(d, -1); drop_dom_ref = (d->tot_pages == 0); page_list_del(page, &d->page_list); spin_unlock(&d->page_alloc_lock); @@ -680,7 +680,7 @@ static int page_make_private(struct doma ASSERT(page_get_owner(page) == dom_cow); page_set_owner(page, d); - if ( d->tot_pages++ == 0 ) + if ( domain_adjust_tot_pages(d, 1) == 1 ) get_domain(d); page_list_add_tail(page, &d->page_list); spin_unlock(&d->page_alloc_lock); diff -r 1b72138bddda -r 21a5b181f8ad xen/common/grant_table.c --- a/xen/common/grant_table.c Mon Dec 10 11:18:25 2012 +0100 +++ b/xen/common/grant_table.c Mon Dec 10 11:15:53 2012 +0000 @@ -1667,7 +1667,7 @@ gnttab_transfer( } /* Okay, add the page to 'e'. */ - if ( unlikely(e->tot_pages++ == 0) ) + if ( unlikely(domain_adjust_tot_pages(e, 1) == 1) ) get_knownalive_domain(e); page_list_add_tail(page, &e->page_list); page_set_owner(page, e); diff -r 1b72138bddda -r 21a5b181f8ad xen/common/memory.c --- a/xen/common/memory.c Mon Dec 10 11:18:25 2012 +0100 +++ b/xen/common/memory.c Mon Dec 10 11:15:53 2012 +0000 @@ -465,7 +465,7 @@ static long memory_exchange(XEN_GUEST_HA (j * (1UL << exch.out.extent_order))); spin_lock(&d->page_alloc_lock); - d->tot_pages -= dec_count; + domain_adjust_tot_pages(d, -dec_count); drop_dom_ref = (dec_count && !d->tot_pages); spin_unlock(&d->page_alloc_lock); diff -r 1b72138bddda -r 21a5b181f8ad xen/common/page_alloc.c --- a/xen/common/page_alloc.c Mon Dec 10 11:18:25 2012 +0100 +++ b/xen/common/page_alloc.c Mon Dec 10 11:15:53 2012 +0000 @@ -239,6 +239,12 @@ static long midsize_alloc_zone_pages; static DEFINE_SPINLOCK(heap_lock); +unsigned long domain_adjust_tot_pages(struct domain *d, long pages) +{ + ASSERT(spin_is_locked(&d->page_alloc_lock)); + return d->tot_pages += pages; +} + static unsigned long init_node_heap(int node, unsigned long mfn, unsigned long nr, bool_t *use_tail) { @@ -1291,7 +1297,7 @@ int assign_pages( if ( unlikely(d->tot_pages == 0) ) get_knownalive_domain(d); - d->tot_pages += 1 << order; + domain_adjust_tot_pages(d, 1 << order); } for ( i = 0; i < (1 << order); i++ ) @@ -1375,7 +1381,7 @@ void free_domheap_pages(struct page_info page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list); } - d->tot_pages -= 1 << order; + domain_adjust_tot_pages(d, -(1 << order)); drop_dom_ref = (d->tot_pages == 0); spin_unlock_recursive(&d->page_alloc_lock); diff -r 1b72138bddda -r 21a5b181f8ad xen/include/xen/mm.h --- a/xen/include/xen/mm.h Mon Dec 10 11:18:25 2012 +0100 +++ b/xen/include/xen/mm.h Mon Dec 10 11:15:53 2012 +0000 @@ -48,6 +48,8 @@ void free_xenheap_pages(void *v, unsigne #define alloc_xenheap_page() (alloc_xenheap_pages(0,0)) #define free_xenheap_page(v) (free_xenheap_pages(v,0)) +unsigned long domain_adjust_tot_pages(struct domain *d, long pages); + /* Domain suballocator. These functions are *not* interrupt-safe.*/ void init_domheap_pages(paddr_t ps, paddr_t pe); struct page_info *alloc_domheap_pages( _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |