[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-4.0-testing] xen: make the shadow allocation hypercalls include the p2m memory
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1278579805 -3600 # Node ID 0839af420ade49eb4e455066f9b13a3008e00434 # Parent 1afe3a562711e12ea36c45f3d4b8699e65aaf17b xen: make the shadow allocation hypercalls include the p2m memory in the total shadow allocation. This makes the effect of allocation changes consistent regardless of p2m activity on boot. Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> xen-unstable changeset: 21750:92ac9536ac5a xen-unstable date: Thu Jul 08 09:52:51 2010 +0100 --- xen/arch/x86/mm/hap/hap.c | 13 ++++++++++++- xen/arch/x86/mm/shadow/common.c | 29 ++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 10 deletions(-) diff -r 1afe3a562711 -r 0839af420ade xen/arch/x86/mm/hap/hap.c --- a/xen/arch/x86/mm/hap/hap.c Thu Jul 08 10:03:03 2010 +0100 +++ b/xen/arch/x86/mm/hap/hap.c Thu Jul 08 10:03:25 2010 +0100 @@ -334,7 +334,8 @@ static unsigned int static unsigned int hap_get_allocation(struct domain *d) { - unsigned int pg = d->arch.paging.hap.total_pages; + unsigned int pg = d->arch.paging.hap.total_pages + + d->arch.paging.hap.p2m_pages; return ((pg >> (20 - PAGE_SHIFT)) + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0)); @@ -348,6 +349,11 @@ hap_set_allocation(struct domain *d, uns struct page_info *pg; ASSERT(hap_locked_by_me(d)); + + if ( pages < d->arch.paging.hap.p2m_pages ) + pages = 0; + else + pages -= d->arch.paging.hap.p2m_pages; while ( d->arch.paging.hap.total_pages != pages ) { @@ -367,6 +373,11 @@ hap_set_allocation(struct domain *d, uns else if ( d->arch.paging.hap.total_pages > pages ) { /* Need to return memory to domheap */ + if ( page_list_empty(&d->arch.paging.hap.freelist) ) + { + HAP_PRINTK("failed to free enough hap pages.\n"); + return -ENOMEM; + } pg = page_list_remove_head(&d->arch.paging.hap.freelist); ASSERT(pg); d->arch.paging.hap.free_pages--; diff -r 1afe3a562711 -r 0839af420ade xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Thu Jul 08 10:03:03 2010 +0100 +++ b/xen/arch/x86/mm/shadow/common.c Thu Jul 08 10:03:25 2010 +0100 @@ -1817,14 +1817,24 @@ static unsigned int sh_set_allocation(st unsigned int j, order = shadow_max_order(d); ASSERT(shadow_locked_by_me(d)); - - /* Don't allocate less than the minimum acceptable, plus one page per - * megabyte of RAM (for the p2m table) */ - lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256); - if ( pages > 0 && pages < lower_bound ) - pages = lower_bound; - /* Round up to largest block size */ - pages = (pages + ((1<<SHADOW_MAX_ORDER)-1)) & ~((1<<SHADOW_MAX_ORDER)-1); + + if ( pages > 0 ) + { + /* Check for minimum value. */ + if ( pages < d->arch.paging.shadow.p2m_pages ) + pages = 0; + else + pages -= d->arch.paging.shadow.p2m_pages; + + /* Don't allocate less than the minimum acceptable, plus one page per + * megabyte of RAM (for the p2m table) */ + lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256); + if ( pages < lower_bound ) + pages = lower_bound; + + /* Round up to largest block size */ + pages = (pages + ((1<<SHADOW_MAX_ORDER)-1)) & ~((1<<SHADOW_MAX_ORDER)-1); + } SHADOW_PRINTK("current %i target %i\n", d->arch.paging.shadow.total_pages, pages); @@ -1884,7 +1894,8 @@ static unsigned int sh_set_allocation(st /* Return the size of the shadow pool, rounded up to the nearest MB */ static unsigned int shadow_get_allocation(struct domain *d) { - unsigned int pg = d->arch.paging.shadow.total_pages; + unsigned int pg = d->arch.paging.shadow.total_pages + + d->arch.paging.shadow.p2m_pages; return ((pg >> (20 - PAGE_SHIFT)) + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0)); } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |