[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] xen, pod: Only sweep in an emergency, and only for 4k pages
# HG changeset patch # User George Dunlap <george.dunlap@xxxxxxxxxxxxx> # Date 1340893085 -3600 # Node ID 3ea92ea1490fbe4a174f0d4d8fc3c6acfecd1b97 # Parent 437b7cc52db7fb2d0c4adacc9bf9bad2979cf110 xen,pod: Only sweep in an emergency, and only for 4k pages Testing has shown that doing sweeps for superpages slows down boot significantly, but does not result in a significantly higher number of superpages after boot. Early sweeping for 4k pages causes superpages to be broken up unnecessarily. Only sweep if we're really out of memory. v2: - Move unrelated code-motion hunk to another patch v3: - Remove now-unused reclaim_super from pod struct Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Committed-by: Tim Deegan <tim@xxxxxxx> --- diff -r 437b7cc52db7 -r 3ea92ea1490f xen/arch/x86/mm/p2m-pod.c --- a/xen/arch/x86/mm/p2m-pod.c Thu Jun 28 15:18:03 2012 +0100 +++ b/xen/arch/x86/mm/p2m-pod.c Thu Jun 28 15:18:05 2012 +0100 @@ -897,34 +897,6 @@ p2m_pod_zero_check(struct p2m_domain *p2 } #define POD_SWEEP_LIMIT 1024 -static void -p2m_pod_emergency_sweep_super(struct p2m_domain *p2m) -{ - unsigned long i, start, limit; - - if ( p2m->pod.reclaim_super == 0 ) - { - p2m->pod.reclaim_super = (p2m->pod.max_guest>>PAGE_ORDER_2M)<<PAGE_ORDER_2M; - p2m->pod.reclaim_super -= SUPERPAGE_PAGES; - } - - start = p2m->pod.reclaim_super; - limit = (start > POD_SWEEP_LIMIT) ? (start - POD_SWEEP_LIMIT) : 0; - - for ( i=p2m->pod.reclaim_super ; i > 0 ; i -= SUPERPAGE_PAGES ) - { - p2m_pod_zero_check_superpage(p2m, i); - /* Stop if we're past our limit and we have found *something*. - * - * NB that this is a zero-sum game; we're increasing our cache size - * by increasing our 'debt'. Since we hold the p2m lock, - * (entry_count - count) must remain the same. */ - if ( !page_list_empty(&p2m->pod.super) && i < limit ) - break; - } - - p2m->pod.reclaim_super = i ? i - SUPERPAGE_PAGES : 0; -} /* When populating a new superpage, look at recently populated superpages * hoping that they've been zeroed. This will snap up zeroed pages as soon as @@ -1039,27 +1011,12 @@ p2m_pod_demand_populate(struct p2m_domai return 0; } - /* Once we've ballooned down enough that we can fill the remaining - * PoD entries from the cache, don't sweep even if the particular - * list we want to use is empty: that can lead to thrashing zero pages - * through the cache for no good reason. */ - if ( p2m->pod.entry_count > p2m->pod.count ) - { + /* Only sweep if we're actually out of memory. Doing anything else + * causes unnecessary time and fragmentation of superpages in the p2m. */ + if ( p2m->pod.count == 0 ) + p2m_pod_emergency_sweep(p2m); - /* If we're low, start a sweep */ - if ( order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) ) - /* Note that sweeps scan other ranges in the p2m. In an scenario - * in which p2m locks are fine-grained, this may result in deadlock. - * Using trylock on the gfn's as we sweep would avoid it. */ - p2m_pod_emergency_sweep_super(p2m); - - if ( page_list_empty(&p2m->pod.single) && - ( ( order == PAGE_ORDER_4K ) - || (order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) ) ) ) - /* Same comment regarding deadlock applies */ - p2m_pod_emergency_sweep(p2m); - } - + /* If the sweep failed, give up. */ if ( p2m->pod.count == 0 ) goto out_of_memory; diff -r 437b7cc52db7 -r 3ea92ea1490f xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h Thu Jun 28 15:18:03 2012 +0100 +++ b/xen/include/asm-x86/p2m.h Thu Jun 28 15:18:05 2012 +0100 @@ -284,7 +284,6 @@ struct p2m_domain { single; /* Non-super lists */ int count, /* # of pages in cache lists */ entry_count; /* # of pages in p2m marked pod */ - unsigned reclaim_super; /* Last gpfn of a scan */ unsigned reclaim_single; /* Last gpfn of a scan */ unsigned max_guest; /* gpfn of max guest demand-populate */ #define POD_HISTORY_MAX 128 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |