[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [XEN] Boot allocator searches from earliest registered
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Node ID c3ff06093028962ffb64af4ef62693d6dc3525a2 # Parent 21609e7f5a49a30c2d8ec978148a2317ad70bd0e [XEN] Boot allocator searches from earliest registered memory region instead of 0. Based on a patch from Jes Sorensen <jes@xxxxxxx> which massively improves boot time on SGI Altix systems. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/common/page_alloc.c | 46 +++++++++++++++++++++++++--------------------- 1 files changed, 25 insertions(+), 21 deletions(-) diff -r 21609e7f5a49 -r c3ff06093028 xen/common/page_alloc.c --- a/xen/common/page_alloc.c Thu Dec 14 11:39:40 2006 +0000 +++ b/xen/common/page_alloc.c Thu Dec 14 12:02:30 2006 +0000 @@ -136,7 +136,6 @@ static void map_alloc(unsigned long firs } } - static void map_free(unsigned long first_page, unsigned long nr_pages) { unsigned long start_off, end_off, curr_idx, end_idx; @@ -170,6 +169,8 @@ static void map_free(unsigned long first /************************* * BOOT-TIME ALLOCATOR */ + +static unsigned long first_valid_mfn = ~0UL; /* Initialise allocator to handle up to @max_page pages. */ paddr_t init_boot_allocator(paddr_t bitmap_start) @@ -203,6 +204,8 @@ void init_boot_pages(paddr_t ps, paddr_t if ( pe <= ps ) return; + first_valid_mfn = min_t(unsigned long, ps >> PAGE_SHIFT, first_valid_mfn); + map_free(ps >> PAGE_SHIFT, (pe - ps) >> PAGE_SHIFT); /* Check new pages against the bad-page list. */ @@ -256,16 +259,17 @@ unsigned long alloc_boot_pages_at(unsign unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align) { - unsigned long pg, i = 0; - - for ( pg = 0; (pg + nr_pfns) < max_page; pg += pfn_align ) - { - i = alloc_boot_pages_at(nr_pfns, pg); - if (i != 0) + unsigned long pg; + + pg = first_valid_mfn & ~(pfn_align-1); + while ( (pg + nr_pfns) < max_page ) + { + if ( alloc_boot_pages_at(nr_pfns, pg) != 0 ) break; - } - - return i; + pg += pfn_align; + } + + return pg; } @@ -301,7 +305,7 @@ void end_boot_allocator(void) INIT_LIST_HEAD(&heap[i][j][k]); /* Pages that are free now go to the domain sub-allocator. */ - for ( i = 0; i < max_page; i++ ) + for ( i = first_valid_mfn; i < max_page; i++ ) { curr_free = next_free; next_free = !allocated_in_map(i+1); @@ -324,7 +328,7 @@ void init_heap_pages( void init_heap_pages( unsigned int zone, struct page_info *pg, unsigned long nr_pages) { - unsigned int nid_curr,nid_prev; + unsigned int nid_curr, nid_prev; unsigned long i; ASSERT(zone < NR_ZONES); @@ -478,37 +482,37 @@ void scrub_heap_pages(void) void scrub_heap_pages(void) { void *p; - unsigned long pfn; + unsigned long mfn; printk("Scrubbing Free RAM: "); - for ( pfn = 0; pfn < max_page; pfn++ ) + for ( mfn = first_valid_mfn; mfn < max_page; mfn++ ) { /* Every 100MB, print a progress dot. */ - if ( (pfn % ((100*1024*1024)/PAGE_SIZE)) == 0 ) + if ( (mfn % ((100*1024*1024)/PAGE_SIZE)) == 0 ) printk("."); process_pending_timers(); /* Quick lock-free check. */ - if ( allocated_in_map(pfn) ) + if ( allocated_in_map(mfn) ) continue; spin_lock_irq(&heap_lock); /* Re-check page status with lock held. */ - if ( !allocated_in_map(pfn) ) - { - if ( IS_XEN_HEAP_FRAME(mfn_to_page(pfn)) ) + if ( !allocated_in_map(mfn) ) + { + if ( IS_XEN_HEAP_FRAME(mfn_to_page(mfn)) ) { - p = page_to_virt(mfn_to_page(pfn)); + p = page_to_virt(mfn_to_page(mfn)); memguard_unguard_range(p, PAGE_SIZE); clear_page(p); memguard_guard_range(p, PAGE_SIZE); } else { - p = map_domain_page(pfn); + p = map_domain_page(mfn); clear_page(p); unmap_domain_page(p); } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |