[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] xen memory allocator: hide generic allocator routines
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1172249756 0 # Node ID c64aa7fb7712cd6768c52621ddc00177cf1e123e # Parent 6253b8d32eb99b32b22c0d8075759569dc45d5da xen memory allocator: hide generic allocator routines This patch doesn't introduce functional changes, but simply moves code around to make the unused (outside of the page allocator) heap alloc functions taking an explicit zone parameter static without having to forward-prototype them in their source file. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- xen/common/page_alloc.c | 319 +++++++++++++++++++++++------------------------- xen/include/xen/mm.h | 12 - 2 files changed, 161 insertions(+), 170 deletions(-) diff -r 6253b8d32eb9 -r c64aa7fb7712 xen/common/page_alloc.c --- a/xen/common/page_alloc.c Fri Feb 23 16:36:55 2007 +0000 +++ b/xen/common/page_alloc.c Fri Feb 23 16:55:56 2007 +0000 @@ -322,35 +322,122 @@ static unsigned long avail[NR_ZONES][MAX static DEFINE_SPINLOCK(heap_lock); -void end_boot_allocator(void) -{ - unsigned long i, j, k; - int curr_free, next_free; - - memset(avail, 0, sizeof(avail)); - - for ( i = 0; i < NR_ZONES; i++ ) - for ( j = 0; j < MAX_NUMNODES; j++ ) - for ( k = 0; k <= MAX_ORDER; k++ ) - INIT_LIST_HEAD(&heap[i][j][k]); - - /* Pages that are free now go to the domain sub-allocator. */ - if ( (curr_free = next_free = !allocated_in_map(first_valid_mfn)) ) - map_alloc(first_valid_mfn, 1); - for ( i = first_valid_mfn; i < max_page; i++ ) - { - curr_free = next_free; - next_free = !allocated_in_map(i+1); - if ( next_free ) - map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */ - if ( curr_free ) - init_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 1); - } - - printk("Domain heap initialised: DMA width %u bits\n", dma_bitsize); -} - -/* +/* Allocate 2^@order contiguous pages. */ +static struct page_info *alloc_heap_pages( + unsigned int zone, unsigned int cpu, unsigned int order) +{ + unsigned int i, j, node = cpu_to_node(cpu), num_nodes = num_online_nodes(); + unsigned int request = (1UL << order); + struct page_info *pg; + + ASSERT(node >= 0); + ASSERT(node < num_nodes); + ASSERT(zone < NR_ZONES); + + if ( unlikely(order > MAX_ORDER) ) + return NULL; + + spin_lock(&heap_lock); + + /* start with requested node, but exhaust all node memory + * in requested zone before failing, only calc new node + * value if we fail to find memory in target node, this avoids + * needless computation on fast-path */ + for ( i = 0; i < num_nodes; i++ ) + { + /* check if target node can support the allocation */ + if ( avail[zone][node] >= request ) + { + /* Find smallest order which can satisfy the request. */ + for ( j = order; j <= MAX_ORDER; j++ ) + { + if ( !list_empty(&heap[zone][node][j]) ) + goto found; + } + } + /* pick next node, wrapping around if needed */ + if ( ++node == num_nodes ) + node = 0; + } + + /* No suitable memory blocks. Fail the request. */ + spin_unlock(&heap_lock); + return NULL; + + found: + pg = list_entry(heap[zone][node][j].next, struct page_info, list); + list_del(&pg->list); + + /* We may have to halve the chunk a number of times. */ + while ( j != order ) + { + PFN_ORDER(pg) = --j; + list_add_tail(&pg->list, &heap[zone][node][j]); + pg += 1 << j; + } + + map_alloc(page_to_mfn(pg), request); + ASSERT(avail[zone][node] >= request); + avail[zone][node] -= request; + + spin_unlock(&heap_lock); + + return pg; +} + +/* Free 2^@order set of pages. */ +static void free_heap_pages( + unsigned int zone, struct page_info *pg, unsigned int order) +{ + unsigned long mask; + unsigned int node = phys_to_nid(page_to_maddr(pg)); + + ASSERT(zone < NR_ZONES); + ASSERT(order <= MAX_ORDER); + ASSERT(node >= 0); + ASSERT(node < num_online_nodes()); + + spin_lock(&heap_lock); + + map_free(page_to_mfn(pg), 1 << order); + avail[zone][node] += 1 << order; + + /* Merge chunks as far as possible. */ + while ( order < MAX_ORDER ) + { + mask = 1 << order; + + if ( (page_to_mfn(pg) & mask) ) + { + /* Merge with predecessor block? */ + if ( allocated_in_map(page_to_mfn(pg)-mask) || + (PFN_ORDER(pg-mask) != order) ) + break; + list_del(&(pg-mask)->list); + pg -= mask; + } + else + { + /* Merge with successor block? */ + if ( allocated_in_map(page_to_mfn(pg)+mask) || + (PFN_ORDER(pg+mask) != order) ) + break; + list_del(&(pg+mask)->list); + } + + order++; + + /* after merging, pg should be in the same node */ + ASSERT(phys_to_nid(page_to_maddr(pg)) == node ); + } + + PFN_ORDER(pg) = order; + list_add_tail(&pg->list, &heap[zone][node][order]); + + spin_unlock(&heap_lock); +} + +/* * Hand the specified arbitrary page range to the specified heap zone * checking the node_id of the previous page. If they differ and the * latter is not on a MAX_ORDER boundary, then we reserve the page by @@ -389,122 +476,48 @@ void init_heap_pages( } } -/* Allocate 2^@order contiguous pages. */ -struct page_info *alloc_heap_pages(unsigned int zone, unsigned int cpu, - unsigned int order) -{ - unsigned int i,j, node = cpu_to_node(cpu), num_nodes = num_online_nodes(); - unsigned int request = (1UL << order); - struct page_info *pg; - - ASSERT(node >= 0); - ASSERT(node < num_nodes); - ASSERT(zone < NR_ZONES); - - if ( unlikely(order > MAX_ORDER) ) - return NULL; - - spin_lock(&heap_lock); - - /* start with requested node, but exhaust all node memory - * in requested zone before failing, only calc new node - * value if we fail to find memory in target node, this avoids - * needless computation on fast-path */ - for ( i = 0; i < num_nodes; i++ ) - { - /* check if target node can support the allocation */ - if ( avail[zone][node] >= request ) - { - /* Find smallest order which can satisfy the request. */ - for ( j = order; j <= MAX_ORDER; j++ ) - { - if ( !list_empty(&heap[zone][node][j]) ) - goto found; - } - } - /* pick next node, wrapping around if needed */ - if ( ++node == num_nodes ) - node = 0; - } - - /* No suitable memory blocks. Fail the request. */ - spin_unlock(&heap_lock); - return NULL; - - found: - pg = list_entry(heap[zone][node][j].next, struct page_info, list); - list_del(&pg->list); - - /* We may have to halve the chunk a number of times. */ - while ( j != order ) - { - PFN_ORDER(pg) = --j; - list_add_tail(&pg->list, &heap[zone][node][j]); - pg += 1 << j; - } - - map_alloc(page_to_mfn(pg), request); - ASSERT(avail[zone][node] >= request); - avail[zone][node] -= request; - - spin_unlock(&heap_lock); - - return pg; -} - - -/* Free 2^@order set of pages. */ -void free_heap_pages( - unsigned int zone, struct page_info *pg, unsigned int order) -{ - unsigned long mask; - int node = phys_to_nid(page_to_maddr(pg)); - - ASSERT(zone < NR_ZONES); - ASSERT(order <= MAX_ORDER); - ASSERT(node >= 0); - ASSERT(node < num_online_nodes()); - - spin_lock(&heap_lock); - - map_free(page_to_mfn(pg), 1 << order); - avail[zone][node] += 1 << order; - - /* Merge chunks as far as possible. */ - while ( order < MAX_ORDER ) - { - mask = 1 << order; - - if ( (page_to_mfn(pg) & mask) ) - { - /* Merge with predecessor block? */ - if ( allocated_in_map(page_to_mfn(pg)-mask) || - (PFN_ORDER(pg-mask) != order) ) - break; - list_del(&(pg-mask)->list); - pg -= mask; - } - else - { - /* Merge with successor block? */ - if ( allocated_in_map(page_to_mfn(pg)+mask) || - (PFN_ORDER(pg+mask) != order) ) - break; - list_del(&(pg+mask)->list); - } - - order++; - - /* after merging, pg should be in the same node */ - ASSERT(phys_to_nid(page_to_maddr(pg)) == node ); - } - - PFN_ORDER(pg) = order; - list_add_tail(&pg->list, &heap[zone][node][order]); - - spin_unlock(&heap_lock); -} - +static unsigned long avail_heap_pages( + int zone, int node) +{ + unsigned int i, j, num_nodes = num_online_nodes(); + unsigned long free_pages = 0; + + for (i=0; i<NR_ZONES; i++) + if ( (zone == -1) || (zone == i) ) + for (j=0; j < num_nodes; j++) + if ( (node == -1) || (node == j) ) + free_pages += avail[i][j]; + + return free_pages; +} + +void end_boot_allocator(void) +{ + unsigned long i, j, k; + int curr_free, next_free; + + memset(avail, 0, sizeof(avail)); + + for ( i = 0; i < NR_ZONES; i++ ) + for ( j = 0; j < MAX_NUMNODES; j++ ) + for ( k = 0; k <= MAX_ORDER; k++ ) + INIT_LIST_HEAD(&heap[i][j][k]); + + /* Pages that are free now go to the domain sub-allocator. */ + if ( (curr_free = next_free = !allocated_in_map(first_valid_mfn)) ) + map_alloc(first_valid_mfn, 1); + for ( i = first_valid_mfn; i < max_page; i++ ) + { + curr_free = next_free; + next_free = !allocated_in_map(i+1); + if ( next_free ) + map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */ + if ( curr_free ) + init_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 1); + } + + printk("Domain heap initialised: DMA width %u bits\n", dma_bitsize); +} /* * Scrub all unallocated pages in all heap zones. This function is more @@ -769,7 +782,7 @@ struct page_info *__alloc_domheap_pages( return pg; } -inline struct page_info *alloc_domheap_pages( +struct page_info *alloc_domheap_pages( struct domain *d, unsigned int order, unsigned int flags) { return __alloc_domheap_pages(d, smp_processor_id(), order, flags); @@ -848,20 +861,6 @@ void free_domheap_pages(struct page_info } -unsigned long avail_heap_pages(int zone, int node) -{ - int i,j, num_nodes = num_online_nodes(); - unsigned long free_pages = 0; - - for (i=0; i<NR_ZONES; i++) - if ( (zone == -1) || (zone == i) ) - for (j=0; j < num_nodes; j++) - if ( (node == -1) || (node == j) ) - free_pages += avail[i][j]; - - return free_pages; -} - unsigned long avail_domheap_pages(void) { unsigned long avail_nrm, avail_dma; diff -r 6253b8d32eb9 -r c64aa7fb7712 xen/include/xen/mm.h --- a/xen/include/xen/mm.h Fri Feb 23 16:36:55 2007 +0000 +++ b/xen/include/xen/mm.h Fri Feb 23 16:55:56 2007 +0000 @@ -46,15 +46,6 @@ int reserve_boot_pages(unsigned long fir int reserve_boot_pages(unsigned long first_pfn, unsigned long nr_pfns); void end_boot_allocator(void); -/* Generic allocator. These functions are *not* interrupt-safe. */ -void init_heap_pages( - unsigned int zone, struct page_info *pg, unsigned long nr_pages); -struct page_info *alloc_heap_pages( - unsigned int zone, unsigned int cpu, unsigned int order); -void free_heap_pages( - unsigned int zone, struct page_info *pg, unsigned int order); -void scrub_heap_pages(void); - /* Xen suballocator. These functions are interrupt-safe. */ void init_xenheap_pages(paddr_t ps, paddr_t pe); void *alloc_xenheap_pages(unsigned int order); @@ -71,9 +62,10 @@ struct page_info *__alloc_domheap_pages( unsigned int memflags); void free_domheap_pages(struct page_info *pg, unsigned int order); unsigned long avail_domheap_pages(void); -unsigned long avail_heap_pages(int zone, int node); #define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0)) #define free_domheap_page(p) (free_domheap_pages(p,0)) + +void scrub_heap_pages(void); int assign_pages( struct domain *d, _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |