[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86_64: Remove statically-partitioned Xen heap.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1232118732 0 # Node ID 845aa241e1633b3dadfd4589bf6c7010ca5fd35b # Parent ba7ed03a3f79a8f9d60f4da5be34ee00859302e7 x86_64: Remove statically-partitioned Xen heap. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/ia64/xen/machine_kexec.c | 1 xen/arch/x86/machine_kexec.c | 3 + xen/arch/x86/mm.c | 10 +++- xen/arch/x86/setup.c | 75 +++++++++++++----------------------- xen/arch/x86/x86_32/machine_kexec.c | 3 - xen/arch/x86/x86_64/machine_kexec.c | 9 +--- xen/common/kexec.c | 1 xen/common/page_alloc.c | 51 ++++++++++++++++++++++++ xen/include/asm-x86/config.h | 8 +-- xen/include/asm-x86/mm.h | 9 ++++ 10 files changed, 108 insertions(+), 62 deletions(-) diff -r ba7ed03a3f79 -r 845aa241e163 xen/arch/ia64/xen/machine_kexec.c --- a/xen/arch/ia64/xen/machine_kexec.c Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/arch/ia64/xen/machine_kexec.c Fri Jan 16 15:12:12 2009 +0000 @@ -195,6 +195,7 @@ int machine_kexec_get(xen_kexec_range_t void arch_crash_save_vmcoreinfo(void) { + VMCOREINFO_SYMBOL(xenheap_phys_end); VMCOREINFO_SYMBOL(dom_xen); VMCOREINFO_SYMBOL(dom_io); VMCOREINFO_SYMBOL(xen_pstart); diff -r ba7ed03a3f79 -r 845aa241e163 xen/arch/x86/machine_kexec.c --- a/xen/arch/x86/machine_kexec.c Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/arch/x86/machine_kexec.c Fri Jan 16 15:12:12 2009 +0000 @@ -150,6 +150,9 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(dom_xen); VMCOREINFO_SYMBOL(dom_io); +#ifdef CONFIG_X86_32 + VMCOREINFO_SYMBOL(xenheap_phys_end); +#endif #ifdef CONFIG_X86_PAE VMCOREINFO_SYMBOL_ALIAS(pgd_l3, idle_pg_table); #endif diff -r ba7ed03a3f79 -r 845aa241e163 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/arch/x86/mm.c Fri Jan 16 15:12:12 2009 +0000 @@ -329,7 +329,7 @@ void share_xen_page_with_guest( page_set_owner(page, d); wmb(); /* install valid domain ptr before updating refcnt. */ - ASSERT(page->count_info == 0); + ASSERT((page->count_info & (PGC_allocated|PGC_count_mask)) == 0); /* Only add to the allocation list if the domain isn't dying. */ if ( !d->is_dying ) @@ -4722,12 +4722,18 @@ void memguard_init(void) void memguard_init(void) { unsigned long start = max_t(unsigned long, xen_phys_start, 1UL << 20); +#ifdef __i386__ map_pages_to_xen( (unsigned long)__va(start), start >> PAGE_SHIFT, (xenheap_phys_end - start) >> PAGE_SHIFT, __PAGE_HYPERVISOR|MAP_SMALL_PAGES); -#ifdef __x86_64__ +#else + map_pages_to_xen( + (unsigned long)__va(start), + start >> PAGE_SHIFT, + (__pa(&_end) + PAGE_SIZE - 1 - start) >> PAGE_SHIFT, + __PAGE_HYPERVISOR|MAP_SMALL_PAGES); BUG_ON(start != xen_phys_start); map_pages_to_xen( XEN_VIRT_START, diff -r ba7ed03a3f79 -r 845aa241e163 xen/arch/x86/setup.c --- a/xen/arch/x86/setup.c Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/arch/x86/setup.c Fri Jan 16 15:12:12 2009 +0000 @@ -54,15 +54,6 @@ extern u8 boot_edid_info[128]; extern u8 boot_edid_info[128]; extern struct boot_video_info boot_vid_info; -/* - * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the - * page_info table and allocation bitmap. - */ -static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB; -#if defined(CONFIG_X86_64) -integer_param("xenheap_megabytes", opt_xenheap_megabytes); -#endif - /* opt_nosmp: If true, secondary processors are ignored. */ static int opt_nosmp = 0; boolean_param("nosmp", opt_nosmp); @@ -105,8 +96,10 @@ cpumask_t cpu_present_map; unsigned long xen_phys_start; +#ifdef CONFIG_X86_32 /* Limits of Xen heap, used to initialise the allocator. */ unsigned long xenheap_phys_start, xenheap_phys_end; +#endif extern void arch_init_memory(void); extern void init_IRQ(void); @@ -421,6 +414,7 @@ void __init __start_xen(unsigned long mb multiboot_info_t *mbi = __va(mbi_p); module_t *mod = (module_t *)__va(mbi->mods_addr); unsigned long nr_pages, modules_length; + unsigned long allocator_bitmap_end; int i, e820_warn = 0, bytes = 0; struct ns16550_defaults ns16550 = { .data_bits = 8, @@ -599,23 +593,6 @@ void __init __start_xen(unsigned long mb /* Sanitise the raw E820 map to produce a final clean version. */ max_page = init_e820(memmap_type, e820_raw, &e820_raw_nr); -#ifdef CONFIG_X86_64 - /* - * On x86/64 we are able to account for the allocation bitmap - * (allocated in common/page_alloc.c:init_boot_allocator()) stealing - * from the Xen heap. Here we make the Xen heap appropriately larger. - */ - opt_xenheap_megabytes += (max_page / 8) >> 20; -#endif - - /* - * Since there are some stubs getting built on the stacks which use - * direct calls/jumps, the heap must be confined to the lower 2G so - * that those branches can reach their targets. - */ - if ( opt_xenheap_megabytes > 2048 ) - opt_xenheap_megabytes = 2048; - /* Create a temporary copy of the E820 map. */ memcpy(&boot_e820, &e820, sizeof(e820)); @@ -654,8 +631,9 @@ void __init __start_xen(unsigned long mb s >> PAGE_SHIFT, (e-s) >> PAGE_SHIFT, PAGE_HYPERVISOR); #if defined(CONFIG_X86_64) +#define reloc_size ((__pa(&_end) + mask) & ~mask) /* Is the region suitable for relocating Xen? */ - if ( !xen_phys_start && (((e-s) >> 20) >= opt_xenheap_megabytes) ) + if ( !xen_phys_start && ((e-s) >= reloc_size) ) { extern l2_pgentry_t l2_xenmap[]; l4_pgentry_t *pl4e; @@ -664,7 +642,7 @@ void __init __start_xen(unsigned long mb int i, j, k; /* Select relocation address. */ - e = (e - (opt_xenheap_megabytes << 20)) & ~mask; + e -= reloc_size; xen_phys_start = e; bootsym(trampoline_xen_phys_start) = e; @@ -760,15 +738,15 @@ void __init __start_xen(unsigned long mb EARLY_FAIL("Not enough memory to relocate the dom0 kernel image.\n"); reserve_e820_ram(&boot_e820, initial_images_start, initial_images_end); - /* Initialise Xen heap and boot heap. */ - xenheap_phys_start = init_boot_allocator(__pa(&_end)); - xenheap_phys_end = opt_xenheap_megabytes << 20; -#if defined(CONFIG_X86_64) + /* Initialise boot heap. */ + allocator_bitmap_end = init_boot_allocator(__pa(&_end)); +#if defined(CONFIG_X86_32) + xenheap_phys_start = allocator_bitmap_end; + xenheap_phys_end = DIRECTMAP_MBYTES << 20; +#else if ( !xen_phys_start ) EARLY_FAIL("Not enough memory to relocate Xen.\n"); - xenheap_phys_end += xen_phys_start; - reserve_e820_ram(&boot_e820, xen_phys_start, - xen_phys_start + (opt_xenheap_megabytes<<20)); + reserve_e820_ram(&boot_e820, __pa(&_start), allocator_bitmap_end); #endif /* Late kexec reservation (dynamic start address). */ @@ -861,22 +839,22 @@ void __init __start_xen(unsigned long mb numa_initmem_init(0, max_page); - /* Initialise the Xen heap, skipping RAM holes. */ +#if defined(CONFIG_X86_32) + /* Initialise the Xen heap. */ init_xenheap_pages(xenheap_phys_start, xenheap_phys_end); nr_pages = (xenheap_phys_end - xenheap_phys_start) >> PAGE_SHIFT; -#ifdef __x86_64__ - init_xenheap_pages(xen_phys_start, __pa(&_start)); - nr_pages += (__pa(&_start) - xen_phys_start) >> PAGE_SHIFT; - vesa_init(); -#endif xenheap_phys_start = xen_phys_start; printk("Xen heap: %luMB (%lukB)\n", nr_pages >> (20 - PAGE_SHIFT), nr_pages << (PAGE_SHIFT - 10)); +#endif end_boot_allocator(); - early_boot = 0; + +#if defined(CONFIG_X86_64) + vesa_init(); +#endif softirq_init(); @@ -1115,10 +1093,15 @@ void arch_get_xen_caps(xen_capabilities_ int xen_in_range(paddr_t start, paddr_t end) { - start = max_t(paddr_t, start, xenheap_phys_start); - end = min_t(paddr_t, end, xenheap_phys_end); - - return start < end; +#if defined(CONFIG_X86_32) + paddr_t xs = xenheap_phys_start; + paddr_t xe = xenheap_phys_end; +#else + paddr_t xs = __pa(&_start); + paddr_t xe = __pa(&_end); +#endif + + return (start < xe) && (end > xs); } /* diff -r ba7ed03a3f79 -r 845aa241e163 xen/arch/x86/x86_32/machine_kexec.c --- a/xen/arch/x86/x86_32/machine_kexec.c Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/arch/x86/x86_32/machine_kexec.c Fri Jan 16 15:12:12 2009 +0000 @@ -5,8 +5,6 @@ * - Simon 'Horms' Horman <horms@xxxxxxxxxxxx> * - Magnus Damm <magnus@xxxxxxxxxxxxx> */ - -#ifndef CONFIG_COMPAT #include <xen/types.h> #include <xen/kernel.h> @@ -20,7 +18,6 @@ int machine_kexec_get_xen(xen_kexec_rang (unsigned long)range->start; return 0; } -#endif /* * Local variables: diff -r ba7ed03a3f79 -r 845aa241e163 xen/arch/x86/x86_64/machine_kexec.c --- a/xen/arch/x86/x86_64/machine_kexec.c Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/arch/x86/x86_64/machine_kexec.c Fri Jan 16 15:12:12 2009 +0000 @@ -6,20 +6,17 @@ * - Magnus Damm <magnus@xxxxxxxxxxxxx> */ -#ifndef CONFIG_COMPAT - #include <xen/types.h> +#include <xen/kernel.h> #include <asm/page.h> #include <public/kexec.h> int machine_kexec_get_xen(xen_kexec_range_t *range) { - range->start = xenheap_phys_start; - range->size = (unsigned long)xenheap_phys_end - - (unsigned long)range->start; + range->start = virt_to_maddr(_start); + range->size = virt_to_maddr(_end) - (unsigned long)range->start; return 0; } -#endif /* * Local variables: diff -r ba7ed03a3f79 -r 845aa241e163 xen/common/kexec.c --- a/xen/common/kexec.c Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/common/kexec.c Fri Jan 16 15:12:12 2009 +0000 @@ -336,7 +336,6 @@ static void crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(frame_table); VMCOREINFO_SYMBOL(alloc_bitmap); VMCOREINFO_SYMBOL(max_page); - VMCOREINFO_SYMBOL(xenheap_phys_end); VMCOREINFO_STRUCT_SIZE(page_info); VMCOREINFO_STRUCT_SIZE(domain); diff -r ba7ed03a3f79 -r 845aa241e163 xen/common/page_alloc.c --- a/xen/common/page_alloc.c Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/common/page_alloc.c Fri Jan 16 15:12:12 2009 +0000 @@ -634,6 +634,8 @@ void __init scrub_heap_pages(void) * XEN-HEAP SUB-ALLOCATOR */ +#ifndef __x86_64__ + void init_xenheap_pages(paddr_t ps, paddr_t pe) { ps = round_pgup(ps); @@ -688,6 +690,55 @@ void free_xenheap_pages(void *v, unsigne free_heap_pages(MEMZONE_XEN, virt_to_page(v), order); } + +#else + +void init_xenheap_pages(paddr_t ps, paddr_t pe) +{ + init_domheap_pages(ps, pe); +} + +void *alloc_xenheap_pages(unsigned int order) +{ + struct page_info *pg; + unsigned int i; + + ASSERT(!in_irq()); + + pg = alloc_heap_pages( + MEMZONE_XEN+1, 31, cpu_to_node(smp_processor_id()), order); + if ( unlikely(pg == NULL) ) + goto no_memory; + + for ( i = 0; i < (1u << order); i++ ) + pg[i].count_info |= PGC_xen_heap; + + return page_to_virt(pg); + + no_memory: + printk("Cannot handle page request order %d!\n", order); + return NULL; +} + +void free_xenheap_pages(void *v, unsigned int order) +{ + struct page_info *pg; + unsigned int i; + + ASSERT(!in_irq()); + + if ( v == NULL ) + return; + + pg = virt_to_page(v); + + for ( i = 0; i < (1u << order); i++ ) + pg[i].count_info &= ~PGC_xen_heap; + + free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order); +} + +#endif diff -r ba7ed03a3f79 -r 845aa241e163 xen/include/asm-x86/config.h --- a/xen/include/asm-x86/config.h Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/include/asm-x86/config.h Fri Jan 16 15:12:12 2009 +0000 @@ -116,8 +116,6 @@ extern unsigned int video_mode, video_fl #define CONFIG_COMPAT 1 #define asmlinkage - -#define XENHEAP_DEFAULT_MB (16) #define PML4_ENTRY_BITS 39 #ifndef __ASSEMBLY__ @@ -310,7 +308,6 @@ extern unsigned int video_mode, video_fl #define RO_MPT_VIRT_END FRAMETABLE_VIRT_START #define RO_MPT_VIRT_START (RO_MPT_VIRT_END - (MACHPHYS_MBYTES<<20)) -#define XENHEAP_DEFAULT_MB (DIRECTMAP_MBYTES) #define DIRECTMAP_PHYS_END (DIRECTMAP_MBYTES<<20) /* Maximum linear address accessible via guest memory segments. */ @@ -340,7 +337,10 @@ extern unsigned int video_mode, video_fl #endif /* __i386__ */ #ifndef __ASSEMBLY__ -extern unsigned long xen_phys_start, xenheap_phys_start, xenheap_phys_end; +extern unsigned long xen_phys_start; +#if defined(__i386__) +extern unsigned long xenheap_phys_start, xenheap_phys_end; +#endif #endif /* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */ diff -r ba7ed03a3f79 -r 845aa241e163 xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Fri Jan 16 13:17:53 2009 +0000 +++ b/xen/include/asm-x86/mm.h Fri Jan 16 15:12:12 2009 +0000 @@ -135,6 +135,10 @@ struct page_info /* Page is locked? */ # define _PGC_locked 30 # define PGC_locked (1U<<_PGC_out_of_sync) +#else + /* Page is Xen heap? */ +# define _PGC_xen_heap 30 +# define PGC_xen_heap (1U<<_PGC_xen_heap) #endif /* Set when is using a page as a page table */ #define _PGC_page_table 29 @@ -145,12 +149,17 @@ struct page_info /* 26-bit count of references to this frame. */ #define PGC_count_mask ((1U<<26)-1) +#if defined(__i386__) #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page)) #define is_xen_heap_mfn(mfn) ({ \ unsigned long _mfn = (mfn); \ ((_mfn >= paddr_to_pfn(xenheap_phys_start)) && \ (_mfn < paddr_to_pfn(xenheap_phys_end))); \ }) +#else +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) +#define is_xen_heap_mfn(mfn) is_xen_heap_page(&frame_table[mfn]) +#endif #if defined(__i386__) #define pickle_domptr(_d) ((u32)(unsigned long)(_d)) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |