[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [IA64] allocate percpu area in the xen va area.
# HG changeset patch # User Isaku Yamahata <yamahata@xxxxxxxxxxxxx> # Date 1218077254 -32400 # Node ID 7affdebb7a1ed4f80fb46c679f25c642b1a67034 # Parent e9706492e960ba7d8c79476c80183f5a86ae1dbc [IA64] allocate percpu area in the xen va area. To guarantee that the percpu is pinned down, move its virtual address from the xen identity mapped area to the xen va area which is pinned by DTR[IA64_TR_KERNEL]. Then unnecessary tlb miss fault will be avoided. Sometimes per cpu area is accessed from very critial point where tlb miss isn't allowed. Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx> --- xen/arch/ia64/linux-xen/mm_contig.c | 41 +++++++++++++++++++++++----- xen/arch/ia64/xen/xensetup.c | 7 ++++ xen/include/asm-ia64/linux-xen/asm/percpu.h | 10 ++++++ 3 files changed, 52 insertions(+), 6 deletions(-) diff -r e9706492e960 -r 7affdebb7a1e xen/arch/ia64/linux-xen/mm_contig.c --- a/xen/arch/ia64/linux-xen/mm_contig.c Thu Jul 31 12:25:50 2008 +0900 +++ b/xen/arch/ia64/linux-xen/mm_contig.c Thu Aug 07 11:47:34 2008 +0900 @@ -175,6 +175,39 @@ find_memory (void) #endif #ifdef CONFIG_SMP +#ifdef XEN +#include <asm/elf.h> + +void *percpu_area __initdata = NULL; + +void* __init +per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa) +{ + int order = get_order(NR_CPUS * PERCPU_PAGE_SIZE); + unsigned long size = 1UL << (order + PAGE_SHIFT); + unsigned long start = ALIGN_UP((unsigned long)xen_heap_start, + PERCPU_PAGE_SIZE); + unsigned long end = start + size; + + if (__pa(end) < end_in_pa) { + init_xenheap_pages(__pa(xen_heap_start), __pa(start)); + xen_heap_start = (void*)end; + percpu_area = (void*)virt_to_xenva(start); + printk("allocate percpu area 0x%lx@0x%lx 0x%p\n", + size, start, percpu_area); + } else { + panic("can't allocate percpu area. size 0x%lx\n", size); + } + return xen_heap_start; +} + +static void* __init +get_per_cpu_area(void) +{ + return percpu_area; +} +#endif + /** * per_cpu_init - setup per-cpu variables * @@ -193,13 +226,9 @@ per_cpu_init (void) */ if (smp_processor_id() == 0) { #ifdef XEN - struct page_info *page; - page = alloc_domheap_pages(NULL, - get_order(NR_CPUS * - PERCPU_PAGE_SIZE), 0); - if (page == NULL) + cpu_data = get_per_cpu_area(); + if (cpu_data == NULL) panic("can't allocate per cpu area.\n"); - cpu_data = page_to_virt(page); #else cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); diff -r e9706492e960 -r 7affdebb7a1e xen/arch/ia64/xen/xensetup.c --- a/xen/arch/ia64/xen/xensetup.c Thu Jul 31 12:25:50 2008 +0900 +++ b/xen/arch/ia64/xen/xensetup.c Thu Aug 07 11:47:34 2008 +0900 @@ -566,6 +566,13 @@ skip_move: if (vmx_enabled) xen_heap_start = vmx_init_env(xen_heap_start, xenheap_phys_end); + /* allocate memory for percpu area + * per_cpu_init() called from late_set_arch() is called after + * end_boot_allocate(). It's too late to allocate memory in + * xenva. + */ + xen_heap_start = per_cpu_allocate(xen_heap_start, xenheap_phys_end); + heap_desc.xen_heap_start = xen_heap_start; heap_desc.xenheap_phys_end = xenheap_phys_end; heap_desc.kern_md = kern_md; diff -r e9706492e960 -r 7affdebb7a1e xen/include/asm-ia64/linux-xen/asm/percpu.h --- a/xen/include/asm-ia64/linux-xen/asm/percpu.h Thu Jul 31 12:25:50 2008 +0900 +++ b/xen/include/asm-ia64/linux-xen/asm/percpu.h Thu Aug 07 11:47:34 2008 +0900 @@ -50,12 +50,22 @@ extern void percpu_modcopy(void *pcpudst extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); extern void setup_per_cpu_areas (void); extern void *per_cpu_init(void); +#ifdef XEN +extern void *per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa); +#endif #else /* ! SMP */ #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define per_cpu_init() (__phys_per_cpu_start) +#ifdef XEN +static inline void *per_cpu_allocate(void *xen_heap_start, + unsigned long end_in_pa) +{ + return xen_heap_start; +} +#endif #endif /* SMP */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |