[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Pull the Linux percpu interface into Xen. Implemented for
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 9b1c9d4133f891cc2168186eab4ab4bcebd70438 # Parent 42a398e1daf1fcc823156f33f85bfe1d785662c4 Pull the Linux percpu interface into Xen. Implemented for x86 and used it to eliminate the percpu_ctxt struct from arch/x86/domain.c. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r 42a398e1daf1 -r 9b1c9d4133f8 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Fri Apr 21 17:19:31 2006 +0100 +++ b/xen/arch/x86/domain.c Fri Apr 21 17:35:15 2006 +0100 @@ -21,6 +21,12 @@ #include <xen/softirq.h> #include <xen/grant_table.h> #include <xen/iocap.h> +#include <xen/kernel.h> +#include <xen/multicall.h> +#include <xen/irq.h> +#include <xen/event.h> +#include <xen/console.h> +#include <xen/percpu.h> #include <asm/regs.h> #include <asm/mc146818rtc.h> #include <asm/system.h> @@ -30,22 +36,12 @@ #include <asm/i387.h> #include <asm/mpspec.h> #include <asm/ldt.h> -#include <xen/irq.h> -#include <xen/event.h> #include <asm/shadow.h> -#include <xen/console.h> -#include <xen/elf.h> #include <asm/hvm/hvm.h> #include <asm/hvm/support.h> #include <asm/msr.h> -#include <xen/kernel.h> -#include <xen/multicall.h> - -struct percpu_ctxt { - struct vcpu *curr_vcpu; - unsigned int dirty_segment_mask; -} __cacheline_aligned; -static struct percpu_ctxt percpu_ctxt[NR_CPUS]; + +DEFINE_PER_CPU(struct vcpu *, curr_vcpu); static void paravirt_ctxt_switch_from(struct vcpu *v); static void paravirt_ctxt_switch_to(struct vcpu *v); @@ -121,11 +117,6 @@ void dump_pageframe_info(struct domain * _p(page_to_maddr(page)), _p(page_to_mfn(page)), page->count_info, page->u.inuse.type_info); } -} - -void set_current_execstate(struct vcpu *v) -{ - percpu_ctxt[smp_processor_id()].curr_vcpu = v; } struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id) @@ -459,6 +450,7 @@ void new_thread(struct vcpu *d, * allowing load_segments() to avoid some expensive segment loads and * MSR writes. */ +static DEFINE_PER_CPU(unsigned int, dirty_segment_mask); #define DIRTY_DS 0x01 #define DIRTY_ES 0x02 #define DIRTY_FS 0x04 @@ -473,8 +465,8 @@ static void load_segments(struct vcpu *n unsigned int dirty_segment_mask, cpu = smp_processor_id(); /* Load and clear the dirty segment mask. */ - dirty_segment_mask = percpu_ctxt[cpu].dirty_segment_mask; - percpu_ctxt[cpu].dirty_segment_mask = 0; + dirty_segment_mask = per_cpu(dirty_segment_mask, cpu); + per_cpu(dirty_segment_mask, cpu) = 0; /* Either selector != 0 ==> reload. */ if ( unlikely((dirty_segment_mask & DIRTY_DS) | nctxt->user_regs.ds) ) @@ -601,7 +593,7 @@ static void save_segments(struct vcpu *v dirty_segment_mask |= DIRTY_GS_BASE_USER; } - percpu_ctxt[smp_processor_id()].dirty_segment_mask = dirty_segment_mask; + this_cpu(dirty_segment_mask) = dirty_segment_mask; } #define switch_kernel_stack(v) ((void)0) @@ -638,7 +630,7 @@ static void __context_switch(void) { struct cpu_user_regs *stack_regs = guest_cpu_user_regs(); unsigned int cpu = smp_processor_id(); - struct vcpu *p = percpu_ctxt[cpu].curr_vcpu; + struct vcpu *p = per_cpu(curr_vcpu, cpu); struct vcpu *n = current; ASSERT(p != n); @@ -692,7 +684,7 @@ static void __context_switch(void) cpu_clear(cpu, p->domain->domain_dirty_cpumask); cpu_clear(cpu, p->vcpu_dirty_cpumask); - percpu_ctxt[cpu].curr_vcpu = n; + per_cpu(curr_vcpu, cpu) = n; } @@ -716,7 +708,7 @@ void context_switch(struct vcpu *prev, s set_current(next); - if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_vcpu(next) ) + if ( (per_cpu(curr_vcpu, cpu) == next) || is_idle_vcpu(next) ) { local_irq_enable(); } @@ -758,7 +750,7 @@ int __sync_lazy_execstate(void) local_irq_save(flags); - switch_required = (percpu_ctxt[smp_processor_id()].curr_vcpu != current); + switch_required = (this_cpu(curr_vcpu) != current); if ( switch_required ) __context_switch(); diff -r 42a398e1daf1 -r 9b1c9d4133f8 xen/arch/x86/setup.c --- a/xen/arch/x86/setup.c Fri Apr 21 17:19:31 2006 +0100 +++ b/xen/arch/x86/setup.c Fri Apr 21 17:35:15 2006 +0100 @@ -14,6 +14,7 @@ #include <xen/domain_page.h> #include <xen/compile.h> #include <xen/gdbstub.h> +#include <xen/percpu.h> #include <public/version.h> #include <asm/bitops.h> #include <asm/smp.h> @@ -157,6 +158,38 @@ void discard_initial_images(void) void discard_initial_images(void) { init_domheap_pages(initial_images_start, initial_images_end); +} + +extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[]; + +static void percpu_init_areas(void) +{ + unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start; + + BUG_ON(data_size > PERCPU_SIZE); + + for ( i = 1; i < NR_CPUS; i++ ) + memcpy(__per_cpu_start + (i << PERCPU_SHIFT), + __per_cpu_start, + data_size); +} + +static void percpu_free_unused_areas(void) +{ + unsigned int i, first_unused; + + /* Find first unused CPU number. */ + for ( i = 0; i < NR_CPUS; i++ ) + if ( !cpu_online(i) ) + break; + first_unused = i; + + /* Check that there are no holes in cpu_online_map. */ + for ( ; i < NR_CPUS; i++ ) + BUG_ON(cpu_online(i)); + + init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT), + __pa(__per_cpu_end)); } void __init __start_xen(multiboot_info_t *mbi) @@ -208,6 +241,8 @@ void __init __start_xen(multiboot_info_t printk("FATAL ERROR: Misaligned CPU0 stack.\n"); EARLY_FAIL(); } + + percpu_init_areas(); xenheap_phys_end = opt_xenheap_megabytes << 20; @@ -405,7 +440,7 @@ void __init __start_xen(multiboot_info_t BUG_ON(idle_domain == NULL); set_current(idle_domain->vcpu[0]); - set_current_execstate(idle_domain->vcpu[0]); + this_cpu(curr_vcpu) = idle_domain->vcpu[0]; idle_vcpu[0] = current; paging_init(); @@ -481,6 +516,8 @@ void __init __start_xen(multiboot_info_t printk("Brought up %ld CPUs\n", (long)num_online_cpus()); smp_cpus_done(max_cpus); + + percpu_free_unused_areas(); initialise_gdb(); /* could be moved earlier */ diff -r 42a398e1daf1 -r 9b1c9d4133f8 xen/arch/x86/smpboot.c --- a/xen/arch/x86/smpboot.c Fri Apr 21 17:19:31 2006 +0100 +++ b/xen/arch/x86/smpboot.c Fri Apr 21 17:35:15 2006 +0100 @@ -531,7 +531,7 @@ void __devinit start_secondary(void *unu set_processor_id(cpu); set_current(idle_vcpu[cpu]); - set_current_execstate(idle_vcpu[cpu]); + this_cpu(curr_vcpu) = idle_vcpu[cpu]; percpu_traps_init(); diff -r 42a398e1daf1 -r 9b1c9d4133f8 xen/arch/x86/x86_32/xen.lds.S --- a/xen/arch/x86/x86_32/xen.lds.S Fri Apr 21 17:19:31 2006 +0100 +++ b/xen/arch/x86/x86_32/xen.lds.S Fri Apr 21 17:35:15 2006 +0100 @@ -5,6 +5,7 @@ #include <xen/config.h> #include <asm/page.h> +#include <asm/percpu.h> #undef ENTRY #undef ALIGN @@ -56,8 +57,15 @@ SECTIONS __initcall_start = .; .initcall.init : { *(.initcall.init) } :text __initcall_end = .; + . = ALIGN(PAGE_SIZE); + __init_end = .; + + __per_cpu_start = .; + .data.percpu : { *(.data.percpu) } :text + __per_cpu_data_end = .; + . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT); . = ALIGN(STACK_SIZE); - __init_end = .; + __per_cpu_end = .; __bss_start = .; /* BSS */ .bss : { diff -r 42a398e1daf1 -r 9b1c9d4133f8 xen/arch/x86/x86_64/xen.lds.S --- a/xen/arch/x86/x86_64/xen.lds.S Fri Apr 21 17:19:31 2006 +0100 +++ b/xen/arch/x86/x86_64/xen.lds.S Fri Apr 21 17:35:15 2006 +0100 @@ -3,6 +3,7 @@ #include <xen/config.h> #include <asm/page.h> +#include <asm/percpu.h> #undef ENTRY #undef ALIGN @@ -54,8 +55,15 @@ SECTIONS __initcall_start = .; .initcall.init : { *(.initcall.init) } :text __initcall_end = .; + . = ALIGN(PAGE_SIZE); + __init_end = .; + + __per_cpu_start = .; + .data.percpu : { *(.data.percpu) } :text + __per_cpu_data_end = .; + . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT); . = ALIGN(STACK_SIZE); - __init_end = .; + __per_cpu_end = .; __bss_start = .; /* BSS */ .bss : { diff -r 42a398e1daf1 -r 9b1c9d4133f8 xen/include/asm-x86/current.h --- a/xen/include/asm-x86/current.h Fri Apr 21 17:19:31 2006 +0100 +++ b/xen/include/asm-x86/current.h Fri Apr 21 17:35:15 2006 +0100 @@ -16,7 +16,7 @@ struct cpu_info { struct cpu_info { struct cpu_user_regs guest_cpu_user_regs; unsigned int processor_id; - struct vcpu *current_ed; + struct vcpu *current_vcpu; }; static inline struct cpu_info *get_cpu_info(void) @@ -29,12 +29,12 @@ static inline struct cpu_info *get_cpu_i return cpu_info; } -#define get_current() (get_cpu_info()->current_ed) -#define set_current(_ed) (get_cpu_info()->current_ed = (_ed)) +#define get_current() (get_cpu_info()->current_vcpu) +#define set_current(vcpu) (get_cpu_info()->current_vcpu = (vcpu)) #define current (get_current()) #define get_processor_id() (get_cpu_info()->processor_id) -#define set_processor_id(_id) (get_cpu_info()->processor_id = (_id)) +#define set_processor_id(id) (get_cpu_info()->processor_id = (id)) #define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) @@ -51,8 +51,14 @@ static inline struct cpu_info *get_cpu_i "mov %0,%%"__OP"sp; jmp "STR(__fn) \ : : "r" (guest_cpu_user_regs()) : "memory" ) -#define schedule_tail(_ed) (((_ed)->arch.schedule_tail)(_ed)) +#define schedule_tail(vcpu) (((vcpu)->arch.schedule_tail)(vcpu)) -extern void set_current_execstate(struct vcpu *v); +#include <xen/percpu.h> +/* + * Which VCPU's state is currently running on each CPU? + * This is not necesasrily the same as 'current' as a CPU may be + * executing a lazy state switch. + */ +DECLARE_PER_CPU(struct vcpu *, curr_vcpu); #endif /* __X86_CURRENT_H__ */ diff -r 42a398e1daf1 -r 9b1c9d4133f8 xen/include/xen/compiler.h --- a/xen/include/xen/compiler.h Fri Apr 21 17:19:31 2006 +0100 +++ b/xen/include/xen/compiler.h Fri Apr 21 17:35:15 2006 +0100 @@ -25,4 +25,17 @@ #define __must_check #endif +/* This macro obfuscates arithmetic on a variable address so that gcc + shouldn't recognize the original var, and make assumptions about it */ +/* + * Versions of the ppc64 compiler before 4.1 had a bug where use of + * RELOC_HIDE could trash r30. The bug can be worked around by changing + * the inline assembly constraint from =g to =r, in this particular + * case either is valid. + */ +#define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ + (typeof(ptr)) (__ptr + (off)); }) + #endif /* __LINUX_COMPILER_H */ diff -r 42a398e1daf1 -r 9b1c9d4133f8 xen/include/asm-x86/percpu.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-x86/percpu.h Fri Apr 21 17:35:15 2006 +0100 @@ -0,0 +1,20 @@ +#ifndef __X86_PERCPU_H__ +#define __X86_PERCPU_H__ + +#define PERCPU_SHIFT 12 +#define PERCPU_SIZE (1UL << PERCPU_SHIFT) + +/* Separate out the type, so (int[3], foo) works. */ +#define DEFINE_PER_CPU(type, name) \ + __attribute__((__section__(".data.percpu"))) \ + __typeof__(type) per_cpu__##name + +/* var is in discarded region: offset to particular copy we want */ +#define per_cpu(var, cpu) \ + (*RELOC_HIDE(&per_cpu__##var, ((unsigned int)(cpu))<<PERCPU_SHIFT)) +#define __get_cpu_var(var) \ + (per_cpu(var, smp_processor_id())) + +#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name + +#endif /* __X86_PERCPU_H__ */ diff -r 42a398e1daf1 -r 9b1c9d4133f8 xen/include/xen/percpu.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/xen/percpu.h Fri Apr 21 17:35:15 2006 +0100 @@ -0,0 +1,15 @@ +#ifndef __XEN_PERCPU_H__ +#define __XEN_PERCPU_H__ + +#include <xen/config.h> +#include <xen/smp.h> +#include <asm/percpu.h> + +/* Preferred on Xen. Also see arch-defined per_cpu(). */ +#define this_cpu(var) __get_cpu_var(var) + +/* Linux compatibility. */ +#define get_cpu_var(var) this_cpu(var) +#define put_cpu_var(var) + +#endif /* __XEN_PERCPU_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |