[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Rendezvous selected cpus in softirq (stop_machine).
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1202745589 0 # Node ID 2a3111016f88c22cbf1f24ca31b7f9ecf7a71e15 # Parent 7b0c0ab0566bbf241620db3fc94e791528a4503c Rendezvous selected cpus in softirq (stop_machine). This is similar to stop_machine_run stub from Linux, to pull selected cpus in rendezvous point and the do some batch work under a safe environment. Current one usage is from S3 path, where individual cpu is pulled down with related online footprints being cleared. It's dangerous to have other cpus checking clobbered data structure in the middle, such as cpu_online_map, cpu_sibling_map, etc. Signed-off-by: Kevin Tian <kevin.tian@xxxxxxxxx> Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/x86/cpu/mtrr/main.c | 2 xen/arch/x86/domain.c | 3 xen/arch/x86/smpboot.c | 25 ++++-- xen/common/Makefile | 1 xen/common/stop_machine.c | 168 +++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/smp.h | 3 xen/include/xen/smp.h | 17 ++++ xen/include/xen/softirq.h | 3 xen/include/xen/stop_machine.h | 30 +++++++ 9 files changed, 239 insertions(+), 13 deletions(-) diff -r 7b0c0ab0566b -r 2a3111016f88 xen/arch/x86/cpu/mtrr/main.c --- a/xen/arch/x86/cpu/mtrr/main.c Mon Feb 11 14:55:33 2008 +0000 +++ b/xen/arch/x86/cpu/mtrr/main.c Mon Feb 11 15:59:49 2008 +0000 @@ -46,8 +46,6 @@ #define DEFINE_MUTEX(_m) DEFINE_SPINLOCK(_m) #define mutex_lock(_m) spin_lock(_m) #define mutex_unlock(_m) spin_unlock(_m) -#define lock_cpu_hotplug() ((void)0) -#define unlock_cpu_hotplug() ((void)0) #define dump_stack() ((void)0) #define get_cpu() smp_processor_id() #define put_cpu() do {} while(0) diff -r 7b0c0ab0566b -r 2a3111016f88 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Mon Feb 11 14:55:33 2008 +0000 +++ b/xen/arch/x86/domain.c Mon Feb 11 15:59:49 2008 +0000 @@ -82,7 +82,6 @@ static void default_idle(void) static void play_dead(void) { - __cpu_disable(); /* This must be done before dead CPU ack */ cpu_exit_clear(); hvm_cpu_down(); @@ -101,7 +100,7 @@ void idle_loop(void) { for ( ; ; ) { - if (cpu_is_offline(smp_processor_id())) + if ( cpu_is_offline(smp_processor_id()) ) play_dead(); page_scrub_schedule_work(); default_idle(); diff -r 7b0c0ab0566b -r 2a3111016f88 xen/arch/x86/smpboot.c --- a/xen/arch/x86/smpboot.c Mon Feb 11 14:55:33 2008 +0000 +++ b/xen/arch/x86/smpboot.c Mon Feb 11 15:59:49 2008 +0000 @@ -54,6 +54,7 @@ #include <mach_apic.h> #include <mach_wakecpu.h> #include <smpboot_hooks.h> +#include <xen/stop_machine.h> #define set_kernel_exec(x, y) (0) #define setup_trampoline() (bootsym_phys(trampoline_realmode_entry)) @@ -1208,6 +1209,15 @@ int __cpu_disable(void) if (cpu == 0) return -EBUSY; + /* + * Only S3 is using this path, and thus idle vcpus are running on all + * APs when we are called. To support full cpu hotplug, other + * notification mechanisms should be introduced (e.g., migrate vcpus + * off this physical cpu before rendezvous point). + */ + if (!is_idle_vcpu(current)) + return -EINVAL; + local_irq_disable(); clear_local_APIC(); /* Allow any queued timer interrupts to get serviced */ @@ -1242,6 +1252,11 @@ void __cpu_die(unsigned int cpu) process_pending_timers(); } printk(KERN_ERR "CPU %u didn't die...\n", cpu); +} + +static int take_cpu_down(void *unused) +{ + return __cpu_disable(); } /* @@ -1269,7 +1284,6 @@ int cpu_down(unsigned int cpu) int cpu_down(unsigned int cpu) { int err = 0; - cpumask_t mask; spin_lock(&cpu_add_remove_lock); if (num_online_cpus() == 1) { @@ -1283,11 +1297,10 @@ int cpu_down(unsigned int cpu) } printk("Prepare to bring CPU%d down...\n", cpu); - /* Send notification to remote idle vcpu */ - cpus_clear(mask); - cpu_set(cpu, mask); - per_cpu(cpu_state, cpu) = CPU_DYING; - smp_send_event_check_mask(mask); + + err = __stop_machine_run(take_cpu_down, NULL, cpu); + if ( err < 0 ) + goto out; __cpu_die(cpu); diff -r 7b0c0ab0566b -r 2a3111016f88 xen/common/Makefile --- a/xen/common/Makefile Mon Feb 11 14:55:33 2008 +0000 +++ b/xen/common/Makefile Mon Feb 11 15:59:49 2008 +0000 @@ -16,6 +16,7 @@ obj-y += schedule.o obj-y += schedule.o obj-y += shutdown.o obj-y += softirq.o +obj-y += stop_machine.o obj-y += string.o obj-y += symbols.o obj-y += sysctl.o diff -r 7b0c0ab0566b -r 2a3111016f88 xen/common/stop_machine.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/common/stop_machine.c Mon Feb 11 15:59:49 2008 +0000 @@ -0,0 +1,168 @@ +/****************************************************************************** + * common/stop_machine.c + * + * Facilities to put whole machine in a safe 'stop' state + * + * Copyright 2005 Rusty Russell rusty@xxxxxxxxxxxxxxx IBM Corporation + * Copyright 2008 Kevin Tian <kevin.tian@xxxxxxxxx>, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#include <xen/config.h> +#include <xen/init.h> +#include <xen/spinlock.h> +#include <asm/smp.h> +#include <asm/current.h> +#include <xen/softirq.h> +#include <asm/processor.h> +#include <xen/errno.h> + +enum stopmachine_state { + STOPMACHINE_START, + STOPMACHINE_PREPARE, + STOPMACHINE_DISABLE_IRQ, + STOPMACHINE_INVOKE, + STOPMACHINE_EXIT +}; + +struct stopmachine_data { + unsigned int nr_cpus; + + enum stopmachine_state state; + atomic_t done; + + unsigned int fn_cpu; + int fn_result; + int (*fn)(void *); + void *fn_data; +}; + +static struct stopmachine_data stopmachine_data; +static DEFINE_SPINLOCK(stopmachine_lock); + +static void stopmachine_set_state(enum stopmachine_state state) +{ + atomic_set(&stopmachine_data.done, 0); + smp_wmb(); + stopmachine_data.state = state; + while ( atomic_read(&stopmachine_data.done) != stopmachine_data.nr_cpus ) + cpu_relax(); +} + +int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) +{ + cpumask_t allbutself; + unsigned int i, nr_cpus; + int ret; + + BUG_ON(!local_irq_is_enabled()); + + allbutself = cpu_online_map; + cpu_clear(smp_processor_id(), allbutself); + nr_cpus = cpus_weight(allbutself); + + if ( nr_cpus == 0 ) + { + BUG_ON(cpu != smp_processor_id()); + return (*fn)(data); + } + + /* Note: We shouldn't spin on lock when it's held by others since others + * is expecting this cpus to enter softirq context. Or else deadlock + * is caused. + */ + if ( !spin_trylock(&stopmachine_lock) ) + return -EBUSY; + + stopmachine_data.fn = fn; + stopmachine_data.fn_data = data; + stopmachine_data.nr_cpus = nr_cpus; + stopmachine_data.fn_cpu = cpu; + atomic_set(&stopmachine_data.done, 0); + stopmachine_data.state = STOPMACHINE_START; + + smp_wmb(); + + for_each_cpu_mask ( i, allbutself ) + cpu_raise_softirq(i, STOPMACHINE_SOFTIRQ); + + stopmachine_set_state(STOPMACHINE_PREPARE); + + local_irq_disable(); + stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); + + if ( cpu == smp_processor_id() ) + stopmachine_data.fn_result = (*fn)(data); + stopmachine_set_state(STOPMACHINE_INVOKE); + ret = stopmachine_data.fn_result; + + stopmachine_set_state(STOPMACHINE_EXIT); + local_irq_enable(); + + spin_unlock(&stopmachine_lock); + + return ret; +} + +int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) +{ + int ret; + + lock_cpu_hotplug(); + ret = __stop_machine_run(fn, data, cpu); + unlock_cpu_hotplug(); + + return ret; +} + +static void stopmachine_softirq(void) +{ + enum stopmachine_state state = STOPMACHINE_START; + + smp_mb(); + + while ( state != STOPMACHINE_EXIT ) + { + while ( stopmachine_data.state == state ) + cpu_relax(); + + state = stopmachine_data.state; + switch ( state ) + { + case STOPMACHINE_DISABLE_IRQ: + local_irq_disable(); + break; + case STOPMACHINE_INVOKE: + if ( stopmachine_data.fn_cpu == smp_processor_id() ) + stopmachine_data.fn_result = + stopmachine_data.fn(stopmachine_data.fn_data); + break; + default: + break; + } + + smp_mb(); + atomic_inc(&stopmachine_data.done); + } + + local_irq_enable(); +} + +static int __init cpu_stopmachine_init(void) +{ + open_softirq(STOPMACHINE_SOFTIRQ, stopmachine_softirq); + return 0; +} +__initcall(cpu_stopmachine_init); diff -r 7b0c0ab0566b -r 2a3111016f88 xen/include/asm-x86/smp.h --- a/xen/include/asm-x86/smp.h Mon Feb 11 14:55:33 2008 +0000 +++ b/xen/include/asm-x86/smp.h Mon Feb 11 15:59:49 2008 +0000 @@ -51,12 +51,11 @@ extern u8 x86_cpu_to_apicid[]; /* State of each CPU. */ #define CPU_ONLINE 0x0002 /* CPU is up */ -#define CPU_DYING 0x0003 /* CPU is requested to die */ #define CPU_DEAD 0x0004 /* CPU is dead */ DECLARE_PER_CPU(int, cpu_state); #ifdef CONFIG_HOTPLUG_CPU -#define cpu_is_offline(cpu) unlikely(per_cpu(cpu_state,cpu) == CPU_DYING) +#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) extern int cpu_down(unsigned int cpu); extern int cpu_up(unsigned int cpu); extern void cpu_exit_clear(void); diff -r 7b0c0ab0566b -r 2a3111016f88 xen/include/xen/smp.h --- a/xen/include/xen/smp.h Mon Feb 11 14:55:33 2008 +0000 +++ b/xen/include/xen/smp.h Mon Feb 11 15:59:49 2008 +0000 @@ -112,4 +112,21 @@ static inline int on_each_cpu( #define smp_processor_id() raw_smp_processor_id() +#ifdef CONFIG_HOTPLUG_CPU +extern spinlock_t cpu_add_remove_lock; +/* + * FIXME: need a better lock mechanism when real cpu hotplug is later + * supported, since spinlock may cause dead lock: + * cpu0: in stop_machine with lock held. Wait for cpu1 to respond + * to stop request + * cpu1: spin loop on lock upon cpu hotplug request from guest, + * without chance to handle softirq + * ... + */ +#define lock_cpu_hotplug() spin_lock(&cpu_add_remove_lock); +#define unlock_cpu_hotplug() spin_unlock(&cpu_add_remove_lock); +#else +#define lock_cpu_hotplug() do { } while ( 0 ) +#define unlock_cpu_hotplug() do { } while ( 0 ) #endif +#endif diff -r 7b0c0ab0566b -r 2a3111016f88 xen/include/xen/softirq.h --- a/xen/include/xen/softirq.h Mon Feb 11 14:55:33 2008 +0000 +++ b/xen/include/xen/softirq.h Mon Feb 11 15:59:49 2008 +0000 @@ -10,8 +10,9 @@ #define PAGE_SCRUB_SOFTIRQ 5 #define TRACE_SOFTIRQ 6 #define RCU_SOFTIRQ 7 +#define STOPMACHINE_SOFTIRQ 8 -#define NR_COMMON_SOFTIRQS 8 +#define NR_COMMON_SOFTIRQS 9 #include <asm/softirq.h> diff -r 7b0c0ab0566b -r 2a3111016f88 xen/include/xen/stop_machine.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/xen/stop_machine.h Mon Feb 11 15:59:49 2008 +0000 @@ -0,0 +1,30 @@ +#ifndef __XEN_STOP_MACHINE_H__ +#define __XEN_STOP_MACHINE_H__ + +/** + * stop_machine_run: freeze the machine on all CPUs and run this function + * @fn: the function to run + * @data: the data ptr for the @fn() + * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS). + * + * Description: This causes every other cpu to enter a safe point, with + * each of which disables interrupts, and finally interrupts are disabled + * on the current CPU. The result is that none is holding a spinlock + * or inside any other preempt-disabled region when @fn() runs. + * + * This can be thought of as a very heavy write lock, equivalent to + * grabbing every spinlock in the kernel. */ +int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); + +/** + * __stop_machine_run: freeze the machine on all CPUs and run this function + * @fn: the function to run + * @data: the data ptr for the @fn + * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. + * + * Description: This is a special version of the above, without explicit + * lock acquisition. Used by hotplug cpu. + */ +int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); + +#endif /* __XEN_STOP_MACHINE_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |