[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Reduce 'd' debug key's global impact
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1273161608 -3600 # Node ID e85d18cbf00837bc8815b4539a0673c7f23cce16 # Parent f20b608e225af1c7ee5b0603a0d2d85ca9ef04c4 Reduce 'd' debug key's global impact On large systems, dumping state may cause time management to get stalled for so long a period that it wouldn't recover. Therefore alter the state dumping logic to alternatively block each CPU as it prints rather than one CPU for a very long time (using the alternative key handling toggle introduced with an earlier patch). Further, instead of using on_selected_cpus(), which is unsafe when the dumping happens from a hardware interrupt, introduce and use a dedicated IPI sending function (which each architecture can implement to its liking) Finally, don't print useless data (e.g. the hypervisor context of the interrupt that is used for triggering the printing, but isn't part of the context that's actually interesting). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/ia64/linux-xen/smp.c | 11 ++++ xen/arch/x86/apic.c | 36 ++++++++++---- xen/common/keyhandler.c | 69 +++++++++++++++++++--------- xen/include/asm-ia64/linux-xen/asm/ptrace.h | 2 xen/include/xen/lib.h | 3 + xen/include/xen/smp.h | 2 6 files changed, 93 insertions(+), 30 deletions(-) diff -r f20b608e225a -r e85d18cbf008 xen/arch/ia64/linux-xen/smp.c --- a/xen/arch/ia64/linux-xen/smp.c Thu May 06 16:01:49 2010 +0100 +++ b/xen/arch/ia64/linux-xen/smp.c Thu May 06 17:00:08 2010 +0100 @@ -94,6 +94,7 @@ static volatile struct call_data_struct #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 +#define IPI_STATE_DUMP 2 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; @@ -200,6 +201,10 @@ handle_IPI (int irq, void *dev_id, struc case IPI_CPU_STOP: stop_this_cpu(); + break; + + case IPI_STATE_DUMP: + dump_execstate(regs); break; default: @@ -479,6 +484,12 @@ smp_send_stop (void) send_IPI_allbutself(IPI_CPU_STOP); } +void +smp_send_state_dump (unsigned int cpu) +{ + send_IPI_single(cpu, IPI_STATE_DUMP); +} + int __init setup_profiling_timer (unsigned int multiplier) { diff -r f20b608e225a -r e85d18cbf008 xen/arch/x86/apic.c --- a/xen/arch/x86/apic.c Thu May 06 16:01:49 2010 +0100 +++ b/xen/arch/x86/apic.c Thu May 06 17:00:08 2010 +0100 @@ -1233,8 +1233,17 @@ fastcall void smp_apic_timer_interrupt(s set_irq_regs(old_regs); } -/* - * This interrupt should _never_ happen with our APIC/SMP architecture +static DEFINE_PER_CPU(bool_t, state_dump_pending); + +void smp_send_state_dump(unsigned int cpu) +{ + /* We overload the spurious interrupt handler to handle the dump. */ + per_cpu(state_dump_pending, cpu) = 1; + send_IPI_mask(cpumask_of(cpu), SPURIOUS_APIC_VECTOR); +} + +/* + * Spurious interrupts should _never_ happen with our APIC/SMP architecture. */ fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs) { @@ -1242,18 +1251,27 @@ fastcall void smp_spurious_interrupt(str struct cpu_user_regs *old_regs = set_irq_regs(regs); irq_enter(); - /* - * Check if this really is a spurious interrupt and ACK it - * if it is a vectored one. Just in case... - * Spurious interrupts should not be ACKed. + + /* + * Check if this is a vectored interrupt (most likely, as this is probably + * a request to dump local CPU state). Vectored interrupts are ACKed; + * spurious interrupts are not. */ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); - if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) + if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) { ack_APIC_irq(); + if (this_cpu(state_dump_pending)) { + this_cpu(state_dump_pending) = 0; + dump_execstate(regs); + goto out; + } + } /* see sw-dev-man vol 3, chapter 7.4.13.5 */ - printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n", - smp_processor_id()); + printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should " + "never happen.\n", smp_processor_id()); + + out: irq_exit(); set_irq_regs(old_regs); } diff -r f20b608e225a -r e85d18cbf008 xen/common/keyhandler.c --- a/xen/common/keyhandler.c Thu May 06 16:01:49 2010 +0100 +++ b/xen/common/keyhandler.c Thu May 06 17:00:08 2010 +0100 @@ -71,14 +71,40 @@ static struct keyhandler show_handlers_k .desc = "show this message" }; -static void __dump_execstate(void *unused) -{ - dump_execution_state(); - printk("*** Dumping CPU%d guest state: ***\n", smp_processor_id()); - if ( is_idle_vcpu(current) ) - printk("No guest context (CPU is idle).\n"); - else +static cpumask_t dump_execstate_mask; + +void dump_execstate(struct cpu_user_regs *regs) +{ + unsigned int cpu = smp_processor_id(); + + if ( !guest_mode(regs) ) + { + printk("*** Dumping CPU%u host state: ***\n", cpu); + show_execution_state(regs); + } + + if ( !is_idle_vcpu(current) ) + { + printk("*** Dumping CPU%u guest state (d%d:v%d): ***\n", + smp_processor_id(), current->domain->domain_id, + current->vcpu_id); show_execution_state(guest_cpu_user_regs()); + printk("\n"); + } + + cpu_clear(cpu, dump_execstate_mask); + if ( !alt_key_handling ) + return; + + cpu = cycle_cpu(cpu, dump_execstate_mask); + if ( cpu < NR_CPUS ) + { + smp_send_state_dump(cpu); + return; + } + + console_end_sync(); + watchdog_enable(); } static void dump_registers(unsigned char key, struct cpu_user_regs *regs) @@ -89,21 +115,24 @@ static void dump_registers(unsigned char watchdog_disable(); console_start_sync(); - printk("'%c' pressed -> dumping registers\n", key); + printk("'%c' pressed -> dumping registers\n\n", key); + + dump_execstate_mask = cpu_online_map; /* Get local execution state out immediately, in case we get stuck. */ - printk("\n*** Dumping CPU%d host state: ***\n", smp_processor_id()); - __dump_execstate(NULL); - - for_each_online_cpu ( cpu ) - { - if ( cpu == smp_processor_id() ) - continue; - printk("\n*** Dumping CPU%d host state: ***\n", cpu); - on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1); - } - - printk("\n"); + dump_execstate(regs); + + /* Alt. handling: remaining CPUs are dumped asynchronously one-by-one. */ + if ( alt_key_handling ) + return; + + /* Normal handling: synchronously dump the remaining CPUs' states. */ + for_each_cpu_mask ( cpu, dump_execstate_mask ) + { + smp_send_state_dump(cpu); + while ( cpu_isset(cpu, dump_execstate_mask) ) + cpu_relax(); + } console_end_sync(); watchdog_enable(); diff -r f20b608e225a -r e85d18cbf008 xen/include/asm-ia64/linux-xen/asm/ptrace.h --- a/xen/include/asm-ia64/linux-xen/asm/ptrace.h Thu May 06 16:01:49 2010 +0100 +++ b/xen/include/asm-ia64/linux-xen/asm/ptrace.h Thu May 06 17:00:08 2010 +0100 @@ -280,7 +280,7 @@ struct switch_stack { # define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) #ifdef XEN -# define guest_mode(regs) (ia64_psr(regs)->cpl != 0) +# define guest_mode(regs) (ia64_psr(regs)->cpl && !ia64_psr(regs)->vm) # define guest_kernel_mode(regs) (ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) # define vmx_guest_kernel_mode(regs) (ia64_psr(regs)->cpl == 0) # define regs_increment_iip(regs) \ diff -r f20b608e225a -r e85d18cbf008 xen/include/xen/lib.h --- a/xen/include/xen/lib.h Thu May 06 16:01:49 2010 +0100 +++ b/xen/include/xen/lib.h Thu May 06 17:00:08 2010 +0100 @@ -111,4 +111,7 @@ extern char *print_tainted(char *str); extern char *print_tainted(char *str); extern void add_taint(unsigned); +struct cpu_user_regs; +void dump_execstate(struct cpu_user_regs *); + #endif /* __LIB_H__ */ diff -r f20b608e225a -r e85d18cbf008 xen/include/xen/smp.h --- a/xen/include/xen/smp.h Thu May 06 16:01:49 2010 +0100 +++ b/xen/include/xen/smp.h Thu May 06 17:00:08 2010 +0100 @@ -12,6 +12,8 @@ extern void smp_send_event_check_mask(co extern void smp_send_event_check_mask(const cpumask_t *mask); #define smp_send_event_check_cpu(cpu) \ smp_send_event_check_mask(cpumask_of(cpu)) + +extern void smp_send_state_dump(unsigned int cpu); /* * Prepare machine for booting other CPUs. _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |