[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [Xen-devel] Re: One question to IST stack for PV guest



Can SysRQ-L be used to check dead-locked CPU's state? And if we have no NMI 
support, we may lost that part.

--jyh

>-----Original Message-----
>From: Ian Campbell [mailto:Ian.Campbell@xxxxxxxxxx]
>Sent: Saturday, December 19, 2009 5:25 PM
>To: Jeremy Fitzhardinge
>Cc: Jiang, Yunhong; Kleen, Andi; xen-devel@xxxxxxxxxxxxxxxxxxx; Keir Fraser; 
>Jan
>Beulich
>Subject: Re: [Xen-devel] Re: One question to IST stack for PV guest
>
>On Fri, 2009-12-18 at 21:21 +0000, Jeremy Fitzhardinge wrote:
>>
>> > BTW, Jeremy, seems vNMI support is not included in pvops dom0, will
>> it be supported in future?
>> >
>>
>> There's been no call for it so far, so I hadn't worried about it much.
>> I was thinking it might be useful as a debug tool, but I don't know
>> what it gets used for normally.
>
>SysRQ-L (show all cpus) uses it via arch_trigger_all_cpu_backtrace()
>which is a bit of a problem even in a domU because it goes to
>apic->send_IPI_all(NMI_VECTOR) which ends up "BUG: unable to handle
>kernel paging request" in default_send_IPI_mask_logical.
>
>I started adding a new smp_op to handle allow this function to be
>overidden yesterday (WIP appended) but having some sort of NMI support
>would be useful so reduce the differences with native on the receiving
>end, instead of using smp_call_function.
>
>Ian.
>
>
>diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
>index 1e79678..00ef5f7 100644
>--- a/arch/x86/include/asm/smp.h
>+++ b/arch/x86/include/asm/smp.h
>@@ -60,6 +60,8 @@ struct smp_ops {
>
>       void (*send_call_func_ipi)(const struct cpumask *mask);
>       void (*send_call_func_single_ipi)(int cpu);
>+
>+      void (*send_nmi_ipi)(void);
> };
>
> /* Globals due to paravirt */
>@@ -126,6 +128,11 @@ static inline void arch_send_call_function_ipi_mask(const
>struct cpumask *mask)
>       smp_ops.send_call_func_ipi(mask);
> }
>
>+static inline void smp_send_nmi_ipi(void)
>+{
>+      smp_ops.send_nmi_ipi();
>+}
>+
> void cpu_disable_common(void);
> void native_smp_prepare_boot_cpu(void);
> void native_smp_prepare_cpus(unsigned int max_cpus);
>@@ -139,6 +146,8 @@ void play_dead_common(void);
> void native_send_call_func_ipi(const struct cpumask *mask);
> void native_send_call_func_single_ipi(int cpu);
>
>+void native_send_nmi_ipi(void);
>+
> void smp_store_cpu_info(int id);
> #define cpu_physical_id(cpu)  per_cpu(x86_cpu_to_apicid, cpu)
>
>diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
>index 7ff61d6..40c1414 100644
>--- a/arch/x86/kernel/apic/nmi.c
>+++ b/arch/x86/kernel/apic/nmi.c
>@@ -561,7 +561,7 @@ void arch_trigger_all_cpu_backtrace(void)
>       cpumask_copy(&backtrace_mask, cpu_online_mask);
>
>       printk(KERN_INFO "sending NMI to all CPUs:\n");
>-      apic->send_IPI_all(NMI_VECTOR);
>+      smp_send_nmi_ipi();
>
>       /* Wait for up to 10 seconds for all CPUs to do the backtrace */
>       for (i = 0; i < 10 * 1000; i++) {
>diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
>index ec1de97..f53437f 100644
>--- a/arch/x86/kernel/smp.c
>+++ b/arch/x86/kernel/smp.c
>@@ -146,6 +146,11 @@ void native_send_call_func_ipi(const struct cpumask
>*mask)
>       free_cpumask_var(allbutself);
> }
>
>+void native_send_nmi_ipi(void)
>+{
>+      apic->send_IPI_all(NMI_VECTOR);
>+}
>+
> /*
>  * this function calls the 'stop' function on all other CPUs in the system.
>  */
>@@ -236,5 +241,7 @@ struct smp_ops smp_ops = {
>
>       .send_call_func_ipi     = native_send_call_func_ipi,
>       .send_call_func_single_ipi = native_send_call_func_single_ipi,
>+
>+      .send_nmi_ipi           = native_send_nmi_ipi,
> };
> EXPORT_SYMBOL_GPL(smp_ops);
>diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
>index 360f8d8..986f372 100644
>--- a/arch/x86/xen/smp.c
>+++ b/arch/x86/xen/smp.c
>@@ -20,6 +20,7 @@
> #include <asm/desc.h>
> #include <asm/pgtable.h>
> #include <asm/cpu.h>
>+#include <asm/nmi.h>
>
> #include <xen/interface/xen.h>
> #include <xen/interface/vcpu.h>
>@@ -456,6 +457,16 @@ static irqreturn_t xen_call_function_single_interrupt(int
>irq, void *dev_id)
>       return IRQ_HANDLED;
> }
>
>+static void xen_nmi_ipi_func(void *info)
>+{
>+      nmi_watchdog_tick(task_pt_regs(current), 0/*reason*/);
>+}
>+
>+static void xen_send_nmi_ipi(void)
>+{
>+      smp_call_function(xen_nmi_ipi_func, NULL, 0);
>+}
>+
> static const struct smp_ops xen_smp_ops __initdata = {
>       .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
>       .smp_prepare_cpus = xen_smp_prepare_cpus,
>@@ -471,6 +482,8 @@ static const struct smp_ops xen_smp_ops __initdata = {
>
>       .send_call_func_ipi = xen_smp_send_call_function_ipi,
>       .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
>+
>+      .send_nmi_ipi = xen_send_nmi_ipi,
> };
>
> void __init xen_smp_init(void)
>


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.