[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] Dmesg protocol for C2D E8400&ASUS P5K Premium (stack trace came up twice)



Boris Derzhavets wrote:

[    6.011212] ------------[ cut here ]------------
[ 6.011257] WARNING: at kernel/smp.c:329 smp_call_function_many+0x1e9/0x250()
[    6.011303] Hardware name: P5K Premium
[ 6.011345] Modules linked in: usbhid(+) hid sd_mod crc_t10dif sr_mod cdrom r8169 mii ohci1394 ieee1394 sg sky2 pata_acpi ata_generic ehci_hcd uhci_hcd usbcore dm_mirror dm_region_hash dm_log dm_snapshot dm_mod thermal processor fan fuse
[    6.012219] Pid: 2303, comm: modprobe Not tainted 2.6.29-rc5-tip #1
[    6.012263] Call Trace:
[    6.012309]  [<ffffffff8024c050>] warn_slowpath+0xd0/0x130
[    6.012356]  [<ffffffff8022d9ae>] ? pvclock_clocksource_read+0x4e/0x90
[    6.012403]  [<ffffffff8020eeed>] ? xen_force_evtchn_callback+0xd/0x10
[    6.012450]  [<ffffffff8020f732>] ? check_events+0x12/0x20
[    6.012495]  [<ffffffff8020f531>] ? xen_clocksource_read+0x21/0x30
[    6.012542]  [<ffffffff80210617>] ? xen_spin_lock+0xa7/0x110
[    6.012587]  [<ffffffff8020f71f>] ? xen_restore_fl_direct_end+0x0/0x1
[    6.012634]  [<ffffffff8052e373>] ? _spin_lock+0x13/0x30
[ 6.012681] [<ffffffff802a6690>] ? perf_counter_task_sched_in+0x60/0x190
[    6.012728]  [<ffffffff8022d9ae>] ? pvclock_clocksource_read+0x4e/0x90
[ 6.012776] [<ffffffff8020bdf9>] ? __raw_callee_save_xen_pmd_val+0x11/0x1e
[    6.012823]  [<ffffffff802360b0>] ? do_flush_tlb_all+0x0/0x50
[    6.012869]  [<ffffffff802742a9>] smp_call_function_many+0x1e9/0x250
[    6.012915]  [<ffffffff802360b0>] ? do_flush_tlb_all+0x0/0x50
[    6.012961]  [<ffffffff802360b0>] ? do_flush_tlb_all+0x0/0x50
[    6.013007]  [<ffffffff80274330>] smp_call_function+0x20/0x30
[    6.013053]  [<ffffffff80251fcf>] on_each_cpu+0x1f/0x50
[    6.013098]  [<ffffffff80235ea7>] flush_tlb_all+0x17/0x20


Could you try this patch:

Subject: [PATCH] x86: implement flush_tlb_all in terms of flush_tlb_others

Modify flush_tlb_others to take a NULL mm, meaning "flush user
and kernel tlbs", and implement flush_tlb_all in terms of that.

The principle motivation for this is to make sure it goes via
paravirt_ops, which allows for more efficient cross-cpu tlb
flushes than a plain IPI.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 14c5af4..2d112f9 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -148,7 +148,13 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
                 * BUG();
                 */

-       if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
+       if (f->flush_mm == NULL) {
+               /* No mm - flush all kernel and user tlbs */
+               __flush_tlb_all();
+               if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
+                       leave_mm(cpu);
+       } else if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
+               /* Flushing a specific user mm */
                if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
                        if (f->flush_va == TLB_FLUSH_ALL)
                                local_flush_tlb();
@@ -281,16 +287,15 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned 
long va)
        preempt_enable();
}

-static void do_flush_tlb_all(void *info)
+void flush_tlb_all(void)
{
-       unsigned long cpu = smp_processor_id();
+       preempt_disable();
+
+       flush_tlb_others(cpu_online_mask, NULL, TLB_FLUSH_ALL);

        __flush_tlb_all();
        if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
-               leave_mm(cpu);
-}
+               leave_mm(smp_processor_id());

-void flush_tlb_all(void)
-{
-       on_each_cpu(do_flush_tlb_all, NULL, 1);
+       preempt_enable();
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 7cadab1..dc83b47 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1350,7 +1350,6 @@ static void xen_flush_tlb_others(const struct cpumask 
*cpus,
        struct multicall_space mcs;

        BUG_ON(cpumask_empty(cpus));
-       BUG_ON(!mm);

        mcs = xen_mc_entry(sizeof(*args));
        args = mcs.args;



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.