[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/smp: use a dedicated CPU mask in send_IPI_mask



commit 4d901aa67a2005c61f6057f5f6b59c17269d9456
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Fri Feb 28 16:24:26 2020 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Feb 28 16:24:26 2020 +0100

    x86/smp: use a dedicated CPU mask in send_IPI_mask
    
    Some callers of send_IPI_mask pass the scratch cpumask as the mask
    parameter of send_IPI_mask, so the scratch cpumask cannot be used by
    the function. The following trace has been obtained with a debug patch
    and shows one of those callers:
    
    (XEN) scratch CPU mask already in use by 
arch/x86/mm.c#_get_page_type+0x1f9/0x1abf
    (XEN) Xen BUG at smp.c:45
    [...]
    (XEN) Xen call trace:
    (XEN)    [<ffff82d0802abb53>] R scratch_cpumask+0xd3/0xf9
    (XEN)    [<ffff82d0802abc21>] F send_IPI_mask+0x72/0x1ca
    (XEN)    [<ffff82d0802ac13e>] F flush_area_mask+0x10c/0x16c
    (XEN)    [<ffff82d080296c56>] F arch/x86/mm.c#_get_page_type+0x3ff/0x1abf
    (XEN)    [<ffff82d080298324>] F get_page_type+0xe/0x2c
    (XEN)    [<ffff82d08038624f>] F pv_set_gdt+0xa1/0x2aa
    (XEN)    [<ffff82d08027dfd6>] F arch_set_info_guest+0x1196/0x16ba
    (XEN)    [<ffff82d080207a55>] F default_initialise_vcpu+0xc7/0xd4
    (XEN)    [<ffff82d08027e55b>] F arch_initialise_vcpu+0x61/0xcd
    (XEN)    [<ffff82d080207e78>] F do_vcpu_op+0x219/0x690
    (XEN)    [<ffff82d08038be16>] F pv_hypercall+0x2f6/0x593
    (XEN)    [<ffff82d080396432>] F lstar_enter+0x112/0x120
    
    _get_page_type will use the scratch cpumask to call flush_tlb_mask,
    which in turn calls send_IPI_mask.
    
    Fix this by using a dedicated per CPU cpumask in send_IPI_mask.
    
    Fixes: 5500d265a2a8 ('x86/smp: use APIC ALLBUT destination shorthand when 
possible')
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/smp.c        | 2 +-
 xen/arch/x86/smpboot.c    | 9 ++++++++-
 xen/include/asm-x86/smp.h | 1 +
 3 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index a90c782d53..bcead5d01b 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -66,7 +66,7 @@ static void send_IPI_shortcut(unsigned int shortcut, int 
vector,
 void send_IPI_mask(const cpumask_t *mask, int vector)
 {
     bool cpus_locked = false;
-    cpumask_t *scratch = this_cpu(scratch_cpumask);
+    cpumask_t *scratch = this_cpu(send_ipi_cpumask);
 
     if ( in_irq() || in_mce_handler() || in_nmi_handler() )
     {
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index ad49f2dcd7..6c548b0b53 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -57,6 +57,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, scratch_cpumask);
 static cpumask_t scratch_cpu0mask;
 
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, send_ipi_cpumask);
+static cpumask_t send_ipi_cpu0mask;
+
 cpumask_t cpu_online_map __read_mostly;
 EXPORT_SYMBOL(cpu_online_map);
 
@@ -930,6 +933,8 @@ static void cpu_smpboot_free(unsigned int cpu, bool remove)
         FREE_CPUMASK_VAR(per_cpu(cpu_core_mask, cpu));
         if ( per_cpu(scratch_cpumask, cpu) != &scratch_cpu0mask )
             FREE_CPUMASK_VAR(per_cpu(scratch_cpumask, cpu));
+        if ( per_cpu(send_ipi_cpumask, cpu) != &send_ipi_cpu0mask )
+            FREE_CPUMASK_VAR(per_cpu(send_ipi_cpumask, cpu));
     }
 
     cleanup_cpu_root_pgt(cpu);
@@ -1034,7 +1039,8 @@ static int cpu_smpboot_alloc(unsigned int cpu)
 
     if ( !(cond_zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) &&
            cond_zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) &&
-           cond_alloc_cpumask_var(&per_cpu(scratch_cpumask, cpu))) )
+           cond_alloc_cpumask_var(&per_cpu(scratch_cpumask, cpu)) &&
+           cond_alloc_cpumask_var(&per_cpu(send_ipi_cpumask, cpu))) )
         goto out;
 
     rc = 0;
@@ -1175,6 +1181,7 @@ void __init smp_prepare_boot_cpu(void)
     cpumask_set_cpu(cpu, &cpu_present_map);
 #if NR_CPUS > 2 * BITS_PER_LONG
     per_cpu(scratch_cpumask, cpu) = &scratch_cpu0mask;
+    per_cpu(send_ipi_cpumask, cpu) = &send_ipi_cpu0mask;
 #endif
 
     get_cpu_info()->use_pv_cr3 = false;
diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h
index 92d69a5ea0..6150363655 100644
--- a/xen/include/asm-x86/smp.h
+++ b/xen/include/asm-x86/smp.h
@@ -22,6 +22,7 @@
 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
 DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
 DECLARE_PER_CPU(cpumask_var_t, scratch_cpumask);
+DECLARE_PER_CPU(cpumask_var_t, send_ipi_cpumask);
 
 /*
  * Do we, for platform reasons, need to actually keep CPUs online when we
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.