|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 1/2] x86/smp: use a dedicated CPU mask in send_IPI_mask
Some callers of send_IPI_mask pass the scratch cpumask as the mask
parameter of send_IPI_mask, so the scratch cpumask cannot be used by
the function. The following trace has been obtained with a debug patch
and shows one of those callers:
(XEN) scratch CPU mask already in use by
arch/x86/mm.c#_get_page_type+0x1f9/0x1abf
(XEN) Xen BUG at smp.c:45
[...]
(XEN) Xen call trace:
(XEN) [<ffff82d0802abb53>] R scratch_cpumask+0xd3/0xf9
(XEN) [<ffff82d0802abc21>] F send_IPI_mask+0x72/0x1ca
(XEN) [<ffff82d0802ac13e>] F flush_area_mask+0x10c/0x16c
(XEN) [<ffff82d080296c56>] F arch/x86/mm.c#_get_page_type+0x3ff/0x1abf
(XEN) [<ffff82d080298324>] F get_page_type+0xe/0x2c
(XEN) [<ffff82d08038624f>] F pv_set_gdt+0xa1/0x2aa
(XEN) [<ffff82d08027dfd6>] F arch_set_info_guest+0x1196/0x16ba
(XEN) [<ffff82d080207a55>] F default_initialise_vcpu+0xc7/0xd4
(XEN) [<ffff82d08027e55b>] F arch_initialise_vcpu+0x61/0xcd
(XEN) [<ffff82d080207e78>] F do_vcpu_op+0x219/0x690
(XEN) [<ffff82d08038be16>] F pv_hypercall+0x2f6/0x593
(XEN) [<ffff82d080396432>] F lstar_enter+0x112/0x120
_get_page_type will use the scratch cpumask to call flush_tlb_mask,
which in turn calls send_IPI_mask.
Fix this by using a dedicated per CPU cpumask in send_IPI_mask.
Fixes: 5500d265a2a8 ('x86/smp: use APIC ALLBUT destination shorthand when
possible')
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
xen/arch/x86/smp.c | 4 +++-
xen/arch/x86/smpboot.c | 9 ++++++++-
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index 0461812cf6..072638f0f6 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -59,6 +59,8 @@ static void send_IPI_shortcut(unsigned int shortcut, int
vector,
apic_write(APIC_ICR, cfg);
}
+DECLARE_PER_CPU(cpumask_var_t, send_ipi_cpumask);
+
/*
* send_IPI_mask(cpumask, vector): sends @vector IPI to CPUs in @cpumask,
* excluding the local CPU. @cpumask may be empty.
@@ -67,7 +69,7 @@ static void send_IPI_shortcut(unsigned int shortcut, int
vector,
void send_IPI_mask(const cpumask_t *mask, int vector)
{
bool cpus_locked = false;
- cpumask_t *scratch = this_cpu(scratch_cpumask);
+ cpumask_t *scratch = this_cpu(send_ipi_cpumask);
if ( in_irq() || in_mce_handler() || in_nmi_handler() )
{
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index ad49f2dcd7..6c548b0b53 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -57,6 +57,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, scratch_cpumask);
static cpumask_t scratch_cpu0mask;
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, send_ipi_cpumask);
+static cpumask_t send_ipi_cpu0mask;
+
cpumask_t cpu_online_map __read_mostly;
EXPORT_SYMBOL(cpu_online_map);
@@ -930,6 +933,8 @@ static void cpu_smpboot_free(unsigned int cpu, bool remove)
FREE_CPUMASK_VAR(per_cpu(cpu_core_mask, cpu));
if ( per_cpu(scratch_cpumask, cpu) != &scratch_cpu0mask )
FREE_CPUMASK_VAR(per_cpu(scratch_cpumask, cpu));
+ if ( per_cpu(send_ipi_cpumask, cpu) != &send_ipi_cpu0mask )
+ FREE_CPUMASK_VAR(per_cpu(send_ipi_cpumask, cpu));
}
cleanup_cpu_root_pgt(cpu);
@@ -1034,7 +1039,8 @@ static int cpu_smpboot_alloc(unsigned int cpu)
if ( !(cond_zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) &&
cond_zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) &&
- cond_alloc_cpumask_var(&per_cpu(scratch_cpumask, cpu))) )
+ cond_alloc_cpumask_var(&per_cpu(scratch_cpumask, cpu)) &&
+ cond_alloc_cpumask_var(&per_cpu(send_ipi_cpumask, cpu))) )
goto out;
rc = 0;
@@ -1175,6 +1181,7 @@ void __init smp_prepare_boot_cpu(void)
cpumask_set_cpu(cpu, &cpu_present_map);
#if NR_CPUS > 2 * BITS_PER_LONG
per_cpu(scratch_cpumask, cpu) = &scratch_cpu0mask;
+ per_cpu(send_ipi_cpumask, cpu) = &send_ipi_cpu0mask;
#endif
get_cpu_info()->use_pv_cr3 = false;
--
2.25.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |