[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/smp: use APIC ALLBUT destination shorthand when possible



If the IPI destination mask matches the mask of online CPUs use the
APIC ALLBUT destination shorthand in order to send an IPI to all CPUs
on the system except the current one. This can only be safely used
when no CPU hotplug or unplug operations are taking place, no offline
CPUs or those have been onlined and parked and finally when all CPUs
in the system have been accounted for (ie: the number of CPUs doesn't
exceed NR_CPUS and APIC IDs are below MAX_APICS).

This is specially beneficial when using the PV shim, since using the
shorthand avoids performing an APIC register write (or multiple ones
if using xAPIC mode) for each destination when doing a global TLB
flush.

The lock time of flush_lock on a 32 vCPU guest using the shim without
the shorthand is:

Global lock flush_lock: addr=ffff82d0804b21c0, lockval=f602f602, not locked
  lock:228455938(79406065573135), block:205908580(556416605761539)

Average lock time: 347577ns

While the same guest using the shorthand:

Global lock flush_lock: addr=ffff82d0804b41c0, lockval=d9c4d9bc, cpu=12
  lock:1890775(416719148054), block:1663958(2500161282949)

Average lock time: 220395ns

Approximately a 1/3 improvement in the lock time.

Note that this requires locking the CPU maps (get_cpu_maps) which uses
a trylock. This is currently safe as all users of cpu_add_remove_lock
do a trylock, but will need reevaluating if non-trylock users appear.

Also there's some code movement of __prepare_ICR and
__default_send_IPI_shortcut, which is a non-functional change but I
didn't feel like it should be split to a separate patch.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Changes since v1:
 - Move the shorthand logic to send_IPI_mask.
 - Check interrupts are enabled before trying to get the cpu maps
   lock.
 - Move __prepare_ICR and __default_send_IPI_shortcut.
---
 xen/arch/x86/acpi/boot.c  |  1 +
 xen/arch/x86/mpparse.c    |  5 +++
 xen/arch/x86/smp.c        | 86 +++++++++++++++++++++++++++------------
 xen/include/asm-x86/smp.h |  2 +
 4 files changed, 68 insertions(+), 26 deletions(-)

diff --git a/xen/arch/x86/acpi/boot.c b/xen/arch/x86/acpi/boot.c
index 15542a9bdf..88e1a89ff0 100644
--- a/xen/arch/x86/acpi/boot.c
+++ b/xen/arch/x86/acpi/boot.c
@@ -103,6 +103,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, 
const unsigned long end)
                               processor->lapic_flags & ACPI_MADT_ENABLED
                               ? KERN_WARNING "WARNING: " : KERN_INFO,
                               processor->local_apic_id, processor->uid);
+               cpu_overflow = true;
                /*
                 * Must not return an error here, to prevent
                 * acpi_table_parse_entries() from terminating early.
diff --git a/xen/arch/x86/mpparse.c b/xen/arch/x86/mpparse.c
index f057d9162f..8d7739fbf4 100644
--- a/xen/arch/x86/mpparse.c
+++ b/xen/arch/x86/mpparse.c
@@ -66,6 +66,9 @@ static unsigned int __initdata disabled_cpus;
 /* Bitmask of physically existing CPUs */
 physid_mask_t phys_cpu_present_map;
 
+/* Record whether CPUs haven't been added due to overflows. */
+bool __read_mostly cpu_overflow;
+
 void __init set_nr_cpu_ids(unsigned int max_cpus)
 {
        unsigned int tot_cpus = num_processors + disabled_cpus;
@@ -160,6 +163,7 @@ static int MP_processor_info_x(struct mpc_config_processor 
*m,
                printk_once(XENLOG_WARNING
                            "WARNING: NR_CPUS limit of %u reached - ignoring 
further processors\n",
                            nr_cpu_ids);
+               cpu_overflow = true;
                return -ENOSPC;
        }
 
@@ -167,6 +171,7 @@ static int MP_processor_info_x(struct mpc_config_processor 
*m,
            && genapic.name == apic_default.name) {
                printk_once(XENLOG_WARNING
                            "WARNING: CPUs limit of 8 reached - ignoring futher 
processors\n");
+               cpu_overflow = true;
                return -ENOSPC;
        }
 
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index c8e5913e47..6510dd84ab 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -8,6 +8,7 @@
  *     later.
  */
 
+#include <xen/cpu.h>
 #include <xen/irq.h>
 #include <xen/sched.h>
 #include <xen/delay.h>
@@ -23,6 +24,31 @@
 #include <irq_vectors.h>
 #include <mach_apic.h>
 
+static inline int __prepare_ICR(unsigned int shortcut, int vector)
+{
+    return APIC_DM_FIXED | shortcut | vector;
+}
+
+static void __default_send_IPI_shortcut(unsigned int shortcut, int vector,
+                                        unsigned int dest)
+{
+    unsigned int cfg;
+
+    /*
+     * Wait for idle.
+     */
+    apic_wait_icr_idle();
+
+    /*
+     * prepare target chip field
+     */
+    cfg = __prepare_ICR(shortcut, vector) | dest;
+    /*
+     * Send the IPI. The write to APIC_ICR fires this off.
+     */
+    apic_write(APIC_ICR, cfg);
+}
+
 /*
  * send_IPI_mask(cpumask, vector): sends @vector IPI to CPUs in @cpumask,
  * excluding the local CPU. @cpumask may be empty.
@@ -30,7 +56,40 @@
 
 void send_IPI_mask(const cpumask_t *mask, int vector)
 {
-    alternative_vcall(genapic.send_IPI_mask, mask, vector);
+    bool cpus_locked = false;
+
+    /*
+     * Prevent any CPU hot{un}plug while sending the IPIs if we are to use
+     * a shorthand, also refuse to use a shorthand if not all CPUs are
+     * online or have been parked.
+     */
+    if ( system_state > SYS_STATE_smp_boot && !cpu_overflow &&
+         /* NB: get_cpu_maps lock requires enabled interrupts. */
+         local_irq_is_enabled() && (cpus_locked = get_cpu_maps()) &&
+         (park_offline_cpus ||
+          cpumask_equal(&cpu_online_map, &cpu_present_map)) )
+    {
+        cpumask_copy(this_cpu(scratch_cpumask), &cpu_online_map);
+        cpumask_clear_cpu(smp_processor_id(), this_cpu(scratch_cpumask));
+    }
+    else
+    {
+        if ( cpus_locked )
+        {
+            put_cpu_maps();
+            cpus_locked = false;
+        }
+        cpumask_clear(this_cpu(scratch_cpumask));
+    }
+
+    if ( cpumask_equal(mask, this_cpu(scratch_cpumask)) )
+        __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector,
+                                    APIC_DEST_PHYSICAL);
+    else
+        alternative_vcall(genapic.send_IPI_mask, mask, vector);
+
+    if ( cpus_locked )
+        put_cpu_maps();
 }
 
 void send_IPI_self(int vector)
@@ -80,11 +139,6 @@ void send_IPI_self(int vector)
  * The following functions deal with sending IPIs between CPUs.
  */
 
-static inline int __prepare_ICR (unsigned int shortcut, int vector)
-{
-    return APIC_DM_FIXED | shortcut | vector;
-}
-
 static inline int __prepare_ICR2 (unsigned int mask)
 {
     return SET_xAPIC_DEST_FIELD(mask);
@@ -99,26 +153,6 @@ void apic_wait_icr_idle(void)
         cpu_relax();
 }
 
-static void __default_send_IPI_shortcut(unsigned int shortcut, int vector,
-                                    unsigned int dest)
-{
-    unsigned int cfg;
-
-    /*
-     * Wait for idle.
-     */
-    apic_wait_icr_idle();
-
-    /*
-     * prepare target chip field
-     */
-    cfg = __prepare_ICR(shortcut, vector) | dest;
-    /*
-     * Send the IPI. The write to APIC_ICR fires this off.
-     */
-    apic_write(APIC_ICR, cfg);
-}
-
 void send_IPI_self_legacy(uint8_t vector)
 {
     __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h
index dbeed2fd41..3df4185744 100644
--- a/xen/include/asm-x86/smp.h
+++ b/xen/include/asm-x86/smp.h
@@ -84,6 +84,8 @@ extern cpumask_t **socket_cpumask;
 #define get_cpu_current(cpu) \
     (get_cpu_info_from_stack((unsigned long)stack_base[cpu])->current_vcpu)
 
+extern bool cpu_overflow;
+
 #endif /* !__ASSEMBLY__ */
 
 #endif
-- 
2.24.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.