[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[patch V2 12/38] x86/smpboot: Make TSC synchronization function call based



From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

Spin-waiting on the control CPU until the AP reaches the TSC
synchronization is just a waste especially in the case that there is no
synchronization required.

As the synchronization has to run with interrupts disabled the control CPU
part can just be done from a SMP function call. The upcoming AP issues that
call async only in the case that synchronization is required.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

---
 arch/x86/include/asm/tsc.h |    2 --
 arch/x86/kernel/smpboot.c  |   20 +++-----------------
 arch/x86/kernel/tsc_sync.c |   36 +++++++++++-------------------------
 3 files changed, 14 insertions(+), 44 deletions(-)
---
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -55,12 +55,10 @@ extern bool tsc_async_resets;
 #ifdef CONFIG_X86_TSC
 extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
 extern void tsc_verify_tsc_adjust(bool resume);
-extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 #else
 static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return 
false; }
 static inline void tsc_verify_tsc_adjust(bool resume) { }
-static inline void check_tsc_sync_source(int cpu) { }
 static inline void check_tsc_sync_target(void) { }
 #endif
 
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -278,11 +278,7 @@ static void notrace start_secondary(void
        /* Otherwise gcc will move up smp_processor_id() before cpu_init() */
        barrier();
 
-       /*
-        * Check TSC synchronization with the control CPU, which will do
-        * its part of this from wait_cpu_online(), making it an implicit
-        * synchronization point.
-        */
+       /* Check TSC synchronization with the control CPU. */
        check_tsc_sync_target();
 
        /*
@@ -1144,21 +1140,11 @@ static void wait_cpu_callin(unsigned int
 }
 
 /*
- * Bringup step four: Synchronize the TSC and wait for the target AP
- * to reach set_cpu_online() in start_secondary().
+ * Bringup step four: Wait for the target AP to reach set_cpu_online() in
+ * start_secondary().
  */
 static void wait_cpu_online(unsigned int cpu)
 {
-       unsigned long flags;
-
-       /*
-        * Check TSC synchronization with the AP (keep irqs disabled
-        * while doing so):
-        */
-       local_irq_save(flags);
-       check_tsc_sync_source(cpu);
-       local_irq_restore(flags);
-
        /*
         * Wait for the AP to mark itself online, so the core caller
         * can drop sparse_irq_lock.
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -245,7 +245,6 @@ bool tsc_store_and_check_tsc_adjust(bool
  */
 static atomic_t start_count;
 static atomic_t stop_count;
-static atomic_t skip_test;
 static atomic_t test_runs;
 
 /*
@@ -344,21 +343,14 @@ static inline unsigned int loop_timeout(
 }
 
 /*
- * Source CPU calls into this - it waits for the freshly booted
- * target CPU to arrive and then starts the measurement:
+ * The freshly booted CPU initiates this via an async SMP function call.
  */
-void check_tsc_sync_source(int cpu)
+static void check_tsc_sync_source(void *__cpu)
 {
+       unsigned int cpu = (unsigned long)__cpu;
        int cpus = 2;
 
        /*
-        * No need to check if we already know that the TSC is not
-        * synchronized or if we have no TSC.
-        */
-       if (unsynchronized_tsc())
-               return;
-
-       /*
         * Set the maximum number of test runs to
         *  1 if the CPU does not provide the TSC_ADJUST MSR
         *  3 if the MSR is available, so the target can try to adjust
@@ -368,16 +360,9 @@ void check_tsc_sync_source(int cpu)
        else
                atomic_set(&test_runs, 3);
 retry:
-       /*
-        * Wait for the target to start or to skip the test:
-        */
-       while (atomic_read(&start_count) != cpus - 1) {
-               if (atomic_read(&skip_test) > 0) {
-                       atomic_set(&skip_test, 0);
-                       return;
-               }
+       /* Wait for the target to start. */
+       while (atomic_read(&start_count) != cpus - 1)
                cpu_relax();
-       }
 
        /*
         * Trigger the target to continue into the measurement too:
@@ -397,14 +382,14 @@ void check_tsc_sync_source(int cpu)
        if (!nr_warps) {
                atomic_set(&test_runs, 0);
 
-               pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
+               pr_debug("TSC synchronization [CPU#%d -> CPU#%u]: passed\n",
                        smp_processor_id(), cpu);
 
        } else if (atomic_dec_and_test(&test_runs) || random_warps) {
                /* Force it to 0 if random warps brought us here */
                atomic_set(&test_runs, 0);
 
-               pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n",
+               pr_warn("TSC synchronization [CPU#%d -> CPU#%u]:\n",
                        smp_processor_id(), cpu);
                pr_warn("Measured %Ld cycles TSC warp between CPUs, "
                        "turning off TSC clock.\n", max_warp);
@@ -457,11 +442,12 @@ void check_tsc_sync_target(void)
         * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
         * register might have been wreckaged by the BIOS..
         */
-       if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
-               atomic_inc(&skip_test);
+       if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable)
                return;
-       }
 
+       /* Kick the control CPU into the TSC synchronization function */
+       smp_call_function_single(cpumask_first(cpu_online_mask), 
check_tsc_sync_source,
+                                (unsigned long *)(unsigned long)cpu, 0);
 retry:
        /*
         * Register this CPU's participation and wait for the




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.