[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: trust new architecturally-defined TSC Invariant bit on Intel systems



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1255077243 -3600
# Node ID 18e80cd73366095dc50925c8a44a0774b2ca323a
# Parent  1593073ad670ba057fc854021413590167a0ae06
x86: trust new architecturally-defined TSC Invariant bit on Intel systems

Trust new architecturally-defined TSC Invariant bit (on
Intel systems only for now, AMD TBD).

Signed-off-by: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/setup.c   |   12 ++++++++++++
 xen/arch/x86/smpboot.c |    8 ++++++++
 xen/arch/x86/time.c    |   31 +++++++++++++++----------------
 3 files changed, 35 insertions(+), 16 deletions(-)

diff -r 1593073ad670 -r 18e80cd73366 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Fri Oct 09 09:33:29 2009 +0100
+++ b/xen/arch/x86/setup.c      Fri Oct 09 09:34:03 2009 +0100
@@ -66,6 +66,10 @@ static int opt_watchdog = 0;
 static int opt_watchdog = 0;
 boolean_param("watchdog", opt_watchdog);
 
+/* opt_tsc_unstable: Override all tests; assume TSC is unreliable. */
+static int opt_tsc_unstable;
+boolean_param("tsc_unstable", opt_tsc_unstable);
+
 /* **** Linux config option: propagated to domain0. */
 /* "acpi=off":    Sisables both ACPI table parsing and interpreter. */
 /* "acpi=force":  Override the disable blacklist.                   */
@@ -460,6 +464,14 @@ void __init __start_xen(unsigned long mb
         while ( kextra[1] == ' ' ) kextra++;
     }
     cmdline_parse(cmdline);
+
+    /* If TSC is marked as unstable, clear all enhanced TSC features. */
+    if ( opt_tsc_unstable )
+    {
+        setup_clear_cpu_cap(X86_FEATURE_CONSTANT_TSC);
+        setup_clear_cpu_cap(X86_FEATURE_NONSTOP_TSC);
+        setup_clear_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+    }
 
     parse_video_info();
 
diff -r 1593073ad670 -r 18e80cd73366 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Fri Oct 09 09:33:29 2009 +0100
+++ b/xen/arch/x86/smpboot.c    Fri Oct 09 09:34:03 2009 +0100
@@ -187,6 +187,11 @@ static void __init synchronize_tsc_bp (v
        unsigned int one_usec;
        int buggy = 0;
 
+       if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+               printk("TSC is reliable, synchronization unnecessary\n");
+               return;
+       }
+       
        printk("checking TSC synchronization across %u CPUs: ", 
num_booting_cpus());
 
        /* convert from kcyc/sec to cyc/usec */
@@ -278,6 +283,9 @@ static void __init synchronize_tsc_ap (v
 static void __init synchronize_tsc_ap (void)
 {
        int i;
+
+       if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
+               return;
 
        /*
         * Not every cpu is online at the time
diff -r 1593073ad670 -r 18e80cd73366 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Fri Oct 09 09:33:29 2009 +0100
+++ b/xen/arch/x86/time.c       Fri Oct 09 09:34:03 2009 +0100
@@ -37,13 +37,6 @@ static char __initdata opt_clocksource[1
 static char __initdata opt_clocksource[10];
 string_param("clocksource", opt_clocksource);
 
-/*
- * opt_consistent_tscs: All TSCs tick at the exact same rate, allowing
- * simplified system time handling.
- */
-static int opt_consistent_tscs;
-boolean_param("consistent_tscs", opt_consistent_tscs);
-
 unsigned long cpu_khz;  /* CPU clock frequency in kHz. */
 DEFINE_SPINLOCK(rtc_lock);
 unsigned long pit0_ticks;
@@ -977,7 +970,7 @@ static void local_time_calibration(void)
     /* The overall calibration scale multiplier. */
     u32 calibration_mul_frac;
 
-    if ( opt_consistent_tscs )
+    if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
     {
         /* Atomically read cpu_calibration struct and write cpu_time struct. */
         local_irq_disable();
@@ -1110,6 +1103,10 @@ struct calibration_rendezvous {
     u64 master_tsc_stamp;
 };
 
+/*
+ * Keep TSCs in sync when they run at the same rate, but may stop in
+ * deep-sleep C states.
+ */
 static void time_calibration_tsc_rendezvous(void *_r)
 {
     int i;
@@ -1161,6 +1158,7 @@ static void time_calibration_tsc_rendezv
     raise_softirq(TIME_CALIBRATE_SOFTIRQ);
 }
 
+/* Ordinary rendezvous function which does not modify TSC values. */
 static void time_calibration_std_rendezvous(void *_r)
 {
     struct cpu_calibration *c = &this_cpu(cpu_calibration);
@@ -1190,6 +1188,9 @@ static void time_calibration_std_rendezv
     raise_softirq(TIME_CALIBRATE_SOFTIRQ);
 }
 
+static void (*time_calibration_rendezvous_fn)(void *) =
+    time_calibration_std_rendezvous;
+
 static void time_calibration(void *unused)
 {
     struct calibration_rendezvous r = {
@@ -1199,9 +1200,7 @@ static void time_calibration(void *unuse
 
     /* @wait=1 because we must wait for all cpus before freeing @r. */
     on_selected_cpus(&r.cpu_calibration_map,
-                     opt_consistent_tscs
-                     ? time_calibration_tsc_rendezvous
-                     : time_calibration_std_rendezvous,
+                     time_calibration_rendezvous_fn,
                      &r, 1);
 }
 
@@ -1229,15 +1228,15 @@ void init_percpu_time(void)
 /* Late init function (after all CPUs are booted). */
 int __init init_xen_time(void)
 {
-    if ( !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
-        opt_consistent_tscs = 0;
-
-    /* If we have constant TSCs then scale factor can be shared. */
-    if ( opt_consistent_tscs )
+    /* If we have constant-rate TSCs then scale factor can be shared. */
+    if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
     {
         int cpu;
         for_each_possible_cpu ( cpu )
             per_cpu(cpu_time, cpu).tsc_scale = per_cpu(cpu_time, 0).tsc_scale;
+        /* If TSCs are not marked as 'reliable', re-sync during rendezvous. */
+        if ( !boot_cpu_has(X86_FEATURE_TSC_RELIABLE) )
+            time_calibration_rendezvous_fn = time_calibration_tsc_rendezvous;
     }
 
     open_softirq(TIME_CALIBRATE_SOFTIRQ, local_time_calibration);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.