[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: CPU synchronization while doing MTRR register update



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1249476636 -3600
# Node ID cd46b418d1719fa8654b5355ffe1f675de99549f
# Parent  f1b10de90679307824ee7e4dcb265ce571c83816
x86: CPU synchronization while doing MTRR register update

The current Xen code does not synchronize all the cpus while
initializing MTRR registers when a cpu comes up.=20

As per IA32 SDM vol 3: Section: 10.11.8 MTRR Considerations in MP
Systems, all the processors should be synchronized while updating
MTRRs.

Processors starting with westmere are caching VMCS data for better VMX
performance. These processors also has Hyper-threading support. With
hyper-threading, when one thread's cache is disabled, it also disables
cache for the sibling threads. And MTRR register updating procedure
involves cache disabling. So if cpus are not synchronized, updating
MTRR registers on a thread, results in the VMCS data from sibling
threads becoming inaccessible, and it causes system failure.

With this patch while updating the MTRR registers, all the cpus are
synchronized as per the IA32 SDM. Also at the boot time and resume
time when multiple cpus are brought up, an optimization is added to
delay the MTRR initialization until all the cpus are up, to avoid
multiple times cpu synchronization.

Signed-off-by: Nitin A Kamble <nitin.a.kamble@xxxxxxxxx>
Signed-off-by: Suresh B Siddha <suresh.b.siddha@xxxxxxxxx>
Signed-off-by: Asit K Mallick <asit.k.mallick@xxxxxxxxx>
---
 xen/arch/x86/acpi/suspend.c  |    2 -
 xen/arch/x86/cpu/common.c    |    2 -
 xen/arch/x86/cpu/mtrr/main.c |   57 +++++++++++++++++++++++++++++++++----------
 xen/arch/x86/smpboot.c       |    6 ++++
 xen/include/asm-x86/mtrr.h   |    4 +++
 5 files changed, 56 insertions(+), 15 deletions(-)

diff -r f1b10de90679 -r cd46b418d171 xen/arch/x86/acpi/suspend.c
--- a/xen/arch/x86/acpi/suspend.c       Wed Aug 05 13:49:35 2009 +0100
+++ b/xen/arch/x86/acpi/suspend.c       Wed Aug 05 13:50:36 2009 +0100
@@ -78,6 +78,6 @@ void restore_rest_processor_state(void)
     if (cpu_has_pat)
         wrmsrl(MSR_IA32_CR_PAT, host_pat);
 
-    mtrr_ap_init();
+    mtrr_bp_restore();
     mcheck_init(&boot_cpu_data);
 }
diff -r f1b10de90679 -r cd46b418d171 xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c Wed Aug 05 13:49:35 2009 +0100
+++ b/xen/arch/x86/cpu/common.c Wed Aug 05 13:50:36 2009 +0100
@@ -452,8 +452,6 @@ void __cpuinit identify_cpu(struct cpuin
 
        if (c == &boot_cpu_data)
                mtrr_bp_init();
-       else
-               mtrr_ap_init();
 }
 
 #ifdef CONFIG_X86_HT
diff -r f1b10de90679 -r cd46b418d171 xen/arch/x86/cpu/mtrr/main.c
--- a/xen/arch/x86/cpu/mtrr/main.c      Wed Aug 05 13:49:35 2009 +0100
+++ b/xen/arch/x86/cpu/mtrr/main.c      Wed Aug 05 13:50:36 2009 +0100
@@ -134,6 +134,17 @@ struct set_mtrr_data {
        mtrr_type       smp_type;
 };
 
+/* As per the IA32 SDM vol-3: 10.11.8 MTRR Considerations in MP Systems section
+ * MTRRs updates must to be synchronized across all the processors.
+ * This flags avoids multiple cpu synchronization while booting each cpu.
+ * At the boot & resume time, this flag is turned on in mtrr_aps_sync_begin().
+ * Using this flag the mtrr initialization (and the all cpus sync up) in the 
+ * mtrr_ap_init() is avoided while booting each cpu. 
+ * After all the cpus have came up, then mtrr_aps_sync_end() synchronizes all 
+ * the cpus and updates mtrrs on all of them. Then this flag is turned off.
+ */
+int hold_mtrr_updates_on_aps;
+
 #ifdef CONFIG_SMP
 
 static void ipi_handler(void *info)
@@ -151,11 +162,13 @@ static void ipi_handler(void *info)
                cpu_relax();
 
        /*  The master has cleared me to execute  */
-       if (data->smp_reg != ~0U) 
+       if (data->smp_reg == ~0U) /* update all mtrr registers */
+               /* At the cpu hot-add time this will reinitialize mtrr 
+                * registres on the existing cpus. It is ok.  */
+               mtrr_if->set_all();
+       else /* single mtrr register update */
                mtrr_if->set(data->smp_reg, data->smp_base, 
                             data->smp_size, data->smp_type);
-       else
-               mtrr_if->set_all();
 
        atomic_dec(&data->count);
        while(atomic_read(&data->gate))
@@ -250,7 +263,11 @@ static void set_mtrr(unsigned int reg, u
         * to replicate across all the APs. 
         * If we're doing that @reg is set to something special...
         */
-       if (reg != ~0U) 
+       if (reg == ~0U)  /* update all mtrr registers */
+               /* at boot or resume time, this will reinitialize the mtrrs on 
+                * the bp. It is ok. */
+               mtrr_if->set_all();
+       else /* update the single mtrr register */
                mtrr_if->set(reg,base,size,type);
 
        /* wait for the others */
@@ -659,9 +676,7 @@ void __init mtrr_bp_init(void)
 
 void mtrr_ap_init(void)
 {
-       unsigned long flags;
-
-       if (!mtrr_if || !use_intel())
+       if (!mtrr_if || !use_intel() || hold_mtrr_updates_on_aps)
                return;
        /*
         * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
@@ -671,11 +686,7 @@ void mtrr_ap_init(void)
         * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
         * prevent mtrr entry changes
         */
-       local_irq_save(flags);
-
-       mtrr_if->set_all();
-
-       local_irq_restore(flags);
+       set_mtrr(~0U, 0, 0, 0);
 }
 
 /**
@@ -692,6 +703,28 @@ void mtrr_save_state(void)
        put_cpu();
 }
 
+void mtrr_aps_sync_begin(void)
+{
+       if (!use_intel())
+               return;
+       hold_mtrr_updates_on_aps = 1;
+}
+
+void mtrr_aps_sync_end(void)
+{
+       if (!use_intel())
+               return;
+       set_mtrr(~0U, 0, 0, 0);
+       hold_mtrr_updates_on_aps = 0;
+}
+
+void mtrr_bp_restore(void)
+{
+       if (!use_intel())
+               return;
+       mtrr_if->set_all();
+}
+
 static int __init mtrr_init_finialize(void)
 {
        if (!mtrr_if)
diff -r f1b10de90679 -r cd46b418d171 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Wed Aug 05 13:49:35 2009 +0100
+++ b/xen/arch/x86/smpboot.c    Wed Aug 05 13:50:36 2009 +0100
@@ -519,6 +519,7 @@ void __devinit start_secondary(void *unu
 
        /* We can take interrupts now: we're officially "up". */
        local_irq_enable();
+       mtrr_ap_init();
 
        microcode_resume_cpu(cpu);
 
@@ -1194,6 +1195,7 @@ void __init smp_prepare_cpus(unsigned in
        cpu_callin_map = cpumask_of_cpu(0);
        mb();
        smp_boot_cpus(max_cpus);
+       mtrr_aps_sync_begin();
 }
 
 void __devinit smp_prepare_boot_cpu(void)
@@ -1386,6 +1388,7 @@ void enable_nonboot_cpus(void)
        int cpu, error;
 
        printk("Thawing cpus ...\n");
+       mtrr_aps_sync_begin();
        for_each_cpu_mask(cpu, frozen_cpus) {
                error = cpu_up(cpu);
                if (!error) {
@@ -1395,6 +1398,7 @@ void enable_nonboot_cpus(void)
                printk("Error taking cpu %d up: %d\n", cpu, error);
                panic("Not enough cpus");
        }
+       mtrr_aps_sync_end();
        cpus_clear(frozen_cpus);
 
        /*
@@ -1461,6 +1465,8 @@ void __init smp_cpus_done(unsigned int m
 #ifdef CONFIG_X86_IO_APIC
        setup_ioapic_dest();
 #endif
+       mtrr_save_state();
+       mtrr_aps_sync_end();
 #ifndef CONFIG_HOTPLUG_CPU
        /*
         * Disable executability of the SMP trampoline:
diff -r f1b10de90679 -r cd46b418d171 xen/include/asm-x86/mtrr.h
--- a/xen/include/asm-x86/mtrr.h        Wed Aug 05 13:49:35 2009 +0100
+++ b/xen/include/asm-x86/mtrr.h        Wed Aug 05 13:50:36 2009 +0100
@@ -71,5 +71,9 @@ extern void ept_change_entry_emt_with_ra
 extern void ept_change_entry_emt_with_range(
     struct domain *d, unsigned long start_gfn, unsigned long end_gfn);
 extern unsigned char pat_type_2_pte_flags(unsigned char pat_type);
+extern int hold_mtrr_updates_on_aps;
+extern void mtrr_aps_sync_begin(void);
+extern void mtrr_aps_sync_end(void);
+extern void mtrr_bp_restore(void);
 
 #endif /* __ASM_X86_MTRR_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.