[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/spec-ctrl: Record the last write to MSR_SPEC_CTRL



commit 00f2992b6c7a9d4090443c1a85bf83224a87eeb9
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Jan 28 11:57:19 2022 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Feb 1 13:20:44 2022 +0000

    x86/spec-ctrl: Record the last write to MSR_SPEC_CTRL
    
    In some cases, writes to MSR_SPEC_CTRL do not have interesting side effects,
    and we should implement lazy context switching like we do with other MSRs.
    
    In the short term, this will be used by the SVM infrastructure, but I expect
    to extend it to other contexts in due course.
    
    Introduce cpu_info.last_spec_ctrl for the purpose, and cache writes made 
from
    the boot/resume paths.  The value can't live in regular per-cpu data when it
    is eventually used for PV guests when XPTI might be active.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/acpi/power.c                |  3 +++
 xen/arch/x86/include/asm/current.h       |  2 +-
 xen/arch/x86/include/asm/spec_ctrl_asm.h |  4 ++++
 xen/arch/x86/setup.c                     |  5 ++++-
 xen/arch/x86/smpboot.c                   |  5 +++++
 xen/arch/x86/spec_ctrl.c                 | 10 +++++++---
 6 files changed, 24 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c
index 0837a3ead4..bac9c16389 100644
--- a/xen/arch/x86/acpi/power.c
+++ b/xen/arch/x86/acpi/power.c
@@ -296,7 +296,10 @@ static int enter_state(u32 state)
     ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr);
 
     if ( boot_cpu_has(X86_FEATURE_IBRSB) )
+    {
         wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
+        ci->last_spec_ctrl = default_xen_spec_ctrl;
+    }
 
     if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
         wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
diff --git a/xen/arch/x86/include/asm/current.h 
b/xen/arch/x86/include/asm/current.h
index cfbedc3198..dc0edd9ed0 100644
--- a/xen/arch/x86/include/asm/current.h
+++ b/xen/arch/x86/include/asm/current.h
@@ -56,6 +56,7 @@ struct cpu_info {
     /* See asm/spec_ctrl_asm.h for usage. */
     unsigned int shadow_spec_ctrl;
     uint8_t      xen_spec_ctrl;
+    uint8_t      last_spec_ctrl;
     uint8_t      spec_ctrl_flags;
 
     /*
@@ -73,7 +74,6 @@ struct cpu_info {
      */
     bool         use_pv_cr3;
 
-    unsigned long __pad;
     /* get_stack_bottom() must be 16-byte aligned */
 };
 
diff --git a/xen/arch/x86/include/asm/spec_ctrl_asm.h 
b/xen/arch/x86/include/asm/spec_ctrl_asm.h
index bf82528a12..9c0c7622c4 100644
--- a/xen/arch/x86/include/asm/spec_ctrl_asm.h
+++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h
@@ -67,6 +67,10 @@
  * steps 2 and 6 will restore the shadow value rather than leaving Xen's value
  * loaded and corrupting the value used in guest context.
  *
+ * Additionally, in some cases it is safe to skip writes to MSR_SPEC_CTRL when
+ * we don't require any of the side effects of an identical write.  Maintain a
+ * per-cpu last_spec_ctrl value for this purpose.
+ *
  * The following ASM fragments implement this algorithm.  See their local
  * comments for further details.
  *  - SPEC_CTRL_ENTRY_FROM_PV
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index e716005145..115f8f6517 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1930,9 +1930,12 @@ void __init noreturn __start_xen(unsigned long mbi_p)
 
     if ( bsp_delay_spec_ctrl )
     {
-        get_cpu_info()->spec_ctrl_flags &= ~SCF_use_shadow;
+        struct cpu_info *info = get_cpu_info();
+
+        info->spec_ctrl_flags &= ~SCF_use_shadow;
         barrier();
         wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
+        info->last_spec_ctrl = default_xen_spec_ctrl;
     }
 
     /* Jump to the 1:1 virtual mappings of cpu0_stack. */
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 08c0f2d9df..1cfdf96207 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -323,6 +323,8 @@ static void set_cpu_sibling_map(unsigned int cpu)
 
 void start_secondary(void *unused)
 {
+    struct cpu_info *info = get_cpu_info();
+
     /*
      * Dont put anything before smp_callin(), SMP booting is so fragile that we
      * want to limit the things done here to the most necessary things.
@@ -379,7 +381,10 @@ void start_secondary(void *unused)
      * microcode.
      */
     if ( boot_cpu_has(X86_FEATURE_IBRSB) )
+    {
         wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
+        info->last_spec_ctrl = default_xen_spec_ctrl;
+    }
     if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
         wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
 
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 2072daf662..b2fd86ebe5 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -1270,6 +1270,9 @@ void __init init_speculation_mitigations(void)
      */
     if ( has_spec_ctrl )
     {
+        struct cpu_info *info = get_cpu_info();
+        unsigned int val;
+
         bsp_delay_spec_ctrl = !cpu_has_hypervisor && default_xen_spec_ctrl;
 
         /*
@@ -1278,15 +1281,16 @@ void __init init_speculation_mitigations(void)
          */
         if ( bsp_delay_spec_ctrl )
         {
-            struct cpu_info *info = get_cpu_info();
-
             info->shadow_spec_ctrl = 0;
             barrier();
             info->spec_ctrl_flags |= SCF_use_shadow;
             barrier();
         }
 
-        wrmsrl(MSR_SPEC_CTRL, bsp_delay_spec_ctrl ? 0 : default_xen_spec_ctrl);
+        val = bsp_delay_spec_ctrl ? 0 : default_xen_spec_ctrl;
+
+        wrmsrl(MSR_SPEC_CTRL, val);
+        info->last_spec_ctrl = val;
     }
 
     if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.