[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] Temporarily revert "amd/msr: implement VIRT_SPEC_CTRL for HVM guests using legacy SSBD"



commit c254f3b45c7fc8b050a6b6290d9bd7203417b994
Author:     George Dunlap <george.dunlap@xxxxxxxxxx>
AuthorDate: Fri Aug 19 20:15:22 2022 +0100
Commit:     George Dunlap <george.dunlap@xxxxxxxxxx>
CommitDate: Fri Aug 19 20:15:22 2022 +0100

    Temporarily revert "amd/msr: implement VIRT_SPEC_CTRL for HVM guests using 
legacy SSBD"
    
    A person tagged in commit 646589ac148a2ff6bb222a6081b4d7b13ee468c0
    claims the tag is in accurate; revert this commit so that we can
    re-commit it again with the tag corrected.
    
    Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
 CHANGELOG.md                   |   1 -
 xen/arch/x86/cpu/amd.c         | 121 ++++++-----------------------------------
 xen/arch/x86/hvm/svm/svm.c     |   4 --
 xen/arch/x86/include/asm/amd.h |   4 --
 xen/arch/x86/spec_ctrl.c       |   4 +-
 5 files changed, 18 insertions(+), 116 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5e4bae5f35..a87a193e15 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,7 +13,6 @@ The format is based on [Keep a 
Changelog](https://keepachangelog.com/en/1.0.0/)
 ### Added / support upgraded
  - IOMMU superpage support on x86, affecting PV guests as well as HVM/PVH ones
    when they don't share page tables with the CPU (HAP / EPT / NPT).
- - Support VIRT_SSBD feature for HVM guests on AMD.
 
 ### Removed / support downgraded
  - dropped support for the (x86-only) "vesa-mtrr" and "vesa-remap" command 
line options
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 98c52d0686..d5f8e5e899 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -48,7 +48,6 @@ boolean_param("allow_unsafe", opt_allow_unsafe);
 
 /* Signal whether the ACPI C1E quirk is required. */
 bool __read_mostly amd_acpi_c1e_quirk;
-bool __ro_after_init amd_legacy_ssbd;
 
 static inline int rdmsr_amd_safe(unsigned int msr, unsigned int *lo,
                                 unsigned int *hi)
@@ -686,10 +685,23 @@ void amd_init_lfence(struct cpuinfo_x86 *c)
  * Refer to the AMD Speculative Store Bypass whitepaper:
  * 
https://developer.amd.com/wp-content/resources/124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf
  */
-static bool set_legacy_ssbd(const struct cpuinfo_x86 *c, bool enable)
+void amd_init_ssbd(const struct cpuinfo_x86 *c)
 {
        int bit = -1;
 
+       if (cpu_has_ssb_no)
+               return;
+
+       if (cpu_has_amd_ssbd) {
+               /* Handled by common MSR_SPEC_CTRL logic */
+               return;
+       }
+
+       if (cpu_has_virt_ssbd) {
+               wrmsrl(MSR_VIRT_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0);
+               return;
+       }
+
        switch (c->x86) {
        case 0x15: bit = 54; break;
        case 0x16: bit = 33; break;
@@ -703,119 +715,20 @@ static bool set_legacy_ssbd(const struct cpuinfo_x86 *c, 
bool enable)
                if (rdmsr_safe(MSR_AMD64_LS_CFG, val) ||
                    ({
                            val &= ~mask;
-                           if (enable)
+                           if (opt_ssbd)
                                    val |= mask;
                            false;
                    }) ||
                    wrmsr_safe(MSR_AMD64_LS_CFG, val) ||
                    ({
                            rdmsrl(MSR_AMD64_LS_CFG, val);
-                           (val & mask) != (enable * mask);
+                           (val & mask) != (opt_ssbd * mask);
                    }))
                        bit = -1;
        }
 
-       return bit >= 0;
-}
-
-void amd_init_ssbd(const struct cpuinfo_x86 *c)
-{
-       if (cpu_has_ssb_no)
-               return;
-
-       if (cpu_has_amd_ssbd) {
-               /* Handled by common MSR_SPEC_CTRL logic */
-               return;
-       }
-
-       if (cpu_has_virt_ssbd) {
-               wrmsrl(MSR_VIRT_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0);
-               return;
-       }
-
-       if (!set_legacy_ssbd(c, opt_ssbd)) {
+       if (bit < 0)
                printk_once(XENLOG_ERR "No SSBD controls available\n");
-               if (amd_legacy_ssbd)
-                       panic("CPU feature mismatch: no legacy SSBD\n");
-       } else if (c == &boot_cpu_data)
-               amd_legacy_ssbd = true;
-}
-
-static struct ssbd_ls_cfg {
-    bool locked;
-    unsigned int count;
-} __cacheline_aligned *ssbd_ls_cfg;
-static unsigned int __ro_after_init ssbd_max_cores;
-#define AMD_FAM17H_MAX_SOCKETS 2
-
-bool __init amd_setup_legacy_ssbd(void)
-{
-       unsigned int i;
-
-       if ((boot_cpu_data.x86 != 0x17 && boot_cpu_data.x86 != 0x18) ||
-           boot_cpu_data.x86_num_siblings <= 1)
-               return true;
-
-       /*
-        * One could be forgiven for thinking that c->x86_max_cores is the
-        * correct value to use here.
-        *
-        * However, that value is derived from the current configuration, and
-        * c->cpu_core_id is sparse on all but the top end CPUs.  Derive
-        * max_cpus from ApicIdCoreIdSize which will cover any sparseness.
-        */
-       if (boot_cpu_data.extended_cpuid_level >= 0x80000008) {
-               ssbd_max_cores = 1u << MASK_EXTR(cpuid_ecx(0x80000008), 0xf000);
-               ssbd_max_cores /= boot_cpu_data.x86_num_siblings;
-       }
-       if (!ssbd_max_cores)
-               return false;
-
-       ssbd_ls_cfg = xzalloc_array(struct ssbd_ls_cfg,
-                                   ssbd_max_cores * AMD_FAM17H_MAX_SOCKETS);
-       if (!ssbd_ls_cfg)
-               return false;
-
-       if (opt_ssbd)
-               for (i = 0; i < ssbd_max_cores * AMD_FAM17H_MAX_SOCKETS; i++)
-                       /* Set initial state, applies to any (hotplug) CPU. */
-                       ssbd_ls_cfg[i].count = boot_cpu_data.x86_num_siblings;
-
-       return true;
-}
-
-/*
- * Executed from GIF==0 context: avoid using BUG/ASSERT or other functionality
- * that relies on exceptions as those are not expected to run in GIF==0
- * context.
- */
-void amd_set_legacy_ssbd(bool enable)
-{
-       const struct cpuinfo_x86 *c = &current_cpu_data;
-       struct ssbd_ls_cfg *status;
-
-       if ((c->x86 != 0x17 && c->x86 != 0x18) || c->x86_num_siblings <= 1) {
-               set_legacy_ssbd(c, enable);
-               return;
-       }
-
-       status = &ssbd_ls_cfg[c->phys_proc_id * ssbd_max_cores +
-                             c->cpu_core_id];
-
-       /*
-        * Open code a very simple spinlock: this function is used with GIF==0
-        * and different IF values, so would trigger the checklock detector.
-        * Instead of trying to workaround the detector, use a very simple lock
-        * implementation: it's better to reduce the amount of code executed
-        * with GIF==0.
-        */
-       while (test_and_set_bool(status->locked))
-               cpu_relax();
-       status->count += enable ? 1 : -1;
-       if (enable ? status->count == 1 : !status->count)
-               set_legacy_ssbd(c, enable);
-       barrier();
-       write_atomic(&status->locked, false);
 }
 
 /*
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 1aeaabcb13..53ce2edd35 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -3126,8 +3126,6 @@ void vmexit_virt_spec_ctrl(void)
 
     if ( cpu_has_virt_ssbd )
         wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
-    else
-        amd_set_legacy_ssbd(val);
 }
 
 /* Called with GIF=0. */
@@ -3140,8 +3138,6 @@ void vmentry_virt_spec_ctrl(void)
 
     if ( cpu_has_virt_ssbd )
         wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
-    else
-        amd_set_legacy_ssbd(val);
 }
 
 /*
diff --git a/xen/arch/x86/include/asm/amd.h b/xen/arch/x86/include/asm/amd.h
index 6a42f68542..a82382e6bf 100644
--- a/xen/arch/x86/include/asm/amd.h
+++ b/xen/arch/x86/include/asm/amd.h
@@ -151,8 +151,4 @@ void check_enable_amd_mmconf_dmi(void);
 extern bool amd_acpi_c1e_quirk;
 void amd_check_disable_c1e(unsigned int port, u8 value);
 
-extern bool amd_legacy_ssbd;
-bool amd_setup_legacy_ssbd(void);
-void amd_set_legacy_ssbd(bool enable);
-
 #endif /* __AMD_H__ */
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 4e53056624..ec44205309 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -22,7 +22,6 @@
 #include <xen/param.h>
 #include <xen/warning.h>
 
-#include <asm/amd.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/microcode.h>
 #include <asm/msr.h>
@@ -1245,8 +1244,7 @@ void __init init_speculation_mitigations(void)
     }
 
     /* Support VIRT_SPEC_CTRL.SSBD if AMD_SSBD is not available. */
-    if ( opt_msr_sc_hvm && !cpu_has_amd_ssbd &&
-         (cpu_has_virt_ssbd || (amd_legacy_ssbd && amd_setup_legacy_ssbd())) )
+    if ( opt_msr_sc_hvm && !cpu_has_amd_ssbd && cpu_has_virt_ssbd )
         setup_force_cpu_cap(X86_FEATURE_VIRT_SC_MSR_HVM);
 
     /* Figure out default_xen_spec_ctrl. */
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.