[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen stable-4.16] x86/spec-ctrl: Mitigate Cross-Thread Return Address Predictions



commit d4e286db89d80c862b4a24bf971dd71008c8b53e
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Thu Sep 8 21:27:58 2022 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Feb 15 16:17:04 2023 +0000

    x86/spec-ctrl: Mitigate Cross-Thread Return Address Predictions
    
    This is XSA-426 / CVE-2022-27672
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    (cherry picked from commit 63305e5392ec2d17b85e7996a97462744425db80)
---
 docs/misc/xen-command-line.pandoc |  2 +-
 xen/arch/x86/spec_ctrl.c          | 31 ++++++++++++++++++++++++++++---
 xen/include/asm-x86/cpufeatures.h |  3 ++-
 xen/include/asm-x86/spec_ctrl.h   | 15 +++++++++++++++
 4 files changed, 46 insertions(+), 5 deletions(-)

diff --git a/docs/misc/xen-command-line.pandoc 
b/docs/misc/xen-command-line.pandoc
index bd6826d0ae..b3f60cd923 100644
--- a/docs/misc/xen-command-line.pandoc
+++ b/docs/misc/xen-command-line.pandoc
@@ -2275,7 +2275,7 @@ guests to use.
   on entry and exit.  These blocks are necessary to virtualise support for
   guests and if disabled, guests will be unable to use IBRS/STIBP/SSBD/etc.
 * `rsb=` offers control over whether to overwrite the Return Stack Buffer /
-  Return Address Stack on entry to Xen.
+  Return Address Stack on entry to Xen and on idle.
 * `md-clear=` offers control over whether to use VERW to flush
   microarchitectural buffers on idle and exit from Xen.  *Note: For
   compatibility with development versions of this fix, `mds=` is also accepted
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 90d86fe5cb..14649d92f5 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -1317,13 +1317,38 @@ void __init init_speculation_mitigations(void)
      * 3) Some CPUs have RSBs which are not full width, which allow the
      *    attacker's entries to alias Xen addresses.
      *
+     * 4) Some CPUs have RSBs which are re-partitioned based on thread
+     *    idleness, which allows an attacker to inject entries into the other
+     *    thread.  We still active the optimisation in this case, and mitigate
+     *    in the idle path which has lower overhead.
+     *
      * It is safe to turn off RSB stuffing when Xen is using SMEP itself, and
      * 32bit PV guests are disabled, and when the RSB is full width.
      */
     BUILD_BUG_ON(RO_MPT_VIRT_START != PML4_ADDR(256));
-    if ( opt_rsb_pv == -1 && boot_cpu_has(X86_FEATURE_XEN_SMEP) &&
-         !opt_pv32 && rsb_is_full_width() )
-        opt_rsb_pv = 0;
+    if ( opt_rsb_pv == -1 )
+    {
+        opt_rsb_pv = (opt_pv32 || !boot_cpu_has(X86_FEATURE_XEN_SMEP) ||
+                      !rsb_is_full_width());
+
+        /*
+         * Cross-Thread Return Address Predictions.
+         *
+         * Vulnerable systems are Zen1/Zen2 uarch, which is AMD Fam17 / Hygon
+         * Fam18, when SMT is active.
+         *
+         * To mitigate, we must flush the RSB/RAS/RAP once between entering
+         * Xen and going idle.
+         *
+         * Most cases flush on entry to Xen anyway.  The one case where we
+         * don't is when using the SMEP optimisation for PV guests.  Flushing
+         * before going idle is less overhead than flushing on PV entry.
+         */
+        if ( !opt_rsb_pv && hw_smt_enabled &&
+             (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD|X86_VENDOR_HYGON)) &&
+             (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) )
+            setup_force_cpu_cap(X86_FEATURE_SC_RSB_IDLE);
+    }
 
     if ( opt_rsb_pv )
     {
diff --git a/xen/include/asm-x86/cpufeatures.h 
b/xen/include/asm-x86/cpufeatures.h
index ecc1bb0950..ccf9d7287c 100644
--- a/xen/include/asm-x86/cpufeatures.h
+++ b/xen/include/asm-x86/cpufeatures.h
@@ -35,7 +35,8 @@ XEN_CPUFEATURE(SC_RSB_HVM,        X86_SYNTH(19)) /* RSB 
overwrite needed for HVM
 XEN_CPUFEATURE(XEN_SELFSNOOP,     X86_SYNTH(20)) /* SELFSNOOP gets used by Xen 
itself */
 XEN_CPUFEATURE(SC_MSR_IDLE,       X86_SYNTH(21)) /* Clear MSR_SPEC_CTRL on 
idle */
 XEN_CPUFEATURE(XEN_LBR,           X86_SYNTH(22)) /* Xen uses MSR_DEBUGCTL.LBR 
*/
-/* Bits 23,24 unused. */
+/* Bits 23 unused. */
+XEN_CPUFEATURE(SC_RSB_IDLE,       X86_SYNTH(24)) /* RSB overwrite needed for 
idle. */
 XEN_CPUFEATURE(SC_VERW_IDLE,      X86_SYNTH(25)) /* VERW used by Xen for idle 
*/
 XEN_CPUFEATURE(XEN_SHSTK,         X86_SYNTH(26)) /* Xen uses CET Shadow Stacks 
*/
 XEN_CPUFEATURE(XEN_IBT,           X86_SYNTH(27)) /* Xen uses CET Indirect 
Branch Tracking */
diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
index 6a77c39378..391973ef6a 100644
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -159,6 +159,21 @@ static always_inline void spec_ctrl_enter_idle(struct 
cpu_info *info)
      */
     alternative_input("", "verw %[sel]", X86_FEATURE_SC_VERW_IDLE,
                       [sel] "m" (info->verw_sel));
+
+    /*
+     * Cross-Thread Return Address Predictions:
+     *
+     * On vulnerable systems, the return predictions (RSB/RAS) are statically
+     * partitioned between active threads.  When entering idle, our entries
+     * are re-partitioned to allow the other threads to use them.
+     *
+     * In some cases, we might still have guest entries in the RAS, so flush
+     * them before injecting them sideways to our sibling thread.
+     *
+     * (ab)use alternative_input() to specify clobbers.
+     */
+    alternative_input("", "DO_OVERWRITE_RSB", X86_FEATURE_SC_RSB_IDLE,
+                      : "rax", "rcx");
 }
 
 /* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.16



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.