[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen stable-4.18] x86/vmx: Perform VERW flushing later in the VMExit path



commit 9f89ec65fbe49c3be32a456091097d7ef017d268
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Jun 23 11:32:00 2023 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Mar 12 15:55:06 2024 +0000

    x86/vmx: Perform VERW flushing later in the VMExit path
    
    Broken out of the following patch because this change is subtle enough on 
its
    own.  See it for the rational of why we're moving VERW.
    
    As for how, extend the trick already used to hold one condition in
    flags (RESUME vs LAUNCH) through the POPing of GPRs.
    
    Move the MOV CR earlier.  Intel specify flags to be undefined across it.
    
    Encode the two conditions we want using SF and PF.  See the code comment for
    exactly how.
    
    Leave a comment to explain the lack of any content around
    SPEC_CTRL_EXIT_TO_VMX, but leave the block in place.  Sods law says if we
    delete it, we'll need to reintroduce it.
    
    This is part of XSA-452 / CVE-2023-28746.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    (cherry picked from commit 475fa20b7384464210f42bad7195f87bd6f1c63f)
---
 xen/arch/x86/hvm/vmx/entry.S             | 36 ++++++++++++++++++++++++++++----
 xen/arch/x86/include/asm/asm_defns.h     |  8 +++++++
 xen/arch/x86/include/asm/spec_ctrl_asm.h |  7 +++++++
 xen/arch/x86/x86_64/asm-offsets.c        |  1 +
 4 files changed, 48 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S
index e3f60d5a82..1bead826ca 100644
--- a/xen/arch/x86/hvm/vmx/entry.S
+++ b/xen/arch/x86/hvm/vmx/entry.S
@@ -87,17 +87,39 @@ UNLIKELY_END(realmode)
 
         /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
         /* SPEC_CTRL_EXIT_TO_VMX   Req: %rsp=regs/cpuinfo              Clob:   
 */
-        DO_SPEC_CTRL_COND_VERW
+        /*
+         * All speculation safety work happens to be elsewhere.  VERW is after
+         * popping the GPRs, while restoring the guest MSR_SPEC_CTRL is left
+         * to the MSR load list.
+         */
 
         mov  VCPU_hvm_guest_cr2(%rbx),%rax
+        mov  %rax, %cr2
+
+        /*
+         * We need to perform two conditional actions (VERW, and Resume vs
+         * Launch) after popping GPRs.  With some cunning, we can encode both
+         * of these in eflags together.
+         *
+         * Parity is only calculated over the bottom byte of the answer, while
+         * Sign is simply the top bit.
+         *
+         * Therefore, the final OR instruction ends up producing:
+         *   SF = VCPU_vmx_launched
+         *   PF = !SCF_verw
+         */
+        BUILD_BUG_ON(SCF_verw & ~0xff)
+        movzbl VCPU_vmx_launched(%rbx), %ecx
+        shl  $31, %ecx
+        movzbl CPUINFO_spec_ctrl_flags(%rsp), %eax
+        and  $SCF_verw, %eax
+        or   %eax, %ecx
 
         pop  %r15
         pop  %r14
         pop  %r13
         pop  %r12
         pop  %rbp
-        mov  %rax,%cr2
-        cmpb $0,VCPU_vmx_launched(%rbx)
         pop  %rbx
         pop  %r11
         pop  %r10
@@ -108,7 +130,13 @@ UNLIKELY_END(realmode)
         pop  %rdx
         pop  %rsi
         pop  %rdi
-        je   .Lvmx_launch
+
+        jpe  .L_skip_verw
+        /* VERW clobbers ZF, but preserves all others, including SF. */
+        verw STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)(%rsp)
+.L_skip_verw:
+
+        jns  .Lvmx_launch
 
 /*.Lvmx_resume:*/
         VMRESUME
diff --git a/xen/arch/x86/include/asm/asm_defns.h 
b/xen/arch/x86/include/asm/asm_defns.h
index baaaccb26e..56ae26e542 100644
--- a/xen/arch/x86/include/asm/asm_defns.h
+++ b/xen/arch/x86/include/asm/asm_defns.h
@@ -81,6 +81,14 @@ register unsigned long current_stack_pointer asm("rsp");
 
 #ifdef __ASSEMBLY__
 
+.macro BUILD_BUG_ON condstr, cond:vararg
+        .if \cond
+        .error "Condition \"\condstr\" not satisfied"
+        .endif
+.endm
+/* preprocessor macro to make error message more user friendly */
+#define BUILD_BUG_ON(cond) BUILD_BUG_ON #cond, cond
+
 #ifdef HAVE_AS_QUOTED_SYM
 #define SUBSECTION_LBL(tag)                        \
         .ifndef .L.tag;                            \
diff --git a/xen/arch/x86/include/asm/spec_ctrl_asm.h 
b/xen/arch/x86/include/asm/spec_ctrl_asm.h
index 6cb7c1b949..525745a066 100644
--- a/xen/arch/x86/include/asm/spec_ctrl_asm.h
+++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h
@@ -152,6 +152,13 @@
 #endif
 .endm
 
+/*
+ * Helper to improve the readibility of stack dispacements with %rsp in
+ * unusual positions.  Both @field and @top_of_stack should be constants from
+ * the same object.  @top_of_stack should be where %rsp is currently pointing.
+ */
+#define STK_REL(field, top_of_stk) ((field) - (top_of_stk))
+
 .macro DO_SPEC_CTRL_COND_VERW
 /*
  * Requires %rsp=cpuinfo
diff --git a/xen/arch/x86/x86_64/asm-offsets.c 
b/xen/arch/x86/x86_64/asm-offsets.c
index 2fc4d9130a..0d33678898 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -135,6 +135,7 @@ void __dummy__(void)
 #endif
 
     OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
+    OFFSET(CPUINFO_error_code, struct cpu_info, 
guest_cpu_user_regs.error_code);
     OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
     OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
     OFFSET(CPUINFO_per_cpu_offset, struct cpu_info, per_cpu_offset);
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.18



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.