[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging-4.16] x86/vmx: Perform VERW flushing later in the VMExit path



commit 3bf73a02cb770682587c1c7e3c254ddb18e7b261
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Jun 23 11:32:00 2023 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Mar 12 16:15:35 2024 +0000

    x86/vmx: Perform VERW flushing later in the VMExit path
    
    Broken out of the following patch because this change is subtle enough on 
its
    own.  See it for the rational of why we're moving VERW.
    
    As for how, extend the trick already used to hold one condition in
    flags (RESUME vs LAUNCH) through the POPing of GPRs.
    
    Move the MOV CR earlier.  Intel specify flags to be undefined across it.
    
    Encode the two conditions we want using SF and PF.  See the code comment for
    exactly how.
    
    Leave a comment to explain the lack of any content around
    SPEC_CTRL_EXIT_TO_VMX, but leave the block in place.  Sods law says if we
    delete it, we'll need to reintroduce it.
    
    This is part of XSA-452 / CVE-2023-28746.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    (cherry picked from commit 475fa20b7384464210f42bad7195f87bd6f1c63f)
---
 xen/arch/x86/hvm/vmx/entry.S        | 36 ++++++++++++++++++++++++++++++++----
 xen/arch/x86/x86_64/asm-offsets.c   |  1 +
 xen/include/asm-x86/asm_defns.h     |  8 ++++++++
 xen/include/asm-x86/spec_ctrl_asm.h |  7 +++++++
 4 files changed, 48 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S
index 5f5de45a13..cdde76e138 100644
--- a/xen/arch/x86/hvm/vmx/entry.S
+++ b/xen/arch/x86/hvm/vmx/entry.S
@@ -87,17 +87,39 @@ UNLIKELY_END(realmode)
 
         /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
         /* SPEC_CTRL_EXIT_TO_VMX   Req: %rsp=regs/cpuinfo              Clob:   
 */
-        DO_SPEC_CTRL_COND_VERW
+        /*
+         * All speculation safety work happens to be elsewhere.  VERW is after
+         * popping the GPRs, while restoring the guest MSR_SPEC_CTRL is left
+         * to the MSR load list.
+         */
 
         mov  VCPU_hvm_guest_cr2(%rbx),%rax
+        mov  %rax, %cr2
+
+        /*
+         * We need to perform two conditional actions (VERW, and Resume vs
+         * Launch) after popping GPRs.  With some cunning, we can encode both
+         * of these in eflags together.
+         *
+         * Parity is only calculated over the bottom byte of the answer, while
+         * Sign is simply the top bit.
+         *
+         * Therefore, the final OR instruction ends up producing:
+         *   SF = VCPU_vmx_launched
+         *   PF = !SCF_verw
+         */
+        BUILD_BUG_ON(SCF_verw & ~0xff)
+        movzbl VCPU_vmx_launched(%rbx), %ecx
+        shl  $31, %ecx
+        movzbl CPUINFO_spec_ctrl_flags(%rsp), %eax
+        and  $SCF_verw, %eax
+        or   %eax, %ecx
 
         pop  %r15
         pop  %r14
         pop  %r13
         pop  %r12
         pop  %rbp
-        mov  %rax,%cr2
-        cmpb $0,VCPU_vmx_launched(%rbx)
         pop  %rbx
         pop  %r11
         pop  %r10
@@ -108,7 +130,13 @@ UNLIKELY_END(realmode)
         pop  %rdx
         pop  %rsi
         pop  %rdi
-        je   .Lvmx_launch
+
+        jpe  .L_skip_verw
+        /* VERW clobbers ZF, but preserves all others, including SF. */
+        verw STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)(%rsp)
+.L_skip_verw:
+
+        jns  .Lvmx_launch
 
 /*.Lvmx_resume:*/
         VMRESUME
diff --git a/xen/arch/x86/x86_64/asm-offsets.c 
b/xen/arch/x86/x86_64/asm-offsets.c
index 31fa63b77f..a4e94d6930 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -135,6 +135,7 @@ void __dummy__(void)
 #endif
 
     OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
+    OFFSET(CPUINFO_error_code, struct cpu_info, 
guest_cpu_user_regs.error_code);
     OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
     OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
     OFFSET(CPUINFO_per_cpu_offset, struct cpu_info, per_cpu_offset);
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index d9431180cf..abc6822b08 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -81,6 +81,14 @@ register unsigned long current_stack_pointer asm("rsp");
 
 #ifdef __ASSEMBLY__
 
+.macro BUILD_BUG_ON condstr, cond:vararg
+        .if \cond
+        .error "Condition \"\condstr\" not satisfied"
+        .endif
+.endm
+/* preprocessor macro to make error message more user friendly */
+#define BUILD_BUG_ON(cond) BUILD_BUG_ON #cond, cond
+
 #ifdef HAVE_AS_QUOTED_SYM
 #define SUBSECTION_LBL(tag)                        \
         .ifndef .L.tag;                            \
diff --git a/xen/include/asm-x86/spec_ctrl_asm.h 
b/xen/include/asm-x86/spec_ctrl_asm.h
index 0e69971f66..e807ff6d1d 100644
--- a/xen/include/asm-x86/spec_ctrl_asm.h
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -169,6 +169,13 @@
 #endif
 .endm
 
+/*
+ * Helper to improve the readibility of stack dispacements with %rsp in
+ * unusual positions.  Both @field and @top_of_stack should be constants from
+ * the same object.  @top_of_stack should be where %rsp is currently pointing.
+ */
+#define STK_REL(field, top_of_stk) ((field) - (top_of_stk))
+
 .macro DO_SPEC_CTRL_COND_VERW
 /*
  * Requires %rsp=cpuinfo
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.16



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.