|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.6] x86/spec_ctrl: Rename bits of infrastructure to avoid NATIVE and VMEXIT
commit c1be09ec470a751e925a8bf3db9cdcfa8fa751ce
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue May 29 10:59:39 2018 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue May 29 10:59:39 2018 +0200
x86/spec_ctrl: Rename bits of infrastructure to avoid NATIVE and VMEXIT
In hindsight, using NATIVE and VMEXIT as naming terminology was not clever.
A future change wants to split SPEC_CTRL_EXIT_TO_GUEST into PV and HVM
specific implementations, and using VMEXIT as a term is completely wrong.
Take the opportunity to fix some stale documentation in spec_ctrl_asm.h.
The
IST helpers were missing from the large comment block, and since
SPEC_CTRL_ENTRY_FROM_INTR_IST was introduced, we've gained a new piece of
functionality which currently depends on the fine grain control, which
exists
in lieu of livepatching. Note this in the comment.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
master commit: d9822b8a38114e96e4516dc998f4055249364d5d
master date: 2018-05-16 12:19:10 +0100
---
xen/arch/x86/cpu/common.c | 8 ++++----
xen/arch/x86/hvm/svm/entry.S | 4 ++--
xen/arch/x86/hvm/vmx/entry.S | 4 ++--
xen/arch/x86/spec_ctrl.c | 20 ++++++++++----------
xen/arch/x86/x86_64/compat/entry.S | 2 +-
xen/arch/x86/x86_64/entry.S | 2 +-
xen/include/asm-x86/cpufeature.h | 4 ++--
xen/include/asm-x86/spec_ctrl_asm.h | 36 +++++++++++++++++++++++++-----------
8 files changed, 47 insertions(+), 33 deletions(-)
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 3da0979277..1ba1622e72 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -362,12 +362,12 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
if (test_bit(X86_FEATURE_SC_MSR,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_SC_MSR, c->x86_capability);
- if (test_bit(X86_FEATURE_RSB_NATIVE,
+ if (test_bit(X86_FEATURE_SC_RSB_PV,
boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_RSB_NATIVE, c->x86_capability);
- if (test_bit(X86_FEATURE_RSB_VMEXIT,
+ __set_bit(X86_FEATURE_SC_RSB_PV, c->x86_capability);
+ if (test_bit(X86_FEATURE_SC_RSB_HVM,
boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_RSB_VMEXIT, c->x86_capability);
+ __set_bit(X86_FEATURE_SC_RSB_HVM, c->x86_capability);
/* AND the already accumulated flags with these */
for ( i = 0 ; i < NCAPINTS ; i++ )
diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index 706bdd301a..6426452baa 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -81,7 +81,7 @@ UNLIKELY_END(svm_trace)
mov VCPU_arch_spec_ctrl(%rbx), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob:
cd */
+ SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob:
cd */
pop %r15
pop %r14
@@ -106,7 +106,7 @@ UNLIKELY_END(svm_trace)
GET_CURRENT(%rbx)
- SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob:
acd */
+ SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob:
acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov VCPU_svm_vmcb(%rbx),%rcx
diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S
index d43ae26a6b..32e0f87ae4 100644
--- a/xen/arch/x86/hvm/vmx/entry.S
+++ b/xen/arch/x86/hvm/vmx/entry.S
@@ -37,7 +37,7 @@ ENTRY(vmx_asm_vmexit_handler)
movb $1,VCPU_vmx_launched(%rbx)
mov %rax,VCPU_hvm_guest_cr2(%rbx)
- SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob:
acd */
+ SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob:
acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov %rsp,%rdi
@@ -72,7 +72,7 @@ UNLIKELY_END(realmode)
mov VCPU_arch_spec_ctrl(%rbx), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob:
cd */
+ SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob:
cd */
mov VCPU_hvm_guest_cr2(%rbx),%rax
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 4fcbba2143..91e18487f0 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -35,8 +35,8 @@ static enum ind_thunk {
THUNK_JMP,
} opt_thunk __initdata = THUNK_DEFAULT;
static int8_t __initdata opt_ibrs = -1;
-static bool_t __initdata opt_rsb_native = 1;
-static bool_t __initdata opt_rsb_vmexit = 1;
+static bool_t __initdata opt_rsb_pv = 1;
+static bool_t __initdata opt_rsb_hvm = 1;
bool_t __read_mostly opt_ibpb = 1;
uint8_t __read_mostly default_xen_spec_ctrl;
uint8_t __read_mostly default_spec_ctrl_flags;
@@ -69,9 +69,9 @@ static int __init parse_bti(const char *s)
else if ( (val = parse_boolean("ibpb", s, ss)) >= 0 )
opt_ibpb = val;
else if ( (val = parse_boolean("rsb_native", s, ss)) >= 0 )
- opt_rsb_native = val;
+ opt_rsb_pv = val;
else if ( (val = parse_boolean("rsb_vmexit", s, ss)) >= 0 )
- opt_rsb_vmexit = val;
+ opt_rsb_hvm = val;
else
rc = -EINVAL;
@@ -118,8 +118,8 @@ static void __init print_details(enum ind_thunk thunk,
uint64_t caps)
default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" :
" IBRS-" : "",
opt_ibpb ? " IBPB" : "",
- boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "",
- boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : "");
+ boot_cpu_has(X86_FEATURE_SC_RSB_PV) ? " RSB_NATIVE" : "",
+ boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB_VMEXIT" : "");
}
/* Calculate whether Retpoline is known-safe on this CPU. */
@@ -304,9 +304,9 @@ void __init init_speculation_mitigations(void)
* If a processors speculates to 32bit PV guest kernel mappings, it is
* speculating in 64bit supervisor mode, and can leak data.
*/
- if ( opt_rsb_native )
+ if ( opt_rsb_pv )
{
- __set_bit(X86_FEATURE_RSB_NATIVE, boot_cpu_data.x86_capability);
+ __set_bit(X86_FEATURE_SC_RSB_PV, boot_cpu_data.x86_capability);
default_spec_ctrl_flags |= SCF_ist_rsb;
}
@@ -314,8 +314,8 @@ void __init init_speculation_mitigations(void)
* HVM guests can always poison the RSB to point at Xen supervisor
* mappings.
*/
- if ( opt_rsb_vmexit )
- __set_bit(X86_FEATURE_RSB_VMEXIT, boot_cpu_data.x86_capability);
+ if ( opt_rsb_hvm )
+ __set_bit(X86_FEATURE_SC_RSB_HVM, boot_cpu_data.x86_capability);
/* Check we have hardware IBPB support before using it... */
if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) )
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index c211e9a869..6a48fc50b7 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -235,7 +235,7 @@ ENTRY(compat_restore_all_guest)
mov VCPU_arch_spec_ctrl(%rbx), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob:
cd */
+ SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob:
cd */
RESTORE_ALL adj=8 compat=1
.Lft0: iretq
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 7c8211ae5a..8cecfd407e 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -71,7 +71,7 @@ restore_all_guest:
mov %r15d, %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob:
cd */
+ SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob:
cd */
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index ed4f18cf90..9c8bca9faa 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -66,8 +66,8 @@
#define X86_FEATURE_IND_THUNK_JMP (3*32+ 2) /* Use IND_THUNK_JMP */
#define X86_FEATURE_XEN_IBPB (3*32+ 3) /* IBRSB || IBPB */
#define X86_FEATURE_SC_MSR (3*32+ 4) /* MSR_SPEC_CTRL used by Xen */
-#define X86_FEATURE_RSB_NATIVE (3*32+ 6) /* RSB overwrite needed for
native */
-#define X86_FEATURE_RSB_VMEXIT (3*32+ 7) /* RSB overwrite needed for
vmexit */
+#define X86_FEATURE_SC_RSB_PV (3*32+ 6) /* RSB overwrite needed for PV */
+#define X86_FEATURE_SC_RSB_HVM (3*32+ 7) /* RSB overwrite needed for HVM
*/
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_NONSTOP_TSC (3*32+ 9) /* TSC does not stop in C
states */
#define X86_FEATURE_ARAT (3*32+ 10) /* Always running APIC timer */
diff --git a/xen/include/asm-x86/spec_ctrl_asm.h
b/xen/include/asm-x86/spec_ctrl_asm.h
index ab47508b80..be5cba318e 100644
--- a/xen/include/asm-x86/spec_ctrl_asm.h
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -72,11 +72,14 @@
*
* The following ASM fragments implement this algorithm. See their local
* comments for further details.
- * - SPEC_CTRL_ENTRY_FROM_VMEXIT
+ * - SPEC_CTRL_ENTRY_FROM_HVM
* - SPEC_CTRL_ENTRY_FROM_PV
* - SPEC_CTRL_ENTRY_FROM_INTR
+ * - SPEC_CTRL_ENTRY_FROM_INTR_IST
+ * - SPEC_CTRL_EXIT_TO_XEN_IST
* - SPEC_CTRL_EXIT_TO_XEN
- * - SPEC_CTRL_EXIT_TO_GUEST
+ * - SPEC_CTRL_EXIT_TO_PV
+ * - SPEC_CTRL_EXIT_TO_HVM
*/
.macro DO_OVERWRITE_RSB tmp=rax
@@ -117,7 +120,7 @@
mov %\tmp, %rsp /* Restore old %rsp */
.endm
-.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT
+.macro DO_SPEC_CTRL_ENTRY_FROM_HVM
/*
* Requires %rbx=current, %rsp=regs/cpuinfo
* Clobbers %rax, %rcx, %rdx
@@ -216,23 +219,23 @@
.endm
/* Use after a VMEXIT from an HVM guest. */
-#define SPEC_CTRL_ENTRY_FROM_VMEXIT \
+#define SPEC_CTRL_ENTRY_FROM_HVM \
ALTERNATIVE __stringify(ASM_NOP40), \
- DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \
+ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM; \
ALTERNATIVE __stringify(ASM_NOP33), \
- DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, X86_FEATURE_SC_MSR
+ DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR
/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
#define SPEC_CTRL_ENTRY_FROM_PV \
ALTERNATIVE __stringify(ASM_NOP40), \
- DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
+ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE __stringify(ASM_NOP25), \
__stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR
/* Use in interrupt/exception context. May interrupt Xen or PV context. */
#define SPEC_CTRL_ENTRY_FROM_INTR \
ALTERNATIVE __stringify(ASM_NOP40), \
- DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
+ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE __stringify(ASM_NOP39), \
__stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR
@@ -241,12 +244,22 @@
ALTERNATIVE __stringify(ASM_NOP23), \
DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
-/* Use when exiting to guest context. */
-#define SPEC_CTRL_EXIT_TO_GUEST \
+/* Use when exiting to PV guest context. */
+#define SPEC_CTRL_EXIT_TO_PV \
ALTERNATIVE __stringify(ASM_NOP24), \
DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
-/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
+/* Use when exiting to HVM guest context. */
+#define SPEC_CTRL_EXIT_TO_HVM \
+ ALTERNATIVE __stringify(ASM_NOP24), \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
+
+/*
+ * Use in IST interrupt/exception context. May interrupt Xen or PV context.
+ * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume
+ * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has
+ * been reloaded.
+ */
.macro SPEC_CTRL_ENTRY_FROM_INTR_IST
/*
* Requires %rsp=regs, %r14=stack_end
@@ -293,6 +306,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
UNLIKELY_END(\@_serialise)
.endm
+/* Use when exiting to Xen in IST context. */
.macro SPEC_CTRL_EXIT_TO_XEN_IST
/*
* Requires %rbx=stack_end
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.6
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |