[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 1/3] x86/hvm: Introduce experimental guest CET support
For now, let VMs opt into using CET by setting cet_ss/ibt in the CPUID policy. Also extend cr4 handling to permit CR4.CET being set, along with logic to interlock CR4.CET and CR0.WP. Everything else will malfunction for now, but this will help adding support incrementally - there is a lot to do before CET will work properly. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Wei Liu <wl@xxxxxxx> --- xen/arch/x86/hvm/hvm.c | 18 ++++++++++++++++-- xen/include/public/arch-x86/cpufeatureset.h | 4 ++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index ae37bc434a..28beacc45b 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -976,11 +976,12 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, unsigned long hvm_cr4_guest_valid_bits(const struct domain *d) { const struct cpuid_policy *p = d->arch.cpuid; - bool mce, vmxe; + bool mce, vmxe, cet; /* Logic broken out simply to aid readability below. */ mce = p->basic.mce || p->basic.mca; vmxe = p->basic.vmx && nestedhvm_enabled(d); + cet = p->feat.cet_ss || p->feat.cet_ibt; return ((p->basic.vme ? X86_CR4_VME | X86_CR4_PVI : 0) | (p->basic.tsc ? X86_CR4_TSD : 0) | @@ -999,7 +1000,8 @@ unsigned long hvm_cr4_guest_valid_bits(const struct domain *d) (p->basic.xsave ? X86_CR4_OSXSAVE : 0) | (p->feat.smep ? X86_CR4_SMEP : 0) | (p->feat.smap ? X86_CR4_SMAP : 0) | - (p->feat.pku ? X86_CR4_PKE : 0)); + (p->feat.pku ? X86_CR4_PKE : 0) | + (cet ? X86_CR4_CET : 0)); } static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) @@ -2289,6 +2291,12 @@ int hvm_set_cr0(unsigned long value, bool may_defer) } } + if ( !(value & X86_CR0_WP) && (v->arch.hvm.guest_cr[4] & X86_CR4_CET) ) + { + gprintk(XENLOG_DEBUG, "Trying to clear WP with CET set\n"); + return X86EMUL_EXCEPTION; + } + if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) ) { if ( v->arch.hvm.guest_efer & EFER_LME ) @@ -2444,6 +2452,12 @@ int hvm_set_cr4(unsigned long value, bool may_defer) } } + if ( (value & X86_CR4_CET) && !(v->arch.hvm.guest_cr[0] & X86_CR0_WP) ) + { + gprintk(XENLOG_DEBUG, "Trying to set CET without WP\n"); + return X86EMUL_EXCEPTION; + } + old_cr = v->arch.hvm.guest_cr[4]; if ( (value & X86_CR4_PCIDE) && !(old_cr & X86_CR4_PCIDE) && diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h index c42f56bdd4..6f94a73408 100644 --- a/xen/include/public/arch-x86/cpufeatureset.h +++ b/xen/include/public/arch-x86/cpufeatureset.h @@ -232,7 +232,7 @@ XEN_CPUFEATURE(UMIP, 6*32+ 2) /*S User Mode Instruction Prevention */ XEN_CPUFEATURE(PKU, 6*32+ 3) /*H Protection Keys for Userspace */ XEN_CPUFEATURE(OSPKE, 6*32+ 4) /*! OS Protection Keys Enable */ XEN_CPUFEATURE(AVX512_VBMI2, 6*32+ 6) /*A Additional AVX-512 Vector Byte Manipulation Instrs */ -XEN_CPUFEATURE(CET_SS, 6*32+ 7) /* CET - Shadow Stacks */ +XEN_CPUFEATURE(CET_SS, 6*32+ 7) /*h CET - Shadow Stacks */ XEN_CPUFEATURE(GFNI, 6*32+ 8) /*A Galois Field Instrs */ XEN_CPUFEATURE(VAES, 6*32+ 9) /*A Vector AES Instrs */ XEN_CPUFEATURE(VPCLMULQDQ, 6*32+10) /*A Vector Carry-less Multiplication Instrs */ @@ -267,7 +267,7 @@ XEN_CPUFEATURE(SRBDS_CTRL, 9*32+ 9) /* MSR_MCU_OPT_CTRL and RNGDS_MITG_DIS. XEN_CPUFEATURE(MD_CLEAR, 9*32+10) /*A VERW clears microarchitectural buffers */ XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */ XEN_CPUFEATURE(SERIALIZE, 9*32+14) /*a SERIALIZE insn */ -XEN_CPUFEATURE(CET_IBT, 9*32+20) /* CET - Indirect Branch Tracking */ +XEN_CPUFEATURE(CET_IBT, 9*32+20) /*h CET - Indirect Branch Tracking */ XEN_CPUFEATURE(IBRSB, 9*32+26) /*A IBRS and IBPB support (used by Intel) */ XEN_CPUFEATURE(STIBP, 9*32+27) /*A STIBP */ XEN_CPUFEATURE(L1D_FLUSH, 9*32+28) /*S MSR_FLUSH_CMD and L1D flush. */ -- 2.11.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |