[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/cpuid: Calculate a guests xfeature_mask from its featureset
commit c52319642bb9069436d4aec75361049f5fac63ed Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Thu Jun 2 12:08:42 2016 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Fri Jun 3 15:30:38 2016 +0100 x86/cpuid: Calculate a guests xfeature_mask from its featureset libxc current performs the xstate calculation for guests, and provides the information to Xen to be used when satisfying CPUID traps. (There is further work planned to improve this arrangement, but the worst a buggy toolstack can do is make junk appear in the cpuid leaves for the guest.) dom0 however has no policy constructed for it, and certain fields filter straight through from hardware. Linux queries CPUID.7[0].{EAX/EDX} alone to choose a setting for %xcr0, which is a valid action to take, but features such as MPX and PKRU are not supported for PV guests. As a result, Linux, using leaked hardware information, fails to set %xcr0 on newer Skylake hardware with PKRU support, and crashes. As an interim solution, dynamically calculate the correct xfeature_mask and xstate_size to report to the guest for CPUID.7[0] queries. This ensures that domains don't see leaked hardware values, even when no cpuid policy is provided. Similarly, CPUID.7[1]{ECX/EDX} represents the applicable settings for MSR_XSS. As Xen doesn't yet support any XSS states in guests, unconditionally zero them. Reported-by: Luwei Kang <luwei.kang@xxxxxxxxx> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Tested-by: Luwei Kang <luwei.kang@xxxxxxxxx> Release-acked-by: Wei Liu <wei.liu2@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 53 ++++++++++++++++++++++++++++++++++++++++++-- xen/arch/x86/traps.c | 35 ++++++++++++++++++++--------- xen/arch/x86/xstate.c | 2 +- xen/include/asm-x86/xstate.h | 32 +++++++++++++++++--------- 4 files changed, 99 insertions(+), 23 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index bb98051..d224bc7 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3362,7 +3362,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, switch ( input ) { - unsigned int _ecx, _edx; + unsigned int _ebx, _ecx, _edx; case 0x1: /* Fix up VLAPIC details. */ @@ -3443,6 +3443,51 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, switch ( count ) { case 0: + { + uint64_t xfeature_mask = XSTATE_FP_SSE; + uint32_t xstate_size = XSTATE_AREA_MIN_SIZE; + + if ( _ecx & cpufeat_mask(X86_FEATURE_AVX) ) + { + xfeature_mask |= XSTATE_YMM; + xstate_size = max(xstate_size, + xstate_offsets[_XSTATE_YMM] + + xstate_sizes[_XSTATE_YMM]); + } + + _ecx = 0; + hvm_cpuid(7, NULL, &_ebx, &_ecx, NULL); + + if ( _ebx & cpufeat_mask(X86_FEATURE_MPX) ) + { + xfeature_mask |= XSTATE_BNDREGS | XSTATE_BNDCSR; + xstate_size = max(xstate_size, + xstate_offsets[_XSTATE_BNDCSR] + + xstate_sizes[_XSTATE_BNDCSR]); + } + + if ( _ebx & cpufeat_mask(X86_FEATURE_PKU) ) + { + xfeature_mask |= XSTATE_PKRU; + xstate_size = max(xstate_size, + xstate_offsets[_XSTATE_PKRU] + + xstate_sizes[_XSTATE_PKRU]); + } + + hvm_cpuid(0x80000001, NULL, NULL, &_ecx, NULL); + + if ( _ecx & cpufeat_mask(X86_FEATURE_LWP) ) + { + xfeature_mask |= XSTATE_LWP; + xstate_size = max(xstate_size, + xstate_offsets[_XSTATE_LWP] + + xstate_sizes[_XSTATE_LWP]); + } + + *eax = (uint32_t)xfeature_mask; + *edx = (uint32_t)(xfeature_mask >> 32); + *ecx = xstate_size; + /* * Always read CPUID[0xD,0].EBX from hardware, rather than domain * policy. It varies with enabled xstate, and the correct xcr0 is @@ -3450,6 +3495,8 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, */ cpuid_count(input, count, &dummy, ebx, &dummy, &dummy); break; + } + case 1: *eax &= hvm_featureset[FEATURESET_Da1]; @@ -3463,7 +3510,9 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, cpuid_count(input, count, &dummy, ebx, &dummy, &dummy); } else - *ebx = *ecx = *edx = 0; + *ebx = 0; + + *ecx = *edx = 0; break; } break; diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index c4e0e6f..1b42841 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -928,7 +928,7 @@ void pv_cpuid(struct cpu_user_regs *regs) switch ( leaf ) { - uint32_t tmp; + uint32_t tmp, _ecx; case 0x00000001: c &= pv_featureset[FEATURESET_1c]; @@ -1087,19 +1087,33 @@ void pv_cpuid(struct cpu_user_regs *regs) break; case XSTATE_CPUID: - if ( !((!is_control_domain(currd) && !is_hardware_domain(currd) - ? ({ - uint32_t ecx; - - domain_cpuid(currd, 1, 0, &tmp, &tmp, &ecx, &tmp); - ecx & pv_featureset[FEATURESET_1c]; - }) - : cpuid_ecx(1)) & cpufeat_mask(X86_FEATURE_XSAVE)) || - subleaf >= 63 ) + + if ( !is_control_domain(currd) && !is_hardware_domain(currd) ) + domain_cpuid(currd, 1, 0, &tmp, &tmp, &_ecx, &tmp); + else + _ecx = cpuid_ecx(1); + _ecx &= pv_featureset[FEATURESET_1c]; + + if ( !(_ecx & cpufeat_mask(X86_FEATURE_XSAVE)) || subleaf >= 63 ) goto unsupported; switch ( subleaf ) { case 0: + { + uint64_t xfeature_mask = XSTATE_FP_SSE; + uint32_t xstate_size = XSTATE_AREA_MIN_SIZE; + + if ( _ecx & cpufeat_mask(X86_FEATURE_AVX) ) + { + xfeature_mask |= XSTATE_YMM; + xstate_size = (xstate_offsets[_XSTATE_YMM] + + xstate_sizes[_XSTATE_YMM]); + } + + a = (uint32_t)xfeature_mask; + d = (uint32_t)(xfeature_mask >> 32); + c = xstate_size; + /* * Always read CPUID.0xD[ECX=0].EBX from hardware, rather than * domain policy. It varies with enabled xstate, and the correct @@ -1108,6 +1122,7 @@ void pv_cpuid(struct cpu_user_regs *regs) if ( !is_control_domain(currd) && !is_hardware_domain(currd) ) cpuid_count(leaf, subleaf, &tmp, &b, &tmp, &tmp); break; + } case 1: a &= pv_featureset[FEATURESET_Da1]; diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index a0cfcc2..1fd1ce8 100644 --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -24,7 +24,7 @@ static u32 __read_mostly xsave_cntxt_size; /* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */ u64 __read_mostly xfeature_mask; -static unsigned int *__read_mostly xstate_offsets; +unsigned int *__read_mostly xstate_offsets; unsigned int *__read_mostly xstate_sizes; u64 __read_mostly xstate_align; static unsigned int __read_mostly xstate_features; diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h index 4535354..51a9ed4 100644 --- a/xen/include/asm-x86/xstate.h +++ b/xen/include/asm-x86/xstate.h @@ -26,16 +26,27 @@ #define XSAVE_HDR_OFFSET FXSAVE_SIZE #define XSTATE_AREA_MIN_SIZE (FXSAVE_SIZE + XSAVE_HDR_SIZE) -#define XSTATE_FP (1ULL << 0) -#define XSTATE_SSE (1ULL << 1) -#define XSTATE_YMM (1ULL << 2) -#define XSTATE_BNDREGS (1ULL << 3) -#define XSTATE_BNDCSR (1ULL << 4) -#define XSTATE_OPMASK (1ULL << 5) -#define XSTATE_ZMM (1ULL << 6) -#define XSTATE_HI_ZMM (1ULL << 7) -#define XSTATE_PKRU (1ULL << 9) -#define XSTATE_LWP (1ULL << 62) /* AMD lightweight profiling */ +#define _XSTATE_FP 0 +#define XSTATE_FP (1ULL << _XSTATE_FP) +#define _XSTATE_SSE 1 +#define XSTATE_SSE (1ULL << _XSTATE_SSE) +#define _XSTATE_YMM 2 +#define XSTATE_YMM (1ULL << _XSTATE_YMM) +#define _XSTATE_BNDREGS 3 +#define XSTATE_BNDREGS (1ULL << _XSTATE_BNDREGS) +#define _XSTATE_BNDCSR 4 +#define XSTATE_BNDCSR (1ULL << _XSTATE_BNDCSR) +#define _XSTATE_OPMASK 5 +#define XSTATE_OPMASK (1ULL << _XSTATE_OPMASK) +#define _XSTATE_ZMM 6 +#define XSTATE_ZMM (1ULL << _XSTATE_ZMM) +#define _XSTATE_HI_ZMM 7 +#define XSTATE_HI_ZMM (1ULL << _XSTATE_HI_ZMM) +#define _XSTATE_PKRU 9 +#define XSTATE_PKRU (1ULL << _XSTATE_PKRU) +#define _XSTATE_LWP 62 +#define XSTATE_LWP (1ULL << _XSTATE_LWP) + #define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE) #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | \ XSTATE_ZMM | XSTATE_HI_ZMM | XSTATE_NONLAZY) @@ -51,6 +62,7 @@ extern u64 xfeature_mask; extern u64 xstate_align; +extern unsigned int *xstate_offsets; extern unsigned int *xstate_sizes; /* extended state save area */ -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |