[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH for-4.7] x86/cpuid: Calculate a guests xfeature_mask from its featureset
I have finsh test this patch and it work well, thank Andrew and all. -----Original Message----- From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx] Sent: Friday, June 3, 2016 12:48 AM To: Xen-devel <xen-devel@xxxxxxxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>; Jan Beulich <JBeulich@xxxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>; Kang, Luwei <luwei.kang@xxxxxxxxx>; Han, Huaitong <huaitong.han@xxxxxxxxx> Subject: [PATCH for-4.7] x86/cpuid: Calculate a guests xfeature_mask from its featureset libxc current performs the xstate calculation for guests, and provides the information to Xen to be used when satisfying CPUID traps. (There is further work planned to improve this arrangement, but the worst a buggy toolstack can do is make junk appear in the cpuid leaves for the guest.) dom0 however has no policy constructed for it, and certain fields filter straight through from hardware. Linux queries CPUID.7[0].{EAX/EDX} alone to choose a setting for %xcr0, which is action to take. However, features such as MPX and PKRU are not supported for PV guests. As a result, Linux, using leaked hardware information, fails to set %xcr0 on newer Skylake hardware with PKRU support, and crashes. As an interim solution, dynamically calculate the correct xfeature_mask and xstate_size to report to the guest for CPUID.7[0] queries. This ensures that domains don't see leaked hardware values, even when no cpuid policy is provided. Similarly, CPUID.7[1]{ECX/EDX} represents the applicable settings for MSR_XSS. Xen doesn't support any XSS states in guests, unconditionally clear them for HVM guests. Reported-by: Luwei Kang <luwei.kang@xxxxxxxxx> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> CC: Luwei Kang <luwei.kang@xxxxxxxxx> CC: Huaitong Han <huaitong.han@xxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 53 ++++++++++++++++++++++++++++++++++++++++++-- xen/arch/x86/traps.c | 50 ++++++++++++++++++++++++++++++++--------- xen/arch/x86/xstate.c | 2 +- xen/include/asm-x86/xstate.h | 32 +++++++++++++++++--------- 4 files changed, 114 insertions(+), 23 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index bb98051..72bbed5 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3362,7 +3362,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, switch ( input ) { - unsigned int _ecx, _edx; + unsigned int _ebx, _ecx, _edx; case 0x1: /* Fix up VLAPIC details. */ @@ -3443,6 +3443,51 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, switch ( count ) { case 0: + { + uint64_t xfeature_mask = XSTATE_FP_SSE; + uint32_t xstate_size = XSTATE_AREA_MIN_SIZE; + + if ( _ecx & cpufeat_mask(X86_FEATURE_AVX) ) + { + xfeature_mask |= XSTATE_YMM; + xstate_size = MAX(xstate_size, + xstate_offsets[_XSTATE_YMM] + + xstate_sizes[_XSTATE_YMM]); + } + + _ecx = 0; + hvm_cpuid(7, NULL, &_ebx, &_ecx, NULL); + + if ( _ebx & cpufeat_mask(X86_FEATURE_MPX) ) + { + xfeature_mask |= XSTATE_BNDREGS | XSTATE_BNDCSR; + xstate_size = MAX(xstate_size, + xstate_offsets[_XSTATE_BNDCSR] + + xstate_sizes[_XSTATE_BNDCSR]); + } + + if ( _ebx & cpufeat_mask(X86_FEATURE_PKU) ) + { + xfeature_mask |= XSTATE_PKRU; + xstate_size = MAX(xstate_size, + xstate_offsets[_XSTATE_PKRU] + + xstate_sizes[_XSTATE_PKRU]); + } + + hvm_cpuid(0x80000001, NULL, NULL, &_ecx, NULL); + + if ( _ecx & cpufeat_mask(X86_FEATURE_LWP) ) + { + xfeature_mask |= XSTATE_LWP; + xstate_size = MAX(xstate_size, + xstate_offsets[_XSTATE_LWP] + + xstate_sizes[_XSTATE_LWP]); + } + + *eax = (uint32_t)xfeature_mask; + *edx = (uint32_t)(xfeature_mask >> 32); + *ecx = xstate_size; + /* * Always read CPUID[0xD,0].EBX from hardware, rather than domain * policy. It varies with enabled xstate, and the correct xcr0 is @@ -3450,6 +3495,8 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, */ cpuid_count(input, count, &dummy, ebx, &dummy, &dummy); break; + } + case 1: *eax &= hvm_featureset[FEATURESET_Da1]; @@ -3463,7 +3510,9 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, cpuid_count(input, count, &dummy, ebx, &dummy, &dummy); } else - *ebx = *ecx = *edx = 0; + *ebx = 0; + + *ecx = *edx = 0; break; } break; diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 5d7232d..a2688c3 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -928,7 +928,7 @@ void pv_cpuid(struct cpu_user_regs *regs) switch ( leaf ) { - uint32_t tmp; + uint32_t tmp, _ecx; case 0x00000001: c &= pv_featureset[FEATURESET_1c]; @@ -1087,19 +1087,48 @@ void pv_cpuid(struct cpu_user_regs *regs) break; case XSTATE_CPUID: - if ( !((!is_control_domain(currd) && !is_hardware_domain(currd) - ? ({ - uint32_t ecx; - - domain_cpuid(currd, 1, 0, &tmp, &tmp, &ecx, &tmp); - ecx & pv_featureset[FEATURESET_1c]; - }) - : cpuid_ecx(1)) & cpufeat_mask(X86_FEATURE_XSAVE)) || - subleaf >= 63 ) + + if ( !is_control_domain(currd) && !is_hardware_domain(currd) ) + domain_cpuid(currd, 1, 0, &tmp, &tmp, &_ecx, &tmp); + else + _ecx = cpuid_ecx(1); + _ecx &= pv_featureset[FEATURESET_1c]; + + if ( !(_ecx & cpufeat_mask(X86_FEATURE_XSAVE)) || subleaf >= 63 + ) goto unsupported; switch ( subleaf ) { case 0: + { + uint64_t xfeature_mask = XSTATE_FP_SSE; + uint32_t xstate_size = XSTATE_AREA_MIN_SIZE; + + if ( _ecx & cpufeat_mask(X86_FEATURE_AVX) ) + { + xfeature_mask |= XSTATE_YMM; + xstate_size = MAX(xstate_size, + xstate_offsets[_XSTATE_YMM] + + xstate_sizes[_XSTATE_YMM]); + } + + if ( !is_control_domain(currd) && !is_hardware_domain(currd) ) + domain_cpuid(currd, 0x80000001, 0, &tmp, &tmp, &_ecx, &tmp); + else + _ecx = cpuid_ecx(0x80000001); + _ecx &= pv_featureset[FEATURESET_e1c]; + + if ( _ecx & cpufeat_mask(X86_FEATURE_LWP) ) + { + xfeature_mask |= XSTATE_LWP; + xstate_size = MAX(xstate_size, + xstate_offsets[_XSTATE_LWP] + + xstate_sizes[_XSTATE_LWP]); + } + + a = (uint32_t)xfeature_mask; + d = (uint32_t)(xfeature_mask >> 32); + c = xstate_size; + /* * Always read CPUID.0xD[ECX=0].EBX from hardware, rather than * domain policy. It varies with enabled xstate, and the correct @@ -1108,6 +1137,7 @@ void pv_cpuid(struct cpu_user_regs *regs) if ( !is_control_domain(currd) && !is_hardware_domain(currd) ) cpuid_count(leaf, subleaf, &tmp, &b, &tmp, &tmp); break; + } case 1: a &= pv_featureset[FEATURESET_Da1]; diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index a0cfcc2..1fd1ce8 100644 --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -24,7 +24,7 @@ static u32 __read_mostly xsave_cntxt_size; /* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */ u64 __read_mostly xfeature_mask; -static unsigned int *__read_mostly xstate_offsets; +unsigned int *__read_mostly xstate_offsets; unsigned int *__read_mostly xstate_sizes; u64 __read_mostly xstate_align; static unsigned int __read_mostly xstate_features; diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h index 4535354..51a9ed4 100644 --- a/xen/include/asm-x86/xstate.h +++ b/xen/include/asm-x86/xstate.h @@ -26,16 +26,27 @@ #define XSAVE_HDR_OFFSET FXSAVE_SIZE #define XSTATE_AREA_MIN_SIZE (FXSAVE_SIZE + XSAVE_HDR_SIZE) -#define XSTATE_FP (1ULL << 0) -#define XSTATE_SSE (1ULL << 1) -#define XSTATE_YMM (1ULL << 2) -#define XSTATE_BNDREGS (1ULL << 3) -#define XSTATE_BNDCSR (1ULL << 4) -#define XSTATE_OPMASK (1ULL << 5) -#define XSTATE_ZMM (1ULL << 6) -#define XSTATE_HI_ZMM (1ULL << 7) -#define XSTATE_PKRU (1ULL << 9) -#define XSTATE_LWP (1ULL << 62) /* AMD lightweight profiling */ +#define _XSTATE_FP 0 +#define XSTATE_FP (1ULL << _XSTATE_FP) +#define _XSTATE_SSE 1 +#define XSTATE_SSE (1ULL << _XSTATE_SSE) +#define _XSTATE_YMM 2 +#define XSTATE_YMM (1ULL << _XSTATE_YMM) +#define _XSTATE_BNDREGS 3 +#define XSTATE_BNDREGS (1ULL << _XSTATE_BNDREGS) +#define _XSTATE_BNDCSR 4 +#define XSTATE_BNDCSR (1ULL << _XSTATE_BNDCSR) +#define _XSTATE_OPMASK 5 +#define XSTATE_OPMASK (1ULL << _XSTATE_OPMASK) +#define _XSTATE_ZMM 6 +#define XSTATE_ZMM (1ULL << _XSTATE_ZMM) +#define _XSTATE_HI_ZMM 7 +#define XSTATE_HI_ZMM (1ULL << _XSTATE_HI_ZMM) +#define _XSTATE_PKRU 9 +#define XSTATE_PKRU (1ULL << _XSTATE_PKRU) +#define _XSTATE_LWP 62 +#define XSTATE_LWP (1ULL << _XSTATE_LWP) + #define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE) #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | \ XSTATE_ZMM | XSTATE_HI_ZMM | XSTATE_NONLAZY) @@ -51,6 +62,7 @@ extern u64 xfeature_mask; extern u64 xstate_align; +extern unsigned int *xstate_offsets; extern unsigned int *xstate_sizes; /* extended state save area */ -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |