[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 20/28] x86/domctl: Update PV domain cpumasks when setting cpuid policy
This allows PV domains with different featuresets to observe different values from a native cpuid instruction, on supporting hardware. It is important to leak the host view of HTT and CMP_LEGACY through to guests, even though they could be hidden. These flags affect how to interpret other cpuid leaves which are not maskable. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> v3: * Only set a shadow cpumask if it is available in hardware. This causes fewer branches in the context switch. * Fix interaction between fastforward bits and override MSR. * Fix up the cross-vendor case. * Fix the host view of HTT/CMP_LEGACY. v2: * Use switch() rather than if/elseif chain * Clamp to static PV featuremask --- xen/arch/x86/domctl.c | 124 +++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/cpufeature.h | 1 + 2 files changed, 125 insertions(+) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index b34a295..8db8f3b 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -36,6 +36,7 @@ #include <asm/xstate.h> #include <asm/debugger.h> #include <asm/psr.h> +#include <asm/cpuid.h> static int gdbsx_guest_mem_io(domid_t domid, struct xen_domctl_gdbsx_memio *iop) { @@ -87,6 +88,129 @@ static void update_domain_cpuid_info(struct domain *d, d->arch.x86_model = (ctl->eax >> 4) & 0xf; if ( d->arch.x86 >= 0x6 ) d->arch.x86_model |= (ctl->eax >> 12) & 0xf0; + + if ( is_pv_domain(d) && ((levelling_caps & LCAP_1cd) == LCAP_1cd) ) + { + uint64_t mask = cpuidmask_defaults._1cd; + uint32_t ecx = ctl->ecx & pv_featureset[FEATURESET_1c]; + uint32_t edx = ctl->edx & pv_featureset[FEATURESET_1d]; + + /* + * Must expose hosts HTT value so a guest using native CPU can + * correctly interpret other leaves which cannot be masked. + */ + edx &= ~cpufeat_mask(X86_FEATURE_HTT); + if ( cpu_has_htt ) + edx |= cpufeat_mask(X86_FEATURE_HTT); + + switch ( boot_cpu_data.x86_vendor ) + { + case X86_VENDOR_INTEL: + mask &= ((uint64_t)edx << 32) | ecx; + break; + + case X86_VENDOR_AMD: + mask &= ((uint64_t)ecx << 32) | edx; + + /* Fast-forward bits - Must be set. */ + if (ecx & cpufeat_mask(X86_FEATURE_XSAVE)) + ecx = cpufeat_mask(X86_FEATURE_OSXSAVE); + else + ecx = 0; + edx = cpufeat_mask(X86_FEATURE_APIC); + + mask |= ((uint64_t)ecx << 32) | edx; + break; + } + + d->arch.pv_domain.cpuidmasks->_1cd = mask; + } + break; + + case 6: + if ( is_pv_domain(d) && ((levelling_caps & LCAP_6c) == LCAP_6c) ) + { + uint64_t mask = cpuidmask_defaults._6c; + + if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) + mask &= (~0ULL << 32) | ctl->ecx; + + d->arch.pv_domain.cpuidmasks->_6c = mask; + } + break; + + case 7: + if ( ctl->input[1] != 0 ) + break; + + if ( is_pv_domain(d) && ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) ) + { + uint64_t mask = cpuidmask_defaults._7ab0; + uint32_t eax = ctl->eax; + uint32_t ebx = ctl->ebx & pv_featureset[FEATURESET_7b0]; + + if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) + mask &= ((uint64_t)eax << 32) | ebx; + + d->arch.pv_domain.cpuidmasks->_7ab0 = mask; + } + break; + + case 0xd: + if ( ctl->input[1] != 1 ) + break; + + if ( is_pv_domain(d) && ((levelling_caps & LCAP_Da1) == LCAP_Da1) ) + { + uint64_t mask = cpuidmask_defaults.Da1; + uint32_t eax = ctl->eax & pv_featureset[FEATURESET_Da1]; + + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + mask &= (~0ULL << 32) | eax; + + d->arch.pv_domain.cpuidmasks->Da1 = mask; + } + break; + + case 0x80000001: + if ( is_pv_domain(d) && ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) ) + { + uint64_t mask = cpuidmask_defaults.e1cd; + uint32_t ecx = ctl->ecx & pv_featureset[FEATURESET_e1c]; + uint32_t edx = ctl->edx & pv_featureset[FEATURESET_e1d]; + + /* If emulating Intel, clear the duplicated features in e1d. */ + if ( d->arch.x86_vendor == X86_VENDOR_INTEL ) + edx &= ~CPUID_COMMON_1D_FEATURES; + + switch ( boot_cpu_data.x86_vendor ) + { + case X86_VENDOR_INTEL: + mask &= ((uint64_t)edx << 32) | ecx; + break; + + case X86_VENDOR_AMD: + /* + * Must expose hosts CMP_LEGACY value so a guest using native + * CPU can correctly interpret other leaves which cannot be + * masked. + */ + ecx &= ~cpufeat_mask(X86_FEATURE_CMP_LEGACY); + if ( cpu_has_cmp_legacy ) + ecx |= cpufeat_mask(X86_FEATURE_CMP_LEGACY); + + mask &= ((uint64_t)ecx << 32) | edx; + + /* Fast-forward bits - Must be set. */ + ecx = 0; + edx = cpufeat_mask(X86_FEATURE_APIC); + + mask |= ((uint64_t)ecx << 32) | edx; + break; + } + + d->arch.pv_domain.cpuidmasks->e1cd = mask; + } break; } } diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h index 50414e3..79236ea 100644 --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -81,6 +81,7 @@ #define cpu_has_xsavec boot_cpu_has(X86_FEATURE_XSAVEC) #define cpu_has_xgetbv1 boot_cpu_has(X86_FEATURE_XGETBV1) #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) +#define cpu_has_cmp_legacy boot_cpu_has(X86_FEATURE_CMP_LEGACY) #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) enum _cache_type { -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |