[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 11/27] x86/hvm: Improve hvm_efer_valid() using named features
Pick the appropriate cpuid_policy object rather than using hvm_cpuid() or boot_cpu_data. This breaks the dependency on current. As data is read straight out of cpuid_policy, there is no need to work around the fact that X86_FEATURE_SYSCALL might be clear because of the dynamic adjustment in hvm_cpuid(). This simplifies the SCE handling, as EFER.SCE can be set in isolation in 32bit mode on Intel hardware. Alter nestedhvm_enabled() to be const-correct, allowing hvm_efer_valid() to be properly const-correct. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 43 ++++++++++--------------------------- xen/arch/x86/hvm/nestedhvm.c | 6 ++---- xen/include/asm-x86/hvm/hvm.h | 3 +-- xen/include/asm-x86/hvm/nestedhvm.h | 2 +- 4 files changed, 15 insertions(+), 39 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index bdf8ca8..d651d0b 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -914,56 +914,35 @@ static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) } /* Return a string indicating the error, or NULL for valid. */ -const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, - signed int cr0_pg) +const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, int cr0_pg) { - unsigned int ext1_ecx = 0, ext1_edx = 0; + const struct domain *d = v->domain; + const struct cpuid_policy *p; - if ( cr0_pg < 0 && !is_hardware_domain(v->domain) ) - { - unsigned int level; - - ASSERT(v->domain == current->domain); - hvm_cpuid(0x80000000, &level, NULL, NULL, NULL); - if ( (level >> 16) == 0x8000 && level > 0x80000000 ) - hvm_cpuid(0x80000001, NULL, NULL, &ext1_ecx, &ext1_edx); - } + if ( cr0_pg < 0 && !is_hardware_domain(d) ) + p = d->arch.cpuid; else - { - ext1_edx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_LM)]; - ext1_ecx = boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_SVM)]; - } + p = &host_policy; - /* - * Guests may want to set EFER.SCE and EFER.LME at the same time, so we - * can't make the check depend on only X86_FEATURE_SYSCALL (which on VMX - * will be clear without the guest having entered 64-bit mode). - */ - if ( (value & EFER_SCE) && - !(ext1_edx & cpufeat_mask(X86_FEATURE_SYSCALL)) && - (cr0_pg >= 0 || !(value & EFER_LME)) ) + if ( (value & EFER_SCE) && !p->extd.syscall ) return "SCE without feature"; - if ( (value & (EFER_LME | EFER_LMA)) && - !(ext1_edx & cpufeat_mask(X86_FEATURE_LM)) ) + if ( (value & (EFER_LME | EFER_LMA)) && !p->extd.lm ) return "LME/LMA without feature"; if ( (value & EFER_LMA) && (!(value & EFER_LME) || !cr0_pg) ) return "LMA/LME/CR0.PG inconsistency"; - if ( (value & EFER_NX) && !(ext1_edx & cpufeat_mask(X86_FEATURE_NX)) ) + if ( (value & EFER_NX) && !p->extd.nx ) return "NX without feature"; - if ( (value & EFER_SVME) && - (!(ext1_ecx & cpufeat_mask(X86_FEATURE_SVM)) || - !nestedhvm_enabled(v->domain)) ) + if ( (value & EFER_SVME) && (!p->extd.svm || !nestedhvm_enabled(d)) ) return "SVME without nested virt"; if ( (value & EFER_LMSLE) && !cpu_has_lmsl ) return "LMSLE without support"; - if ( (value & EFER_FFXSE) && - !(ext1_edx & cpufeat_mask(X86_FEATURE_FFXSR)) ) + if ( (value & EFER_FFXSE) && !p->extd.ffxsr ) return "FFXSE without feature"; return NULL; diff --git a/xen/arch/x86/hvm/nestedhvm.c b/xen/arch/x86/hvm/nestedhvm.c index c09c5b2..a400d55 100644 --- a/xen/arch/x86/hvm/nestedhvm.c +++ b/xen/arch/x86/hvm/nestedhvm.c @@ -27,11 +27,9 @@ static unsigned long *shadow_io_bitmap[3]; /* Nested HVM on/off per domain */ -bool_t -nestedhvm_enabled(struct domain *d) +bool nestedhvm_enabled(const struct domain *d) { - return is_hvm_domain(d) && - d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]; + return is_hvm_domain(d) && d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]; } /* Nested VCPU */ diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 8c95c08..4248546 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -613,8 +613,7 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v) } /* Check CR4/EFER values */ -const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, - signed int cr0_pg); +const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, int cr0_pg); unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v, bool_t restore); /* diff --git a/xen/include/asm-x86/hvm/nestedhvm.h b/xen/include/asm-x86/hvm/nestedhvm.h index bc82425..47165fc 100644 --- a/xen/include/asm-x86/hvm/nestedhvm.h +++ b/xen/include/asm-x86/hvm/nestedhvm.h @@ -33,7 +33,7 @@ enum nestedhvm_vmexits { }; /* Nested HVM on/off per domain */ -bool_t nestedhvm_enabled(struct domain *d); +bool nestedhvm_enabled(const struct domain *d); /* Nested VCPU */ int nestedhvm_vcpu_initialise(struct vcpu *v); -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |