|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] Revert "x86/HVM: make hvm_efer_valid() honor guest features"
commit d6af4e1db6c91ba79789ee0e0d363f90ef5b2370
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Jan 14 11:53:28 2015 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Jan 14 11:53:28 2015 +0100
Revert "x86/HVM: make hvm_efer_valid() honor guest features"
This reverts commit 07a6aa869bae0699d2f7e1b75d188229eb70c9e4. Some HVM
guests don't survive the new checking (EFER=0x901 reported invalid).
---
xen/arch/x86/hvm/hvm.c | 66 ++++++++++++++----------------------------------
1 files changed, 19 insertions(+), 47 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 82ee7e8..8b06bfd 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1672,53 +1672,20 @@ static int hvm_save_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
return 0;
}
-static bool_t hvm_efer_valid(const struct vcpu *v, uint64_t value,
- signed int cr0_pg)
+static bool_t hvm_efer_valid(struct domain *d,
+ uint64_t value, uint64_t efer_validbits)
{
- unsigned int ext1_ecx = 0, ext1_edx = 0;
+ if ( nestedhvm_enabled(d) && cpu_has_svm )
+ efer_validbits |= EFER_SVME;
- if ( cr0_pg < 0 && !is_hardware_domain(v->domain) )
- {
- unsigned int level;
-
- ASSERT(v == current);
- hvm_cpuid(0x80000000, &level, NULL, NULL, NULL);
- if ( level >= 0x80000001 )
- hvm_cpuid(0x80000001, NULL, NULL, &ext1_ecx, &ext1_edx);
- }
- else
- {
- ext1_edx = boot_cpu_data.x86_capability[X86_FEATURE_LM / 32];
- ext1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_SVM / 32];
- }
-
- if ( (value & EFER_SCE) &&
- !(ext1_edx & cpufeat_mask(X86_FEATURE_SYSCALL)) )
- return 0;
-
- if ( (value & (EFER_LME | EFER_LMA)) &&
- !(ext1_edx & cpufeat_mask(X86_FEATURE_LM)) )
- return 0;
-
- if ( cr0_pg > 0 && (value & EFER_LMA) && (!(value & EFER_LME) || !cr0_pg) )
- return 0;
-
- if ( (value & EFER_NX) && !(ext1_edx & cpufeat_mask(X86_FEATURE_NX)) )
- return 0;
-
- if ( (value & EFER_SVME) &&
- (!(ext1_ecx & cpufeat_mask(X86_FEATURE_SVM)) ||
- !nestedhvm_enabled(v->domain)) )
- return 0;
-
- if ( (value & EFER_LMSLE) && !cpu_has_lmsl )
- return 0;
-
- if ( (value & EFER_FFXSE) &&
- !(ext1_edx & cpufeat_mask(X86_FEATURE_FFXSR)) )
- return 0;
-
- return 1;
+ return !((value & ~efer_validbits) ||
+ ((sizeof(long) != 8) && (value & EFER_LME)) ||
+ (!cpu_has_svm && (value & EFER_SVME)) ||
+ (!cpu_has_nx && (value & EFER_NX)) ||
+ (!cpu_has_syscall && (value & EFER_SCE)) ||
+ (!cpu_has_lmsl && (value & EFER_LMSLE)) ||
+ (!cpu_has_ffxsr && (value & EFER_FFXSE)) ||
+ ((value & (EFER_LME|EFER_LMA)) == EFER_LMA));
}
/* These reserved bits in lower 32 remain 0 after any load of CR0 */
@@ -1796,6 +1763,7 @@ static int hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
struct vcpu *v;
struct hvm_hw_cpu ctxt;
struct segment_register seg;
+ uint64_t efer_validbits;
/* Which vcpu is this? */
vcpuid = hvm_load_instance(h);
@@ -1826,7 +1794,9 @@ static int hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
return -EINVAL;
}
- if ( !hvm_efer_valid(v, ctxt.msr_efer, MASK_EXTR(ctxt.cr0, X86_CR0_PG)) )
+ efer_validbits = EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA
+ | EFER_NX | EFER_SCE;
+ if ( !hvm_efer_valid(d, ctxt.msr_efer, efer_validbits) )
{
printk(XENLOG_G_ERR "HVM%d restore: bad EFER %#" PRIx64 "\n",
d->domain_id, ctxt.msr_efer);
@@ -2966,10 +2936,12 @@ err:
int hvm_set_efer(uint64_t value)
{
struct vcpu *v = current;
+ uint64_t efer_validbits;
value &= ~EFER_LMA;
- if ( !hvm_efer_valid(v, value, -1) )
+ efer_validbits = EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE;
+ if ( !hvm_efer_valid(v->domain, value, efer_validbits) )
{
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
"EFER: %#"PRIx64"\n", value);
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |