|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 3/4] x86: stop handling MSR_IA32_XSS save/restore in implementation code
Saving and restoring the value of this MSR is currently handled by
implementation-specific code despite it being architectural. This patch
moves handling of accesses to this MSR from hvm.c into the msr.c, thus
allowing the common MSR save/restore code to handle it.
This patch also adds proper checks of CPUID policy in the new get/set code.
NOTE: MSR_IA32_XSS is the last MSR to be saved and restored by
implementation-specific code. This patch therefore removes the
(VMX) definitions and of the init_msr(), save_msr() and
load_msr() hvm_funcs, as they are no longer necessary. The
declarations of and calls to those hvm_funcs will be cleaned up
by a subsequent patch.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 15 ++----------
xen/arch/x86/hvm/vmx/vmx.c | 49 --------------------------------------
xen/arch/x86/msr.c | 18 ++++++++++++++
3 files changed, 20 insertions(+), 62 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index dff590e658..deb7fb2adb 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1304,6 +1304,7 @@ static const uint32_t msrs_to_send[] = {
MSR_SPEC_CTRL,
MSR_INTEL_MISC_FEATURES_ENABLES,
MSR_IA32_BNDCFGS,
+ MSR_IA32_XSS,
MSR_AMD64_DR0_ADDRESS_MASK,
MSR_AMD64_DR1_ADDRESS_MASK,
MSR_AMD64_DR2_ADDRESS_MASK,
@@ -1442,6 +1443,7 @@ static int hvm_load_cpu_msrs(struct domain *d,
hvm_domain_context_t *h)
case MSR_SPEC_CTRL:
case MSR_INTEL_MISC_FEATURES_ENABLES:
case MSR_IA32_BNDCFGS:
+ case MSR_IA32_XSS:
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
rc = guest_wrmsr(v, ctxt->msr[i].index, ctxt->msr[i].val);
@@ -3463,12 +3465,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
MTRRcap_VCNT))];
break;
- case MSR_IA32_XSS:
- if ( !d->arch.cpuid->xstate.xsaves )
- goto gp_fault;
- *msr_content = v->arch.msrs->xss.raw;
- break;
-
case MSR_K8_ENABLE_C1E:
case MSR_AMD64_NB_CFG:
/*
@@ -3608,13 +3604,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
goto gp_fault;
break;
- case MSR_IA32_XSS:
- /* No XSS features currently supported for guests. */
- if ( !d->arch.cpuid->xstate.xsaves || msr_content != 0 )
- goto gp_fault;
- v->arch.msrs->xss.raw = msr_content;
- break;
-
case MSR_AMD64_NB_CFG:
/* ignore the write */
break;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 985e5735d2..c46e05b91e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -797,52 +797,6 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct
hvm_hw_cpu *ctxt)
return 0;
}
-static unsigned int __init vmx_init_msr(void)
-{
- return (cpu_has_mpx && cpu_has_vmx_mpx) +
- (cpu_has_xsaves && cpu_has_vmx_xsaves);
-}
-
-static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
-{
- if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
- {
- ctxt->msr[ctxt->count].val = v->arch.msrs->xss.raw;
- if ( ctxt->msr[ctxt->count].val )
- ctxt->msr[ctxt->count++].index = MSR_IA32_XSS;
- }
-}
-
-static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
-{
- unsigned int i;
- int err = 0;
-
- vmx_vmcs_enter(v);
-
- for ( i = 0; i < ctxt->count; ++i )
- {
- switch ( ctxt->msr[i].index )
- {
- case MSR_IA32_XSS:
- if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
- v->arch.msrs->xss.raw = ctxt->msr[i].val;
- else
- err = -ENXIO;
- break;
- default:
- continue;
- }
- if ( err )
- break;
- ctxt->msr[i]._rsvd = 1;
- }
-
- vmx_vmcs_exit(v);
-
- return err;
-}
-
static void vmx_fpu_enter(struct vcpu *v)
{
vcpu_restore_fpu_lazy(v);
@@ -2282,9 +2236,6 @@ static struct hvm_function_table __initdata
vmx_function_table = {
.vcpu_destroy = vmx_vcpu_destroy,
.save_cpu_ctxt = vmx_save_vmcs_ctxt,
.load_cpu_ctxt = vmx_load_vmcs_ctxt,
- .init_msr = vmx_init_msr,
- .save_msr = vmx_save_msr,
- .load_msr = vmx_load_msr,
.get_interrupt_shadow = vmx_get_interrupt_shadow,
.set_interrupt_shadow = vmx_set_interrupt_shadow,
.guest_x86_mode = vmx_guest_x86_mode,
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 0e901d2397..4b5e000224 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -170,6 +170,13 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr,
uint64_t *val)
break;
+ case MSR_IA32_XSS:
+ if ( !cp->xstate.xsaves )
+ goto gp_fault;
+
+ *val = msrs->xss.raw;
+ break;
+
case 0x40000000 ... 0x400001ff:
if ( is_viridian_domain(d) )
{
@@ -343,6 +350,17 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
break;
+ case MSR_IA32_XSS:
+ if ( !cp->xstate.xsaves )
+ goto gp_fault;
+
+ /* No XSS features currently supported for guests */
+ if ( val != 0 )
+ goto gp_fault;
+
+ msrs->xss.raw = val;
+ break;
+
case 0x40000000 ... 0x400001ff:
if ( is_viridian_domain(d) )
{
--
2.20.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |