[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4/6] x86: stop handling MSR_IA32_XSS save/restore in implementation code
Saving and restoring the value of this MSR is currently handled by implementation-specific code despite it being architectural. This patch moves handling of accesses to this MSR from hvm.c into the msr.c, thus allowing the common MSR save/restore code to handle it. This patch also adds proper checks of CPUID policy in the new get/set code. NOTE: MSR_IA32_XSS is the last MSR to be saved and restored by implementation-specific code. This patch therefore removes the (VMX) definitions and of the init_msr(), save_msr() and load_msr() hvm_funcs, as they are no longer necessary. The declarations of and calls to those hvm_funcs will be cleaned up by a subsequent patch. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx> Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx> Cc: Kevin Tian <kevin.tian@xxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 15 ++---------- xen/arch/x86/hvm/vmx/vmx.c | 49 -------------------------------------- xen/arch/x86/msr.c | 18 ++++++++++++++ 3 files changed, 20 insertions(+), 62 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index b55bb8b081..856dcf696b 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1345,6 +1345,7 @@ static const uint32_t msrs_to_send[] = { MSR_SPEC_CTRL, MSR_INTEL_MISC_FEATURES_ENABLES, MSR_IA32_BNDCFGS, + MSR_IA32_XSS, MSR_AMD64_DR0_ADDRESS_MASK, MSR_AMD64_DR1_ADDRESS_MASK, MSR_AMD64_DR2_ADDRESS_MASK, @@ -1483,6 +1484,7 @@ static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h) case MSR_SPEC_CTRL: case MSR_INTEL_MISC_FEATURES_ENABLES: case MSR_IA32_BNDCFGS: + case MSR_IA32_XSS: case MSR_AMD64_DR0_ADDRESS_MASK: case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: rc = guest_wrmsr(v, ctxt->msr[i].index, ctxt->msr[i].val); @@ -3500,12 +3502,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) *msr_content = var_range_base[index]; break; - case MSR_IA32_XSS: - if ( !d->arch.cpuid->xstate.xsaves ) - goto gp_fault; - *msr_content = v->arch.msrs->xss.raw; - break; - case MSR_K8_ENABLE_C1E: case MSR_AMD64_NB_CFG: /* @@ -3645,13 +3641,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, goto gp_fault; break; - case MSR_IA32_XSS: - /* No XSS features currently supported for guests. */ - if ( !d->arch.cpuid->xstate.xsaves || msr_content != 0 ) - goto gp_fault; - v->arch.msrs->xss.raw = msr_content; - break; - case MSR_AMD64_NB_CFG: /* ignore the write */ break; diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index ec87607ec7..f175b79b4b 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -800,52 +800,6 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) return 0; } -static unsigned int __init vmx_init_msr(void) -{ - return (cpu_has_mpx && cpu_has_vmx_mpx) + - (cpu_has_xsaves && cpu_has_vmx_xsaves); -} - -static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt) -{ - if ( cpu_has_xsaves && cpu_has_vmx_xsaves ) - { - ctxt->msr[ctxt->count].val = v->arch.msrs->xss.raw; - if ( ctxt->msr[ctxt->count].val ) - ctxt->msr[ctxt->count++].index = MSR_IA32_XSS; - } -} - -static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt) -{ - unsigned int i; - int err = 0; - - vmx_vmcs_enter(v); - - for ( i = 0; i < ctxt->count; ++i ) - { - switch ( ctxt->msr[i].index ) - { - case MSR_IA32_XSS: - if ( cpu_has_xsaves && cpu_has_vmx_xsaves ) - v->arch.msrs->xss.raw = ctxt->msr[i].val; - else - err = -ENXIO; - break; - default: - continue; - } - if ( err ) - break; - ctxt->msr[i]._rsvd = 1; - } - - vmx_vmcs_exit(v); - - return err; -} - static void vmx_fpu_enter(struct vcpu *v) { vcpu_restore_fpu_lazy(v); @@ -2283,9 +2237,6 @@ static struct hvm_function_table __initdata vmx_function_table = { .vcpu_destroy = vmx_vcpu_destroy, .save_cpu_ctxt = vmx_save_vmcs_ctxt, .load_cpu_ctxt = vmx_load_vmcs_ctxt, - .init_msr = vmx_init_msr, - .save_msr = vmx_save_msr, - .load_msr = vmx_load_msr, .get_interrupt_shadow = vmx_get_interrupt_shadow, .set_interrupt_shadow = vmx_set_interrupt_shadow, .guest_x86_mode = vmx_guest_x86_mode, diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c index a3406c29a8..3aa79031cf 100644 --- a/xen/arch/x86/msr.c +++ b/xen/arch/x86/msr.c @@ -164,6 +164,13 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val) break; + case MSR_IA32_XSS: + if ( !is_hvm_domain(d) || !cp->xstate.xsaves ) + goto gp_fault; + + *val = msrs->xss.raw; + break; + case 0x40000000 ... 0x400001ff: if ( is_viridian_domain(d) ) { @@ -331,6 +338,17 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) break; + case MSR_IA32_XSS: + if ( !is_hvm_domain(d) || !cp->xstate.xsaves ) + goto gp_fault; + + /* No XSS features currently supported for guests */ + if ( val != 0 ) + goto gp_fault; + + msrs->xss.raw = val; + break; + case 0x40000000 ... 0x400001ff: if ( is_viridian_domain(d) ) { -- 2.20.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |