[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 5/5] x86/msr: handle VMX MSRs with guest_rd/wrmsr()
Now that each domain has a correct view of VMX MSRs in it's per-domain MSR policy, it's possible to handle guest's RD/WRMSR with the new handlers. Do it and remove the old nvmx_msr_read_intercept() and associated bits. There is no functional change to what a guest sees in its VMX MSRs. Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx> --- v4 --> v5: - New msr availability helpers are used --- xen/arch/x86/hvm/vmx/vmx.c | 6 -- xen/arch/x86/hvm/vmx/vvmx.c | 178 ------------------------------------- xen/arch/x86/msr.c | 35 ++++++++ xen/include/asm-x86/hvm/vmx/vvmx.h | 2 - 4 files changed, 35 insertions(+), 186 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 4856ad7c24..e850ef913f 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2934,10 +2934,6 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) if ( nestedhvm_enabled(curr->domain) ) *msr_content |= IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX; break; - case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC: - if ( !nvmx_msr_read_intercept(msr, msr_content) ) - goto gp_fault; - break; case MSR_IA32_MISC_ENABLE: rdmsrl(MSR_IA32_MISC_ENABLE, *msr_content); /* Debug Trace Store is not supported. */ @@ -3160,8 +3156,6 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) break; } case MSR_IA32_FEATURE_CONTROL: - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: - /* None of these MSRs are writeable. */ goto gp_fault; case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index c8c168b7d0..8f4a68cf9a 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -1975,184 +1975,6 @@ int nvmx_handle_invvpid(struct cpu_user_regs *regs) return X86EMUL_OKAY; } -#define __emul_value(enable1, default1) \ - ((enable1 | default1) << 32 | (default1)) - -#define gen_vmx_msr(enable1, default1, host_value) \ - (((__emul_value(enable1, default1) & host_value) & (~0ul << 32)) | \ - ((uint32_t)(__emul_value(enable1, default1) | host_value))) - -/* - * Capability reporting - */ -int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) -{ - struct vcpu *v = current; - struct domain *d = v->domain; - u64 data = 0, host_data = 0; - int r = 1; - - /* VMX capablity MSRs are available only when guest supports VMX. */ - if ( !nestedhvm_enabled(d) || !d->arch.cpuid->basic.vmx ) - return 0; - - /* - * These MSRs are only available when flags in other MSRs are set. - * These prerequisites are listed in the Intel 64 and IA-32 - * Architectures Software Developer’s Manual, Vol 3, Appendix A. - */ - switch ( msr ) - { - case MSR_IA32_VMX_PROCBASED_CTLS2: - if ( !cpu_has_vmx_secondary_exec_control ) - return 0; - break; - - case MSR_IA32_VMX_EPT_VPID_CAP: - if ( !(cpu_has_vmx_ept || cpu_has_vmx_vpid) ) - return 0; - break; - - case MSR_IA32_VMX_TRUE_PINBASED_CTLS: - case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: - case MSR_IA32_VMX_TRUE_EXIT_CTLS: - case MSR_IA32_VMX_TRUE_ENTRY_CTLS: - if ( !(vmx_basic_msr & VMX_BASIC_DEFAULT1_ZERO) ) - return 0; - break; - - case MSR_IA32_VMX_VMFUNC: - if ( !cpu_has_vmx_vmfunc ) - return 0; - break; - } - - rdmsrl(msr, host_data); - - /* - * Remove unsupport features from n1 guest capability MSR - */ - switch (msr) { - case MSR_IA32_VMX_BASIC: - { - const struct vmcs_struct *vmcs = - map_domain_page(_mfn(PFN_DOWN(v->arch.hvm_vmx.vmcs_pa))); - - data = (host_data & (~0ul << 32)) | - (vmcs->vmcs_revision_id & 0x7fffffff); - unmap_domain_page(vmcs); - break; - } - case MSR_IA32_VMX_PINBASED_CTLS: - case MSR_IA32_VMX_TRUE_PINBASED_CTLS: - /* 1-settings */ - data = PIN_BASED_EXT_INTR_MASK | - PIN_BASED_NMI_EXITING | - PIN_BASED_PREEMPT_TIMER; - data = gen_vmx_msr(data, VMX_PINBASED_CTLS_DEFAULT1, host_data); - break; - case MSR_IA32_VMX_PROCBASED_CTLS: - case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: - { - u32 default1_bits = VMX_PROCBASED_CTLS_DEFAULT1; - /* 1-settings */ - data = CPU_BASED_HLT_EXITING | - CPU_BASED_VIRTUAL_INTR_PENDING | - CPU_BASED_CR8_LOAD_EXITING | - CPU_BASED_CR8_STORE_EXITING | - CPU_BASED_INVLPG_EXITING | - CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_MONITOR_EXITING | - CPU_BASED_MWAIT_EXITING | - CPU_BASED_MOV_DR_EXITING | - CPU_BASED_ACTIVATE_IO_BITMAP | - CPU_BASED_USE_TSC_OFFSETING | - CPU_BASED_UNCOND_IO_EXITING | - CPU_BASED_RDTSC_EXITING | - CPU_BASED_MONITOR_TRAP_FLAG | - CPU_BASED_VIRTUAL_NMI_PENDING | - CPU_BASED_ACTIVATE_MSR_BITMAP | - CPU_BASED_PAUSE_EXITING | - CPU_BASED_RDPMC_EXITING | - CPU_BASED_TPR_SHADOW | - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; - - if ( msr == MSR_IA32_VMX_TRUE_PROCBASED_CTLS ) - default1_bits &= ~(CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_INVLPG_EXITING); - - data = gen_vmx_msr(data, default1_bits, host_data); - break; - } - case MSR_IA32_VMX_PROCBASED_CTLS2: - /* 1-settings */ - data = SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING | - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_ENABLE_VPID | - SECONDARY_EXEC_UNRESTRICTED_GUEST | - SECONDARY_EXEC_ENABLE_EPT; - data = gen_vmx_msr(data, 0, host_data); - break; - case MSR_IA32_VMX_EXIT_CTLS: - case MSR_IA32_VMX_TRUE_EXIT_CTLS: - /* 1-settings */ - data = VM_EXIT_ACK_INTR_ON_EXIT | - VM_EXIT_IA32E_MODE | - VM_EXIT_SAVE_PREEMPT_TIMER | - VM_EXIT_SAVE_GUEST_PAT | - VM_EXIT_LOAD_HOST_PAT | - VM_EXIT_SAVE_GUEST_EFER | - VM_EXIT_LOAD_HOST_EFER | - VM_EXIT_LOAD_PERF_GLOBAL_CTRL; - data = gen_vmx_msr(data, VMX_EXIT_CTLS_DEFAULT1, host_data); - break; - case MSR_IA32_VMX_ENTRY_CTLS: - case MSR_IA32_VMX_TRUE_ENTRY_CTLS: - /* 1-settings */ - data = VM_ENTRY_LOAD_GUEST_PAT | - VM_ENTRY_LOAD_GUEST_EFER | - VM_ENTRY_LOAD_PERF_GLOBAL_CTRL | - VM_ENTRY_IA32E_MODE; - data = gen_vmx_msr(data, VMX_ENTRY_CTLS_DEFAULT1, host_data); - break; - - case MSR_IA32_VMX_VMCS_ENUM: - /* The max index of VVMCS encoding is 0x1f. */ - data = 0x1f << 1; - break; - case MSR_IA32_VMX_CR0_FIXED0: - /* PG, PE bits must be 1 in VMX operation */ - data = X86_CR0_PE | X86_CR0_PG; - break; - case MSR_IA32_VMX_CR0_FIXED1: - /* allow 0-settings for all bits */ - data = 0xffffffff; - break; - case MSR_IA32_VMX_CR4_FIXED0: - /* VMXE bit must be 1 in VMX operation */ - data = X86_CR4_VMXE; - break; - case MSR_IA32_VMX_CR4_FIXED1: - data = hvm_cr4_guest_valid_bits(d, false); - break; - case MSR_IA32_VMX_MISC: - /* Do not support CR3-target feature now */ - data = host_data & ~VMX_MISC_CR3_TARGET; - break; - case MSR_IA32_VMX_EPT_VPID_CAP: - data = nept_get_ept_vpid_cap(); - break; - default: - r = 0; - break; - } - - *msr_content = data; - return r; -} - /* This function uses L2_gpa to walk the P2M page table in L1. If the * walk is successful, the translated value is returned in * L1_gpa. The result value tells what to do next. diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c index 9114b8f53b..ee2ff94fcd 100644 --- a/xen/arch/x86/msr.c +++ b/xen/arch/x86/msr.c @@ -365,6 +365,7 @@ int init_vcpu_msr_policy(struct vcpu *v) int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) { + const struct domain *d = v->domain; const struct cpuid_policy *cp = v->domain->arch.cpuid; const struct msr_domain_policy *dp = v->domain->arch.msr; const struct msr_vcpu_policy *vp = v->arch.msr; @@ -399,6 +400,36 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) _MSR_MISC_FEATURES_CPUID_FAULTING; break; + case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMCS_ENUM: + if ( !nestedhvm_enabled(d) || !cp->basic.vmx ) + goto gp_fault; + *val = dp->vmx.raw[msr - MSR_IA32_VMX_BASIC]; + break; + + case MSR_IA32_VMX_PROCBASED_CTLS2: + if ( !vmx_procbased_ctls2_available(dp) ) + goto gp_fault; + *val = dp->vmx_procbased_ctls2.raw; + break; + + case MSR_IA32_VMX_EPT_VPID_CAP: + if ( !vmx_ept_vpid_cap_available(dp) ) + goto gp_fault; + *val = dp->vmx_ept_vpid_cap.raw; + break; + + case MSR_IA32_VMX_TRUE_PINBASED_CTLS ... MSR_IA32_VMX_TRUE_ENTRY_CTLS: + if ( !vmx_true_ctls_available(dp) ) + goto gp_fault; + *val = dp->vmx_true_ctls.raw[msr - MSR_IA32_VMX_TRUE_PINBASED_CTLS]; + break; + + case MSR_IA32_VMX_VMFUNC: + if ( !vmx_vmfunc_available(dp) ) + goto gp_fault; + *val = dp->vmx_vmfunc.raw; + break; + default: return X86EMUL_UNHANDLEABLE; } @@ -474,6 +505,10 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) break; } + case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + /* None of these MSRs are writeable. */ + goto gp_fault; + default: return X86EMUL_UNHANDLEABLE; } diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h index 9ea35eb795..d8ae93e61a 100644 --- a/xen/include/asm-x86/hvm/vmx/vvmx.h +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h @@ -200,8 +200,6 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs); int nvmx_handle_vmlaunch(struct cpu_user_regs *regs); int nvmx_handle_invept(struct cpu_user_regs *regs); int nvmx_handle_invvpid(struct cpu_user_regs *regs); -int nvmx_msr_read_intercept(unsigned int msr, - u64 *msr_content); void nvmx_update_exec_control(struct vcpu *v, u32 value); void nvmx_update_secondary_exec_control(struct vcpu *v, -- 2.14.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |