[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 13/20] x86/VPMU: When handling MSR accesses, leave fault injection to callers
This is done in preparation to subsequent PV patches. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- xen/arch/x86/hvm/svm/svm.c | 6 ++-- xen/arch/x86/hvm/svm/vpmu.c | 6 ++-- xen/arch/x86/hvm/vmx/vmx.c | 26 +++++++++++----- xen/arch/x86/hvm/vmx/vpmu_core2.c | 65 +++++++++++++++++---------------------- 4 files changed, 54 insertions(+), 49 deletions(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 555e5f7..da5af5c 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1642,7 +1642,8 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) case MSR_AMD_FAM15H_EVNTSEL3: case MSR_AMD_FAM15H_EVNTSEL4: case MSR_AMD_FAM15H_EVNTSEL5: - vpmu_do_rdmsr(msr, msr_content); + if ( vpmu_do_rdmsr(msr, msr_content) ) + goto gpf; break; case MSR_AMD64_DR0_ADDRESS_MASK: @@ -1793,7 +1794,8 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) case MSR_AMD_FAM15H_EVNTSEL3: case MSR_AMD_FAM15H_EVNTSEL4: case MSR_AMD_FAM15H_EVNTSEL5: - vpmu_do_wrmsr(msr, msr_content); + if ( vpmu_do_wrmsr(msr, msr_content) ) + goto gpf; break; case MSR_IA32_MCx_MISC(4): /* Threshold register */ diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c index 6f21e39..07dccb6 100644 --- a/xen/arch/x86/hvm/svm/vpmu.c +++ b/xen/arch/x86/hvm/svm/vpmu.c @@ -303,7 +303,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) ) { if ( !acquire_pmu_ownership(PMU_OWNER_HVM) ) - return 1; + return 0; vpmu_set(vpmu, VPMU_RUNNING); if ( has_hvm_container_domain(v->domain) && is_msr_bitmap_on(vpmu) ) @@ -333,7 +333,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) /* Write to hw counters */ wrmsrl(msr, msr_content); - return 1; + return 0; } static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) @@ -351,7 +351,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) rdmsrl(msr, *msr_content); - return 1; + return 0; } static int amd_vpmu_initialise(struct vcpu *v) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 1533d98..c8dbe80 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2078,12 +2078,17 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; /* Perhaps vpmu will change some bits. */ + /* FALLTHROUGH */ + case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1: + case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1: + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + case MSR_IA32_PEBS_ENABLE: + case MSR_IA32_DS_AREA: if ( vpmu_do_rdmsr(msr, msr_content) ) - goto done; + goto gp_fault; break; default: - if ( vpmu_do_rdmsr(msr, msr_content) ) - break; if ( passive_domain_do_rdmsr(msr, msr_content) ) goto done; switch ( long_mode_do_msr_read(msr, msr_content) ) @@ -2256,8 +2261,8 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( msr_content & ~supported ) { /* Perhaps some other bits are supported in vpmu. */ - if ( !vpmu_do_wrmsr(msr, msr_content) ) - break; + if ( vpmu_do_wrmsr(msr, msr_content) ) + goto gp_fault; } if ( msr_content & IA32_DEBUGCTLMSR_LBR ) { @@ -2286,9 +2291,16 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( !nvmx_msr_write_intercept(msr, msr_content) ) goto gp_fault; break; - default: + case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1: + case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1: + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + case MSR_IA32_PEBS_ENABLE: + case MSR_IA32_DS_AREA: if ( vpmu_do_wrmsr(msr, msr_content) ) - return X86EMUL_OKAY; + goto gp_fault; + break; + default: if ( passive_domain_do_wrmsr(msr, msr_content) ) return X86EMUL_OKAY; diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index d964444..60e0474 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -458,13 +458,13 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) ) supported |= IA32_DEBUGCTLMSR_BTS_OFF_OS | IA32_DEBUGCTLMSR_BTS_OFF_USR; - if ( msr_content & supported ) + + if ( !(msr_content & supported ) || + !vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) ) { - if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) ) - return 1; - gdprintk(XENLOG_WARNING, "Debug Store is not supported on this cpu\n"); - hvm_inject_hw_exception(TRAP_gp_fault, 0); - return 0; + gdprintk(XENLOG_WARNING, + "Debug Store is not supported on this cpu\n"); + return 1; } } return 0; @@ -476,18 +476,17 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { case MSR_CORE_PERF_GLOBAL_OVF_CTRL: core2_vpmu_cxt->global_status &= ~msr_content; - return 1; + return 0; case MSR_CORE_PERF_GLOBAL_STATUS: gdprintk(XENLOG_INFO, "Can not write readonly MSR: " "MSR_PERF_GLOBAL_STATUS(0x38E)!\n"); - hvm_inject_hw_exception(TRAP_gp_fault, 0); return 1; case MSR_IA32_PEBS_ENABLE: if ( msr_content & 1 ) gdprintk(XENLOG_WARNING, "Guest is trying to enable PEBS, " "which is not supported.\n"); core2_vpmu_cxt->pebs_enable = msr_content; - return 1; + return 0; case MSR_IA32_DS_AREA: if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) ) { @@ -496,14 +495,13 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) gdprintk(XENLOG_WARNING, "Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n", msr_content); - hvm_inject_hw_exception(TRAP_gp_fault, 0); return 1; } core2_vpmu_cxt->ds_area = msr_content; break; } gdprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n"); - return 1; + return 0; case MSR_CORE_PERF_GLOBAL_CTRL: global_ctrl = msr_content; break; @@ -541,45 +539,43 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) } } - if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) ) - vpmu_set(vpmu, VPMU_RUNNING); - else - vpmu_reset(vpmu, VPMU_RUNNING); - if ( type != MSR_TYPE_GLOBAL ) { u64 mask; - int inject_gp = 0; + switch ( type ) { case MSR_TYPE_ARCH_CTRL: /* MSR_P6_EVNTSEL[0,...] */ mask = ~((1ull << 32) - 1); - if (msr_content & mask) - inject_gp = 1; + if ( msr_content & mask ) + return 1; break; case MSR_TYPE_CTRL: /* IA32_FIXED_CTR_CTRL */ if ( msr == MSR_IA32_DS_AREA ) break; /* 4 bits per counter, currently 3 fixed counters implemented. */ mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1); - if (msr_content & mask) - inject_gp = 1; + if ( msr_content & mask ) + return 1; break; case MSR_TYPE_COUNTER: /* IA32_FIXED_CTR[0-2] */ mask = ~((1ull << core2_get_bitwidth_fix_count()) - 1); - if (msr_content & mask) - inject_gp = 1; + if ( msr_content & mask ) + return 1; break; } - if (inject_gp) - hvm_inject_hw_exception(TRAP_gp_fault, 0); - else - wrmsrl(msr, msr_content); + + wrmsrl(msr, msr_content); } else vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); - return 1; + if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) ) + vpmu_set(vpmu, VPMU_RUNNING); + else + vpmu_reset(vpmu, VPMU_RUNNING); + + return 0; } static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) @@ -607,19 +603,14 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) rdmsrl(msr, *msr_content); } } - else + else if ( msr == MSR_IA32_MISC_ENABLE ) { /* Extension for BTS */ - if ( msr == MSR_IA32_MISC_ENABLE ) - { - if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) ) - *msr_content &= ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; - } - else - return 0; + if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) ) + *msr_content &= ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; } - return 1; + return 0; } static void core2_vpmu_do_cpuid(unsigned int input, -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |