[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 16/20] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr
The two routines share most of their logic. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- xen/arch/x86/hvm/svm/svm.c | 4 +-- xen/arch/x86/hvm/vmx/vmx.c | 6 ++-- xen/arch/x86/hvm/vpmu.c | 66 +++++++++++++++++++----------------------- xen/arch/x86/traps.c | 4 +-- xen/include/asm-x86/hvm/vpmu.h | 6 ++-- 5 files changed, 40 insertions(+), 46 deletions(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index da5af5c..8935404 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1642,7 +1642,7 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) case MSR_AMD_FAM15H_EVNTSEL3: case MSR_AMD_FAM15H_EVNTSEL4: case MSR_AMD_FAM15H_EVNTSEL5: - if ( vpmu_do_rdmsr(msr, msr_content) ) + if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) ) goto gpf; break; @@ -1794,7 +1794,7 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) case MSR_AMD_FAM15H_EVNTSEL3: case MSR_AMD_FAM15H_EVNTSEL4: case MSR_AMD_FAM15H_EVNTSEL5: - if ( vpmu_do_wrmsr(msr, msr_content) ) + if ( vpmu_do_msr(msr, &msr_content, VPMU_MSR_WRITE) ) goto gpf; break; diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index c8dbe80..7c11550 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2085,7 +2085,7 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: case MSR_IA32_PEBS_ENABLE: case MSR_IA32_DS_AREA: - if ( vpmu_do_rdmsr(msr, msr_content) ) + if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) ) goto gp_fault; break; default: @@ -2261,7 +2261,7 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( msr_content & ~supported ) { /* Perhaps some other bits are supported in vpmu. */ - if ( vpmu_do_wrmsr(msr, msr_content) ) + if ( vpmu_do_msr(msr, &msr_content, VPMU_MSR_WRITE) ) goto gp_fault; } if ( msr_content & IA32_DEBUGCTLMSR_LBR ) @@ -2297,7 +2297,7 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: case MSR_IA32_PEBS_ENABLE: case MSR_IA32_DS_AREA: - if ( vpmu_do_wrmsr(msr, msr_content) ) + if ( vpmu_do_msr(msr, &msr_content, VPMU_MSR_WRITE) ) goto gp_fault; break; default: diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index 818f721..c9cf6c0 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -91,57 +91,49 @@ void vpmu_lvtpc_update(uint32_t val) apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc); } -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, int rw) { struct vcpu *curr = current; struct vpmu_struct *vpmu = vcpu_vpmu(curr); + struct arch_vpmu_ops *ops = vpmu->arch_vpmu_ops; + int ret = 0; if ( !(vpmu_mode & XENPMU_MODE_SELF) ) return 0; - if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) + switch ( rw ) { - int ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); - - /* - * We may have received a PMU interrupt during WRMSR handling - * and since do_wrmsr may load VPMU context we should save - * (and unload) it again. - */ - if ( !is_hvm_domain(curr->domain) && - vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu_flags & PMU_CACHED) ) - { - vpmu_set(vpmu, VPMU_CONTEXT_SAVE); - vpmu->arch_vpmu_ops->arch_vpmu_save(curr); - vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); - } - return ret; - } - return 0; -} + case VPMU_MSR_READ: + if ( !ops || !ops->do_rdmsr ) + return 0; + ret = ops->do_rdmsr(msr, msr_content); + break; -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) -{ - struct vcpu *curr = current; - struct vpmu_struct *vpmu = vcpu_vpmu(curr); + case VPMU_MSR_WRITE: + if ( !ops || !ops->do_wrmsr ) + return 0; + ret = ops->do_wrmsr(msr, *msr_content); + break; - if ( !(vpmu_mode & XENPMU_MODE_SELF) ) + default: + ASSERT(0); return 0; + } - if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) + /* + * We may have received a PMU interrupt while handling MSR access + * and since do_wr/rdmsr may load VPMU context we should save + * (and unload) it again. + */ + if ( !is_hvm_domain(curr->domain) && + vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu_flags & PMU_CACHED) ) { - int ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); - - if ( !is_hvm_domain(curr->domain) && - vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu_flags & PMU_CACHED) ) - { - vpmu_set(vpmu, VPMU_CONTEXT_SAVE); - vpmu->arch_vpmu_ops->arch_vpmu_save(curr); - vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); - } - return ret; + vpmu_set(vpmu, VPMU_CONTEXT_SAVE); + ops->arch_vpmu_save(curr); + vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); } - return 0; + + return ret; } static struct vcpu *choose_hwdom_vcpu(void) diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index f1830d5..4c4292b 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -2575,7 +2575,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) if ( vpmu_msr || ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !vpmu_msr) ) { - if ( vpmu_do_wrmsr(regs->ecx, msr_content) ) + if ( vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_WRITE) ) goto fail; break; } @@ -2698,7 +2698,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) if ( vpmu_msr || ((boot_cpu_data.x86_vendor != X86_VENDOR_AMD) && !vpmu_msr) ) { - if ( vpmu_do_rdmsr(regs->ecx, &msr_content) ) + if ( vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_READ) ) goto fail; regs->eax = (uint32_t)msr_content; diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h index 25954c6..429ab27 100644 --- a/xen/include/asm-x86/hvm/vpmu.h +++ b/xen/include/asm-x86/hvm/vpmu.h @@ -94,9 +94,11 @@ static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu, return !!((vpmu->flags & mask) == mask); } +#define VPMU_MSR_READ 0 +#define VPMU_MSR_WRITE 1 + void vpmu_lvtpc_update(uint32_t val); -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content); -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content); +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, int rw); int vpmu_do_interrupt(struct cpu_user_regs *regs); void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx); -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |