[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 14/19] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr
> From: Boris Ostrovsky [mailto:boris.ostrovsky@xxxxxxxxxx] > Sent: Tuesday, May 13, 2014 11:53 PM > > The two routines share most of their logic. > > Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> > --- > xen/arch/x86/hvm/svm/svm.c | 9 ++++++--- > xen/arch/x86/hvm/vmx/vmx.c | 11 +++++++---- > xen/arch/x86/hvm/vpmu.c | 42 > +++++++++++++++--------------------------- > xen/arch/x86/traps.c | 4 ++-- > xen/include/asm-x86/hvm/vpmu.h | 6 ++++-- > 5 files changed, 34 insertions(+), 38 deletions(-) > > diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c > index c23db32..3d652c2 100644 > --- a/xen/arch/x86/hvm/svm/svm.c > +++ b/xen/arch/x86/hvm/svm/svm.c > @@ -1632,7 +1632,7 @@ static int svm_msr_read_intercept(unsigned int msr, > uint64_t *msr_content) > case MSR_AMD_FAM15H_EVNTSEL3: > case MSR_AMD_FAM15H_EVNTSEL4: > case MSR_AMD_FAM15H_EVNTSEL5: > - vpmu_do_rdmsr(msr, msr_content); > + vpmu_do_msr(msr, msr_content, VPMU_MSR_READ); > break; > > case MSR_AMD64_DR0_ADDRESS_MASK: > @@ -1783,9 +1783,12 @@ static int svm_msr_write_intercept(unsigned int > msr, uint64_t msr_content) > case MSR_AMD_FAM15H_EVNTSEL3: > case MSR_AMD_FAM15H_EVNTSEL4: > case MSR_AMD_FAM15H_EVNTSEL5: > - vpmu_do_wrmsr(msr, msr_content); > - break; > + { > + uint64_t msr_val = msr_content; > > + vpmu_do_msr(msr, &msr_val, VPMU_MSR_WRITE); > + break; > + } > case MSR_IA32_MCx_MISC(4): /* Threshold register */ > case MSR_F10_MC4_MISC1 ... MSR_F10_MC4_MISC3: > /* > diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c > index 1c9e742..8588f48 100644 > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -2047,11 +2047,11 @@ static int vmx_msr_read_intercept(unsigned int > msr, uint64_t *msr_content) > *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL | > MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; > /* Perhaps vpmu will change some bits. */ > - if ( vpmu_do_rdmsr(msr, msr_content) ) > + if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) ) > goto done; > break; > default: > - if ( vpmu_do_rdmsr(msr, msr_content) ) > + if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) ) > break; > if ( passive_domain_do_rdmsr(msr, msr_content) ) > goto done; > @@ -2202,6 +2202,7 @@ void vmx_vlapic_msr_changed(struct vcpu *v) > static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) > { > struct vcpu *v = current; > + uint64_t msr_val; > > HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%#x, msr_value=%#"PRIx64, msr, > msr_content); > > @@ -2225,7 +2226,8 @@ static int vmx_msr_write_intercept(unsigned int > msr, uint64_t msr_content) > if ( msr_content & ~supported ) > { > /* Perhaps some other bits are supported in vpmu. */ > - if ( !vpmu_do_wrmsr(msr, msr_content) ) > + msr_val = msr_content; > + if ( !vpmu_do_msr(msr, &msr_val, VPMU_MSR_WRITE) ) > break; > } > if ( msr_content & IA32_DEBUGCTLMSR_LBR ) > @@ -2256,7 +2258,8 @@ static int vmx_msr_write_intercept(unsigned int > msr, uint64_t msr_content) > goto gp_fault; > break; > default: > - if ( vpmu_do_wrmsr(msr, msr_content) ) > + msr_val = msr_content; > + if ( vpmu_do_msr(msr, &msr_val, VPMU_MSR_WRITE) ) > return X86EMUL_OKAY; > if ( passive_domain_do_wrmsr(msr, msr_content) ) > return X86EMUL_OKAY; > diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c > index 9995728..896e2be 100644 > --- a/xen/arch/x86/hvm/vpmu.c > +++ b/xen/arch/x86/hvm/vpmu.c > @@ -84,20 +84,29 @@ void vpmu_lvtpc_update(uint32_t val) > apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc); > } > > -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) > +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, uint8_t rw) > { > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > if ( !(vpmu_mode & XENPMU_MODE_ON) ) > return 0; > > - if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) > + ASSERT((rw == VPMU_MSR_READ) || (rw == VPMU_MSR_WRITE)); > + > + if ( vpmu->arch_vpmu_ops ) > { > - int ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); > + int ret; > + > + if ( (rw == VPMU_MSR_READ) && > vpmu->arch_vpmu_ops->do_rdmsr ) > + ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); > + else if ( vpmu->arch_vpmu_ops->do_wrmsr ) > + ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, *msr_content); > + else > + return 0; > > /* > - * We may have received a PMU interrupt during WRMSR handling > - * and since do_wrmsr may load VPMU context we should save > + * We may have received a PMU interrupt while handling MSR > access > + * and since do_wr/rdmsr may load VPMU context we should save > * (and unload) it again. > */ > if ( !is_hvm_domain(current->domain) && > @@ -107,31 +116,10 @@ int vpmu_do_wrmsr(unsigned int msr, uint64_t > msr_content) > vpmu->arch_vpmu_ops->arch_vpmu_save(current); > vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | > VPMU_CONTEXT_LOADED); > } > - return ret; > - } > - return 0; > -} > - > -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) > -{ > - struct vpmu_struct *vpmu = vcpu_vpmu(current); > > - if ( !(vpmu_mode & XENPMU_MODE_ON) ) > - return 0; > - > - if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) > - { > - int ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); > - > - if ( !is_hvm_domain(current->domain) && > - (current->arch.vpmu.xenpmu_data->pmu_flags & > PMU_CACHED) ) > - { > - vpmu_set(vpmu, VPMU_CONTEXT_SAVE); > - vpmu->arch_vpmu_ops->arch_vpmu_save(current); > - vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | > VPMU_CONTEXT_LOADED); > - } > return ret; > } > + > return 0; > } > > diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c > index a0d0ba7..adbdebe 100644 > --- a/xen/arch/x86/traps.c > +++ b/xen/arch/x86/traps.c > @@ -2525,7 +2525,7 @@ static int emulate_privileged_op(struct > cpu_user_regs *regs) > case > MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: > case > MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: > case > MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: > - if ( !vpmu_do_wrmsr(regs->ecx, msr_content) ) > + if ( !vpmu_do_msr(regs->ecx, &msr_content, > VPMU_MSR_WRITE) ) > goto invalid; > break; > default: > @@ -2638,7 +2638,7 @@ static int emulate_privileged_op(struct > cpu_user_regs *regs) > case > MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: > case > MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: > case > MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: > - if ( vpmu_do_rdmsr(regs->ecx, &msr_content) ) > + if ( vpmu_do_msr(regs->ecx, &msr_content, > VPMU_MSR_READ) ) > { > regs->eax = (uint32_t)msr_content; > regs->edx = (uint32_t)(msr_content >> 32); > diff --git a/xen/include/asm-x86/hvm/vpmu.h > b/xen/include/asm-x86/hvm/vpmu.h > index 438a913..bab8779 100644 > --- a/xen/include/asm-x86/hvm/vpmu.h > +++ b/xen/include/asm-x86/hvm/vpmu.h > @@ -78,9 +78,11 @@ struct vpmu_struct { > #define vpmu_is_set_all(_vpmu, _x) (((_vpmu)->flags & (_x)) == (_x)) > #define vpmu_clear(_vpmu) ((_vpmu)->flags = 0) > > +#define VPMU_MSR_READ 0 > +#define VPMU_MSR_WRITE 1 > + > void vpmu_lvtpc_update(uint32_t val); > -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content); > -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content); > +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, uint8_t rw); > int vpmu_do_interrupt(struct cpu_user_regs *regs); > void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, > unsigned int *ecx, unsigned > int *edx); > -- > 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |