[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v10 16/20] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr



> From: Boris Ostrovsky [mailto:boris.ostrovsky@xxxxxxxxxx]
> Sent: Wednesday, September 03, 2014 8:41 PM
> 
> The two routines share most of their logic.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>

Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
>  xen/arch/x86/hvm/svm/svm.c     |  4 +--
>  xen/arch/x86/hvm/vmx/vmx.c     |  6 ++--
>  xen/arch/x86/hvm/vpmu.c        | 71
> +++++++++++++++++++-----------------------
>  xen/arch/x86/traps.c           |  6 ++--
>  xen/include/asm-x86/hvm/vpmu.h |  7 +++--
>  5 files changed, 46 insertions(+), 48 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 326cad9..5174a82 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1654,7 +1654,7 @@ static int svm_msr_read_intercept(unsigned int msr,
> uint64_t *msr_content)
>      case MSR_AMD_FAM15H_EVNTSEL3:
>      case MSR_AMD_FAM15H_EVNTSEL4:
>      case MSR_AMD_FAM15H_EVNTSEL5:
> -        if ( vpmu_do_rdmsr(msr, msr_content) )
> +        if ( vpmu_do_msr(msr, msr_content, 0, VPMU_MSR_READ) )
>              goto gpf;
>          break;
> 
> @@ -1806,7 +1806,7 @@ static int svm_msr_write_intercept(unsigned int msr,
> uint64_t msr_content)
>      case MSR_AMD_FAM15H_EVNTSEL3:
>      case MSR_AMD_FAM15H_EVNTSEL4:
>      case MSR_AMD_FAM15H_EVNTSEL5:
> -        if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> +        if ( vpmu_do_msr(msr, &msr_content, 0, VPMU_MSR_WRITE) )
>              goto gpf;
>          break;
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 3c63bb0..f4064ba 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2083,7 +2083,7 @@ static int vmx_msr_read_intercept(unsigned int msr,
> uint64_t *msr_content)
>      case
> MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
>      case MSR_IA32_PEBS_ENABLE:
>      case MSR_IA32_DS_AREA:
> -        if ( vpmu_do_rdmsr(msr, msr_content) )
> +        if ( vpmu_do_msr(msr, msr_content, 0, VPMU_MSR_READ) )
>              goto gp_fault;
>          break;
>      default:
> @@ -2259,7 +2259,7 @@ static int vmx_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>          if ( msr_content & ~supported )
>          {
>              /* Perhaps some other bits are supported in vpmu. */
> -            if ( vpmu_do_wrmsr(msr, msr_content, supported) )
> +            if ( vpmu_do_msr(msr, &msr_content, supported,
> VPMU_MSR_WRITE) )
>                  break;
>          }
>          if ( msr_content & IA32_DEBUGCTLMSR_LBR )
> @@ -2293,7 +2293,7 @@ static int vmx_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>      case
> MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
>      case MSR_IA32_PEBS_ENABLE:
>      case MSR_IA32_DS_AREA:
> -         if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> +         if ( vpmu_do_msr(msr, &msr_content, 0, VPMU_MSR_WRITE) )
>              goto gp_fault;
>          break;
>      default:
> diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
> index edc81be..35a08d2 100644
> --- a/xen/arch/x86/hvm/vpmu.c
> +++ b/xen/arch/x86/hvm/vpmu.c
> @@ -89,57 +89,50 @@ void vpmu_lvtpc_update(uint32_t val)
>          apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
>  }
> 
> -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t
> supported)
> +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
> +                uint64_t supported, int rw)
>  {
>      struct vcpu *curr = current;
>      struct vpmu_struct *vpmu = vcpu_vpmu(curr);
> +    struct arch_vpmu_ops *ops = vpmu->arch_vpmu_ops;
> +    int ret = 0;
> 
>      if ( !(vpmu_mode & XENPMU_MODE_SELF) )
>          return 0;
> 
> -    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr )
> +    switch ( rw )
>      {
> -        int ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content,
> supported);
> -
> -        /*
> -         * We may have received a PMU interrupt during WRMSR handling
> -         * and since do_wrmsr may load VPMU context we should save
> -         * (and unload) it again.
> -         */
> -        if ( !is_hvm_domain(curr->domain) &&
> -             vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu_flags
> & PMU_CACHED) )
> -        {
> -            vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
> -            vpmu->arch_vpmu_ops->arch_vpmu_save(curr);
> -            vpmu_reset(vpmu, VPMU_CONTEXT_SAVE |
> VPMU_CONTEXT_LOADED);
> -        }
> -        return ret;
> -    }
> -    return 0;
> -}
> +    case VPMU_MSR_READ:
> +        if ( !ops || !ops->do_rdmsr )
> +            return 0;
> +        ret = ops->do_rdmsr(msr, msr_content);
> +        break;
> 
> -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> -{
> -    struct vcpu *curr = current;
> -    struct vpmu_struct *vpmu = vcpu_vpmu(curr);
> +    case VPMU_MSR_WRITE:
> +        if ( !ops || !ops->do_wrmsr )
> +            return 0;
> +        ret = ops->do_wrmsr(msr, *msr_content, supported);
> +        break;
> 
> -    if ( !(vpmu_mode & XENPMU_MODE_SELF) )
> +    default:
> +        ASSERT(0);
>          return 0;
> +    }
> 
> -    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr )
> +    /*
> +     * We may have received a PMU interrupt while handling MSR access
> +     * and since do_wr/rdmsr may load VPMU context we should save
> +     * (and unload) it again.
> +     */
> +    if ( !is_hvm_domain(curr->domain) &&
> +         vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu_flags &
> PMU_CACHED) )
>      {
> -        int ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
> -
> -        if ( !is_hvm_domain(curr->domain) &&
> -             vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu_flags
> & PMU_CACHED) )
> -        {
> -            vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
> -            vpmu->arch_vpmu_ops->arch_vpmu_save(curr);
> -            vpmu_reset(vpmu, VPMU_CONTEXT_SAVE |
> VPMU_CONTEXT_LOADED);
> -        }
> -        return ret;
> +        vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
> +        ops->arch_vpmu_save(curr);
> +        vpmu_reset(vpmu, VPMU_CONTEXT_SAVE |
> VPMU_CONTEXT_LOADED);
>      }
> -    return 0;
> +
> +    return ret;
>  }
> 
>  static struct vcpu *choose_hwdom_vcpu(void)
> @@ -199,11 +192,11 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs)
>          vpmu_reset(vpmu, VPMU_CONTEXT_SAVE |
> VPMU_CONTEXT_LOADED);
> 
>          /* Store appropriate registers in xenpmu_data */
> -        if ( is_pv_32bit_domain(sampled->domain) )
> +        if ( is_pv_32bit_domain(sampling->domain) )
>          {
>              /*
>               * 32-bit dom0 cannot process Xen's addresses (which are 64
> bit)
> -             * and therefore we treat it the same way as a non-priviledged
> +             * and therefore we treat it the same way as a non-privileged
>               * PV 32-bit domain.
>               */
>              struct compat_cpu_user_regs *cmp;
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index bc0c9c3..106f522 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -2578,7 +2578,8 @@ static int emulate_privileged_op(struct
> cpu_user_regs *regs)
>          case
> MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
>                  if ( vpmu_msr || (boot_cpu_data.x86_vendor ==
> X86_VENDOR_AMD) )
>                  {
> -                    if ( vpmu_do_wrmsr(regs->ecx, msr_content, 0) )
> +                    if ( vpmu_do_msr(regs->ecx, &msr_content,
> +                                     0, VPMU_MSR_WRITE) )
>                          goto fail;
>                  }
>                  break;
> @@ -2700,7 +2701,8 @@ static int emulate_privileged_op(struct
> cpu_user_regs *regs)
>          case
> MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
>                  if ( vpmu_msr || (boot_cpu_data.x86_vendor ==
> X86_VENDOR_AMD) )
>                  {
> -                    if ( vpmu_do_rdmsr(regs->ecx, &msr_content) )
> +                    if ( vpmu_do_msr(regs->ecx, &msr_content,
> +                                     0, VPMU_MSR_READ) )
>                          goto fail;
> 
>                      regs->eax = (uint32_t)msr_content;
> diff --git a/xen/include/asm-x86/hvm/vpmu.h
> b/xen/include/asm-x86/hvm/vpmu.h
> index a70b1a4..977a44c 100644
> --- a/xen/include/asm-x86/hvm/vpmu.h
> +++ b/xen/include/asm-x86/hvm/vpmu.h
> @@ -95,9 +95,12 @@ static inline bool_t vpmu_are_all_set(const struct
> vpmu_struct *vpmu,
>      return !!((vpmu->flags & mask) == mask);
>  }
> 
> +#define VPMU_MSR_READ  0
> +#define VPMU_MSR_WRITE 1
> +
>  void vpmu_lvtpc_update(uint32_t val);
> -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t
> supported);
> -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
> +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
> +                uint64_t supported, int rw);
>  int vpmu_do_interrupt(struct cpu_user_regs *regs);
>  void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
>                                         unsigned int *ecx, unsigned
> int *edx);
> --
> 1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.