[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] vpmu intel: Add cpuid handling when vpmu disabled



On Wed, Mar 20, 2013 at 07:40:58AM +0100, Dietmar Hahn wrote:
> Am Mittwoch 13 März 2013, 10:55:29 schrieb Dietmar Hahn:
> > Even though vpmu is disabled in the hypervisor in the HVM guest the call of
> > cpuid(0xa) returns informations about usable performance counters.
> > This may confuse guest software when trying to use the counters and nothing
> > happens.
> > This patch clears most bits in registers eax and edx of cpuid(0xa) 
> > instruction
> > for the guest when vpmu is disabled:
> >  - version ID of architectural performance counting
> >  - number of general pmu registers
> >  - width of general pmu registers
> >  - number of fixed pmu registers
> >  - width of ixed pmu registers
> > 
> > Thanks.
> > 
> > Dietmar.
> 
> Ping?

You need to CC the AMD maintainer as well. CC-ing him here.
> 
> > 
> > Signed-off-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
> > 
> > diff -r a6b81234b189 xen/arch/x86/hvm/svm/vpmu.c
> > --- a/xen/arch/x86/hvm/svm/vpmu.c   Mon Mar 11 16:13:42 2013 +0000
> > +++ b/xen/arch/x86/hvm/svm/vpmu.c   Wed Mar 13 09:54:00 2013 +0100
> > @@ -370,6 +370,10 @@ int svm_vpmu_initialise(struct vcpu *v, 
> >      uint8_t family = current_cpu_data.x86;
> >      int ret = 0;
> >  
> > +    /* vpmu enabled? */
> > +    if (!vpmu_flags)
> > +        return 0;
> > +
> >      switch ( family )
> >      {
> >      case 0x10:
> > diff -r a6b81234b189 xen/arch/x86/hvm/vmx/vpmu_core2.c
> > --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c     Mon Mar 11 16:13:42 2013 +0000
> > +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c     Wed Mar 13 09:54:00 2013 +0100
> > @@ -727,6 +727,59 @@ struct arch_vpmu_ops core2_vpmu_ops = {
> >      .arch_vpmu_load = core2_vpmu_load
> >  };
> >  
> > +/* cpuid 0xa - eax */

Do you think it might make sense to point out where these are defined
in the Intel SDM?

> > +#define X86_FEATURE_PMU_VER_OFF   0  /* Version */
> > +#define FEATURE_PMU_VER_BITS      8  /* 8 bits */
> > +#define X86_FEATURE_NUM_GEN_OFF   8  /* Number of general pmu registers */
> > +#define FEATURE_NUM_GEN_BITS      8  /* 8 bits */
> > +#define X86_FEATURE_GEN_WIDTH_OFF 16 /* Width of general pmu registers */
> > +#define FEATURE_GEN_WIDTH_BITS    8  /* 8 bits */
> > +
> > +/* cpuid 0xa - edx */
> > +#define X86_FEATURE_NUM_FIX_OFF   0  /* Number of fixed pmu registers */
> > +#define FEATURE_NUM_FIX_BITS      5  /* 5 bits */
> > +#define X86_FEATURE_FIX_WIDTH_OFF 5  /* Width of fixed pmu registers */
> > +#define FEATURE_FIX_WIDTH_BITS    8  /* 8 bits */
> > +
> > +static void core2_no_vpmu_do_cpuid(unsigned int input,
> > +                                unsigned int *eax, unsigned int *ebx,
> > +                                unsigned int *ecx, unsigned int *edx)
> > +{
> > +    /*
> > +     * As in this case the vpmu is not enabled reset some bits in the
> > +     * pmu part of the cpuid.
> > +     */
> > +    if (input == 0xa)
> > +    {
> > +        *eax &= ~(((1 << FEATURE_PMU_VER_BITS) -1) << 
> > X86_FEATURE_PMU_VER_OFF);
> > +        *eax &= ~(((1 << FEATURE_NUM_GEN_BITS) -1) << 
> > X86_FEATURE_NUM_GEN_OFF);
> > +        *eax &= ~(((1 << FEATURE_GEN_WIDTH_BITS) -1) << 
> > X86_FEATURE_GEN_WIDTH_OFF);
> > +
> > +        *edx &= ~(((1 << FEATURE_NUM_FIX_BITS) -1) << 
> > X86_FEATURE_NUM_FIX_OFF);
> > +        *edx &= ~(((1 << FEATURE_FIX_WIDTH_BITS) -1) << 
> > X86_FEATURE_FIX_WIDTH_OFF);
> > +    }
> > +}
> > +
> > +/*
> > + * If it's a vpmu msr set it to 0.

.. s/it/its value/

> > + */
> > +static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> > +{
> > +    int type = -1, index = -1;
> > +    if ( !is_core2_vpmu_msr(msr, &type, &index) )
> > +        return 0;
> > +    *msr_content = 0;
> > +    return 1;
> > +}
> > +
> > +/*
> > + * These functions are used in case vpmu is not enabled.
> > + */
> > +struct arch_vpmu_ops core2_no_vpmu_ops = {
> > +    .do_rdmsr = core2_no_vpmu_do_rdmsr,
> > +    .do_cpuid = core2_no_vpmu_do_cpuid,
> > +};
> > +
> >  int vmx_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags)
> >  {
> >      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> > @@ -734,6 +787,10 @@ int vmx_vpmu_initialise(struct vcpu *v, 
> >      uint8_t cpu_model = current_cpu_data.x86_model;
> >      int ret = 0;
> >  
> > +    vpmu->arch_vpmu_ops = &core2_no_vpmu_ops;
> > +    if ( !vpmu_flags )
> > +        return 0;
> > +
> >      if ( family == 6 )
> >      {
> >          switch ( cpu_model )
> > diff -r a6b81234b189 xen/arch/x86/hvm/vpmu.c
> > --- a/xen/arch/x86/hvm/vpmu.c       Mon Mar 11 16:13:42 2013 +0000
> > +++ b/xen/arch/x86/hvm/vpmu.c       Wed Mar 13 09:54:00 2013 +0100
> > @@ -67,7 +67,7 @@ int vpmu_do_wrmsr(unsigned int msr, uint
> >  {
> >      struct vpmu_struct *vpmu = vcpu_vpmu(current);
> >  
> > -    if ( vpmu->arch_vpmu_ops )
> > +    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr )
> >          return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content);
> >      return 0;
> >  }
> > @@ -76,7 +76,7 @@ int vpmu_do_rdmsr(unsigned int msr, uint
> >  {
> >      struct vpmu_struct *vpmu = vcpu_vpmu(current);
> >  
> > -    if ( vpmu->arch_vpmu_ops )
> > +    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr )
> >          return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
> >      return 0;
> >  }
> > @@ -85,7 +85,7 @@ int vpmu_do_interrupt(struct cpu_user_re
> >  {
> >      struct vpmu_struct *vpmu = vcpu_vpmu(current);
> >  
> > -    if ( vpmu->arch_vpmu_ops )
> > +    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt )
> >          return vpmu->arch_vpmu_ops->do_interrupt(regs);

> >      return 0;
> >  }
> > @@ -104,7 +104,7 @@ void vpmu_save(struct vcpu *v)
> >  {
> >      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> >  
> > -    if ( vpmu->arch_vpmu_ops )
> > +    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save )
> >          vpmu->arch_vpmu_ops->arch_vpmu_save(v);
> >  }
> >  
> > @@ -112,7 +112,7 @@ void vpmu_load(struct vcpu *v)
> >  {
> >      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> >  
> > -    if ( vpmu->arch_vpmu_ops )
> > +    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
> >          vpmu->arch_vpmu_ops->arch_vpmu_load(v);
> >  }
> >  
> > @@ -121,9 +121,6 @@ void vpmu_initialise(struct vcpu *v)
> >      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> >      uint8_t vendor = current_cpu_data.x86_vendor;
> >  
> > -    if ( !opt_vpmu_enabled )
> > -        return;
> > -
> >      if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
> >          vpmu_destroy(v);
> >      vpmu_clear(vpmu);
> > @@ -153,7 +150,7 @@ void vpmu_destroy(struct vcpu *v)
> >  {
> >      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> >  
> > -    if ( vpmu->arch_vpmu_ops )
> > +    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy )
> >          vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
> >  }
> >  
> > 
> > 
> -- 
> Company details: http://ts.fujitsu.com/imprint.html
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.