[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5] x86/VPMU: implement ipc and arch filter flags



>>> On 05.01.16 at 02:43, <bgregg@xxxxxxxxxxx> wrote:
> This introduces a way to have a restricted VPMU, by specifying one of two
> predefined groups of PMCs to make available. For secure environments, this
> allows the VPMU to be used without needing to enable all PMCs.
> 
> Signed-off-by: Brendan Gregg <bgregg@xxxxxxxxxxx>
> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>

I was about to apply with

>  static DEFINE_PER_CPU(struct vcpu *, last_vcpu);
>  
> -static void __init parse_vpmu_param(char *s)
> +static int parse_vpmu_param(char *s, int len)

static bool_t __init parse_vpmu_param(char *s, unsigned int len)

>  {
> +    if ( ! *s || ! len )

    if ( !*s || !len )

> +        return 0;
> +    if ( !strncmp(s, "bts", len) )
> +        vpmu_features |= XENPMU_FEATURE_INTEL_BTS;
> +    else if ( !strncmp(s, "ipc", len) )
> +        vpmu_features |= XENPMU_FEATURE_IPC_ONLY;
> +    else if ( !strncmp(s, "arch", len) )
> +        vpmu_features |= XENPMU_FEATURE_ARCH_ONLY;
> +    else
> +        return 1;
> +    return 0;
> +}
> +
> +static void __init parse_vpmu_params(char *s)
> +{
> +    char *sep, *p = s;
> +
>      switch ( parse_bool(s) )
>      {
>      case 0:
>          break;
>      default:
> -        if ( !strcmp(s, "bts") )
> -            vpmu_features |= XENPMU_FEATURE_INTEL_BTS;
> -        else if ( *s )
> +        while (1)
>          {
> -            printk("VPMU: unknown flag: %s - vpmu disabled!\n", s);
> -            break;
> +            sep = strchr(p, ',');
> +            if ( sep == NULL )
> +                sep = strchr(p, 0);
> +            if ( parse_vpmu_param(p, sep - p) )
> +                goto error;
> +            if ( *sep == 0 )

            if ( !*sep )

> --- a/xen/arch/x86/cpu/vpmu_intel.c
> +++ b/xen/arch/x86/cpu/vpmu_intel.c
> @@ -604,12 +604,17 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content,
>                   "MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
>          return -EINVAL;
>      case MSR_IA32_PEBS_ENABLE:
> +        if ( vpmu_features & (XENPMU_FEATURE_IPC_ONLY |
> +             XENPMU_FEATURE_ARCH_ONLY) )

                              XENPMU_FEATURE_ARCH_ONLY) )

> @@ -656,12 +661,55 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content,
>          tmp = msr - MSR_P6_EVNTSEL(0);
>          if ( tmp >= 0 && tmp < arch_pmc_cnt )
>          {
> +            bool_t blocked = 0;
> +            uint64_t umaskevent;
>              struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
>                  vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
>  
>              if ( msr_content & ARCH_CTRL_MASK )
>                  return -EINVAL;
>  
> +            /* PMC filters */
> +            umaskevent = msr_content & MSR_IA32_CMT_EVTSEL_UE_MASK;
> +            if ( vpmu_features & XENPMU_FEATURE_IPC_ONLY ||
> +                 vpmu_features & XENPMU_FEATURE_ARCH_ONLY )

            if ( vpmu_features & (XENPMU_FEATURE_IPC_ONLY |
                                  XENPMU_FEATURE_ARCH_ONLY) )

> +            {
> +                blocked = 1;
> +                switch ( umaskevent )
> +                {
> +                /*
> +                 * See the Pre-Defined Architectural Performance Events table
> +                 * from the Intel 64 and IA-32 Architectures Software
> +                 * Developer's Manual, Volume 3B, System Programming Guide,
> +                 * Part 2.
> +                 */
> +                case 0x003c: /* UnHalted Core Cycles */
> +                case 0x013c: /* UnHalted Reference Cycles */
> +                case 0x00c0: /* Instructions Retired */
> +                    blocked = 0;
> +                default:
> +                    break;

dropped last two lines

> +                }
> +            }
> +
> +            if ( vpmu_features & XENPMU_FEATURE_ARCH_ONLY )
> +            {
> +                /* additional counters beyond IPC only; blocked already set 
> */

                /* Additional counters beyond IPC only; blocked already set. */

> +                switch ( umaskevent )
> +                {
> +                case 0x4f2e: /* Last Level Cache References */
> +                case 0x412e: /* Last Level Cache Misses */
> +                case 0x00c4: /* Branch Instructions Retired */
> +                case 0x00c5: /* All Branch Mispredict Retired */
> +                    blocked = 0;
> +                default:
> +                    break;

Again

> --- a/xen/include/public/pmu.h
> +++ b/xen/include/public/pmu.h
> @@ -84,9 +84,19 @@ DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t);
>  
>  /*
>   * PMU features:
> - * - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
> + * - XENPMU_FEATURE_INTEL_BTS:  Intel BTS support (ignored on AMD)
> + * - XENPMU_FEATURE_IPC_ONLY:   Restrict PMCs to the most minimum set 
> possible.
> + *                              Instructions, cycles, and ref cycles. Can be
> + *                              used to calculate instructions-per-cycle 
> (IPC)
> + *                              (ignored on AMD).
> + * - XENPMU_FEATURE_ARCH_ONLY:  Restrict PMCs to the Intel Pre-Defined
> + *                              Architectural Performance Events exposed by
> + *                              cpuid and listed in the Intel developer's 
> manual
> + *                              (ignored on AMD).
>   */
> -#define XENPMU_FEATURE_INTEL_BTS  1
> +#define XENPMU_FEATURE_INTEL_BTS  (1<<0)
> +#define XENPMU_FEATURE_IPC_ONLY   (1<<1)
> +#define XENPMU_FEATURE_ARCH_ONLY  (1<<2)

when I reached this: Do these additions really need to go here?
And if they do, why does do_xenpmu_op()'s XENPMU_feature_set
case not get altered?

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.