[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 3/6] xen/PMU: Initialization code for Xen PMU



On 06/06/14 18:44, Boris Ostrovsky wrote:
> Map shared data structure that will hold CPU registers, VPMU context, V/PCPU 
> IDs
> of the CPU interrupted by PMU interrupt. Hypervisor fills this information in
> its handler and passes it to the guest for further processing.
>
> Set up PMU VIRQ.
>
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
>  arch/x86/include/asm/xen/interface.h |  41 ++++++++++
>  arch/x86/xen/Makefile                |   2 +-
>  arch/x86/xen/pmu.c                   | 146 
> +++++++++++++++++++++++++++++++++++
>  arch/x86/xen/pmu.h                   |  11 +++
>  arch/x86/xen/smp.c                   |  31 +++++++-
>  include/xen/interface/xen.h          |   1 +
>  include/xen/interface/xenpmu.h       |  19 +++++
>  7 files changed, 249 insertions(+), 2 deletions(-)
>  create mode 100644 arch/x86/xen/pmu.c
>  create mode 100644 arch/x86/xen/pmu.h
>
> diff --git a/arch/x86/include/asm/xen/interface.h 
> b/arch/x86/include/asm/xen/interface.h
> index fd9cb76..c4b92d3 100644
> --- a/arch/x86/include/asm/xen/interface.h
> +++ b/arch/x86/include/asm/xen/interface.h
> @@ -169,6 +169,47 @@ struct vcpu_guest_context {
>  #endif
>  };
>  DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
> +
> +/* AMD PMU registers and structures */
> +struct xen_pmu_amd_ctxt {
> +     uint32_t counters;       /* Offset to counter MSRs */
> +     uint32_t ctrls;          /* Offset to control MSRs */
> +};
> +
> +/* Intel PMU registers and structures */
> +struct xen_pmu_cntr_pair {
> +     uint64_t counter;
> +     uint64_t control;
> +};
> +
> +struct xen_pmu_intel_ctxt {
> +     uint64_t global_ctrl;
> +     uint64_t global_ovf_ctrl;
> +     uint64_t global_status;
> +     uint64_t fixed_ctrl;
> +     uint64_t ds_area;
> +     uint64_t pebs_enable;
> +     uint64_t debugctl;
> +     uint32_t fixed_counters;  /* Offset to fixed counter MSRs */
> +     uint32_t arch_counters;   /* Offset to architectural counter MSRs */
> +};
> +
> +struct xen_arch_pmu {
> +     union {
> +             struct cpu_user_regs regs;
> +             uint8_t pad1[256];
> +     };
> +     union {
> +             uint32_t lapic_lvtpc;
> +             uint64_t pad2;
> +     };
> +     union {
> +             struct xen_pmu_amd_ctxt amd;
> +             struct xen_pmu_intel_ctxt intel;
> +#define XENPMU_CTXT_PAD_SZ  128
> +             uint8_t pad3[XENPMU_CTXT_PAD_SZ];
> +     };
> +};
>  #endif       /* !__ASSEMBLY__ */

You appear to have a define for XENPMU_CTXT_PAD_SZ but not for any other
bits of padding.

Also, I presume there is no sensible way to coalesce the Intel and AMD
variations?

>  
>  /*
> diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
> index 96ab2c0..b187df5 100644
> --- a/arch/x86/xen/Makefile
> +++ b/arch/x86/xen/Makefile
> @@ -13,7 +13,7 @@ CFLAGS_mmu.o                        := $(nostackp)
>  obj-y                := enlighten.o setup.o multicalls.o mmu.o irq.o \
>                       time.o xen-asm.o xen-asm_$(BITS).o \
>                       grant-table.o suspend.o platform-pci-unplug.o \
> -                     p2m.o
> +                     p2m.o pmu.o
>  
>  obj-$(CONFIG_EVENT_TRACING) += trace.o
>  
> diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
> new file mode 100644
> index 0000000..65c3767
> --- /dev/null
> +++ b/arch/x86/xen/pmu.c
> @@ -0,0 +1,146 @@
> +#include <linux/types.h>
> +#include <linux/interrupt.h>
> +
> +#include <asm/xen/hypercall.h>
> +#include <xen/page.h>
> +#include <xen/interface/xen.h>
> +#include <xen/interface/vcpu.h>
> +#include <xen/interface/xenpmu.h>
> +
> +#include "xen-ops.h"
> +#include "pmu.h"
> +
> +/* x86_pmu.handle_irq definition */
> +#include <../kernel/cpu/perf_event.h>

System include with relative path?

> +
> +
> +/* Shared page between hypervisor and domain */
> +DEFINE_PER_CPU(struct xen_pmu_data *, xenpmu_shared);
> +#define get_xenpmu_data()    per_cpu(xenpmu_shared, smp_processor_id());
> +
> +/* perf callbacks*/
> +int xen_is_in_guest(void)
> +{
> +     struct xen_pmu_data *xenpmu_data = get_xenpmu_data();

const

> +
> +     if (!xenpmu_data) {
> +             WARN_ONCE(1, "%s: pmudata not initialized\n", __func__);
> +             return 0;
> +     }
> +
> +     if (!xen_initial_domain() ||
> +         xenpmu_data->domain_id > DOMID_SELF || xenpmu_data->domain_id == 0)
> +             return 0;

Why is dom0 special, and is it sensible to hard code a 0 here?

> +
> +     return 1;
> +}
> +
> +static int xen_is_user_mode(void)
> +{
> +     struct xen_pmu_data *xenpmu_data = get_xenpmu_data();

const

> +
> +     if (!xenpmu_data) {
> +             WARN_ONCE(1, "%s: pmudata not initialized\n", __func__);
> +             return 0;
> +     }
> +
> +     return ((xenpmu_data->pmu.regs.cs & 3) == 3);
> +}
> +
> +static unsigned long xen_get_guest_ip(void)
> +{
> +     struct xen_pmu_data *xenpmu_data = get_xenpmu_data();

const

> +
> +     if (!xenpmu_data) {
> +             WARN_ONCE(1, "%s: pmudata not initialized\n", __func__);
> +             return 0;
> +     }
> +
> +     return xenpmu_data->pmu.regs.eip;
> +}
> +
> +static struct perf_guest_info_callbacks xen_guest_cbs = {
> +     .is_in_guest            = xen_is_in_guest,
> +     .is_user_mode           = xen_is_user_mode,
> +     .get_guest_ip           = xen_get_guest_ip,
> +};
> +
> +/* Convert registers from Xen's format to Linux' */
> +static void xen_convert_regs(struct cpu_user_regs *xen_regs,
> +                          struct pt_regs *regs)

const xen_regs

> +{
> +     regs->ip = xen_regs->eip;
> +     regs->cs = xen_regs->cs;
> +}
> +
> +irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
> +{
> +     int ret = IRQ_NONE;
> +     struct pt_regs regs;
> +     struct xen_pmu_data *xenpmu_data = get_xenpmu_data();

const

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.