[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 5/6] xen/PMU: Intercept PMU-related MSR and APIC accesses



Provide interfaces for recognizing accesses to PMU-related MSRs and LVTPC APIC
and process these accesses in Xen PMU code.

(The interrupt handler performs XENPMU_flush right away in the beginning since
no PMU emulation is available. It will be added with a later patch).

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 arch/x86/xen/enlighten.c       | 26 ++++++++----
 arch/x86/xen/pmu.c             | 95 +++++++++++++++++++++++++++++++++++++++++-
 arch/x86/xen/pmu.h             |  4 ++
 include/xen/interface/xenpmu.h |  2 +
 4 files changed, 118 insertions(+), 9 deletions(-)

diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 57764ce..9ab5f76 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -82,6 +82,7 @@
 #include "mmu.h"
 #include "smp.h"
 #include "multicalls.h"
+#include "pmu.h"
 
 EXPORT_SYMBOL_GPL(hypercall_page);
 
@@ -962,15 +963,12 @@ static u32 xen_apic_read(u32 reg)
        return op.u.pcpu_info.apic_id << 24;
 }
 
-unsigned long long xen_read_pmc(int counter)
-{
-       return 0;
-}
-
 static void xen_apic_write(u32 reg, u32 val)
 {
-       if (reg == APIC_LVTPC)
+       if (reg == APIC_LVTPC) {
+               (void)pmu_apic_update(reg);
                return;
+       }
 
        /* Warn to see if there's any stray references */
        WARN_ON(1);
@@ -1076,6 +1074,17 @@ static inline void xen_write_cr8(unsigned long val)
        BUG_ON(val);
 }
 #endif
+
+static u64 xen_read_msr_safe(unsigned int msr, int *err)
+{
+       u64 val;
+
+       if (pmu_msr_read(msr, &val, err))
+               return val;
+
+       return native_read_msr_safe(msr, err);
+}
+
 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 {
        int ret;
@@ -1116,7 +1125,8 @@ static int xen_write_msr_safe(unsigned int msr, unsigned 
low, unsigned high)
                break;
 
        default:
-               ret = native_write_msr_safe(msr, low, high);
+               if (!pmu_msr_write(msr, low, high, &ret))
+                       ret = native_write_msr_safe(msr, low, high);
        }
 
        return ret;
@@ -1252,7 +1262,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
 
        .wbinvd = native_wbinvd,
 
-       .read_msr = native_read_msr_safe,
+       .read_msr = xen_read_msr_safe,
        .write_msr = xen_write_msr_safe,
 
        .read_tsc = native_read_tsc,
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index b37b439..5ea5cb8 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -48,6 +48,8 @@ static __read_mostly int amd_num_counters;
 #define PMU_FIXED_NR_MASK           (((1 << PMU_FIXED_NR_BITS) - 1) \
                                     << PMU_FIXED_NR_SHIFT)
 
+#define INTEL_PMC_TYPE_SHIFT        30
+
 static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;
 
 
@@ -160,6 +162,91 @@ static bool is_intel_pmu_msr(u32 msr_index, int *type, int 
*index)
        return false;
 }
 
+bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
+{
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+               if (is_amd_pmu_msr(msr)) {
+                       *val = native_read_msr_safe(msr, err);
+                       return true;
+               }
+       } else {
+               int type, index;
+
+               if (is_intel_pmu_msr(msr, &type, &index)) {
+                       *val = native_read_msr_safe(msr, err);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
+{
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+               if (is_amd_pmu_msr(msr)) {
+                       *err = native_write_msr_safe(msr, low, high);
+                       return true;
+               }
+       } else {
+               int type, index;
+
+               if (is_intel_pmu_msr(msr, &type, &index)) {
+                       *err = native_write_msr_safe(msr, low, high);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+unsigned long long xen_amd_read_pmc(int counter)
+{
+       uint32_t msr;
+       int err;
+
+       msr = amd_counters_base + (counter * amd_msr_step);
+       return native_read_msr_safe(msr, &err);
+}
+
+unsigned long long xen_intel_read_pmc(int counter)
+{
+       int err;
+       uint32_t msr;
+
+       if (counter & (1<<INTEL_PMC_TYPE_SHIFT))
+               msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
+       else
+               msr = MSR_IA32_PERFCTR0 + counter;
+
+       return native_read_msr_safe(msr, &err);
+}
+
+unsigned long long xen_read_pmc(int counter)
+{
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               return xen_amd_read_pmc(counter);
+       else
+               return xen_intel_read_pmc(counter);
+}
+
+int pmu_apic_update(uint32_t val)
+{
+       int ret;
+       struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+
+       if (!xenpmu_data) {
+               WARN_ONCE(1, "%s: pmudata not initialized\n", __func__);
+               return -EINVAL;
+       }
+
+       xenpmu_data->pmu.lapic_lvtpc = val;
+       ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL);
+
+       return ret;
+}
+
 /* perf callbacks*/
 int xen_is_in_guest(void)
 {
@@ -216,7 +303,7 @@ static void xen_convert_regs(const struct cpu_user_regs 
*xen_regs,
 
 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
 {
-       int ret = IRQ_NONE;
+       int err, ret = IRQ_NONE;
        struct pt_regs regs;
        const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
 
@@ -225,6 +312,12 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
                return ret;
        }
 
+       err = HYPERVISOR_xenpmu_op(XENPMU_flush, NULL);
+       if (err) {
+               WARN_ONCE(1, "%s failed hypercall, err: %d\n", __func__, err);
+               return ret;
+       }
+
        xen_convert_regs(&xenpmu_data->pmu.regs, &regs);
        if (x86_pmu.handle_irq(&regs))
                ret = IRQ_HANDLED;
diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h
index d52e8db..30bfbcf 100644
--- a/arch/x86/xen/pmu.h
+++ b/arch/x86/xen/pmu.h
@@ -7,5 +7,9 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
 int xen_pmu_init(int cpu);
 void xen_pmu_finish(int cpu);
 bool is_xen_pmu(int cpu);
+bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
+bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
+int pmu_apic_update(uint32_t reg);
+unsigned long long xen_read_pmc(int counter);
 
 #endif /* __XEN_PMU_H */
diff --git a/include/xen/interface/xenpmu.h b/include/xen/interface/xenpmu.h
index 78073dc..59aaedd 100644
--- a/include/xen/interface/xenpmu.h
+++ b/include/xen/interface/xenpmu.h
@@ -13,6 +13,8 @@
 #define XENPMU_feature_set     3
 #define XENPMU_init            4
 #define XENPMU_finish          5
+#define XENPMU_lvtpc_set       6
+#define XENPMU_flush           7
 
 /* Parameter structure for HYPERVISOR_xenpmu_op call */
 struct xen_pmu_params {
-- 
1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.