[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 5/6] xen/PMU: Intercept PMU-related MSR and APIC accesses



Provide interfaces for recognizing accesses to PMU-related MSRs and LVTPC APIC
and process these accesses in Xen PMU code.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 arch/x86/xen/enlighten.c       | 24 ++++++++++--
 arch/x86/xen/pmu.c             | 84 ++++++++++++++++++++++++++++++++++++++++++
 arch/x86/xen/pmu.h             |  4 ++
 include/xen/interface/xenpmu.h |  1 +
 4 files changed, 110 insertions(+), 3 deletions(-)

diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 201d09a..a3e4db6 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -82,6 +82,7 @@
 #include "mmu.h"
 #include "smp.h"
 #include "multicalls.h"
+#include "pmu.h"
 
 EXPORT_SYMBOL_GPL(hypercall_page);
 
@@ -964,6 +965,11 @@ static u32 xen_apic_read(u32 reg)
 
 static void xen_apic_write(u32 reg, u32 val)
 {
+       if (reg == APIC_LVTPC) {
+               (void)pmu_apic_update(reg);
+               return;
+       }
+
        /* Warn to see if there's any stray references */
        WARN_ON(1);
 }
@@ -1068,6 +1074,17 @@ static inline void xen_write_cr8(unsigned long val)
        BUG_ON(val);
 }
 #endif
+
+static u64 xen_read_msr_safe(unsigned int msr, int *err)
+{
+       u64 val;
+
+       if (pmu_msr_read(msr, &val, err))
+               return val;
+
+       return native_read_msr_safe(msr, err);
+}
+
 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 {
        int ret;
@@ -1108,7 +1125,8 @@ static int xen_write_msr_safe(unsigned int msr, unsigned 
low, unsigned high)
                break;
 
        default:
-               ret = native_write_msr_safe(msr, low, high);
+               if (!pmu_msr_write(msr, low, high, &ret))
+                       ret = native_write_msr_safe(msr, low, high);
        }
 
        return ret;
@@ -1244,11 +1262,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst 
= {
 
        .wbinvd = native_wbinvd,
 
-       .read_msr = native_read_msr_safe,
+       .read_msr = xen_read_msr_safe,
        .write_msr = xen_write_msr_safe,
 
        .read_tsc = native_read_tsc,
-       .read_pmc = native_read_pmc,
+       .read_pmc = xen_read_pmc,
 
        .read_tscp = native_read_tscp,
 
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index f1bec27..f92d406 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -160,6 +160,90 @@ static bool is_intel_pmu_msr(u32 msr_index, int *type, int 
*index)
        return false;
 }
 
+bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
+{
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+               if (is_amd_pmu_msr(msr)) {
+                       *val = native_read_msr_safe(msr, err);
+                       return true;
+               }
+       } else {
+               int type, index;
+               if (is_intel_pmu_msr(msr, &type, &index)) {
+                       *val = native_read_msr_safe(msr, err);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
+{
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+               if (is_amd_pmu_msr(msr)) {
+                       *err = native_write_msr_safe(msr, low, high);
+                       return true;
+               }
+       } else {
+               int type, index;
+
+               if (is_intel_pmu_msr(msr, &type, &index)) {
+                       *err = native_write_msr_safe(msr, low, high);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+unsigned long long xen_amd_read_pmc(int counter)
+{
+       uint32_t msr;
+       int err;
+
+       msr = amd_counters_base + (counter * amd_msr_step);
+       return native_read_msr_safe(msr, &err);
+}
+
+unsigned long long xen_intel_read_pmc(int counter)
+{
+       int err;
+       uint32_t msr;
+
+       if (counter & (1<<30))
+               msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
+       else
+               msr = MSR_IA32_PERFCTR0 + counter;
+
+       return native_read_msr_safe(msr, &err);
+}
+
+unsigned long long xen_read_pmc(int counter)
+{
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               return xen_amd_read_pmc(counter);
+       else
+               return xen_intel_read_pmc(counter);
+}
+
+int pmu_apic_update(uint32_t val)
+{
+       int ret;
+       struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+
+       if (!xenpmu_data) {
+               WARN_ONCE(1, "%s: pmudata not initialized\n", __func__);
+               return -EINVAL;
+       }
+
+       xenpmu_data->pmu.lapic_lvtpc = val;
+       ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL);
+
+       return ret;
+}
+
 /* perf callbacks*/
 int xen_is_in_guest(void)
 {
diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h
index d52e8db..30bfbcf 100644
--- a/arch/x86/xen/pmu.h
+++ b/arch/x86/xen/pmu.h
@@ -7,5 +7,9 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
 int xen_pmu_init(int cpu);
 void xen_pmu_finish(int cpu);
 bool is_xen_pmu(int cpu);
+bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
+bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
+int pmu_apic_update(uint32_t reg);
+unsigned long long xen_read_pmc(int counter);
 
 #endif /* __XEN_PMU_H */
diff --git a/include/xen/interface/xenpmu.h b/include/xen/interface/xenpmu.h
index ed00245..79384e4 100644
--- a/include/xen/interface/xenpmu.h
+++ b/include/xen/interface/xenpmu.h
@@ -13,6 +13,7 @@
 #define XENPMU_feature_set     3
 #define XENPMU_init            4
 #define XENPMU_finish          5
+#define XENPMU_lvtpc_set       6
 
 /* Parameter structure for HYPERVISOR_xenpmu_op call */
 struct xen_pmu_params {
-- 
1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.