[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 18/22] x86/PMUv4: disable intercept for PERF_GLOBAL_STATUS



From: Edwin Török <edvin.torok@xxxxxxxxxx>

Now that we have a way to set PERF_GLOBAL_STATUS by writing to
PERF_GLOBAL_STATUS_RESET (== PERF_GLOBAL_OVF_CTRL) and
PERF_GLOBAL_STATUS_SET we do not need to intercept this MSR anymore.

We can save/restore its state when saving/loading vPMU state, and
otherwise let the guest read it directly.

This is an optimization, perhaps it'd need a flag to disable it for
debugging purposes.

Signed-off-by: Edwin Török <edvin.torok@xxxxxxxxxx>
---
 xen/arch/x86/cpu/vpmu_intel.c | 19 ++++++++++++++++++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index 5e660af395..59d0b2ca36 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -67,6 +67,7 @@ static bool_t __read_mostly full_width_write;
 
 /* Number of general-purpose and fixed performance counters */
 unsigned int __read_mostly arch_pmc_cnt, fixed_pmc_cnt;
+static unsigned int __read_mostly vpmu_version;
 
 /* Masks used for testing whether and MSR is valid */
 #define ARCH_CTRL_MASK  (~((1ull << 32) - 1) | (1ull << 21) | 
ARCH_CNTR_PIN_CONTROL)
@@ -228,6 +229,9 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
 
     vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, VMX_MSR_R);
     vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, VMX_MSR_R);
+
+    if ( vpmu_version >= 4 )
+        vmx_clear_msr_intercept(v, MSR_CORE_PERF_GLOBAL_STATUS, VMX_MSR_R);
 }
 
 static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
@@ -250,6 +254,9 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
 
     vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, VMX_MSR_R);
     vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, VMX_MSR_R);
+
+    if ( vpmu_version >= 4 )
+        vmx_set_msr_intercept(v, MSR_CORE_PERF_GLOBAL_STATUS, VMX_MSR_R);
 }
 
 static inline void __core2_vpmu_save(struct vcpu *v)
@@ -268,7 +275,7 @@ static inline void __core2_vpmu_save(struct vcpu *v)
         rdmsrl(MSR_P6_EVNTSEL(i), xen_pmu_cntr_pair[i].control);
     }
 
-    if ( !is_hvm_vcpu(v) )
+    if ( !is_hvm_vcpu(v) || vpmu_version >= 4 )
         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
     /* Save MSR to private context to make it fork-friendly */
     else if ( mem_sharing_enabled(v->domain) )
@@ -338,6 +345,15 @@ static inline void __core2_vpmu_load(struct vcpu *v)
     else if ( mem_sharing_is_fork(v->domain) )
         vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                             core2_vpmu_cxt->global_ctrl);
+
+    if ( vpmu_version >= 4 ) {
+        const uint64_t global_status = core2_vpmu_cxt->global_status;
+        const uint64_t reset = (~global_status) & global_ovf_ctrl_mask ;
+        if ( reset )
+            wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, reset);
+        if ( global_status )
+            wrmsrl(MSR_CORE_PERF_GLOBAL_STATUS_SET, global_status);
+    }
 }
 
 static int core2_vpmu_verify(struct vcpu *v)
@@ -917,6 +933,7 @@ const struct arch_vpmu_ops *__init core2_vpmu_init(void)
         printk(XENLOG_INFO "VPMU: PMU version %u is not fully supported. "
                "Emulating version %d\n", version, VPMU_VERSION_MAX);
     }
+    vpmu_version = version;
 
     if ( current_cpu_data.x86 != 6 )
     {
-- 
2.41.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.