[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] Intel/VPMU: Add support for full-width PMC writes



commit 330c2e4e9430855cd7e5dd45b247ccc27bf92c7a
Author:     Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
AuthorDate: Wed Aug 7 09:51:02 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Aug 7 09:51:02 2013 +0200

    Intel/VPMU: Add support for full-width PMC writes
    
    A recent Linux commit (069e0c3c405814778c7475d95b9fff5318f39834) added
    support for full-width PMC writes to performance counter registers,
    making these registers default for perf. Since current Xen VPMU does
    not support these new MSRs perf will fail to initialise in guests.
    
    Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
    Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
    Acked-by: Keir Faser <keir@xxxxxxx>
---
 xen/arch/x86/hvm/vmx/vpmu_core2.c |   41 ++++++++++++++++++++++++++++++++----
 xen/include/asm-x86/msr-index.h   |    2 +-
 2 files changed, 37 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c 
b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index 15b2036..41a326b 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -64,6 +64,10 @@
 #define PMU_FIXED_WIDTH_BITS     8  /* 8 bits 5..12 */
 #define PMU_FIXED_WIDTH_MASK     (((1 << PMU_FIXED_WIDTH_BITS) -1) << 
PMU_FIXED_WIDTH_SHIFT)
 
+/* Alias registers (0x4c1) for full-width writes to PMCs */
+#define MSR_PMC_ALIAS_MASK       (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
+static bool_t __read_mostly full_width_write;
+
 /*
  * QUIRK to workaround an issue on various family 6 cpus.
  * The issue leads to endless PMC interrupt loops on the processor.
@@ -195,6 +199,7 @@ static int core2_get_bitwidth_fix_count(void)
 static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index)
 {
     int i;
+    u32 msr_index_pmc;
 
     for ( i = 0; i < core2_fix_counters.num; i++ )
     {
@@ -224,11 +229,12 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, 
int *index)
         return 1;
     }
 
-    if ( (msr_index >= MSR_IA32_PERFCTR0) &&
-         (msr_index < (MSR_IA32_PERFCTR0 + core2_get_pmc_count())) )
+    msr_index_pmc = msr_index & MSR_PMC_ALIAS_MASK;
+    if ( (msr_index_pmc >= MSR_IA32_PERFCTR0) &&
+         (msr_index_pmc < (MSR_IA32_PERFCTR0 + core2_get_pmc_count())) )
     {
         *type = MSR_TYPE_ARCH_COUNTER;
-        *index = msr_index - MSR_IA32_PERFCTR0;
+        *index = msr_index_pmc - MSR_IA32_PERFCTR0;
         return 1;
     }
 
@@ -259,6 +265,13 @@ static void core2_vpmu_set_msr_bitmap(unsigned long 
*msr_bitmap)
         clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
         clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i),
                   msr_bitmap + 0x800/BYTES_PER_LONG);
+
+        if ( full_width_write )
+        {
+            clear_bit(msraddr_to_bitpos(MSR_IA32_A_PERFCTR0 + i), msr_bitmap);
+            clear_bit(msraddr_to_bitpos(MSR_IA32_A_PERFCTR0 + i),
+                      msr_bitmap + 0x800/BYTES_PER_LONG);
+        }
     }
 
     /* Allow Read PMU Non-global Controls Directly. */
@@ -283,7 +296,15 @@ static void core2_vpmu_unset_msr_bitmap(unsigned long 
*msr_bitmap)
         set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
         set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i),
                 msr_bitmap + 0x800/BYTES_PER_LONG);
+
+        if ( full_width_write )
+        {
+            set_bit(msraddr_to_bitpos(MSR_IA32_A_PERFCTR0 + i), msr_bitmap);
+            set_bit(msraddr_to_bitpos(MSR_IA32_A_PERFCTR0 + i),
+                      msr_bitmap + 0x800/BYTES_PER_LONG);
+        }
     }
+
     for ( i = 0; i < core2_ctrls.num; i++ )
         set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
     for ( i = 0; i < core2_get_pmc_count(); i++ )
@@ -322,13 +343,18 @@ static int core2_vpmu_save(struct vcpu *v)
 
 static inline void __core2_vpmu_load(struct vcpu *v)
 {
-    int i;
+    unsigned int i, pmc_start;
     struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
 
     for ( i = 0; i < core2_fix_counters.num; i++ )
         wrmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]);
+
+    if ( full_width_write )
+        pmc_start = MSR_IA32_A_PERFCTR0;
+    else
+        pmc_start = MSR_IA32_PERFCTR0;
     for ( i = 0; i < core2_get_pmc_count(); i++ )
-        wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
+        wrmsrl(pmc_start + i, core2_vpmu_cxt->arch_msr_pair[i].counter);
 
     for ( i = 0; i < core2_ctrls.num; i++ )
         wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]);
@@ -855,6 +881,11 @@ int vmx_vpmu_initialise(struct vcpu *v, unsigned int 
vpmu_flags)
 
     if ( family == 6 )
     {
+        u64 caps;
+
+        rdmsrl(MSR_IA32_PERF_CAPABILITIES, caps);
+        full_width_write = (caps >> 13) & 1;
+
         switch ( cpu_model )
         {
         /* Core2: */
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index f500efd..03cb00e 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -33,7 +33,7 @@
 
 /* Intel MSRs. Some also available on other CPUs */
 #define MSR_IA32_PERFCTR0              0x000000c1
-#define MSR_IA32_PERFCTR1              0x000000c2
+#define MSR_IA32_A_PERFCTR0            0x000004c1
 #define MSR_FSB_FREQ                   0x000000cd
 
 #define MSR_NHM_SNB_PKG_CST_CFG_CTL    0x000000e2
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.