[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/VPMU: Factor out VPMU common code



commit 426368be82b0184df1b537bb659680026b747a50
Author:     Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
AuthorDate: Mon Apr 15 11:26:44 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Apr 15 11:26:44 2013 +0200

    x86/VPMU: Factor out VPMU common code
    
    Factor out common code from SVM amd VMX into VPMU.
    
    Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
    Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
    Tested-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
    Acked-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
---
 xen/arch/x86/hvm/svm/vpmu.c              |   37 ----------------------
 xen/arch/x86/hvm/vmx/vpmu_core2.c        |   30 +-----------------
 xen/arch/x86/hvm/vpmu.c                  |   50 +++++++++++++++++++++++++++---
 xen/include/asm-x86/hvm/vmx/vpmu_core2.h |    1 -
 xen/include/asm-x86/hvm/vpmu.h           |    1 +
 5 files changed, 47 insertions(+), 72 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 51e5495..f2f2d12 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -87,7 +87,6 @@ static const u32 AMD_F15H_CTRLS[] = {
 struct amd_vpmu_context {
     u64 counters[MAX_NUM_COUNTERS];
     u64 ctrls[MAX_NUM_COUNTERS];
-    u32 hw_lapic_lvtpc;
     bool_t msr_bitmap_set;
 };
 
@@ -171,22 +170,6 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
 
 static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
 {
-    struct vcpu *v = current;
-    struct vlapic *vlapic = vcpu_vlapic(v);
-    u32 vlapic_lvtpc;
-    unsigned char int_vec;
-
-    if ( !is_vlapic_lvtpc_enabled(vlapic) )
-        return 0;
-
-    vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
-    int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
-
-    if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
-        vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
-    else
-        v->nmi_pending = 1;
-
     return 1;
 }
 
@@ -205,17 +188,7 @@ static inline void context_restore(struct vcpu *v)
 
 static void amd_vpmu_restore(struct vcpu *v)
 {
-    struct vpmu_struct *vpmu = vcpu_vpmu(v);
-    struct amd_vpmu_context *ctxt = vpmu->context;
-
-    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
-           vpmu_is_set(vpmu, VPMU_RUNNING)) )
-        return;
-
-    apic_write(APIC_LVTPC, ctxt->hw_lapic_lvtpc);
     context_restore(v);
-
-    vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 static inline void context_save(struct vcpu *v)
@@ -237,18 +210,10 @@ static void amd_vpmu_save(struct vcpu *v)
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct amd_vpmu_context *ctx = vpmu->context;
 
-    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
-           vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
-        return;
-
     context_save(v);
 
     if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
         amd_vpmu_unset_msr_bitmap(v);
-
-    ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
-    apic_write(APIC_LVTPC,  ctx->hw_lapic_lvtpc | APIC_LVT_MASKED);
-    vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 static void context_update(unsigned int msr, u64 msr_content)
@@ -271,8 +236,6 @@ static void context_update(unsigned int msr, u64 
msr_content)
     for ( i = 0; i < num_counters; i++ )
         if ( msr == ctrls[i] )
             ctxt->ctrls[i] = msr_content;
-
-    ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
 }
 
 static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c 
b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index 7c86a0b..6195bfc 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -305,25 +305,18 @@ static inline void __core2_vpmu_save(struct vcpu *v)
         rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]);
     for ( i = 0; i < core2_get_pmc_count(); i++ )
         rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
-    core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
-    apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
 }
 
 static void core2_vpmu_save(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
-           vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
-        return;
-
     __core2_vpmu_save(v);
 
     /* Unset PMU MSR bitmap to trap lazy load. */
     if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
         core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
 
-    vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
     return;
 }
 
@@ -341,20 +334,11 @@ static inline void __core2_vpmu_load(struct vcpu *v)
         wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]);
     for ( i = 0; i < core2_get_pmc_count(); i++ )
         wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control);
-
-    apic_write_around(APIC_LVTPC, core2_vpmu_cxt->hw_lapic_lvtpc);
 }
 
 static void core2_vpmu_load(struct vcpu *v)
 {
-    struct vpmu_struct *vpmu = vcpu_vpmu(v);
-
-    /* Only when PMU is counting, we load PMU context immediately. */
-    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
-           vpmu_is_set(vpmu, VPMU_RUNNING)) )
-        return;
     __core2_vpmu_load(v);
-    vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 static int core2_vpmu_alloc_resource(struct vcpu *v)
@@ -705,11 +689,8 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs 
*regs)
 {
     struct vcpu *v = current;
     u64 msr_content;
-    u32 vlapic_lvtpc;
-    unsigned char int_vec;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
-    struct vlapic *vlapic = vcpu_vlapic(v);
 
     rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, msr_content);
     if ( msr_content )
@@ -728,18 +709,9 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs 
*regs)
             return 0;
     }
 
+    /* HW sets the MASK bit when performance counter interrupt occurs*/
     apic_write_around(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
 
-    if ( !is_vlapic_lvtpc_enabled(vlapic) )
-        return 1;
-
-    vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
-    int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
-    vlapic_set_reg(vlapic, APIC_LVTPC, vlapic_lvtpc | APIC_LVT_MASKED);
-    if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
-        vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
-    else
-        v->nmi_pending = 1;
     return 1;
 }
 
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 3b69580..ff5f065 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -31,7 +31,7 @@
 #include <asm/hvm/vpmu.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/vmcb.h>
-
+#include <asm/apic.h>
 
 /*
  * "vpmu" :     vpmu generally enabled
@@ -83,10 +83,31 @@ int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
 
 int vpmu_do_interrupt(struct cpu_user_regs *regs)
 {
-    struct vpmu_struct *vpmu = vcpu_vpmu(current);
+    struct vcpu *v = current;
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+    if ( vpmu->arch_vpmu_ops )
+    {
+        struct vlapic *vlapic = vcpu_vlapic(v);
+        u32 vlapic_lvtpc;
+        unsigned char int_vec;
+
+        if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) )
+            return 0;
+
+        if ( !is_vlapic_lvtpc_enabled(vlapic) )
+            return 1;
+
+        vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
+        int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
+
+        if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
+            vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
+        else
+            v->nmi_pending = 1;
+        return 1;
+    }
 
-    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt )
-        return vpmu->arch_vpmu_ops->do_interrupt(regs);
     return 0;
 }
 
@@ -104,16 +125,35 @@ void vpmu_save(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save )
+    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+           vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
+       return;
+
+    if ( vpmu->arch_vpmu_ops )
         vpmu->arch_vpmu_ops->arch_vpmu_save(v);
+
+    vpmu->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
+    apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
+
+    vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 void vpmu_load(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
+    /* Only when PMU is counting, we load PMU context immediately. */
+    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+           vpmu_is_set(vpmu, VPMU_RUNNING)) )
+        return;
+
     if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
+    {
+        apic_write_around(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
         vpmu->arch_vpmu_ops->arch_vpmu_load(v);
+    }
+
+    vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 void vpmu_initialise(struct vcpu *v)
diff --git a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h 
b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h
index 4128f2a..60b05fd 100644
--- a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h
+++ b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h
@@ -44,7 +44,6 @@ struct core2_vpmu_context {
     u64 fix_counters[VPMU_CORE2_NUM_FIXED];
     u64 ctrls[VPMU_CORE2_NUM_CTRLS];
     u64 global_ovf_status;
-    u32 hw_lapic_lvtpc;
     struct arch_msr_pair arch_msr_pair[1];
 };
 
diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h
index cd31f5e..01be976 100644
--- a/xen/include/asm-x86/hvm/vpmu.h
+++ b/xen/include/asm-x86/hvm/vpmu.h
@@ -62,6 +62,7 @@ int svm_vpmu_initialise(struct vcpu *, unsigned int flags);
 
 struct vpmu_struct {
     u32 flags;
+    u32 hw_lapic_lvtpc;
     void *context;
     struct arch_vpmu_ops *arch_vpmu_ops;
 };
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.