[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 RESEND 04/17] intel/VPMU: Clean up Intel VPMU code



On 04/26/2014 04:20 AM, Tian, Kevin wrote:
From: Boris Ostrovsky [mailto:boris.ostrovsky@xxxxxxxxxx]
Sent: Wednesday, April 23, 2014 8:50 PM

Remove struct pmumsr and core2_pmu_enable. Replace static MSR structures
with
fields in core2_vpmu_context.

Call core2_get_pmc_count() once, during initialization.

Properly clean up when core2_vpmu_alloc_resource() fails and add routines
to remove MSRs from VMCS.


Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
it's a good cleanup in general. ack with two small comments later.

Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>


---
  xen/arch/x86/hvm/vmx/vmcs.c              |  55 ++++++
  xen/arch/x86/hvm/vmx/vpmu_core2.c        | 310
++++++++++++++-----------------
  xen/include/asm-x86/hvm/vmx/vmcs.h       |   2 +
  xen/include/asm-x86/hvm/vmx/vpmu_core2.h |  19 --
  4 files changed, 199 insertions(+), 187 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 44f33cb..5f86b17 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1205,6 +1205,34 @@ int vmx_add_guest_msr(u32 msr)
      return 0;
  }

+void vmx_rm_guest_msr(u32 msr)
+{
+    struct vcpu *curr = current;
+    unsigned int idx, msr_count = curr->arch.hvm_vmx.msr_count;
+    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+
+    if ( msr_area == NULL )
+        return;
+
+    for ( idx = 0; idx < msr_count; idx++ )
+        if ( msr_area[idx].index == msr )
+            break;
+
+    if ( idx == msr_count )
+        return;
+
+    for ( ; idx < msr_count - 1; idx++ )
+    {
+        msr_area[idx].index = msr_area[idx + 1].index;
+        msr_area[idx].data = msr_area[idx + 1].data;
+    }
+    msr_area[msr_count - 1].index = 0;
+
+    curr->arch.hvm_vmx.msr_count = --msr_count;
+    __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
+    __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
+}
+
  int vmx_add_host_load_msr(u32 msr)
  {
      struct vcpu *curr = current;
@@ -1235,6 +1263,33 @@ int vmx_add_host_load_msr(u32 msr)
      return 0;
  }

+void vmx_rm_host_load_msr(u32 msr)
+{
+    struct vcpu *curr = current;
+    unsigned int idx,  msr_count = curr->arch.hvm_vmx.host_msr_count;
+    struct vmx_msr_entry *msr_area =
curr->arch.hvm_vmx.host_msr_area;
+
+    if ( msr_area == NULL )
+        return;
+
+    for ( idx = 0; idx < msr_count; idx++ )
+        if ( msr_area[idx].index == msr )
+            break;
+
+    if ( idx == msr_count )
+        return;
+
+    for ( ; idx < msr_count - 1; idx++ )
+    {
+        msr_area[idx].index = msr_area[idx + 1].index;
+        msr_area[idx].data = msr_area[idx + 1].data;
+    }
+    msr_area[msr_count - 1].index = 0;
+
+    curr->arch.hvm_vmx.host_msr_count = --msr_count;
+    __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
+}
+
suggest to combine common code in above two 'rm' functions. You can pass in
a msr_area pointer/count , and only last several lines actually differ.

I then should see if I can merge vmx_add_guest_msr/vmx_add_host_load_msr as well since they have similarities too.


  void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector)
  {
      if ( !test_and_set_bit(vector, v->arch.hvm_vmx.eoi_exit_bitmap) )
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index 1e32ff3..513eca4 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -69,6 +69,26 @@
  static bool_t __read_mostly full_width_write;

  /*
+ * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
+ * counters. 4 bits for every counter.
+ */
+#define FIXED_CTR_CTRL_BITS 4
+#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1)
+
+#define VPMU_CORE2_MAX_FIXED_PMCS     4
+struct core2_vpmu_context {
+    u64 fixed_ctrl;
+    u64 ds_area;
+    u64 pebs_enable;
just stay 'pebs' to be consistent.

The name is was chosen to reflect the MSR name (MSR_IA32_PEBS_ENABLE).

-boris


+    u64 global_ovf_status;
+    u64 fix_counters[VPMU_CORE2_MAX_FIXED_PMCS];
+    struct arch_msr_pair arch_msr_pair[1];
+};
+


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.