[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] vmx: Enable Core 2 Duo Performance Counters in HVM guest



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1201687167 0
# Node ID 6ea3db7ae24df1afd8e13e13c8c55ada95dd8d13
# Parent  47b7ec3b4055d59a09a1af26a29e8ef90e0d8d9c
vmx: Enable Core 2 Duo Performance Counters in HVM guest
Signed-off-by: Haitao Shan <haitao.shan@xxxxxxxxx>
---
 xen/arch/x86/apic.c                            |   13 
 xen/arch/x86/hvm/svm/svm.c                     |    8 
 xen/arch/x86/hvm/vmx/Makefile                  |    2 
 xen/arch/x86/hvm/vmx/vmx.c                     |   26 +
 xen/arch/x86/hvm/vmx/vpmu.c                    |  119 ++++++
 xen/arch/x86/hvm/vmx/vpmu_core2.c              |  469 +++++++++++++++++++++++++
 xen/arch/x86/i8259.c                           |    1 
 xen/arch/x86/oprofile/op_model_ppro.c          |    8 
 xen/common/xenoprof.c                          |   41 ++
 xen/include/asm-x86/hvm/hvm.h                  |    6 
 xen/include/asm-x86/hvm/vlapic.h               |    6 
 xen/include/asm-x86/hvm/vmx/vmcs.h             |    4 
 xen/include/asm-x86/hvm/vmx/vpmu.h             |   83 ++++
 xen/include/asm-x86/hvm/vmx/vpmu_core2.h       |   68 +++
 xen/include/asm-x86/irq.h                      |    1 
 xen/include/asm-x86/mach-default/irq_vectors.h |    3 
 xen/include/xen/xenoprof.h                     |    6 
 17 files changed, 861 insertions(+), 3 deletions(-)

diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/arch/x86/apic.c
--- a/xen/arch/x86/apic.c       Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/arch/x86/apic.c       Wed Jan 30 09:59:27 2008 +0000
@@ -93,6 +93,9 @@ void __init apic_intr_init(void)
     /* IPI vectors for APIC spurious and error interrupts */
     set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
     set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
+
+    /* Performance Counters Interrupt */
+    set_intr_gate(PMU_APIC_VECTOR, pmu_apic_interrupt);
 
     /* thermal monitor LVT interrupt */
 #ifdef CONFIG_X86_MCE_P4THERMAL
@@ -1227,6 +1230,16 @@ fastcall void smp_error_interrupt(struct
 }
 
 /*
+ * This interrupt handles performance counters interrupt
+ */
+
+fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs)
+{
+    ack_APIC_irq();
+    hvm_do_pmu_interrupt(regs);
+}
+
+/*
  * This initializes the IO-APIC and APIC hardware if this is
  * a UP kernel.
  */
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Jan 30 09:59:27 2008 +0000
@@ -860,6 +860,11 @@ static int svm_event_pending(struct vcpu
     return vmcb->eventinj.fields.v;
 }
 
+static int svm_do_pmu_interrupt(struct cpu_user_regs *regs)
+{
+    return 0;
+}
+
 static struct hvm_function_table svm_function_table = {
     .name                 = "SVM",
     .cpu_down             = svm_cpu_down,
@@ -882,7 +887,8 @@ static struct hvm_function_table svm_fun
     .set_tsc_offset       = svm_set_tsc_offset,
     .inject_exception     = svm_inject_exception,
     .init_hypercall_page  = svm_init_hypercall_page,
-    .event_pending        = svm_event_pending
+    .event_pending        = svm_event_pending,
+    .do_pmu_interrupt     = svm_do_pmu_interrupt
 };
 
 int start_svm(struct cpuinfo_x86 *c)
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/arch/x86/hvm/vmx/Makefile
--- a/xen/arch/x86/hvm/vmx/Makefile     Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/Makefile     Wed Jan 30 09:59:27 2008 +0000
@@ -9,3 +9,5 @@ endif
 endif
 obj-y += vmcs.o
 obj-y += vmx.o
+obj-y += vpmu.o
+obj-y += vpmu_core2.o
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Jan 30 09:59:27 2008 +0000
@@ -90,6 +90,8 @@ static int vmx_vcpu_initialise(struct vc
         return rc;
     }
 
+    vpmu_initialise(v);
+
     vmx_install_vlapic_mapping(v);
 
 #ifndef VMXASSIST
@@ -104,6 +106,7 @@ static void vmx_vcpu_destroy(struct vcpu
 static void vmx_vcpu_destroy(struct vcpu *v)
 {
     vmx_destroy_vmcs(v);
+    vpmu_destroy(v);
 }
 
 #ifdef __x86_64__
@@ -742,6 +745,7 @@ static void vmx_ctxt_switch_from(struct 
     vmx_save_guest_msrs(v);
     vmx_restore_host_msrs();
     vmx_save_dr(v);
+    vpmu_save(v);
 }
 
 static void vmx_ctxt_switch_to(struct vcpu *v)
@@ -752,6 +756,7 @@ static void vmx_ctxt_switch_to(struct vc
 
     vmx_restore_guest_msrs(v);
     vmx_restore_dr(v);
+    vpmu_load(v);
 }
 
 static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
@@ -1117,6 +1122,11 @@ static int vmx_event_pending(struct vcpu
 {
     ASSERT(v == current);
     return (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK);
+}
+
+static int vmx_do_pmu_interrupt(struct cpu_user_regs *regs)
+{
+    return vpmu_do_interrupt(regs);
 }
 
 static struct hvm_function_table vmx_function_table = {
@@ -1141,6 +1151,7 @@ static struct hvm_function_table vmx_fun
     .inject_exception     = vmx_inject_exception,
     .init_hypercall_page  = vmx_init_hypercall_page,
     .event_pending        = vmx_event_pending,
+    .do_pmu_interrupt     = vmx_do_pmu_interrupt,
     .cpu_up               = vmx_cpu_up,
     .cpu_down             = vmx_cpu_down,
 };
@@ -1300,7 +1311,6 @@ void vmx_cpuid_intercept(
 
     case 0x00000006:
     case 0x00000009:
-    case 0x0000000A:
         *eax = *ebx = *ecx = *edx = 0;
         break;
 
@@ -2376,7 +2386,15 @@ static int vmx_do_msr_read(struct cpu_us
         /* No point in letting the guest see real MCEs */
         msr_content = 0;
         break;
+    case MSR_IA32_MISC_ENABLE:
+        rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
+        /* Debug Trace Store is not supported. */
+        msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
+                       MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
+        break;
     default:
+        if ( vpmu_do_rdmsr(regs) )
+            goto done;
         switch ( long_mode_do_msr_read(regs) )
         {
             case HNDL_unhandled:
@@ -2583,6 +2601,8 @@ static int vmx_do_msr_write(struct cpu_u
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
         goto gp_fault;
     default:
+        if ( vpmu_do_wrmsr(regs) )
+            return 1;
         switch ( long_mode_do_msr_write(regs) )
         {
             case HNDL_unhandled:
@@ -2632,6 +2652,7 @@ static void vmx_do_extint(struct cpu_use
     fastcall void smp_call_function_interrupt(void);
     fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs);
     fastcall void smp_error_interrupt(struct cpu_user_regs *regs);
+    fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs);
 #ifdef CONFIG_X86_MCE_P4THERMAL
     fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
 #endif
@@ -2661,6 +2682,9 @@ static void vmx_do_extint(struct cpu_use
         break;
     case ERROR_APIC_VECTOR:
         smp_error_interrupt(regs);
+        break;
+    case PMU_APIC_VECTOR:
+        smp_pmu_apic_interrupt(regs);
         break;
 #ifdef CONFIG_X86_MCE_P4THERMAL
     case THERMAL_APIC_VECTOR:
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/arch/x86/hvm/vmx/vpmu.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/hvm/vmx/vpmu.c       Wed Jan 30 09:59:27 2008 +0000
@@ -0,0 +1,119 @@
+/*
+ * vpmu.c: PMU virtualization for HVM domain.
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Haitao Shan <haitao.shan@xxxxxxxxx>
+ */
+
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <asm/regs.h>
+#include <asm/types.h>
+#include <asm/msr.h>
+#include <asm/hvm/support.h>
+#include <asm/hvm/vmx/vmx.h>
+#include <asm/hvm/vmx/vmcs.h>
+#include <public/sched.h>
+#include <public/hvm/save.h>
+#include <asm/hvm/vmx/vpmu.h>
+
+int inline vpmu_do_wrmsr(struct cpu_user_regs *regs)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(current);
+
+    if ( vpmu->arch_vpmu_ops )
+        return vpmu->arch_vpmu_ops->do_wrmsr(regs);
+    return 0;
+}
+
+int inline vpmu_do_rdmsr(struct cpu_user_regs *regs)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(current);
+
+    if ( vpmu->arch_vpmu_ops )
+        return vpmu->arch_vpmu_ops->do_rdmsr(regs);
+    return 0;
+}
+
+int inline vpmu_do_interrupt(struct cpu_user_regs *regs)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(current);
+
+    if ( vpmu->arch_vpmu_ops )
+        return vpmu->arch_vpmu_ops->do_interrupt(regs);
+    return 0;
+}
+
+void vpmu_save(struct vcpu *v)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+    if ( vpmu->arch_vpmu_ops )
+        vpmu->arch_vpmu_ops->arch_vpmu_save(v);
+}
+
+void vpmu_load(struct vcpu *v)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+    if ( vpmu->arch_vpmu_ops )
+        vpmu->arch_vpmu_ops->arch_vpmu_load(v);
+}
+
+extern struct arch_vpmu_ops core2_vpmu_ops;
+void inline vpmu_initialise(struct vcpu *v)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+    /* If it is not a fresh initialization, release all resources
+     * before initialise again.
+     */
+    if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED )
+        vpmu_destroy(v);
+
+    if ( current_cpu_data.x86 == 6 )
+    {
+        switch ( current_cpu_data.x86_model )
+        {
+        case 15:
+        case 23:
+            vpmu->arch_vpmu_ops = &core2_vpmu_ops;
+            dprintk(XENLOG_INFO,
+                   "Core 2 duo CPU detected for guest PMU usage.\n");
+            break;
+        }
+    }
+
+    if ( !vpmu->arch_vpmu_ops )
+    {
+        dprintk(XENLOG_WARNING, "Unsupport CPU model for guest PMU usage.\n");
+        return;
+    }
+
+    vpmu->flags = 0;
+    vpmu->context = NULL;
+    vpmu->arch_vpmu_ops->arch_vpmu_initialise(v);
+}
+
+void inline vpmu_destroy(struct vcpu *v)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+    if ( vpmu->arch_vpmu_ops )
+        vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
+}
+
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/arch/x86/hvm/vmx/vpmu_core2.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed Jan 30 09:59:27 2008 +0000
@@ -0,0 +1,469 @@
+/*
+ * vpmu_core2.c: CORE 2 specific PMU virtualization for HVM domain.
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Haitao Shan <haitao.shan@xxxxxxxxx>
+ */
+
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <asm/system.h>
+#include <asm/regs.h>
+#include <asm/types.h>
+#include <asm/msr.h>
+#include <asm/msr-index.h>
+#include <asm/hvm/support.h>
+#include <asm/hvm/vlapic.h>
+#include <asm/hvm/vmx/vmx.h>
+#include <asm/hvm/vmx/vmcs.h>
+#include <public/sched.h>
+#include <public/hvm/save.h>
+#include <asm/hvm/vmx/vpmu.h>
+#include <asm/hvm/vmx/vpmu_core2.h>
+
+static int arch_pmc_cnt = 0;
+
+static int core2_get_pmc_count(void)
+{
+    u32 eax, ebx, ecx, edx;
+
+    if ( arch_pmc_cnt )
+        return arch_pmc_cnt;
+
+    cpuid(0xa, &eax, &ebx, &ecx, &edx);
+    return arch_pmc_cnt = (eax & 0xff00) >> 8;
+}
+
+static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index)
+{
+    int i;
+
+    for ( i=0; i < core2_counters.num; i++ )
+        if ( core2_counters.msr[i] == msr_index )
+        {
+            *type = MSR_TYPE_COUNTER;
+            *index = i;
+            return 1;
+        }
+    for ( i=0; i < core2_ctrls.num; i++ )
+        if ( core2_ctrls.msr[i] == msr_index )
+        {
+            *type = MSR_TYPE_CTRL;
+            *index = i;
+            return 1;
+        }
+
+    if ( msr_index == MSR_CORE_PERF_GLOBAL_CTRL ||
+         msr_index == MSR_CORE_PERF_GLOBAL_STATUS ||
+         msr_index == MSR_CORE_PERF_GLOBAL_OVF_CTRL )
+    {
+        *type = MSR_TYPE_GLOBAL;
+        return 1;
+    }
+
+    if ( msr_index >= MSR_IA32_PERFCTR0 &&
+         msr_index < MSR_IA32_PERFCTR0 + core2_get_pmc_count() )
+    {
+        *type = MSR_TYPE_ARCH_COUNTER;
+        *index = msr_index - MSR_IA32_PERFCTR0;
+        return 1;
+    }
+    if ( msr_index >= MSR_P6_EVNTSEL0 &&
+         msr_index < MSR_P6_EVNTSEL0 + core2_get_pmc_count() )
+    {
+        *type = MSR_TYPE_ARCH_CTRL;
+        *index = msr_index - MSR_P6_EVNTSEL0;
+        return 1;
+    }
+    return 0;
+}
+
+static void core2_vpmu_set_msr_bitmap(char *msr_bitmap)
+{
+    int i;
+
+    /* Allow Read/Write PMU Counters MSR Directly. */
+    for ( i=0; i < core2_counters.num; i++ )
+    {
+        clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
+        clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap + 
0x800);
+    }
+    for ( i=0; i < core2_get_pmc_count(); i++ )
+    {
+        clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
+        clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap + 0x800);
+    }
+    /* Allow Read PMU Non-global Controls Directly. */
+    for ( i=0; i < core2_ctrls.num; i++ )
+        clear_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
+    for ( i=0; i < core2_get_pmc_count(); i++ )
+        clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap);
+}
+
+static void core2_vpmu_unset_msr_bitmap(char *msr_bitmap)
+{
+    int i;
+
+    /* Undo all the changes to msr bitmap. */
+    for ( i=0; i < core2_counters.num; i++ )
+    {
+        set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
+        set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap + 0x800);
+    }
+    for ( i=0; i < core2_get_pmc_count(); i++ )
+    {
+        set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
+        set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap + 0x800);
+    }
+    for ( i=0; i < core2_ctrls.num; i++ )
+        set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
+    for ( i=0; i < core2_get_pmc_count(); i++ )
+        set_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap);
+}
+
+static inline void __core2_vpmu_save(struct vcpu *v)
+{
+    int i;
+    struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
+
+    for ( i=0; i < core2_counters.num; i++ )
+        rdmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]);
+    for ( i=0; i < core2_get_pmc_count(); i++ )
+        rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
+    core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
+    apic_write(APIC_LVTPC, LVTPC_HVM_PMU | APIC_LVT_MASKED);
+}
+
+static void core2_vpmu_save(struct vcpu *v)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+    if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
+           (vpmu->flags & VPMU_CONTEXT_LOADED)) )
+        return;
+
+    __core2_vpmu_save(v);
+
+    /* Unset PMU MSR bitmap to trap lazy load. */
+    if ( !(vpmu->flags & VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
+        core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
+
+    vpmu->flags &= ~VPMU_CONTEXT_LOADED;
+    return;
+}
+
+static inline void __core2_vpmu_load(struct vcpu *v)
+{
+    int i;
+    struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
+
+    for ( i=0; i < core2_counters.num; i++ )
+        wrmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]);
+    for ( i=0; i < core2_get_pmc_count(); i++ )
+        wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
+
+    for ( i=0; i < core2_ctrls.num; i++ )
+        wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]);
+    for ( i=0; i < core2_get_pmc_count(); i++ )
+        wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control);
+
+    apic_write_around(APIC_LVTPC, core2_vpmu_cxt->hw_lapic_lvtpc);
+}
+
+static void core2_vpmu_load(struct vcpu *v)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+    /* Only when PMU is counting, we load PMU context immediately. */
+    if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
+           (vpmu->flags & VPMU_RUNNING)) )
+        return;
+    __core2_vpmu_load(v);
+    vpmu->flags |= VPMU_CONTEXT_LOADED;
+}
+
+static int core2_vpmu_alloc_resource(struct vcpu *v)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+    struct core2_vpmu_context *core2_vpmu_cxt;
+    struct core2_pmu_enable *pmu_enable;
+
+    if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
+        return 0;
+
+    wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+    if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
+        return 0;
+
+    if ( vmx_add_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
+        return 0;
+    vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
+
+    pmu_enable = xmalloc_bytes(sizeof(struct core2_pmu_enable) +
+                 (core2_get_pmc_count()-1)*sizeof(char));
+    if ( !pmu_enable )
+        goto out1;
+    memset(pmu_enable, 0, sizeof(struct core2_pmu_enable) +
+                 (core2_get_pmc_count()-1)*sizeof(char));
+
+    core2_vpmu_cxt = xmalloc_bytes(sizeof(struct core2_vpmu_context) +
+                    (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair));
+    if ( !core2_vpmu_cxt )
+        goto out2;
+    memset(core2_vpmu_cxt, 0, sizeof(struct core2_vpmu_context) +
+                    (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair));
+    core2_vpmu_cxt->pmu_enable = pmu_enable;
+    vpmu->context = (void *)core2_vpmu_cxt;
+
+    return 1;
+ out2:
+    xfree(pmu_enable);
+ out1:
+    dprintk(XENLOG_WARNING, "Insufficient memory for PMU, PMU feature is \
+            unavailable on domain %d vcpu %d.\n",
+            v->vcpu_id, v->domain->domain_id);
+    return 0;
+}
+
+static void core2_vpmu_save_msr_context(struct vcpu *v, int type,
+                                       int index, u64 msr_data)
+{
+    struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
+
+    switch ( type )
+    {
+    case MSR_TYPE_CTRL:
+        core2_vpmu_cxt->ctrls[index] = msr_data;
+        break;
+    case MSR_TYPE_ARCH_CTRL:
+        core2_vpmu_cxt->arch_msr_pair[index].control = msr_data;
+        break;
+    }
+}
+
+static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(current);
+
+    if ( !is_core2_vpmu_msr(msr_index, type, index) )
+        return 0;
+
+    if ( unlikely(!(vpmu->flags & VPMU_CONTEXT_ALLOCATED)) &&
+         !core2_vpmu_alloc_resource(current) )
+        return 0;
+    vpmu->flags |= VPMU_CONTEXT_ALLOCATED;
+
+    /* Do the lazy load staff. */
+    if ( !(vpmu->flags & VPMU_CONTEXT_LOADED) )
+    {
+        __core2_vpmu_load(current);
+        vpmu->flags |= VPMU_CONTEXT_LOADED;
+        if ( cpu_has_vmx_msr_bitmap )
+            core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap);
+    }
+    return 1;
+}
+
+static int core2_vpmu_do_wrmsr(struct cpu_user_regs *regs)
+{
+    u32 ecx = regs->ecx;
+    u64 msr_content, global_ctrl, non_global_ctrl;
+    char pmu_enable = 0;
+    int i, tmp;
+    int type = -1, index = -1;
+    struct vcpu *v = current;
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+    struct core2_vpmu_context *core2_vpmu_cxt = NULL;
+
+    if ( !core2_vpmu_msr_common_check(ecx, &type, &index) )
+        return 0;
+
+    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+    core2_vpmu_cxt = vpmu->context;
+    switch ( ecx )
+    {
+    case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+        core2_vpmu_cxt->global_ovf_status &= ~msr_content;
+        return 1;
+    case MSR_CORE_PERF_GLOBAL_STATUS:
+        dprintk(XENLOG_INFO, "Can not write readonly MSR: \
+                            MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
+        vmx_inject_hw_exception(current, TRAP_gp_fault, 0);
+        return 1;
+    case MSR_IA32_PEBS_ENABLE:
+        if ( msr_content & 1 )
+            dprintk(XENLOG_WARNING, "Guest is trying to enable PEBS, \
+                    which is not supported.\n");
+        return 1;
+    case MSR_IA32_DS_AREA:
+        dprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n");
+        return 1;
+    case MSR_CORE_PERF_GLOBAL_CTRL:
+        global_ctrl = msr_content;
+        for ( i = 0; i < core2_get_pmc_count(); i++ )
+        {
+            rdmsrl(MSR_P6_EVNTSEL0+i, non_global_ctrl);
+            core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] =
+                    global_ctrl & (non_global_ctrl >> 22) & 1;
+            global_ctrl >>= 1;
+        }
+
+        rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl);
+        global_ctrl = msr_content >> 32;
+        for ( i = 0; i < 3; i++ )
+        {
+            core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] =
+                (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0);
+            non_global_ctrl >>= 4;
+            global_ctrl >>= 1;
+        }
+        break;
+    case MSR_CORE_PERF_FIXED_CTR_CTRL:
+        non_global_ctrl = msr_content;
+        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+        global_ctrl >>= 32;
+        for ( i = 0; i < 3; i++ )
+        {
+            core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] =
+                (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0);
+            non_global_ctrl >>= 4;
+            global_ctrl >>= 1;
+        }
+        break;
+    default:
+        tmp = ecx - MSR_P6_EVNTSEL0;
+        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+        if ( tmp >= 0 && tmp < core2_get_pmc_count() )
+            core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] =
+                (global_ctrl >> tmp) & (msr_content >> 22) & 1;
+    }
+
+    for ( i = 0; i < 3; i++ )
+        pmu_enable |= core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i];
+    for ( i = 0; i < core2_get_pmc_count(); i++ )
+        pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i];
+    if ( pmu_enable )
+        vpmu->flags |= VPMU_RUNNING;
+    else
+        vpmu->flags &= ~VPMU_RUNNING;
+
+    /* Setup LVTPC in local apic */
+    if ( vpmu->flags & VPMU_RUNNING &&
+         is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) )
+        apic_write_around(APIC_LVTPC, LVTPC_HVM_PMU);
+    else
+        apic_write_around(APIC_LVTPC, LVTPC_HVM_PMU | APIC_LVT_MASKED);
+
+    core2_vpmu_save_msr_context(v, type, index, msr_content);
+    if ( type != MSR_TYPE_GLOBAL )
+        wrmsrl(ecx, msr_content);
+    else
+        vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+
+    return 1;
+}
+
+static int core2_vpmu_do_rdmsr(struct cpu_user_regs *regs)
+{
+    u64 msr_content = 0;
+    int type = -1, index = -1;
+    struct vcpu *v = current;
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+    struct core2_vpmu_context *core2_vpmu_cxt = NULL;
+
+    if ( !core2_vpmu_msr_common_check(regs->ecx, &type, &index) )
+        return 0;
+
+    core2_vpmu_cxt = vpmu->context;
+    switch ( regs->ecx )
+    {
+    case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+        break;
+    case MSR_CORE_PERF_GLOBAL_STATUS:
+        msr_content = core2_vpmu_cxt->global_ovf_status;
+        break;
+    case MSR_CORE_PERF_GLOBAL_CTRL:
+        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
+        break;
+    default:
+        rdmsrl(regs->ecx, msr_content);
+    }
+
+    regs->eax = msr_content & 0xFFFFFFFF;
+    regs->edx = msr_content >> 32;
+    return 1;
+}
+
+static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
+{
+    struct vcpu *v = current;
+    u64 msr_content;
+    u32 vlapic_lvtpc;
+    unsigned char int_vec;
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+    struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
+    struct vlapic *vlapic = vcpu_vlapic(v);
+
+    rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, msr_content);
+    if ( !msr_content )
+        return 0;
+    core2_vpmu_cxt->global_ovf_status |= msr_content;
+    wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0xC000000700000003);
+
+    apic_write_around(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
+
+    if ( !is_vlapic_lvtpc_enabled(vlapic) )
+        return 1;
+
+    vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
+    int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
+    vlapic_set_reg(vlapic, APIC_LVTPC, vlapic_lvtpc | APIC_LVT_MASKED);
+    if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
+        vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
+    else
+        test_and_set_bool(v->nmi_pending);
+    return 1;
+}
+
+static void core2_vpmu_initialise(struct vcpu *v)
+{
+}
+
+static void core2_vpmu_destroy(struct vcpu *v)
+{
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+    struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
+
+    if ( !vpmu->flags & VPMU_CONTEXT_ALLOCATED )
+        return;
+    xfree(core2_vpmu_cxt->pmu_enable);
+    xfree(vpmu->context);
+    if ( cpu_has_vmx_msr_bitmap )
+        core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
+    release_pmu_ownship(PMU_OWNER_HVM);
+}
+
+struct arch_vpmu_ops core2_vpmu_ops = {
+    .do_wrmsr = core2_vpmu_do_wrmsr,
+    .do_rdmsr = core2_vpmu_do_rdmsr,
+    .do_interrupt = core2_vpmu_do_interrupt,
+    .arch_vpmu_initialise = core2_vpmu_initialise,
+    .arch_vpmu_destroy = core2_vpmu_destroy,
+    .arch_vpmu_save = core2_vpmu_save,
+    .arch_vpmu_load = core2_vpmu_load
+};
+
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/arch/x86/i8259.c
--- a/xen/arch/x86/i8259.c      Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/arch/x86/i8259.c      Wed Jan 30 09:59:27 2008 +0000
@@ -72,6 +72,7 @@ BUILD_SMP_INTERRUPT(apic_timer_interrupt
 BUILD_SMP_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
 BUILD_SMP_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
 BUILD_SMP_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
+BUILD_SMP_INTERRUPT(pmu_apic_interrupt,PMU_APIC_VECTOR)
 BUILD_SMP_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
 
 #define IRQ(x,y) \
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/arch/x86/oprofile/op_model_ppro.c
--- a/xen/arch/x86/oprofile/op_model_ppro.c     Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/arch/x86/oprofile/op_model_ppro.c     Wed Jan 30 09:59:27 2008 +0000
@@ -41,6 +41,7 @@
 #define CTRL_SET_EVENT(val, e) (val |= e)
 
 static unsigned long reset_value[NUM_COUNTERS];
+int ppro_has_global_ctrl = 0;
  
 static void ppro_fill_in_addresses(struct op_msrs * const msrs)
 {
@@ -134,6 +135,11 @@ static void ppro_start(struct op_msrs co
                        CTRL_WRITE(low, high, msrs, i);
                }
        }
+    /* Global Control MSR is enabled by default when system power on.
+     * However, this may not hold true when xenoprof starts to run.
+     */
+    if ( ppro_has_global_ctrl )
+        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, (1<<NUM_COUNTERS) - 1);
 }
 
 
@@ -149,6 +155,8 @@ static void ppro_stop(struct op_msrs con
                CTRL_SET_INACTIVE(low);
                CTRL_WRITE(low, high, msrs, i);
        }
+    if ( ppro_has_global_ctrl )
+        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
 }
 
 
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/common/xenoprof.c
--- a/xen/common/xenoprof.c     Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/common/xenoprof.c     Wed Jan 30 09:59:27 2008 +0000
@@ -23,6 +23,10 @@
 /* Lock protecting the following global state */
 static DEFINE_SPINLOCK(xenoprof_lock);
 
+static DEFINE_SPINLOCK(pmu_owner_lock);
+int pmu_owner = 0;
+int pmu_hvm_refcount = 0;
+
 static struct domain *active_domains[MAX_OPROF_DOMAINS];
 static int active_ready[MAX_OPROF_DOMAINS];
 static unsigned int adomains;
@@ -43,6 +47,37 @@ static u64 passive_samples;
 static u64 passive_samples;
 static u64 idle_samples;
 static u64 others_samples;
+
+int acquire_pmu_ownership(int pmu_ownship)
+{
+    spin_lock(&pmu_owner_lock);
+    if ( pmu_owner == PMU_OWNER_NONE )
+    {
+        pmu_owner = pmu_ownship;
+        goto out;
+    }
+
+    if ( pmu_owner == pmu_ownship )
+        goto out;
+
+    spin_unlock(&pmu_owner_lock);
+    return 0;
+ out:
+    if ( pmu_owner == PMU_OWNER_HVM )
+        pmu_hvm_refcount++;
+    spin_unlock(&pmu_owner_lock);
+    return 1;
+}
+
+void release_pmu_ownship(int pmu_ownship)
+{
+    spin_lock(&pmu_owner_lock);
+    if ( pmu_ownship == PMU_OWNER_HVM )
+        pmu_hvm_refcount--;
+    if ( !pmu_hvm_refcount )
+        pmu_owner = PMU_OWNER_NONE;
+    spin_unlock(&pmu_owner_lock);
+}
 
 int is_active(struct domain *d)
 {
@@ -649,6 +684,11 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
         break;
 
     case XENOPROF_get_buffer:
+        if ( !acquire_pmu_ownership(PMU_OWNER_XENOPROF) )
+        {
+            ret = -EBUSY;
+            break;
+        }
         ret = xenoprof_op_get_buffer(arg);
         break;
 
@@ -786,6 +826,7 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
             break;
         x = current->domain->xenoprof;
         unshare_xenoprof_page_with_guest(x);
+        release_pmu_ownship(PMU_OWNER_XENOPROF);
         break;
     }
 
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Jan 30 09:59:27 2008 +0000
@@ -119,6 +119,7 @@ struct hvm_function_table {
     void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
 
     int  (*event_pending)(struct vcpu *v);
+    int  (*do_pmu_interrupt)(struct cpu_user_regs *regs);
 
     int  (*cpu_up)(void);
     void (*cpu_down)(void);
@@ -233,6 +234,11 @@ static inline int hvm_event_pending(stru
 static inline int hvm_event_pending(struct vcpu *v)
 {
     return hvm_funcs.event_pending(v);
+}
+
+static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
+{
+    return hvm_funcs.do_pmu_interrupt(regs);
 }
 
 /* These reserved bits in lower 32 remain 0 after any load of CR0 */
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/include/asm-x86/hvm/vlapic.h
--- a/xen/include/asm-x86/hvm/vlapic.h  Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/include/asm-x86/hvm/vlapic.h  Wed Jan 30 09:59:27 2008 +0000
@@ -71,6 +71,12 @@ static inline void vlapic_set_reg(
     *((uint32_t *)(&vlapic->regs->data[reg])) = val;
 }
 
+static inline int is_vlapic_lvtpc_enabled(struct vlapic *vlapic)
+{
+    return vlapic_enabled(vlapic) &&
+           !(vlapic_get_reg(vlapic, APIC_LVTPC) & APIC_LVT_MASKED);
+}
+
 int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig);
 
 int vlapic_has_pending_irq(struct vcpu *v);
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Jan 30 09:59:27 2008 +0000
@@ -22,6 +22,7 @@
 #include <asm/config.h>
 #include <asm/hvm/io.h>
 #include <asm/hvm/vmx/cpu.h>
+#include <asm/hvm/vmx/vpmu.h>
 
 #ifdef VMXASSIST
 #include <public/hvm/vmx_assist.h>
@@ -75,6 +76,9 @@ struct arch_vmx_struct {
 
     /* Cache of cpu execution control. */
     u32                  exec_control;
+
+    /* PMU */
+    struct vpmu_struct   vpmu;
 
 #ifdef __x86_64__
     struct vmx_msr_state msr_state;
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/include/asm-x86/hvm/vmx/vpmu.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vpmu.h        Wed Jan 30 09:59:27 2008 +0000
@@ -0,0 +1,83 @@
+/*
+ * vpmu.h: PMU virtualization for HVM domain.
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Haitao Shan <haitao.shan@xxxxxxxxx>
+ */
+
+#ifndef __ASM_X86_HVM_VPMU_H_
+#define __ASM_X86_HVM_VPMU_H_
+
+#define msraddr_to_bitpos(x) (((x)&0xffff) + ((x)>>31)*0x2000)
+#define vcpu_vpmu(vcpu)   (&(vcpu)->arch.hvm_vcpu.u.vmx.vpmu)
+#define vpmu_vcpu(vpmu)   (container_of((vpmu), struct vcpu, \
+                                          arch.hvm_vcpu.u.vmx.vpmu))
+#define vpmu_domain(vpmu) (vpmu_vcpu(vpmu)->domain)
+
+#define MSR_TYPE_COUNTER            0
+#define MSR_TYPE_CTRL               1
+#define MSR_TYPE_GLOBAL             2
+#define MSR_TYPE_ARCH_COUNTER       3
+#define MSR_TYPE_ARCH_CTRL          4
+
+#define LVTPC_HVM_PMU            0xf8
+
+struct pmumsr {
+    unsigned int num;
+    u32 *msr;
+};
+
+struct msr_load_store_entry {
+    u32 msr_index;
+    u32 msr_reserved;
+    u64 msr_data;
+};
+
+/* Arch specific operations shared by all vpmus */
+struct arch_vpmu_ops {
+    int (*do_wrmsr)(struct cpu_user_regs *regs);
+    int (*do_rdmsr)(struct cpu_user_regs *regs);
+    int (*do_interrupt)(struct cpu_user_regs *regs);
+    void (*arch_vpmu_initialise)(struct vcpu *v);
+    void (*arch_vpmu_destroy)(struct vcpu *v);
+    void (*arch_vpmu_save)(struct vcpu *v);
+    void (*arch_vpmu_load)(struct vcpu *v);
+};
+
+struct vpmu_struct {
+    u32 flags;
+    void *context;
+    struct arch_vpmu_ops *arch_vpmu_ops;
+};
+
+#define VPMU_CONTEXT_ALLOCATED              0x1
+#define VPMU_CONTEXT_LOADED                 0x2
+#define VPMU_RUNNING                        0x4
+
+int inline vpmu_do_wrmsr(struct cpu_user_regs *regs);
+int inline vpmu_do_rdmsr(struct cpu_user_regs *regs);
+int inline vpmu_do_interrupt(struct cpu_user_regs *regs);
+void inline vpmu_initialise(struct vcpu *v);
+void inline vpmu_destroy(struct vcpu *v);
+void inline vpmu_save(struct vcpu *v);
+void inline vpmu_load(struct vcpu *v);
+
+extern int acquire_pmu_ownership(int pmu_ownership);
+extern void release_pmu_ownership(int pmu_ownership);
+
+#endif /* __ASM_X86_HVM_VPMU_H_*/
+
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/include/asm-x86/hvm/vmx/vpmu_core2.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h  Wed Jan 30 09:59:27 2008 +0000
@@ -0,0 +1,68 @@
+
+/*
+ * vpmu_core2.h: CORE 2 specific PMU virtualization for HVM domain.
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Haitao Shan <haitao.shan@xxxxxxxxx>
+ */
+
+#ifndef __ASM_X86_HVM_VPMU_CORE_H_
+#define __ASM_X86_HVM_VPMU_CORE_H_
+
+/* Core 2 Non-architectual Performance Counter MSRs. */
+u32 core2_counters_msr[] =   {
+    MSR_CORE_PERF_FIXED_CTR0,
+    MSR_CORE_PERF_FIXED_CTR1,
+    MSR_CORE_PERF_FIXED_CTR2};
+
+/* Core 2 Non-architectual Performance Control MSRs. */
+u32 core2_ctrls_msr[] = {
+    MSR_CORE_PERF_FIXED_CTR_CTRL,
+    MSR_IA32_PEBS_ENABLE,
+    MSR_IA32_DS_AREA};
+
+struct pmumsr core2_counters = {
+    3,
+    core2_counters_msr
+};
+
+struct pmumsr core2_ctrls = {
+    3,
+    core2_ctrls_msr
+};
+
+struct arch_msr_pair {
+    u64 counter;
+    u64 control;
+};
+
+struct core2_pmu_enable {
+    char fixed_ctr_enable[3];
+    char arch_pmc_enable[1];
+};
+
+struct core2_vpmu_context {
+    struct core2_pmu_enable *pmu_enable;
+    u64 counters[3];
+    u64 ctrls[3];
+    u64 global_ovf_status;
+    u32 hw_lapic_lvtpc;
+    struct arch_msr_pair arch_msr_pair[1];
+};
+
+#endif /* __ASM_X86_HVM_VPMU_CORE_H_ */
+
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/include/asm-x86/irq.h Wed Jan 30 09:59:27 2008 +0000
@@ -28,6 +28,7 @@ fastcall void call_function_interrupt(vo
 fastcall void call_function_interrupt(void);
 fastcall void apic_timer_interrupt(void);
 fastcall void error_interrupt(void);
+fastcall void pmu_apic_interrupt(void);
 fastcall void spurious_interrupt(void);
 fastcall void thermal_interrupt(void);
 
diff -r 47b7ec3b4055 -r 6ea3db7ae24d 
xen/include/asm-x86/mach-default/irq_vectors.h
--- a/xen/include/asm-x86/mach-default/irq_vectors.h    Wed Jan 30 09:38:10 
2008 +0000
+++ b/xen/include/asm-x86/mach-default/irq_vectors.h    Wed Jan 30 09:59:27 
2008 +0000
@@ -9,13 +9,14 @@
 #define CALL_FUNCTION_VECTOR   0xfb
 #define THERMAL_APIC_VECTOR    0xfa
 #define LOCAL_TIMER_VECTOR     0xf9
+#define PMU_APIC_VECTOR        0xf8
 
 /*
  * High-priority dynamically-allocated vectors. For interrupts that
  * must be higher priority than any guest-bound interrupt.
  */
 #define FIRST_HIPRIORITY_VECTOR        0xf0
-#define LAST_HIPRIORITY_VECTOR  0xf8
+#define LAST_HIPRIORITY_VECTOR  0xf7
 
 /* Legacy PIC uses vectors 0xe0-0xef. */
 #define FIRST_LEGACY_VECTOR    0xe0
diff -r 47b7ec3b4055 -r 6ea3db7ae24d xen/include/xen/xenoprof.h
--- a/xen/include/xen/xenoprof.h        Wed Jan 30 09:38:10 2008 +0000
+++ b/xen/include/xen/xenoprof.h        Wed Jan 30 09:59:27 2008 +0000
@@ -69,4 +69,10 @@ int xenoprof_add_trace(struct domain *d,
 int xenoprof_add_trace(struct domain *d, struct vcpu *v, 
                        unsigned long eip, int mode);
 
+#define PMU_OWNER_NONE          0
+#define PMU_OWNER_XENOPROF      1
+#define PMU_OWNER_HVM           2
+int acquire_pmu_ownship(int pmu_ownership);
+void release_pmu_ownship(int pmu_ownership);
+
 #endif  /* __XEN__XENOPROF_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.