[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-4.1-testing] hvm: vpmu: Add support for AMD Family 15h processors
# HG changeset patch # User Jacob Shin <jacob.shin@xxxxxxx> # Date 1334218093 -3600 # Node ID 7d9df818d302a593ab2e1b3e795ee8f369af96ae # Parent 0aa6bc8f38a9a270f910f37e664ea6fcbece0073 hvm: vpmu: Add support for AMD Family 15h processors AMD Family 15h CPU mirrors legacy K7 performance monitor counters to a new location, and adds 2 new counters. This patch updates HVM VPMU to take advantage of the new counters. Signed-off-by: Jacob Shin <jacob.shin@xxxxxxx> xen-unstable changeset: 23306:e787d4f2e5ac xen-unstable date: Mon May 09 09:54:46 2011 +0100 xenoprof: Add support for AMD Family 15h processors AMD Family 15h CPU mirrors legacy K7 performance monitor counters to a new location, and adds 2 new counters. This patch updates xenoprof to take advantage of the new counters. Signed-off-by: Jacob Shin <jacob.shin@xxxxxxx> Rename fam15h -> amd_fam15h in a few places, as suggested by Jan Beulich. Signed-off-by: Keir Fraser <keir@xxxxxxx> xen-unstable changeset: 23305:014ee4e09644 xen-unstable date: Mon May 09 09:53:07 2011 +0100 xenoprof: Update cpu_type to sync with upstream oprofile Update xenoprof's cpu_type to match upstream oprofile. Currently AMD Family 11h ~ Family 15h are broken due to string mismatches. Signed-off-by: Jacob Shin <jacob.shin@xxxxxxx> xen-unstable changeset: 23304:8981b582be3e xen-unstable date: Mon May 09 09:49:14 2011 +0100 --- diff -r 0aa6bc8f38a9 -r 7d9df818d302 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu Apr 12 09:06:02 2012 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Apr 12 09:08:13 2012 +0100 @@ -1142,6 +1142,18 @@ static int svm_msr_read_intercept(unsign case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: + case MSR_AMD_FAM15H_PERFCTR0: + case MSR_AMD_FAM15H_PERFCTR1: + case MSR_AMD_FAM15H_PERFCTR2: + case MSR_AMD_FAM15H_PERFCTR3: + case MSR_AMD_FAM15H_PERFCTR4: + case MSR_AMD_FAM15H_PERFCTR5: + case MSR_AMD_FAM15H_EVNTSEL0: + case MSR_AMD_FAM15H_EVNTSEL1: + case MSR_AMD_FAM15H_EVNTSEL2: + case MSR_AMD_FAM15H_EVNTSEL3: + case MSR_AMD_FAM15H_EVNTSEL4: + case MSR_AMD_FAM15H_EVNTSEL5: vpmu_do_rdmsr(msr, msr_content); break; @@ -1237,6 +1249,18 @@ static int svm_msr_write_intercept(unsig case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: + case MSR_AMD_FAM15H_PERFCTR0: + case MSR_AMD_FAM15H_PERFCTR1: + case MSR_AMD_FAM15H_PERFCTR2: + case MSR_AMD_FAM15H_PERFCTR3: + case MSR_AMD_FAM15H_PERFCTR4: + case MSR_AMD_FAM15H_PERFCTR5: + case MSR_AMD_FAM15H_EVNTSEL0: + case MSR_AMD_FAM15H_EVNTSEL1: + case MSR_AMD_FAM15H_EVNTSEL2: + case MSR_AMD_FAM15H_EVNTSEL3: + case MSR_AMD_FAM15H_EVNTSEL4: + case MSR_AMD_FAM15H_EVNTSEL5: vpmu_do_wrmsr(msr, msr_content); break; diff -r 0aa6bc8f38a9 -r 7d9df818d302 xen/arch/x86/hvm/svm/vpmu.c --- a/xen/arch/x86/hvm/svm/vpmu.c Thu Apr 12 09:06:02 2012 +0100 +++ b/xen/arch/x86/hvm/svm/vpmu.c Thu Apr 12 09:08:13 2012 +0100 @@ -36,7 +36,9 @@ #include <public/hvm/save.h> #include <asm/hvm/vpmu.h> -#define NUM_COUNTERS 4 +#define F10H_NUM_COUNTERS 4 +#define F15H_NUM_COUNTERS 6 +#define MAX_NUM_COUNTERS F15H_NUM_COUNTERS #define MSR_F10H_EVNTSEL_GO_SHIFT 40 #define MSR_F10H_EVNTSEL_EN_SHIFT 22 @@ -47,6 +49,11 @@ #define set_guest_mode(msr) (msr |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT)) #define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH-1)))) +static int __read_mostly num_counters = 0; +static u32 __read_mostly *counters = NULL; +static u32 __read_mostly *ctrls = NULL; +static bool_t __read_mostly k7_counters_mirrored = 0; + /* PMU Counter MSRs. */ u32 AMD_F10H_COUNTERS[] = { MSR_K7_PERFCTR0, @@ -63,10 +70,28 @@ u32 AMD_F10H_CTRLS[] = { MSR_K7_EVNTSEL3 }; +u32 AMD_F15H_COUNTERS[] = { + MSR_AMD_FAM15H_PERFCTR0, + MSR_AMD_FAM15H_PERFCTR1, + MSR_AMD_FAM15H_PERFCTR2, + MSR_AMD_FAM15H_PERFCTR3, + MSR_AMD_FAM15H_PERFCTR4, + MSR_AMD_FAM15H_PERFCTR5 +}; + +u32 AMD_F15H_CTRLS[] = { + MSR_AMD_FAM15H_EVNTSEL0, + MSR_AMD_FAM15H_EVNTSEL1, + MSR_AMD_FAM15H_EVNTSEL2, + MSR_AMD_FAM15H_EVNTSEL3, + MSR_AMD_FAM15H_EVNTSEL4, + MSR_AMD_FAM15H_EVNTSEL5 +}; + /* storage for context switching */ struct amd_vpmu_context { - u64 counters[NUM_COUNTERS]; - u64 ctrls[NUM_COUNTERS]; + u64 counters[MAX_NUM_COUNTERS]; + u64 ctrls[MAX_NUM_COUNTERS]; u32 hw_lapic_lvtpc; }; @@ -78,10 +103,45 @@ static inline int get_pmu_reg_type(u32 a if ( (addr >= MSR_K7_PERFCTR0) && (addr <= MSR_K7_PERFCTR3) ) return MSR_TYPE_COUNTER; + if ( (addr >= MSR_AMD_FAM15H_EVNTSEL0) && + (addr <= MSR_AMD_FAM15H_PERFCTR5 ) ) + { + if (addr & 1) + return MSR_TYPE_COUNTER; + else + return MSR_TYPE_CTRL; + } + /* unsupported registers */ return -1; } +static inline u32 get_fam15h_addr(u32 addr) +{ + switch ( addr ) + { + case MSR_K7_PERFCTR0: + return MSR_AMD_FAM15H_PERFCTR0; + case MSR_K7_PERFCTR1: + return MSR_AMD_FAM15H_PERFCTR1; + case MSR_K7_PERFCTR2: + return MSR_AMD_FAM15H_PERFCTR2; + case MSR_K7_PERFCTR3: + return MSR_AMD_FAM15H_PERFCTR3; + case MSR_K7_EVNTSEL0: + return MSR_AMD_FAM15H_EVNTSEL0; + case MSR_K7_EVNTSEL1: + return MSR_AMD_FAM15H_EVNTSEL1; + case MSR_K7_EVNTSEL2: + return MSR_AMD_FAM15H_EVNTSEL2; + case MSR_K7_EVNTSEL3: + return MSR_AMD_FAM15H_EVNTSEL3; + default: + break; + } + + return addr; +} static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs) { @@ -110,12 +170,12 @@ static inline void context_restore(struc struct vpmu_struct *vpmu = vcpu_vpmu(v); struct amd_vpmu_context *ctxt = vpmu->context; - for ( i = 0; i < NUM_COUNTERS; i++ ) - wrmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]); + for ( i = 0; i < num_counters; i++ ) + wrmsrl(ctrls[i], ctxt->ctrls[i]); - for ( i = 0; i < NUM_COUNTERS; i++ ) + for ( i = 0; i < num_counters; i++ ) { - wrmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]); + wrmsrl(counters[i], ctxt->counters[i]); /* Force an interrupt to allow guest reset the counter, if the value is positive */ @@ -147,11 +207,11 @@ static inline void context_save(struct v struct vpmu_struct *vpmu = vcpu_vpmu(v); struct amd_vpmu_context *ctxt = vpmu->context; - for ( i = 0; i < NUM_COUNTERS; i++ ) - rdmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]); + for ( i = 0; i < num_counters; i++ ) + rdmsrl(counters[i], ctxt->counters[i]); - for ( i = 0; i < NUM_COUNTERS; i++ ) - rdmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]); + for ( i = 0; i < num_counters; i++ ) + rdmsrl(ctrls[i], ctxt->ctrls[i]); } static void amd_vpmu_save(struct vcpu *v) @@ -175,12 +235,18 @@ static void context_update(unsigned int struct vpmu_struct *vpmu = vcpu_vpmu(v); struct amd_vpmu_context *ctxt = vpmu->context; - for ( i = 0; i < NUM_COUNTERS; i++ ) - if ( msr == AMD_F10H_COUNTERS[i] ) + if ( k7_counters_mirrored && + ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) ) + { + msr = get_fam15h_addr(msr); + } + + for ( i = 0; i < num_counters; i++ ) + if ( msr == counters[i] ) ctxt->counters[i] = msr_content; - for ( i = 0; i < NUM_COUNTERS; i++ ) - if ( msr == AMD_F10H_CTRLS[i] ) + for ( i = 0; i < num_counters; i++ ) + if ( msr == ctrls[i] ) ctxt->ctrls[i] = msr_content; ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC); @@ -235,10 +301,31 @@ static void amd_vpmu_initialise(struct v { struct amd_vpmu_context *ctxt = NULL; struct vpmu_struct *vpmu = vcpu_vpmu(v); + __u8 family = current_cpu_data.x86; if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED ) return; + if ( counters == NULL ) + { + switch ( family ) + { + case 0x15: + num_counters = F15H_NUM_COUNTERS; + counters = AMD_F15H_COUNTERS; + ctrls = AMD_F15H_CTRLS; + k7_counters_mirrored = 1; + break; + case 0x10: + default: + num_counters = F10H_NUM_COUNTERS; + counters = AMD_F10H_COUNTERS; + ctrls = AMD_F10H_CTRLS; + k7_counters_mirrored = 0; + break; + } + } + ctxt = xmalloc_bytes(sizeof(struct amd_vpmu_context)); if ( !ctxt ) diff -r 0aa6bc8f38a9 -r 7d9df818d302 xen/arch/x86/hvm/vpmu.c --- a/xen/arch/x86/hvm/vpmu.c Thu Apr 12 09:06:02 2012 +0100 +++ b/xen/arch/x86/hvm/vpmu.c Thu Apr 12 09:08:13 2012 +0100 @@ -101,6 +101,7 @@ void vpmu_initialise(struct vcpu *v) switch ( family ) { case 0x10: + case 0x15: vpmu->arch_vpmu_ops = &amd_vpmu_ops; break; default: diff -r 0aa6bc8f38a9 -r 7d9df818d302 xen/arch/x86/oprofile/nmi_int.c --- a/xen/arch/x86/oprofile/nmi_int.c Thu Apr 12 09:06:02 2012 +0100 +++ b/xen/arch/x86/oprofile/nmi_int.c Thu Apr 12 09:08:13 2012 +0100 @@ -30,7 +30,7 @@ struct op_counter_config counter_config[OP_MAX_COUNTER]; struct op_ibs_config ibs_config; -static struct op_x86_model_spec const *__read_mostly model; +struct op_x86_model_spec const *__read_mostly model; static struct op_msrs cpu_msrs[NR_CPUS]; static unsigned long saved_lvtpc[NR_CPUS]; @@ -435,19 +435,19 @@ static int __init nmi_init(void) break; case 0x11: model = &op_athlon_spec; - cpu_type = "x86-64/family11"; + cpu_type = "x86-64/family11h"; break; case 0x12: model = &op_athlon_spec; - cpu_type = "x86-64/family12"; + cpu_type = "x86-64/family12h"; break; case 0x14: model = &op_athlon_spec; - cpu_type = "x86-64/family14"; + cpu_type = "x86-64/family14h"; break; case 0x15: - model = &op_athlon_spec; - cpu_type = "x86-64/family15"; + model = &op_amd_fam15h_spec; + cpu_type = "x86-64/family15h"; break; } break; diff -r 0aa6bc8f38a9 -r 7d9df818d302 xen/arch/x86/oprofile/op_model_athlon.c --- a/xen/arch/x86/oprofile/op_model_athlon.c Thu Apr 12 09:06:02 2012 +0100 +++ b/xen/arch/x86/oprofile/op_model_athlon.c Thu Apr 12 09:08:13 2012 +0100 @@ -24,8 +24,13 @@ #include "op_x86_model.h" #include "op_counter.h" -#define NUM_COUNTERS 4 -#define NUM_CONTROLS 4 +#define K7_NUM_COUNTERS 4 +#define K7_NUM_CONTROLS 4 + +#define FAM15H_NUM_COUNTERS 6 +#define FAM15H_NUM_CONTROLS 6 + +#define MAX_COUNTERS FAM15H_NUM_COUNTERS #define CTR_READ(msr_content,msrs,c) do {rdmsrl(msrs->counters[(c)].addr, (msr_content));} while (0) #define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0) @@ -44,9 +49,10 @@ #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 0x1ULL) << 41)) #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 0x1ULL) << 40)) -static unsigned long reset_value[NUM_COUNTERS]; +static unsigned long reset_value[MAX_COUNTERS]; extern char svm_stgi_label[]; +extern struct op_x86_model_spec const *__read_mostly model; #ifdef CONFIG_X86_64 u32 ibs_caps = 0; @@ -175,26 +181,44 @@ static void athlon_fill_in_addresses(str msrs->controls[3].addr = MSR_K7_EVNTSEL3; } - +static void fam15h_fill_in_addresses(struct op_msrs * const msrs) +{ + msrs->counters[0].addr = MSR_AMD_FAM15H_PERFCTR0; + msrs->counters[1].addr = MSR_AMD_FAM15H_PERFCTR1; + msrs->counters[2].addr = MSR_AMD_FAM15H_PERFCTR2; + msrs->counters[3].addr = MSR_AMD_FAM15H_PERFCTR3; + msrs->counters[4].addr = MSR_AMD_FAM15H_PERFCTR4; + msrs->counters[5].addr = MSR_AMD_FAM15H_PERFCTR5; + + msrs->controls[0].addr = MSR_AMD_FAM15H_EVNTSEL0; + msrs->controls[1].addr = MSR_AMD_FAM15H_EVNTSEL1; + msrs->controls[2].addr = MSR_AMD_FAM15H_EVNTSEL2; + msrs->controls[3].addr = MSR_AMD_FAM15H_EVNTSEL3; + msrs->controls[4].addr = MSR_AMD_FAM15H_EVNTSEL4; + msrs->controls[5].addr = MSR_AMD_FAM15H_EVNTSEL5; +} + static void athlon_setup_ctrs(struct op_msrs const * const msrs) { uint64_t msr_content; int i; + unsigned int const nr_ctrs = model->num_counters; + unsigned int const nr_ctrls = model->num_controls; /* clear all counters */ - for (i = 0 ; i < NUM_CONTROLS; ++i) { + for (i = 0 ; i < nr_ctrls; ++i) { CTRL_READ(msr_content, msrs, i); CTRL_CLEAR(msr_content); CTRL_WRITE(msr_content, msrs, i); } /* avoid a false detection of ctr overflows in NMI handler */ - for (i = 0; i < NUM_COUNTERS; ++i) { + for (i = 0; i < nr_ctrs; ++i) { CTR_WRITE(1, msrs, i); } /* enable active counters */ - for (i = 0; i < NUM_COUNTERS; ++i) { + for (i = 0; i < nr_ctrs; ++i) { if (counter_config[i].enabled) { reset_value[i] = counter_config[i].count; @@ -300,6 +324,7 @@ static int athlon_check_ctrs(unsigned in int mode = 0; struct vcpu *v = current; struct cpu_user_regs *guest_regs = guest_cpu_user_regs(); + unsigned int const nr_ctrs = model->num_counters; if (!guest_mode(regs) && (regs->eip == (unsigned long)svm_stgi_label)) { @@ -312,7 +337,7 @@ static int athlon_check_ctrs(unsigned in mode = xenoprofile_get_mode(v, regs); } - for (i = 0 ; i < NUM_COUNTERS; ++i) { + for (i = 0 ; i < nr_ctrs; ++i) { CTR_READ(msr_content, msrs, i); if (CTR_OVERFLOWED(msr_content)) { xenoprof_log_event(current, regs, eip, mode, i); @@ -373,7 +398,8 @@ static void athlon_start(struct op_msrs { uint64_t msr_content; int i; - for (i = 0 ; i < NUM_COUNTERS ; ++i) { + unsigned int const nr_ctrs = model->num_counters; + for (i = 0 ; i < nr_ctrs ; ++i) { if (reset_value[i]) { CTRL_READ(msr_content, msrs, i); CTRL_SET_ACTIVE(msr_content); @@ -401,10 +427,11 @@ static void athlon_stop(struct op_msrs c { uint64_t msr_content; int i; + unsigned int const nr_ctrs = model->num_counters; /* Subtle: stop on all counters to avoid race with * setting our pm callback */ - for (i = 0 ; i < NUM_COUNTERS ; ++i) { + for (i = 0 ; i < nr_ctrs ; ++i) { CTRL_READ(msr_content, msrs, i); CTRL_SET_INACTIVE(msr_content); CTRL_WRITE(msr_content, msrs, i); @@ -512,11 +539,21 @@ void __init ibs_init(void) #endif /* CONFIG_X86_64 */ struct op_x86_model_spec const op_athlon_spec = { - .num_counters = NUM_COUNTERS, - .num_controls = NUM_CONTROLS, + .num_counters = K7_NUM_COUNTERS, + .num_controls = K7_NUM_CONTROLS, .fill_in_addresses = &athlon_fill_in_addresses, .setup_ctrs = &athlon_setup_ctrs, .check_ctrs = &athlon_check_ctrs, .start = &athlon_start, .stop = &athlon_stop }; + +struct op_x86_model_spec const op_amd_fam15h_spec = { + .num_counters = FAM15H_NUM_COUNTERS, + .num_controls = FAM15H_NUM_CONTROLS, + .fill_in_addresses = &fam15h_fill_in_addresses, + .setup_ctrs = &athlon_setup_ctrs, + .check_ctrs = &athlon_check_ctrs, + .start = &athlon_start, + .stop = &athlon_stop +}; diff -r 0aa6bc8f38a9 -r 7d9df818d302 xen/arch/x86/oprofile/op_x86_model.h --- a/xen/arch/x86/oprofile/op_x86_model.h Thu Apr 12 09:06:02 2012 +0100 +++ b/xen/arch/x86/oprofile/op_x86_model.h Thu Apr 12 09:08:13 2012 +0100 @@ -48,6 +48,7 @@ extern struct op_x86_model_spec op_arch_ extern struct op_x86_model_spec const op_p4_spec; extern struct op_x86_model_spec const op_p4_ht2_spec; extern struct op_x86_model_spec const op_athlon_spec; +extern struct op_x86_model_spec const op_amd_fam15h_spec; void arch_perfmon_setup_counters(void); #endif /* OP_X86_MODEL_H */ diff -r 0aa6bc8f38a9 -r 7d9df818d302 xen/include/asm-x86/msr-index.h --- a/xen/include/asm-x86/msr-index.h Thu Apr 12 09:06:02 2012 +0100 +++ b/xen/include/asm-x86/msr-index.h Thu Apr 12 09:08:13 2012 +0100 @@ -223,6 +223,19 @@ #define MSR_K8_VM_CR 0xc0010114 #define MSR_K8_VM_HSAVE_PA 0xc0010117 +#define MSR_AMD_FAM15H_EVNTSEL0 0xc0010200 +#define MSR_AMD_FAM15H_PERFCTR0 0xc0010201 +#define MSR_AMD_FAM15H_EVNTSEL1 0xc0010202 +#define MSR_AMD_FAM15H_PERFCTR1 0xc0010203 +#define MSR_AMD_FAM15H_EVNTSEL2 0xc0010204 +#define MSR_AMD_FAM15H_PERFCTR2 0xc0010205 +#define MSR_AMD_FAM15H_EVNTSEL3 0xc0010206 +#define MSR_AMD_FAM15H_PERFCTR3 0xc0010207 +#define MSR_AMD_FAM15H_EVNTSEL4 0xc0010208 +#define MSR_AMD_FAM15H_PERFCTR4 0xc0010209 +#define MSR_AMD_FAM15H_EVNTSEL5 0xc001020a +#define MSR_AMD_FAM15H_PERFCTR5 0xc001020b + #define MSR_K8_FEATURE_MASK 0xc0011004 #define MSR_K8_EXT_FEATURE_MASK 0xc0011005 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |