[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86, hvm, xenoprof: Add fully support of HVM guest to xenoprofile on Intel P6.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1225114275 0 # Node ID 2a022ee37392715647abf75e44823d087d0e1d45 # Parent 4413d53a8320809e93142ed599a81e1bfe5ae900 x86, hvm, xenoprof: Add fully support of HVM guest to xenoprofile on Intel P6. Signed-off-by: Ronghui Duan <ronghui.duan@xxxxxxxxx> --- xen/arch/x86/hvm/vmx/vmx.c | 6 + xen/arch/x86/hvm/vmx/vpmu_core2.c | 20 ++++++ xen/arch/x86/oprofile/nmi_int.c | 51 +++++++++++++++ xen/arch/x86/oprofile/op_model_ppro.c | 103 ++++++++++++++++++++++++++++++- xen/arch/x86/oprofile/op_x86_model.h | 5 + xen/common/xenoprof.c | 2 xen/include/asm-x86/hvm/vmx/vpmu.h | 2 xen/include/asm-x86/hvm/vmx/vpmu_core2.h | 22 ------ xen/include/xen/xenoprof.h | 3 9 files changed, 187 insertions(+), 27 deletions(-) diff -r 4413d53a8320 -r 2a022ee37392 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Oct 27 13:29:35 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Oct 27 13:31:15 2008 +0000 @@ -26,6 +26,7 @@ #include <xen/domain_page.h> #include <xen/hypercall.h> #include <xen/perfc.h> +#include <xen/xenoprof.h> #include <asm/current.h> #include <asm/io.h> #include <asm/regs.h> @@ -132,6 +133,7 @@ static void vmx_vcpu_destroy(struct vcpu { vmx_destroy_vmcs(v); vpmu_destroy(v); + passive_domain_destroy(v); } #ifdef __x86_64__ @@ -1666,6 +1668,8 @@ static int vmx_msr_read_intercept(struct default: if ( vpmu_do_rdmsr(regs) ) goto done; + if ( passive_domain_do_rdmsr(regs) ) + goto done; switch ( long_mode_do_msr_read(regs) ) { case HNDL_unhandled: @@ -1860,6 +1864,8 @@ static int vmx_msr_write_intercept(struc goto gp_fault; default: if ( vpmu_do_wrmsr(regs) ) + return X86EMUL_OKAY; + if ( passive_domain_do_wrmsr(regs) ) return X86EMUL_OKAY; if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) ) diff -r 4413d53a8320 -r 2a022ee37392 xen/arch/x86/hvm/vmx/vpmu_core2.c --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Mon Oct 27 13:29:35 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Mon Oct 27 13:31:15 2008 +0000 @@ -35,6 +35,26 @@ #include <asm/hvm/vmx/vpmu.h> #include <asm/hvm/vmx/vpmu_core2.h> +u32 core2_counters_msr[] = { + MSR_CORE_PERF_FIXED_CTR0, + MSR_CORE_PERF_FIXED_CTR1, + MSR_CORE_PERF_FIXED_CTR2}; + +/* Core 2 Non-architectual Performance Control MSRs. */ +u32 core2_ctrls_msr[] = { + MSR_CORE_PERF_FIXED_CTR_CTRL, + MSR_IA32_PEBS_ENABLE, + MSR_IA32_DS_AREA}; + +struct pmumsr core2_counters = { + 3, + core2_counters_msr +}; + +struct pmumsr core2_ctrls = { + 3, + core2_ctrls_msr +}; static int arch_pmc_cnt; static int core2_get_pmc_count(void) diff -r 4413d53a8320 -r 2a022ee37392 xen/arch/x86/oprofile/nmi_int.c --- a/xen/arch/x86/oprofile/nmi_int.c Mon Oct 27 13:29:35 2008 +0000 +++ b/xen/arch/x86/oprofile/nmi_int.c Mon Oct 27 13:31:15 2008 +0000 @@ -36,6 +36,55 @@ static char *cpu_type; static char *cpu_type; extern int is_active(struct domain *d); +extern int is_passive(struct domain *d); + +int passive_domain_do_rdmsr(struct cpu_user_regs *regs) +{ + u64 msr_content; + int type, index; + struct vpmu_struct *vpmu = vcpu_vpmu(current); + + if ( model->is_arch_pmu_msr == NULL ) + return 0; + if ( !model->is_arch_pmu_msr((u64)regs->ecx, &type, &index) ) + return 0; + if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) ) + if ( ! model->allocated_msr(current) ) + return 0; + + model->load_msr(current, type, index, &msr_content); + regs->eax = msr_content & 0xFFFFFFFF; + regs->edx = msr_content >> 32; + return 1; +} + + +int passive_domain_do_wrmsr(struct cpu_user_regs *regs) +{ + u64 msr_content; + int type, index; + struct vpmu_struct *vpmu = vcpu_vpmu(current); + + if ( model->is_arch_pmu_msr == NULL ) + return 0; + if ( !model->is_arch_pmu_msr((u64)regs->ecx, &type, &index) ) + return 0; + + if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) ) + if ( ! model->allocated_msr(current) ) + return 0; + + msr_content = (u32)regs->eax | ((u64)regs->edx << 32); + model->save_msr(current, type, index, msr_content); + return 1; +} + +void passive_domain_destroy(struct vcpu *v) +{ + struct vpmu_struct *vpmu = vcpu_vpmu(v); + if ( vpmu->flags & PASSIVE_DOMAIN_ALLOCATED ) + model->free_msr(v); +} static int nmi_callback(struct cpu_user_regs *regs, int cpu) { @@ -46,6 +95,8 @@ static int nmi_callback(struct cpu_user_ if ( ovf && is_active(current->domain) && !xen_mode ) send_guest_vcpu_virq(current, VIRQ_XENOPROF); + if ( ovf == 2 ) + test_and_set_bool(current->nmi_pending); return 1; } diff -r 4413d53a8320 -r 2a022ee37392 xen/arch/x86/oprofile/op_model_ppro.c --- a/xen/arch/x86/oprofile/op_model_ppro.c Mon Oct 27 13:29:35 2008 +0000 +++ b/xen/arch/x86/oprofile/op_model_ppro.c Mon Oct 27 13:31:15 2008 +0000 @@ -18,6 +18,8 @@ #include <xen/sched.h> #include <asm/regs.h> #include <asm/current.h> +#include <asm/hvm/vmx/vpmu.h> +#include <asm/hvm/vmx/vpmu_core2.h> #include "op_x86_model.h" #include "op_counter.h" @@ -39,9 +41,11 @@ #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) #define CTRL_SET_UM(val, m) (val |= (m << 8)) #define CTRL_SET_EVENT(val, e) (val |= e) - +#define IS_ACTIVE(val) (val & (1 << 22) ) +#define IS_ENABLE(val) (val & (1 << 20) ) static unsigned long reset_value[NUM_COUNTERS]; int ppro_has_global_ctrl = 0; +extern int is_passive(struct domain *d); static void ppro_fill_in_addresses(struct op_msrs * const msrs) { @@ -103,6 +107,7 @@ static int ppro_check_ctrs(unsigned int int ovf = 0; unsigned long eip = regs->eip; int mode = xenoprofile_get_mode(current, regs); + struct arch_msr_pair *msrs_content = vcpu_vpmu(current)->context; for (i = 0 ; i < NUM_COUNTERS; ++i) { if (!reset_value[i]) @@ -111,7 +116,18 @@ static int ppro_check_ctrs(unsigned int if (CTR_OVERFLOWED(low)) { xenoprof_log_event(current, regs, eip, mode, i); CTR_WRITE(reset_value[i], msrs, i); - ovf = 1; + if ( is_passive(current->domain) && (mode != 2) && + (vcpu_vpmu(current)->flags & PASSIVE_DOMAIN_ALLOCATED) ) + { + if ( IS_ACTIVE(msrs_content[i].control) ) + { + msrs_content[i].counter = (low | (unsigned long)high << 32); + if ( IS_ENABLE(msrs_content[i].control) ) + ovf = 2; + } + } + if ( !ovf ) + ovf = 1; } } @@ -159,6 +175,82 @@ static void ppro_stop(struct op_msrs con wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); } +static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index) +{ + if ( (msr_index >= MSR_IA32_PERFCTR0) && + (msr_index < (MSR_IA32_PERFCTR0 + NUM_COUNTERS)) ) + { + *type = MSR_TYPE_ARCH_COUNTER; + *index = msr_index - MSR_IA32_PERFCTR0; + return 1; + } + if ( (msr_index >= MSR_P6_EVNTSEL0) && + (msr_index < (MSR_P6_EVNTSEL0 + NUM_CONTROLS)) ) + { + *type = MSR_TYPE_ARCH_CTRL; + *index = msr_index - MSR_P6_EVNTSEL0; + return 1; + } + + return 0; +} + +static int ppro_allocate_msr(struct vcpu *v) +{ + struct vpmu_struct *vpmu = vcpu_vpmu(v); + struct arch_msr_pair *msr_content; + + msr_content = xmalloc_bytes( sizeof(struct arch_msr_pair) * NUM_COUNTERS ); + if ( !msr_content ) + goto out; + memset(msr_content, 0, sizeof(struct arch_msr_pair) * NUM_COUNTERS); + vpmu->context = (void *)msr_content; + vpmu->flags = 0; + vpmu->flags |= PASSIVE_DOMAIN_ALLOCATED; + return 1; +out: + gdprintk(XENLOG_WARNING, "Insufficient memory for oprofile, oprofile is " + "unavailable on domain %d vcpu %d.\n", + v->vcpu_id, v->domain->domain_id); + return 0; +} + +static void ppro_free_msr(struct vcpu *v) +{ + struct vpmu_struct *vpmu = vcpu_vpmu(v); + + xfree(vpmu->context); + vpmu->flags &= ~PASSIVE_DOMAIN_ALLOCATED; +} + +static void ppro_load_msr(struct vcpu *v, int type, int index, u64 *msr_content) +{ + struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; + switch ( type ) + { + case MSR_TYPE_ARCH_COUNTER: + *msr_content = msrs[index].counter; + break; + case MSR_TYPE_ARCH_CTRL: + *msr_content = msrs[index].control; + break; + } +} + +static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content) +{ + struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; + + switch ( type ) + { + case MSR_TYPE_ARCH_COUNTER: + msrs[index].counter = msr_content; + break; + case MSR_TYPE_ARCH_CTRL: + msrs[index].control = msr_content; + break; + } +} struct op_x86_model_spec const op_ppro_spec = { .num_counters = NUM_COUNTERS, @@ -167,5 +259,10 @@ struct op_x86_model_spec const op_ppro_s .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, .start = &ppro_start, - .stop = &ppro_stop + .stop = &ppro_stop, + .is_arch_pmu_msr = &ppro_is_arch_pmu_msr, + .allocated_msr = &ppro_allocate_msr, + .free_msr = &ppro_free_msr, + .load_msr = &ppro_load_msr, + .save_msr = &ppro_save_msr }; diff -r 4413d53a8320 -r 2a022ee37392 xen/arch/x86/oprofile/op_x86_model.h --- a/xen/arch/x86/oprofile/op_x86_model.h Mon Oct 27 13:29:35 2008 +0000 +++ b/xen/arch/x86/oprofile/op_x86_model.h Mon Oct 27 13:31:15 2008 +0000 @@ -41,6 +41,11 @@ struct op_x86_model_spec { struct cpu_user_regs * const regs); void (*start)(struct op_msrs const * const msrs); void (*stop)(struct op_msrs const * const msrs); + int (*is_arch_pmu_msr)(u64 msr_index, int *type, int *index); + int (*allocated_msr)(struct vcpu *v); + void (*free_msr)(struct vcpu *v); + void (*load_msr)(struct vcpu * const v, int type, int index, u64 *msr_content); + void (*save_msr)(struct vcpu * const v, int type, int index, u64 msr_content); }; extern struct op_x86_model_spec const op_ppro_spec; diff -r 4413d53a8320 -r 2a022ee37392 xen/common/xenoprof.c --- a/xen/common/xenoprof.c Mon Oct 27 13:29:35 2008 +0000 +++ b/xen/common/xenoprof.c Mon Oct 27 13:31:15 2008 +0000 @@ -85,7 +85,7 @@ int is_active(struct domain *d) return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE)); } -static int is_passive(struct domain *d) +int is_passive(struct domain *d) { struct xenoprof *x = d->xenoprof; return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE)); diff -r 4413d53a8320 -r 2a022ee37392 xen/include/asm-x86/hvm/vmx/vpmu.h --- a/xen/include/asm-x86/hvm/vmx/vpmu.h Mon Oct 27 13:29:35 2008 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vpmu.h Mon Oct 27 13:31:15 2008 +0000 @@ -67,7 +67,7 @@ struct vpmu_struct { #define VPMU_CONTEXT_ALLOCATED 0x1 #define VPMU_CONTEXT_LOADED 0x2 #define VPMU_RUNNING 0x4 - +#define PASSIVE_DOMAIN_ALLOCATED 0x8 int vpmu_do_wrmsr(struct cpu_user_regs *regs); int vpmu_do_rdmsr(struct cpu_user_regs *regs); int vpmu_do_interrupt(struct cpu_user_regs *regs); diff -r 4413d53a8320 -r 2a022ee37392 xen/include/asm-x86/hvm/vmx/vpmu_core2.h --- a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h Mon Oct 27 13:29:35 2008 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h Mon Oct 27 13:31:15 2008 +0000 @@ -23,28 +23,6 @@ #ifndef __ASM_X86_HVM_VPMU_CORE_H_ #define __ASM_X86_HVM_VPMU_CORE_H_ -/* Core 2 Non-architectual Performance Counter MSRs. */ -u32 core2_counters_msr[] = { - MSR_CORE_PERF_FIXED_CTR0, - MSR_CORE_PERF_FIXED_CTR1, - MSR_CORE_PERF_FIXED_CTR2}; - -/* Core 2 Non-architectual Performance Control MSRs. */ -u32 core2_ctrls_msr[] = { - MSR_CORE_PERF_FIXED_CTR_CTRL, - MSR_IA32_PEBS_ENABLE, - MSR_IA32_DS_AREA}; - -struct pmumsr core2_counters = { - 3, - core2_counters_msr -}; - -struct pmumsr core2_ctrls = { - 3, - core2_ctrls_msr -}; - struct arch_msr_pair { u64 counter; u64 control; diff -r 4413d53a8320 -r 2a022ee37392 xen/include/xen/xenoprof.h --- a/xen/include/xen/xenoprof.h Mon Oct 27 13:29:35 2008 +0000 +++ b/xen/include/xen/xenoprof.h Mon Oct 27 13:31:15 2008 +0000 @@ -75,4 +75,7 @@ int acquire_pmu_ownship(int pmu_ownershi int acquire_pmu_ownship(int pmu_ownership); void release_pmu_ownship(int pmu_ownership); +int passive_domain_do_rdmsr(struct cpu_user_regs *regs); +int passive_domain_do_wrmsr(struct cpu_user_regs *regs); +void passive_domain_destroy(struct vcpu *v); #endif /* __XEN__XENOPROF_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |