[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v19 10/14] x86/VPMU: Add support for PMU register handling on PV guests
Intercept accesses to PMU MSRs and process them in VPMU module. If vpmu ops for VCPU are not initialized (which is the case, for example, for PV guests that are not "VPMU-enlightened") access to MSRs will return failure. Dump VPMU state for all domains (HVM and PV) when requested. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx> Tested-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx> --- xen/arch/x86/domain.c | 3 +-- xen/arch/x86/hvm/vmx/vpmu_core2.c | 49 +++++++++++++++++++++++++++++++------ xen/arch/x86/hvm/vpmu.c | 3 +++ xen/arch/x86/traps.c | 51 +++++++++++++++++++++++++++++++++++++-- 4 files changed, 95 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index c7f8210..a48d824 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -2065,8 +2065,7 @@ void arch_dump_vcpu_info(struct vcpu *v) { paging_dump_vcpu_info(v); - if ( is_hvm_vcpu(v) ) - vpmu_dump(v); + vpmu_dump(v); } void domain_cpuid( diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index d10e3e7..66d7bc0 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -27,6 +27,7 @@ #include <asm/regs.h> #include <asm/types.h> #include <asm/apic.h> +#include <asm/traps.h> #include <asm/msr.h> #include <asm/msr-index.h> #include <asm/hvm/support.h> @@ -299,12 +300,18 @@ static inline void __core2_vpmu_save(struct vcpu *v) rdmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, fixed_counters[i]); for ( i = 0; i < arch_pmc_cnt; i++ ) rdmsrl(MSR_IA32_PERFCTR0 + i, xen_pmu_cntr_pair[i].counter); + + if ( !has_hvm_container_vcpu(v) ) + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status); } static int core2_vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); + if ( !has_hvm_container_vcpu(v) ) + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); + if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) ) return 0; @@ -342,6 +349,13 @@ static inline void __core2_vpmu_load(struct vcpu *v) wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, core2_vpmu_cxt->fixed_ctrl); wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area); wrmsrl(MSR_IA32_PEBS_ENABLE, core2_vpmu_cxt->pebs_enable); + + if ( !has_hvm_container_vcpu(v) ) + { + wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, core2_vpmu_cxt->global_ovf_ctrl); + core2_vpmu_cxt->global_ovf_ctrl = 0; + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl); + } } static void core2_vpmu_load(struct vcpu *v) @@ -442,7 +456,6 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index) static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t supported) { - u64 global_ctrl; int i, tmp; int type = -1, index = -1; struct vcpu *v = current; @@ -486,7 +499,12 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, switch ( msr ) { case MSR_CORE_PERF_GLOBAL_OVF_CTRL: + if ( msr_content & ~(0xC000000000000000 | + (((1ULL << fixed_pmc_cnt) - 1) << 32) | + ((1ULL << arch_pmc_cnt) - 1)) ) + return 1; core2_vpmu_cxt->global_status &= ~msr_content; + wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content); return 0; case MSR_CORE_PERF_GLOBAL_STATUS: gdprintk(XENLOG_INFO, "Can not write readonly MSR: " @@ -514,14 +532,18 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, gdprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n"); return 0; case MSR_CORE_PERF_GLOBAL_CTRL: - global_ctrl = msr_content; + core2_vpmu_cxt->global_ctrl = msr_content; break; case MSR_CORE_PERF_FIXED_CTR_CTRL: if ( msr_content & ( ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1)) ) return 1; - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); + if ( has_hvm_container_vcpu(v) ) + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, + &core2_vpmu_cxt->global_ctrl); + else + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl); *enabled_cntrs &= ~(((1ULL << fixed_pmc_cnt) - 1) << 32); if ( msr_content != 0 ) { @@ -546,7 +568,11 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, if ( msr_content & (~((1ull << 32) - 1)) ) return 1; - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); + if ( has_hvm_container_vcpu(v) ) + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, + &core2_vpmu_cxt->global_ctrl); + else + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl); if ( msr_content & (1ULL << 22) ) *enabled_cntrs |= 1ULL << tmp; @@ -560,9 +586,15 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, if ( type != MSR_TYPE_GLOBAL ) wrmsrl(msr, msr_content); else - vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + { + if ( has_hvm_container_vcpu(v) ) + vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + else + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + } - if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) ) + if ( (core2_vpmu_cxt->global_ctrl & *enabled_cntrs) || + (core2_vpmu_cxt->ds_area != 0) ) vpmu_set(vpmu, VPMU_RUNNING); else vpmu_reset(vpmu, VPMU_RUNNING); @@ -589,7 +621,10 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) *msr_content = core2_vpmu_cxt->global_status; break; case MSR_CORE_PERF_GLOBAL_CTRL: - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + if ( has_hvm_container_vcpu(v) ) + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + else + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content); break; default: rdmsrl(msr, *msr_content); diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index ea63b6d..26eda34 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -121,6 +121,9 @@ int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); + else + *msr_content = 0; + return 0; } diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 7a2e2d4..1eb7bb4 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -72,6 +72,7 @@ #include <asm/apic.h> #include <asm/mc146818rtc.h> #include <asm/hpet.h> +#include <asm/hvm/vpmu.h> #include <public/arch-x86/cpuid.h> #include <xsm/xsm.h> @@ -968,8 +969,10 @@ void pv_cpuid(struct cpu_user_regs *regs) __clear_bit(X86_FEATURE_TOPOEXT % 32, &c); break; + case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */ + break; + case 0x00000005: /* MONITOR/MWAIT */ - case 0x0000000a: /* Architectural Performance Monitor Features */ case 0x0000000b: /* Extended Topology Enumeration */ case 0x8000000a: /* SVM revision and features */ case 0x8000001b: /* Instruction Based Sampling */ @@ -985,6 +988,9 @@ void pv_cpuid(struct cpu_user_regs *regs) } out: + /* VPMU may decide to modify some of the leaves */ + vpmu_do_cpuid(regs->eax, &a, &b, &c, &d); + regs->eax = a; regs->ebx = b; regs->ecx = c; @@ -2009,6 +2015,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) char io_emul_stub[32]; void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1))); uint64_t val; + bool_t vpmu_msr; if ( !read_descriptor(regs->cs, v, regs, &code_base, &code_limit, &ar, @@ -2499,7 +2506,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) uint32_t eax = regs->eax; uint32_t edx = regs->edx; uint64_t msr_content = ((uint64_t)edx << 32) | eax; - + vpmu_msr = 0; switch ( regs->_ecx ) { case MSR_FS_BASE: @@ -2636,6 +2643,22 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) if ( v->arch.debugreg[7] & DR7_ACTIVE_MASK ) wrmsrl(regs->_ecx, msr_content); break; + case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3): + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + { + vpmu_msr = 1; + case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: + if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) ) + { + if ( vpmu_do_wrmsr(regs->ecx, msr_content, 0) ) + goto fail; + } + break; + } + /*FALLTHROUGH*/ default: if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 ) @@ -2671,6 +2694,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) break; case 0x32: /* RDMSR */ + vpmu_msr = 0; switch ( regs->_ecx ) { case MSR_FS_BASE: @@ -2738,6 +2762,29 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) [regs->_ecx - MSR_AMD64_DR1_ADDRESS_MASK + 1]; regs->edx = 0; break; + case MSR_IA32_PERF_CAPABILITIES: + /* No extra capabilities are supported */ + regs->eax = regs->edx = 0; + break; + case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3): + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + { + vpmu_msr = 1; + case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: + if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) ) + { + if ( vpmu_do_rdmsr(regs->ecx, &val) ) + goto fail; + + regs->eax = (uint32_t)val; + regs->edx = (uint32_t)(val >> 32); + } + break; + } + /*FALLTHROUGH*/ default: if ( rdmsr_hypervisor_regs(regs->ecx, &val) ) -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |