[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v21 10/14] x86/VPMU: Use pre-computed masks when checking validity of MSRs
No need to compute those masks on every MSR access. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- New patch in v21 xen/arch/x86/hvm/vmx/vpmu_core2.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index e5478a1..49f6771 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -83,6 +83,11 @@ static bool_t __read_mostly full_width_write; /* Number of general-purpose and fixed performance counters */ static unsigned int __read_mostly arch_pmc_cnt, fixed_pmc_cnt; +/* Masks used for testing whether and MSR is valid */ +#define ARCH_CTRL_MASK (~((1ull << 32) - 1) | (1ull << 21)) +static uint64_t __read_mostly fixed_ctrl_mask, fixed_counters_mask; +static uint64_t __read_mostly global_ovf_ctrl_mask; + /* * QUIRK to workaround an issue on various family 6 cpus. * The issue leads to endless PMC interrupt loops on the processor. @@ -479,9 +484,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, ASSERT(!supported); - if ( type == MSR_TYPE_COUNTER && - (msr_content & - ~((1ull << core2_get_bitwidth_fix_count()) - 1)) ) + if ( (type == MSR_TYPE_COUNTER) && (msr_content & fixed_counters_mask) ) /* Writing unsupported bits to a fixed counter */ return 1; @@ -490,9 +493,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, switch ( msr ) { case MSR_CORE_PERF_GLOBAL_OVF_CTRL: - if ( msr_content & ~(0xC000000000000000 | - (((1ULL << fixed_pmc_cnt) - 1) << 32) | - ((1ULL << arch_pmc_cnt) - 1)) ) + if ( msr_content & global_ovf_ctrl_mask ) return 1; core2_vpmu_cxt->global_status &= ~msr_content; wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content); @@ -526,8 +527,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, core2_vpmu_cxt->global_ctrl = msr_content; break; case MSR_CORE_PERF_FIXED_CTR_CTRL: - if ( msr_content & - ( ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1)) ) + if ( msr_content & fixed_ctrl_mask ) return 1; if ( has_hvm_container_vcpu(v) ) @@ -556,7 +556,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, struct xen_pmu_cntr_pair *xen_pmu_cntr_pair = vpmu_reg_pointer(core2_vpmu_cxt, arch_counters); - if ( msr_content & (~((1ull << 32) - 1)) ) + if ( msr_content & ARCH_CTRL_MASK ) return 1; if ( has_hvm_container_vcpu(v) ) @@ -915,6 +915,12 @@ int __init core2_vpmu_init(void) rdmsrl(MSR_IA32_PERF_CAPABILITIES, caps); full_width_write = (caps >> 13) & 1; + fixed_ctrl_mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1); + fixed_counters_mask = ~((1ull << core2_get_bitwidth_fix_count()) - 1); + global_ovf_ctrl_mask = ~(0xC000000000000000 | + (((1ULL << fixed_pmc_cnt) - 1) << 32) | + ((1ULL << arch_pmc_cnt) - 1)); + check_pmc_quirk(); if ( sizeof(struct xen_pmu_data) + sizeof(uint64_t) * fixed_pmc_cnt + -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |