[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 03/16] x86/VPMU: Minor VPMU cleanup
Update macros that modify VPMU flags to allow changing multiple bits at once. Make sure that we only touch MSR bitmap on HVM guests (both VMX and SVM). This is needed by subsequent PMU patches. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> --- xen/arch/x86/hvm/svm/vpmu.c | 14 +++++++++----- xen/arch/x86/hvm/vmx/vpmu_core2.c | 9 +++------ xen/arch/x86/hvm/vpmu.c | 11 +++-------- xen/include/asm-x86/hvm/vpmu.h | 9 +++++---- 4 files changed, 20 insertions(+), 23 deletions(-) diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c index bec40d8..842bce7 100644 --- a/xen/arch/x86/hvm/svm/vpmu.c +++ b/xen/arch/x86/hvm/svm/vpmu.c @@ -236,7 +236,8 @@ static int amd_vpmu_save(struct vcpu *v) context_save(v); - if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set ) + if ( is_hvm_domain(v->domain) && + !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set ) amd_vpmu_unset_msr_bitmap(v); return 1; @@ -276,7 +277,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) struct vpmu_struct *vpmu = vcpu_vpmu(v); /* For all counters, enable guest only mode for HVM guest */ - if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) && + if ( is_hvm_domain(v->domain) && (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) && !(is_guest_mode(msr_content)) ) { set_guest_mode(msr_content); @@ -292,7 +293,8 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) apic_write(APIC_LVTPC, PMU_APIC_VECTOR); vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR; - if ( !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) + if ( is_hvm_domain(v->domain) && + !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) amd_vpmu_set_msr_bitmap(v); } @@ -303,7 +305,8 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; vpmu_reset(vpmu, VPMU_RUNNING); - if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) + if ( is_hvm_domain(v->domain) && + ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) amd_vpmu_unset_msr_bitmap(v); release_pmu_ownship(PMU_OWNER_HVM); } @@ -395,7 +398,8 @@ static void amd_vpmu_destroy(struct vcpu *v) if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) return; - if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) + if ( is_hvm_domain(v->domain) && + ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) amd_vpmu_unset_msr_bitmap(v); xfree(vpmu->context); diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index ee26362..a1f1561 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -326,10 +326,7 @@ static int core2_vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) ) - return 0; - - if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) ) + if ( !vpmu_is_set_all(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) ) return 0; __core2_vpmu_save(v); @@ -446,7 +443,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index) { __core2_vpmu_load(current); vpmu_set(vpmu, VPMU_CONTEXT_LOADED); - if ( cpu_has_vmx_msr_bitmap ) + if ( cpu_has_vmx_msr_bitmap && is_hvm_domain(current->domain) ) core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap); } return 1; @@ -813,7 +810,7 @@ static void core2_vpmu_destroy(struct vcpu *v) return; xfree(core2_vpmu_cxt->pmu_enable); xfree(vpmu->context); - if ( cpu_has_vmx_msr_bitmap ) + if ( cpu_has_vmx_msr_bitmap && is_hvm_domain(v->domain) ) core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap); release_pmu_ownship(PMU_OWNER_HVM); vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED); diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index a4e3664..d6a9ff6 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -127,10 +127,7 @@ static void vpmu_save_force(void *arg) struct vcpu *v = (struct vcpu *)arg; struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) - return; - - if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) ) + if ( !vpmu_is_set_all(vpmu, VPMU_CONTEXT_ALLOCATED | VPMU_CONTEXT_LOADED) ) return; vpmu_set(vpmu, VPMU_CONTEXT_SAVE); @@ -138,8 +135,7 @@ static void vpmu_save_force(void *arg) if ( vpmu->arch_vpmu_ops ) (void)vpmu->arch_vpmu_ops->arch_vpmu_save(v); - vpmu_reset(vpmu, VPMU_CONTEXT_SAVE); - vpmu_reset(vpmu, VPMU_CONTEXT_LOADED); + vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); per_cpu(last_vcpu, smp_processor_id()) = NULL; } @@ -149,8 +145,7 @@ void vpmu_save(struct vcpu *v) struct vpmu_struct *vpmu = vcpu_vpmu(v); int pcpu = smp_processor_id(); - if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) && - vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) ) + if ( !vpmu_is_set_all(vpmu, VPMU_CONTEXT_ALLOCATED | VPMU_CONTEXT_LOADED) ) return; vpmu->last_pcpu = pcpu; diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h index 40f63fb..2a713be 100644 --- a/xen/include/asm-x86/hvm/vpmu.h +++ b/xen/include/asm-x86/hvm/vpmu.h @@ -81,10 +81,11 @@ struct vpmu_struct { #define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */ -#define vpmu_set(_vpmu, _x) ((_vpmu)->flags |= (_x)) -#define vpmu_reset(_vpmu, _x) ((_vpmu)->flags &= ~(_x)) -#define vpmu_is_set(_vpmu, _x) ((_vpmu)->flags & (_x)) -#define vpmu_clear(_vpmu) ((_vpmu)->flags = 0) +#define vpmu_set(_vpmu, _x) ((_vpmu)->flags |= (_x)) +#define vpmu_reset(_vpmu, _x) ((_vpmu)->flags &= ~(_x)) +#define vpmu_is_set(_vpmu, _x) ((_vpmu)->flags & (_x)) +#define vpmu_is_set_all(_vpmu, _x) (((_vpmu)->flags & (_x)) == (_x)) +#define vpmu_clear(_vpmu) ((_vpmu)->flags = 0) int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content); int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content); -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |