[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86/vPMU: invoke <vendor>_vpmu_initialise() through a hook as well
commit 8c20aca6751bf40f2d385f79d702813eb6a3cb27 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Fri Dec 3 11:21:14 2021 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Fri Dec 3 11:21:14 2021 +0100 x86/vPMU: invoke <vendor>_vpmu_initialise() through a hook as well I see little point in having an open-coded switch() statement to achieve the same; like other vendor-specific operations the function can be supplied in the respective ops structure instances. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/cpu/vpmu.c | 25 ++++++++++--------------- xen/arch/x86/cpu/vpmu_amd.c | 28 +++++++++++++--------------- xen/arch/x86/cpu/vpmu_intel.c | 28 ++++++++++++---------------- xen/include/asm-x86/vpmu.h | 3 +-- 4 files changed, 36 insertions(+), 48 deletions(-) diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c index eacf12c480..64a8f45c16 100644 --- a/xen/arch/x86/cpu/vpmu.c +++ b/xen/arch/x86/cpu/vpmu.c @@ -455,21 +455,11 @@ static int vpmu_arch_initialise(struct vcpu *v) ASSERT(!(vpmu->flags & ~VPMU_AVAILABLE) && !vpmu->context); - if ( !vpmu_available(v) ) + if ( !vpmu_available(v) || vpmu_mode == XENPMU_MODE_OFF ) return 0; - switch ( vendor ) + if ( !vpmu_ops.initialise ) { - case X86_VENDOR_AMD: - case X86_VENDOR_HYGON: - ret = svm_vpmu_initialise(v); - break; - - case X86_VENDOR_INTEL: - ret = vmx_vpmu_initialise(v); - break; - - default: if ( vpmu_mode != XENPMU_MODE_OFF ) { printk(XENLOG_G_WARNING "VPMU: Unknown CPU vendor %d. " @@ -480,12 +470,17 @@ static int vpmu_arch_initialise(struct vcpu *v) return -EINVAL; } - vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; - + ret = alternative_call(vpmu_ops.initialise, v); if ( ret ) + { printk(XENLOG_G_WARNING "VPMU: Initialization failed for %pv\n", v); + return ret; + } - return ret; + vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; + vpmu_set(vpmu, VPMU_INITIALIZED); + + return 0; } static void get_vpmu(struct vcpu *v) diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c index 629a55f411..5bb4227662 100644 --- a/xen/arch/x86/cpu/vpmu_amd.c +++ b/xen/arch/x86/cpu/vpmu_amd.c @@ -483,24 +483,11 @@ static void amd_vpmu_dump(const struct vcpu *v) } } -static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = { - .do_wrmsr = amd_vpmu_do_wrmsr, - .do_rdmsr = amd_vpmu_do_rdmsr, - .do_interrupt = amd_vpmu_do_interrupt, - .arch_vpmu_destroy = amd_vpmu_destroy, - .arch_vpmu_save = amd_vpmu_save, - .arch_vpmu_load = amd_vpmu_load, - .arch_vpmu_dump = amd_vpmu_dump -}; - -int svm_vpmu_initialise(struct vcpu *v) +static int svm_vpmu_initialise(struct vcpu *v) { struct xen_pmu_amd_ctxt *ctxt; struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu_mode == XENPMU_MODE_OFF ) - return 0; - if ( !counters ) return -EINVAL; @@ -529,11 +516,22 @@ int svm_vpmu_initialise(struct vcpu *v) offsetof(struct xen_pmu_amd_ctxt, regs)); } - vpmu_set(vpmu, VPMU_INITIALIZED | VPMU_CONTEXT_ALLOCATED); + vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED); return 0; } +static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = { + .initialise = svm_vpmu_initialise, + .do_wrmsr = amd_vpmu_do_wrmsr, + .do_rdmsr = amd_vpmu_do_rdmsr, + .do_interrupt = amd_vpmu_do_interrupt, + .arch_vpmu_destroy = amd_vpmu_destroy, + .arch_vpmu_save = amd_vpmu_save, + .arch_vpmu_load = amd_vpmu_load, + .arch_vpmu_dump = amd_vpmu_dump, +}; + static const struct arch_vpmu_ops *__init common_init(void) { unsigned int i; diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c index 75d6689984..c44e81c756 100644 --- a/xen/arch/x86/cpu/vpmu_intel.c +++ b/xen/arch/x86/cpu/vpmu_intel.c @@ -819,25 +819,12 @@ static void core2_vpmu_destroy(struct vcpu *v) vpmu_clear(vpmu); } -static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = { - .do_wrmsr = core2_vpmu_do_wrmsr, - .do_rdmsr = core2_vpmu_do_rdmsr, - .do_interrupt = core2_vpmu_do_interrupt, - .arch_vpmu_destroy = core2_vpmu_destroy, - .arch_vpmu_save = core2_vpmu_save, - .arch_vpmu_load = core2_vpmu_load, - .arch_vpmu_dump = core2_vpmu_dump -}; - -int vmx_vpmu_initialise(struct vcpu *v) +static int vmx_vpmu_initialise(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); u64 msr_content; static bool_t ds_warned; - if ( vpmu_mode == XENPMU_MODE_OFF ) - return 0; - if ( v->domain->arch.cpuid->basic.pmu_version <= 1 || v->domain->arch.cpuid->basic.pmu_version >= 6 ) return -EINVAL; @@ -893,11 +880,20 @@ int vmx_vpmu_initialise(struct vcpu *v) if ( is_pv_vcpu(v) && !core2_vpmu_alloc_resource(v) ) return -EIO; - vpmu_set(vpmu, VPMU_INITIALIZED); - return 0; } +static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = { + .initialise = vmx_vpmu_initialise, + .do_wrmsr = core2_vpmu_do_wrmsr, + .do_rdmsr = core2_vpmu_do_rdmsr, + .do_interrupt = core2_vpmu_do_interrupt, + .arch_vpmu_destroy = core2_vpmu_destroy, + .arch_vpmu_save = core2_vpmu_save, + .arch_vpmu_load = core2_vpmu_load, + .arch_vpmu_dump = core2_vpmu_dump, +}; + const struct arch_vpmu_ops *__init core2_vpmu_init(void) { unsigned int version = 0; diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h index aca143c151..8cfa2cf599 100644 --- a/xen/include/asm-x86/vpmu.h +++ b/xen/include/asm-x86/vpmu.h @@ -39,6 +39,7 @@ /* Arch specific operations shared by all vpmus */ struct arch_vpmu_ops { + int (*initialise)(struct vcpu *v); int (*do_wrmsr)(unsigned int msr, uint64_t msr_content, uint64_t supported); int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content); @@ -50,10 +51,8 @@ struct arch_vpmu_ops { }; const struct arch_vpmu_ops *core2_vpmu_init(void); -int vmx_vpmu_initialise(struct vcpu *); const struct arch_vpmu_ops *amd_vpmu_init(void); const struct arch_vpmu_ops *hygon_vpmu_init(void); -int svm_vpmu_initialise(struct vcpu *); struct vpmu_struct { u32 flags; -- generated by git-patchbot for /home/xen/git/xen.git#master
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |