[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/vPMU: convert vendor hook invocations to altcall
commit d95cf9d6057cf3c65192fbc0bf6d334602ecb34b Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Fri Dec 3 11:20:24 2021 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Fri Dec 3 11:20:24 2021 +0100 x86/vPMU: convert vendor hook invocations to altcall At least some vPMU functions will be invoked (and hence can further be speculated into) even in the vPMU-disabled case. Convert vpmu_ops to the standard single-instance model being a prerequisite to engaging the alternative_call() machinery, and convert all respective calls. Note that this requires vpmu_init() to become a pre-SMP initcall. This change then also helps performance. To replace a few vpmu->arch_vpmu_ops NULL checks, introduce a new VPMU_INITIALIZED state, such that in the absence of any other suitable vmpu_is_set() checks this state can be checked for. While adding the inclusion of xen/err.h, also prune other xen/*.h inclusions. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/cpu/vpmu.c | 71 +++++++++++++++++++++---------------------- xen/arch/x86/cpu/vpmu_amd.c | 21 ++++++------- xen/arch/x86/cpu/vpmu_intel.c | 16 +++++----- xen/include/asm-x86/vpmu.h | 28 ++++++++--------- 4 files changed, 66 insertions(+), 70 deletions(-) diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c index 16e91a3694..eacf12c480 100644 --- a/xen/arch/x86/cpu/vpmu.c +++ b/xen/arch/x86/cpu/vpmu.c @@ -17,12 +17,12 @@ * * Author: Haitao Shan <haitao.shan@xxxxxxxxx> */ -#include <xen/sched.h> -#include <xen/xenoprof.h> -#include <xen/event.h> -#include <xen/guest_access.h> #include <xen/cpu.h> +#include <xen/err.h> #include <xen/param.h> +#include <xen/event.h> +#include <xen/guest_access.h> +#include <xen/sched.h> #include <asm/regs.h> #include <asm/types.h> #include <asm/msr.h> @@ -49,6 +49,7 @@ CHECK_pmu_params; static unsigned int __read_mostly opt_vpmu_enabled; unsigned int __read_mostly vpmu_mode = XENPMU_MODE_OFF; unsigned int __read_mostly vpmu_features = 0; +static struct arch_vpmu_ops __read_mostly vpmu_ops; static DEFINE_SPINLOCK(vpmu_lock); static unsigned vpmu_count; @@ -120,7 +121,6 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, { struct vcpu *curr = current; struct vpmu_struct *vpmu; - const struct arch_vpmu_ops *ops; int ret = 0; /* @@ -133,14 +133,13 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, goto nop; vpmu = vcpu_vpmu(curr); - ops = vpmu->arch_vpmu_ops; - if ( !ops ) + if ( !vpmu_is_set(vpmu, VPMU_INITIALIZED) ) goto nop; - if ( is_write && ops->do_wrmsr ) - ret = ops->do_wrmsr(msr, *msr_content, supported); - else if ( !is_write && ops->do_rdmsr ) - ret = ops->do_rdmsr(msr, msr_content); + if ( is_write && vpmu_ops.do_wrmsr ) + ret = alternative_call(vpmu_ops.do_wrmsr, msr, *msr_content, supported); + else if ( !is_write && vpmu_ops.do_rdmsr ) + ret = alternative_call(vpmu_ops.do_rdmsr, msr, msr_content); else goto nop; @@ -153,7 +152,7 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, vpmu_is_set(vpmu, VPMU_CACHED) ) { vpmu_set(vpmu, VPMU_CONTEXT_SAVE); - ops->arch_vpmu_save(curr, 0); + alternative_vcall(vpmu_ops.arch_vpmu_save, curr, 0); vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); } @@ -202,7 +201,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs) sampling = sampled; vpmu = vcpu_vpmu(sampling); - if ( !vpmu->arch_vpmu_ops ) + if ( !vpmu_is_set(vpmu, VPMU_INITIALIZED) ) return; /* PV(H) guest */ @@ -220,7 +219,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs) /* PV guest will be reading PMU MSRs from xenpmu_data */ vpmu_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); - vpmu->arch_vpmu_ops->arch_vpmu_save(sampling, 1); + alternative_vcall(vpmu_ops.arch_vpmu_save, sampling, 1); vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); if ( is_hvm_vcpu(sampled) ) @@ -321,7 +320,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs) /* We don't support (yet) HVM dom0 */ ASSERT(sampling == sampled); - if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) || + if ( !alternative_call(vpmu_ops.do_interrupt, regs) || !is_vlapic_lvtpc_enabled(vlapic) ) return; @@ -349,8 +348,7 @@ static void vpmu_save_force(void *arg) vpmu_set(vpmu, VPMU_CONTEXT_SAVE); - if ( vpmu->arch_vpmu_ops ) - (void)vpmu->arch_vpmu_ops->arch_vpmu_save(v, 0); + alternative_vcall(vpmu_ops.arch_vpmu_save, v, 0); vpmu_reset(vpmu, VPMU_CONTEXT_SAVE); @@ -368,9 +366,8 @@ void vpmu_save(struct vcpu *v) vpmu->last_pcpu = pcpu; per_cpu(last_vcpu, pcpu) = v; - if ( vpmu->arch_vpmu_ops ) - if ( vpmu->arch_vpmu_ops->arch_vpmu_save(v, 0) ) - vpmu_reset(vpmu, VPMU_CONTEXT_LOADED); + if ( alternative_call(vpmu_ops.arch_vpmu_save, v, 0) ) + vpmu_reset(vpmu, VPMU_CONTEXT_LOADED); apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); } @@ -426,13 +423,13 @@ int vpmu_load(struct vcpu *v, bool_t from_guest) vpmu_is_set(vpmu, VPMU_CACHED)) ) return 0; - if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) + if ( vpmu_ops.arch_vpmu_load ) { int ret; apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc); /* Arch code needs to set VPMU_CONTEXT_LOADED */ - ret = vpmu->arch_vpmu_ops->arch_vpmu_load(v, from_guest); + ret = alternative_call(vpmu_ops.arch_vpmu_load, v, from_guest); if ( ret ) { apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc | APIC_LVT_MASKED); @@ -572,7 +569,7 @@ static void vpmu_arch_destroy(struct vcpu *v) on_selected_cpus(cpumask_of(vpmu->last_pcpu), vpmu_clear_last, v, 1); - if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) + if ( vpmu_ops.arch_vpmu_destroy ) { /* * Unload VPMU first if VPMU_CONTEXT_LOADED being set. @@ -582,7 +579,7 @@ static void vpmu_arch_destroy(struct vcpu *v) on_selected_cpus(cpumask_of(vcpu_vpmu(v)->last_pcpu), vpmu_save_force, v, 1); - vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); + alternative_vcall(vpmu_ops.arch_vpmu_destroy, v); } vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED); @@ -689,10 +686,9 @@ static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params) /* Dump some vpmu information to console. Used in keyhandler dump_domains(). */ void vpmu_dump(struct vcpu *v) { - struct vpmu_struct *vpmu = vcpu_vpmu(v); - - if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_dump ) - vpmu->arch_vpmu_ops->arch_vpmu_dump(v); + if ( vpmu_is_set(vcpu_vpmu(v), VPMU_INITIALIZED) && + vpmu_ops.arch_vpmu_dump ) + alternative_vcall(vpmu_ops.arch_vpmu_dump, v); } long do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg) @@ -870,6 +866,7 @@ static struct notifier_block cpu_nfb = { static int __init vpmu_init(void) { int vendor = current_cpu_data.x86_vendor; + const struct arch_vpmu_ops *ops = NULL; if ( !opt_vpmu_enabled ) return 0; @@ -886,36 +883,36 @@ static int __init vpmu_init(void) switch ( vendor ) { case X86_VENDOR_AMD: - if ( amd_vpmu_init() ) - vpmu_mode = XENPMU_MODE_OFF; + ops = amd_vpmu_init(); break; case X86_VENDOR_HYGON: - if ( hygon_vpmu_init() ) - vpmu_mode = XENPMU_MODE_OFF; + ops = hygon_vpmu_init(); break; case X86_VENDOR_INTEL: - if ( core2_vpmu_init() ) - vpmu_mode = XENPMU_MODE_OFF; + ops = core2_vpmu_init(); break; default: printk(XENLOG_WARNING "VPMU: Unknown CPU vendor: %d. " "Turning VPMU off.\n", vendor); - vpmu_mode = XENPMU_MODE_OFF; break; } - if ( vpmu_mode != XENPMU_MODE_OFF ) + if ( !IS_ERR_OR_NULL(ops) ) { + vpmu_ops = *ops; register_cpu_notifier(&cpu_nfb); printk(XENLOG_INFO "VPMU: version " __stringify(XENPMU_VER_MAJ) "." __stringify(XENPMU_VER_MIN) "\n"); } else + { + vpmu_mode = XENPMU_MODE_OFF; opt_vpmu_enabled = 0; + } return 0; } -__initcall(vpmu_init); +presmp_initcall(vpmu_init); diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c index 38972089ab..629a55f411 100644 --- a/xen/arch/x86/cpu/vpmu_amd.c +++ b/xen/arch/x86/cpu/vpmu_amd.c @@ -21,9 +21,9 @@ * */ -#include <xen/xenoprof.h> +#include <xen/err.h> #include <xen/sched.h> -#include <xen/irq.h> +#include <xen/xenoprof.h> #include <asm/apic.h> #include <asm/vpmu.h> #include <asm/hvm/save.h> @@ -483,7 +483,7 @@ static void amd_vpmu_dump(const struct vcpu *v) } } -static const struct arch_vpmu_ops amd_vpmu_ops = { +static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = { .do_wrmsr = amd_vpmu_do_wrmsr, .do_rdmsr = amd_vpmu_do_rdmsr, .do_interrupt = amd_vpmu_do_interrupt, @@ -529,13 +529,12 @@ int svm_vpmu_initialise(struct vcpu *v) offsetof(struct xen_pmu_amd_ctxt, regs)); } - vpmu->arch_vpmu_ops = &amd_vpmu_ops; + vpmu_set(vpmu, VPMU_INITIALIZED | VPMU_CONTEXT_ALLOCATED); - vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED); return 0; } -static int __init common_init(void) +static const struct arch_vpmu_ops *__init common_init(void) { unsigned int i; @@ -543,7 +542,7 @@ static int __init common_init(void) { printk(XENLOG_WARNING "VPMU: Unsupported CPU family %#x\n", current_cpu_data.x86); - return -EINVAL; + return ERR_PTR(-EINVAL); } if ( sizeof(struct xen_pmu_data) + @@ -553,7 +552,7 @@ static int __init common_init(void) "VPMU: Register bank does not fit into VPMU shared page\n"); counters = ctrls = NULL; num_counters = 0; - return -ENOSPC; + return ERR_PTR(-ENOSPC); } for ( i = 0; i < num_counters; i++ ) @@ -562,10 +561,10 @@ static int __init common_init(void) ctrl_rsvd[i] &= CTRL_RSVD_MASK; } - return 0; + return &amd_vpmu_ops; } -int __init amd_vpmu_init(void) +const struct arch_vpmu_ops *__init amd_vpmu_init(void) { switch ( current_cpu_data.x86 ) { @@ -592,7 +591,7 @@ int __init amd_vpmu_init(void) return common_init(); } -int __init hygon_vpmu_init(void) +const struct arch_vpmu_ops *__init hygon_vpmu_init(void) { switch ( current_cpu_data.x86 ) { diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c index f59cae5438..75d6689984 100644 --- a/xen/arch/x86/cpu/vpmu_intel.c +++ b/xen/arch/x86/cpu/vpmu_intel.c @@ -18,9 +18,9 @@ * Author: Haitao Shan <haitao.shan@xxxxxxxxx> */ +#include <xen/err.h> #include <xen/sched.h> #include <xen/xenoprof.h> -#include <xen/irq.h> #include <asm/system.h> #include <asm/regs.h> #include <asm/types.h> @@ -819,7 +819,7 @@ static void core2_vpmu_destroy(struct vcpu *v) vpmu_clear(vpmu); } -static const struct arch_vpmu_ops core2_vpmu_ops = { +static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = { .do_wrmsr = core2_vpmu_do_wrmsr, .do_rdmsr = core2_vpmu_do_rdmsr, .do_interrupt = core2_vpmu_do_interrupt, @@ -893,12 +893,12 @@ int vmx_vpmu_initialise(struct vcpu *v) if ( is_pv_vcpu(v) && !core2_vpmu_alloc_resource(v) ) return -EIO; - vpmu->arch_vpmu_ops = &core2_vpmu_ops; + vpmu_set(vpmu, VPMU_INITIALIZED); return 0; } -int __init core2_vpmu_init(void) +const struct arch_vpmu_ops *__init core2_vpmu_init(void) { unsigned int version = 0; unsigned int i; @@ -921,13 +921,13 @@ int __init core2_vpmu_init(void) default: printk(XENLOG_WARNING "VPMU: PMU version %u is not supported\n", version); - return -EINVAL; + return ERR_PTR(-EINVAL); } if ( current_cpu_data.x86 != 6 ) { printk(XENLOG_WARNING "VPMU: only family 6 is supported\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } arch_pmc_cnt = core2_get_arch_pmc_count(); @@ -972,9 +972,9 @@ int __init core2_vpmu_init(void) printk(XENLOG_WARNING "VPMU: Register bank does not fit into VPMU share page\n"); arch_pmc_cnt = fixed_pmc_cnt = 0; - return -ENOSPC; + return ERR_PTR(-ENOSPC); } - return 0; + return &core2_vpmu_ops; } diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h index 4b0a6ba3da..aca143c151 100644 --- a/xen/include/asm-x86/vpmu.h +++ b/xen/include/asm-x86/vpmu.h @@ -49,10 +49,10 @@ struct arch_vpmu_ops { void (*arch_vpmu_dump)(const struct vcpu *); }; -int core2_vpmu_init(void); +const struct arch_vpmu_ops *core2_vpmu_init(void); int vmx_vpmu_initialise(struct vcpu *); -int amd_vpmu_init(void); -int hygon_vpmu_init(void); +const struct arch_vpmu_ops *amd_vpmu_init(void); +const struct arch_vpmu_ops *hygon_vpmu_init(void); int svm_vpmu_initialise(struct vcpu *); struct vpmu_struct { @@ -61,25 +61,25 @@ struct vpmu_struct { u32 hw_lapic_lvtpc; void *context; /* May be shared with PV guest */ void *priv_context; /* hypervisor-only */ - const struct arch_vpmu_ops *arch_vpmu_ops; struct xen_pmu_data *xenpmu_data; spinlock_t vpmu_lock; }; /* VPMU states */ -#define VPMU_CONTEXT_ALLOCATED 0x1 -#define VPMU_CONTEXT_LOADED 0x2 -#define VPMU_RUNNING 0x4 -#define VPMU_CONTEXT_SAVE 0x8 /* Force context save */ -#define VPMU_FROZEN 0x10 /* Stop counters while VCPU is not running */ -#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x20 +#define VPMU_INITIALIZED 0x0001 +#define VPMU_CONTEXT_ALLOCATED 0x0002 +#define VPMU_CONTEXT_LOADED 0x0004 +#define VPMU_RUNNING 0x0008 +#define VPMU_CONTEXT_SAVE 0x0010 /* Force context save */ +#define VPMU_FROZEN 0x0020 /* Stop counters while VCPU is not running */ +#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x0040 /* PV(H) guests: VPMU registers are accessed by guest from shared page */ -#define VPMU_CACHED 0x40 -#define VPMU_AVAILABLE 0x80 +#define VPMU_CACHED 0x0080 +#define VPMU_AVAILABLE 0x0100 /* Intel-specific VPMU features */ -#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */ -#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */ +#define VPMU_CPU_HAS_DS 0x1000 /* Has Debug Store */ +#define VPMU_CPU_HAS_BTS 0x2000 /* Has Branch Trace Store */ static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask) { -- generated by git-patchbot for /home/xen/git/xen.git#staging
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |