[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH 1/3] x86/vPMU: convert vendor hook invocations to altcall
On 29/11/2021 09:10, Jan Beulich wrote: > --- a/xen/arch/x86/cpu/vpmu.c > +++ b/xen/arch/x86/cpu/vpmu.c > @@ -17,12 +17,12 @@ > * > * Author: Haitao Shan <haitao.shan@xxxxxxxxx> > */ > -#include <xen/sched.h> > -#include <xen/xenoprof.h> > -#include <xen/event.h> > -#include <xen/guest_access.h> > #include <xen/cpu.h> > +#include <xen/err.h> > #include <xen/param.h> > +#include <xen/event.h> > +#include <xen/guest_access.h> > +#include <xen/sched.h> > #include <asm/regs.h> > #include <asm/types.h> > #include <asm/msr.h> > @@ -49,6 +49,7 @@ CHECK_pmu_params; > static unsigned int __read_mostly opt_vpmu_enabled; > unsigned int __read_mostly vpmu_mode = XENPMU_MODE_OFF; > unsigned int __read_mostly vpmu_features = 0; > +static struct arch_vpmu_ops __read_mostly vpmu_ops; Thoughts on renaming to just struct vpmu_ops ? The arch_ really is quite wrong, and you touch every impacted line in this patch, other than the main struct name itself. [edit] there are other misuses of arch_. Perhaps best to defer this to a later change. > @@ -133,14 +133,13 @@ int vpmu_do_msr(unsigned int msr, uint64 > goto nop; > > vpmu = vcpu_vpmu(curr); > - ops = vpmu->arch_vpmu_ops; > - if ( !ops ) > + if ( !vpmu_is_set(vpmu, VPMU_INITIALIZED) ) > goto nop; > > - if ( is_write && ops->do_wrmsr ) > - ret = ops->do_wrmsr(msr, *msr_content, supported); > - else if ( !is_write && ops->do_rdmsr ) > - ret = ops->do_rdmsr(msr, msr_content); > + if ( is_write && vpmu_ops.do_wrmsr ) > + ret = alternative_call(vpmu_ops.do_wrmsr, msr, *msr_content, > supported); > + else if ( !is_write && vpmu_ops.do_rdmsr ) > + ret = alternative_call(vpmu_ops.do_rdmsr, msr, msr_content); Elsewhere, you've dropped the function pointer NULL checks. Why not here? > --- a/xen/include/asm-x86/vpmu.h > +++ b/xen/include/asm-x86/vpmu.h > @@ -61,25 +61,25 @@ struct vpmu_struct { > u32 hw_lapic_lvtpc; > void *context; /* May be shared with PV guest */ > void *priv_context; /* hypervisor-only */ > - const struct arch_vpmu_ops *arch_vpmu_ops; > struct xen_pmu_data *xenpmu_data; > spinlock_t vpmu_lock; > }; > > /* VPMU states */ > -#define VPMU_CONTEXT_ALLOCATED 0x1 > -#define VPMU_CONTEXT_LOADED 0x2 > -#define VPMU_RUNNING 0x4 > -#define VPMU_CONTEXT_SAVE 0x8 /* Force context save */ > -#define VPMU_FROZEN 0x10 /* Stop counters while > VCPU is not running */ > -#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x20 > +#define VPMU_INITIALIZED 0x1 > +#define VPMU_CONTEXT_ALLOCATED 0x2 > +#define VPMU_CONTEXT_LOADED 0x4 > +#define VPMU_RUNNING 0x8 > +#define VPMU_CONTEXT_SAVE 0x10 /* Force context save */ > +#define VPMU_FROZEN 0x20 /* Stop counters while > VCPU is not running */ > +#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x40 > /* PV(H) guests: VPMU registers are accessed by guest from shared page */ > -#define VPMU_CACHED 0x40 > -#define VPMU_AVAILABLE 0x80 > +#define VPMU_CACHED 0x80 > +#define VPMU_AVAILABLE 0x100 > > /* Intel-specific VPMU features */ > -#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */ > -#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store > */ > +#define VPMU_CPU_HAS_DS 0x1000 /* Has Debug Store */ > +#define VPMU_CPU_HAS_BTS 0x2000 /* Has Branch Trace Store > */ Seeing as you're shuffling each of these, how about adding some leading 0's for alignment? ~Andrew
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |