[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 01/11] xen: sched: make implementing .alloc_pdata optional
On 4/7/2016 9:23 PM, Dario Faggioli wrote: > The .alloc_pdata scheduler hook must, before this change, > be implemented by all schedulers --even those ones that > don't need to allocate anything. > > Make it possible to just use the SCHED_OP(), like for > the other hooks, by using ERR_PTR() and IS_ERR() for > error reporting. This: > - makes NULL a variant of success; > - allows for errors other than ENOMEM to be properly > communicated (if ever necessary). > > This, in turn, means that schedulers not needing to > allocate any per-pCPU data, can avoid implementing the > hook. In fact, the artificial implementation of > .alloc_pdata in the ARINC653 is removed (and, while there, > nuke .free_pdata too, as it is equally useless). > > Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> > Reviewed-by: Meng Xu <mengxu@xxxxxxxxxxxxx> > Reviewed-by: Juergen Gross <jgross@xxxxxxxx> > Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Acked-by: Robert VanVossen <robert.vanvossen@xxxxxxxxxxxxxxx> > --- > Cc: Robert VanVossen <robert.vanvossen@xxxxxxxxxxxxxxx> > Cc: Josh Whitehead <josh.whitehead@xxxxxxxxxxxxxxx> > Cc: Jan Beulich <JBeulich@xxxxxxxx> > --- > Changes from v1: > * only update sd->sched_priv if alloc_pdata does not return > IS_ERR, so that xfree() can always be safely called on > sd->sched_priv itself, as requested during review; > * xen/err.h included in .c files that actually need it, > instead than in sched-if.h. > --- > xen/common/sched_arinc653.c | 31 ------------------------------- > xen/common/sched_credit.c | 5 +++-- > xen/common/sched_credit2.c | 2 +- > xen/common/sched_rt.c | 8 ++++---- > xen/common/schedule.c | 27 +++++++++++++++++---------- > 5 files changed, 25 insertions(+), 48 deletions(-) > > diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c > index 8a11a2f..b79fcdf 100644 > --- a/xen/common/sched_arinc653.c > +++ b/xen/common/sched_arinc653.c > @@ -456,34 +456,6 @@ a653sched_free_vdata(const struct scheduler *ops, void > *priv) > } > > /** > - * This function allocates scheduler-specific data for a physical CPU > - * > - * We do not actually make use of any per-CPU data but the hypervisor expects > - * a non-NULL return value > - * > - * @param ops Pointer to this instance of the scheduler structure > - * > - * @return Pointer to the allocated data > - */ > -static void * > -a653sched_alloc_pdata(const struct scheduler *ops, int cpu) > -{ > - /* return a non-NULL value to keep schedule.c happy */ > - return SCHED_PRIV(ops); > -} > - > -/** > - * This function frees scheduler-specific data for a physical CPU > - * > - * @param ops Pointer to this instance of the scheduler structure > - */ > -static void > -a653sched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) > -{ > - /* nop */ > -} > - > -/** > * This function allocates scheduler-specific data for a domain > * > * We do not actually make use of any per-domain data but the hypervisor > @@ -737,9 +709,6 @@ static const struct scheduler sched_arinc653_def = { > .free_vdata = a653sched_free_vdata, > .alloc_vdata = a653sched_alloc_vdata, > > - .free_pdata = a653sched_free_pdata, > - .alloc_pdata = a653sched_alloc_pdata, > - > .free_domdata = a653sched_free_domdata, > .alloc_domdata = a653sched_alloc_domdata, > > diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c > index 4c4927f..63a4a63 100644 > --- a/xen/common/sched_credit.c > +++ b/xen/common/sched_credit.c > @@ -23,6 +23,7 @@ > #include <xen/errno.h> > #include <xen/keyhandler.h> > #include <xen/trace.h> > +#include <xen/err.h> > > > /* > @@ -532,12 +533,12 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu) > /* Allocate per-PCPU info */ > spc = xzalloc(struct csched_pcpu); > if ( spc == NULL ) > - return NULL; > + return ERR_PTR(-ENOMEM); > > if ( !alloc_cpumask_var(&spc->balance_mask) ) > { > xfree(spc); > - return NULL; > + return ERR_PTR(-ENOMEM); > } > > spin_lock_irqsave(&prv->lock, flags); > diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c > index b8c8e40..e97d8be 100644 > --- a/xen/common/sched_credit2.c > +++ b/xen/common/sched_credit2.c > @@ -2047,7 +2047,7 @@ csched2_alloc_pdata(const struct scheduler *ops, int > cpu) > printk("%s: cpu %d not online yet, deferring initializatgion\n", > __func__, cpu); > > - return (void *)1; > + return NULL; > } > > static void > diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c > index 321b0a5..aece318 100644 > --- a/xen/common/sched_rt.c > +++ b/xen/common/sched_rt.c > @@ -29,6 +29,7 @@ > #include <xen/cpu.h> > #include <xen/keyhandler.h> > #include <xen/trace.h> > +#include <xen/err.h> > #include <xen/guest_access.h> > > /* > @@ -681,7 +682,7 @@ rt_alloc_pdata(const struct scheduler *ops, int cpu) > spin_unlock_irqrestore(old_lock, flags); > > if ( !alloc_cpumask_var(&_cpumask_scratch[cpu]) ) > - return NULL; > + return ERR_PTR(-ENOMEM); > > if ( prv->repl_timer == NULL ) > { > @@ -689,13 +690,12 @@ rt_alloc_pdata(const struct scheduler *ops, int cpu) > prv->repl_timer = xzalloc(struct timer); > > if ( prv->repl_timer == NULL ) > - return NULL; > + return ERR_PTR(-ENOMEM); > > init_timer(prv->repl_timer, repl_timer_handler, (void *)ops, cpu); > } > > - /* 1 indicates alloc. succeed in schedule.c */ > - return (void *)1; > + return NULL; > } > > static void > diff --git a/xen/common/schedule.c b/xen/common/schedule.c > index b7dee16..1941613 100644 > --- a/xen/common/schedule.c > +++ b/xen/common/schedule.c > @@ -37,6 +37,7 @@ > #include <xen/event.h> > #include <public/sched.h> > #include <xsm/xsm.h> > +#include <xen/err.h> > > /* opt_sched: scheduler - default to configured value */ > static char __initdata opt_sched[10] = CONFIG_SCHED_DEFAULT; > @@ -1462,6 +1463,7 @@ static void poll_timer_fn(void *data) > static int cpu_schedule_up(unsigned int cpu) > { > struct schedule_data *sd = &per_cpu(schedule_data, cpu); > + void *sched_priv; > > per_cpu(scheduler, cpu) = &ops; > spin_lock_init(&sd->_lock); > @@ -1500,9 +1502,16 @@ static int cpu_schedule_up(unsigned int cpu) > if ( idle_vcpu[cpu] == NULL ) > return -ENOMEM; > > - if ( (ops.alloc_pdata != NULL) && > - ((sd->sched_priv = ops.alloc_pdata(&ops, cpu)) == NULL) ) > - return -ENOMEM; > + /* > + * We don't want to risk calling xfree() on an sd->sched_priv > + * (e.g., inside free_pdata, from cpu_schedule_down() called > + * during CPU_UP_CANCELLED) that contains an IS_ERR value. > + */ > + sched_priv = SCHED_OP(&ops, alloc_pdata, cpu); > + if ( IS_ERR(sched_priv) ) > + return PTR_ERR(sched_priv); > + > + sd->sched_priv = sched_priv; > > return 0; > } > @@ -1512,8 +1521,7 @@ static void cpu_schedule_down(unsigned int cpu) > struct schedule_data *sd = &per_cpu(schedule_data, cpu); > struct scheduler *sched = per_cpu(scheduler, cpu); > > - if ( sd->sched_priv != NULL ) > - SCHED_OP(sched, free_pdata, sd->sched_priv, cpu); > + SCHED_OP(sched, free_pdata, sd->sched_priv, cpu); > SCHED_OP(sched, free_vdata, idle_vcpu[cpu]->sched_priv); > > idle_vcpu[cpu]->sched_priv = NULL; > @@ -1608,9 +1616,8 @@ void __init scheduler_init(void) > idle_domain->max_vcpus = nr_cpu_ids; > if ( alloc_vcpu(idle_domain, 0, 0) == NULL ) > BUG(); > - if ( ops.alloc_pdata && > - !(this_cpu(schedule_data).sched_priv = ops.alloc_pdata(&ops, 0)) ) > - BUG(); > + this_cpu(schedule_data).sched_priv = SCHED_OP(&ops, alloc_pdata, 0); > + BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv)); > SCHED_OP(&ops, init_pdata, this_cpu(schedule_data).sched_priv, 0); > } > > @@ -1653,8 +1660,8 @@ int schedule_cpu_switch(unsigned int cpu, struct > cpupool *c) > > idle = idle_vcpu[cpu]; > ppriv = SCHED_OP(new_ops, alloc_pdata, cpu); > - if ( ppriv == NULL ) > - return -ENOMEM; > + if ( IS_ERR(ppriv) ) > + return PTR_ERR(ppriv); > SCHED_OP(new_ops, init_pdata, ppriv, cpu); > vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv); > if ( vpriv == NULL ) > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |