[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] xen/sched: let sched_switch_sched() return new lock address
commit 07513e15e6e7e5163bf4f59c747825cce748531c Author: Juergen Gross <jgross@xxxxxxxx> AuthorDate: Tue May 28 12:32:16 2019 +0200 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Wed Jun 12 12:27:46 2019 +0100 xen/sched: let sched_switch_sched() return new lock address Instead of setting the scheduler percpu lock address in each of the switch_sched instances of the different schedulers do that in schedule_cpu_switch() which is the single caller of that function. For that purpose let sched_switch_sched() just return the new lock address. This allows to set the new struct scheduler and struct schedule_data values in the percpu area in schedule_cpu_switch() instead of the schedulers, too. It should be noted that in credit2 the lock used to be set while still holding the global scheduler write lock, which will no longer be true with the new scheme applied. This is actually no problem as the write lock is meant to guard the call of init_pdata() which still is true. While there, turn the full barrier, which was overkill, into an smp_wmb(), matching with the one implicit in managing to take the lock. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx> --- xen/common/sched_arinc653.c | 14 ++------------ xen/common/sched_credit.c | 13 ++----------- xen/common/sched_credit2.c | 15 +++------------ xen/common/sched_null.c | 16 ++++------------ xen/common/sched_rt.c | 12 ++---------- xen/common/schedule.c | 18 +++++++++++++++--- xen/include/xen/sched-if.h | 9 +++++---- 7 files changed, 33 insertions(+), 64 deletions(-) diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c index a4c6d00b81..72b988ea5f 100644 --- a/xen/common/sched_arinc653.c +++ b/xen/common/sched_arinc653.c @@ -630,7 +630,7 @@ a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc) * @param pdata scheduler specific PCPU data (we don't have any) * @param vdata scheduler specific VCPU data of the idle vcpu */ -static void +static spinlock_t * a653_switch_sched(struct scheduler *new_ops, unsigned int cpu, void *pdata, void *vdata) { @@ -641,17 +641,7 @@ a653_switch_sched(struct scheduler *new_ops, unsigned int cpu, idle_vcpu[cpu]->sched_priv = vdata; - per_cpu(scheduler, cpu) = new_ops; - per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */ - - /* - * (Re?)route the lock to its default location. We actually do not use - * it, but if we leave it pointing to where it does now (i.e., the - * runqueue lock for this PCPU in the default scheduler), we'd be - * causing unnecessary contention on that lock (in cases where it is - * shared among multiple PCPUs, like in Credit2 and RTDS). - */ - sd->schedule_lock = &sd->_lock; + return &sd->_lock; } /** diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 07e442cc8f..3c0d7c7267 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -633,7 +633,7 @@ csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu) } /* Change the scheduler of cpu to us (Credit). */ -static void +static spinlock_t * csched_switch_sched(struct scheduler *new_ops, unsigned int cpu, void *pdata, void *vdata) { @@ -655,16 +655,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu, init_pdata(prv, pdata, cpu); spin_unlock(&prv->lock); - per_cpu(scheduler, cpu) = new_ops; - per_cpu(schedule_data, cpu).sched_priv = pdata; - - /* - * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact, - * if it is free (and it can be) we want that anyone that manages - * taking it, finds all the initializations we've done above in place. - */ - smp_mb(); - sd->schedule_lock = &sd->_lock; + return &sd->_lock; } #ifndef NDEBUG diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 9c1c3b4e08..8e4381d8a7 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -3855,7 +3855,7 @@ csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu) } /* Change the scheduler of cpu to us (Credit2). */ -static void +static spinlock_t * csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu, void *pdata, void *vdata) { @@ -3888,18 +3888,9 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu, */ ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock); - per_cpu(scheduler, cpu) = new_ops; - per_cpu(schedule_data, cpu).sched_priv = pdata; - - /* - * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact, - * if it is free (and it can be) we want that anyone that manages - * taking it, find all the initializations we've done above in place. - */ - smp_mb(); - per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock; - write_unlock(&prv->lock); + + return &prv->rqd[rqi].lock; } static void diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c index c9700f1732..c02c1b9c1f 100644 --- a/xen/common/sched_null.c +++ b/xen/common/sched_null.c @@ -380,8 +380,9 @@ static void vcpu_deassign(struct null_private *prv, struct vcpu *v, } /* Change the scheduler of cpu to us (null). */ -static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu, - void *pdata, void *vdata) +static spinlock_t *null_switch_sched(struct scheduler *new_ops, + unsigned int cpu, + void *pdata, void *vdata) { struct schedule_data *sd = &per_cpu(schedule_data, cpu); struct null_private *prv = null_priv(new_ops); @@ -400,16 +401,7 @@ static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu, init_pdata(prv, cpu); - per_cpu(scheduler, cpu) = new_ops; - per_cpu(schedule_data, cpu).sched_priv = pdata; - - /* - * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact, - * if it is free (and it can be) we want that anyone that manages - * taking it, finds all the initializations we've done above in place. - */ - smp_mb(); - sd->schedule_lock = &sd->_lock; + return &sd->_lock; } static void null_vcpu_insert(const struct scheduler *ops, struct vcpu *v) diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index f1b81f0373..0acfc3d702 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -729,7 +729,7 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu) } /* Change the scheduler of cpu to us (RTDS). */ -static void +static spinlock_t * rt_switch_sched(struct scheduler *new_ops, unsigned int cpu, void *pdata, void *vdata) { @@ -761,16 +761,8 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu, } idle_vcpu[cpu]->sched_priv = vdata; - per_cpu(scheduler, cpu) = new_ops; - per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */ - /* - * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact, - * if it is free (and it can be) we want that anyone that manages - * taking it, find all the initializations we've done above in place. - */ - smp_mb(); - per_cpu(schedule_data, cpu).schedule_lock = &prv->lock; + return &prv->lock; } static void diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 047f7672a3..25f6ab388d 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -1812,7 +1812,8 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) struct scheduler *old_ops = per_cpu(scheduler, cpu); struct scheduler *new_ops = (c == NULL) ? &ops : c->sched; struct cpupool *old_pool = per_cpu(cpupool, cpu); - spinlock_t * old_lock; + struct schedule_data *sd = &per_cpu(schedule_data, cpu); + spinlock_t *old_lock, *new_lock; /* * pCPUs only move from a valid cpupool to free (i.e., out of any pool), @@ -1870,8 +1871,19 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) old_lock = pcpu_schedule_lock_irq(cpu); vpriv_old = idle->sched_priv; - ppriv_old = per_cpu(schedule_data, cpu).sched_priv; - sched_switch_sched(new_ops, cpu, ppriv, vpriv); + ppriv_old = sd->sched_priv; + new_lock = sched_switch_sched(new_ops, cpu, ppriv, vpriv); + + per_cpu(scheduler, cpu) = new_ops; + sd->sched_priv = ppriv; + + /* + * The data above is protected under new_lock, which may be unlocked. + * Another CPU can take new_lock as soon as sd->schedule_lock is visible, + * and must observe all prior initialisation. + */ + smp_wmb(); + sd->schedule_lock = new_lock; /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */ spin_unlock_irq(old_lock); diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index b3c3e189d9..b8e2b2e49e 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -153,7 +153,7 @@ struct scheduler { /* Idempotent. */ void (*free_domdata) (const struct scheduler *, void *); - void (*switch_sched) (struct scheduler *, unsigned int, + spinlock_t * (*switch_sched) (struct scheduler *, unsigned int, void *, void *); /* Activate / deactivate vcpus in a cpu pool */ @@ -195,10 +195,11 @@ static inline void sched_deinit(struct scheduler *s) s->deinit(s); } -static inline void sched_switch_sched(struct scheduler *s, unsigned int cpu, - void *pdata, void *vdata) +static inline spinlock_t *sched_switch_sched(struct scheduler *s, + unsigned int cpu, + void *pdata, void *vdata) { - s->switch_sched(s, cpu, pdata, vdata); + return s->switch_sched(s, cpu, pdata, vdata); } static inline void sched_dump_settings(const struct scheduler *s) -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |