[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] xen/sched: move per-cpu variable scheduler to struct sched_resource
commit b6eeeccf46d7c3c15b0aa94c6426de6ee9b83304 Author: Juergen Gross <jgross@xxxxxxxx> AuthorDate: Wed Oct 2 09:27:34 2019 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Fri Oct 4 12:58:49 2019 +0200 xen/sched: move per-cpu variable scheduler to struct sched_resource Having a pointer to struct scheduler in struct sched_resource instead of per cpu is enough. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx> --- xen/common/sched_credit.c | 18 +++++++++++------- xen/common/sched_credit2.c | 3 ++- xen/common/schedule.c | 15 +++++++-------- xen/include/xen/sched-if.h | 2 +- 4 files changed, 21 insertions(+), 17 deletions(-) diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index a6dff8ec62..86603adcb6 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -352,9 +352,10 @@ DEFINE_PER_CPU(unsigned int, last_tickle_cpu); static inline void __runq_tickle(struct csched_unit *new) { unsigned int cpu = sched_unit_master(new->unit); + struct sched_resource *sr = get_sched_res(cpu); struct sched_unit *unit = new->unit; struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu)); - struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu)); + struct csched_private *prv = CSCHED_PRIV(sr->scheduler); cpumask_t mask, idle_mask, *online; int balance_step, idlers_empty; @@ -931,7 +932,8 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu) { struct sched_unit *currunit = current->sched_unit; struct csched_unit * const svc = CSCHED_UNIT(currunit); - const struct scheduler *ops = per_cpu(scheduler, cpu); + struct sched_resource *sr = get_sched_res(cpu); + const struct scheduler *ops = sr->scheduler; ASSERT( sched_unit_master(currunit) == cpu ); ASSERT( svc->sdom != NULL ); @@ -987,8 +989,7 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu) * idlers. But, if we are here, it means there is someone running * on it, and hence the bit must be zero already. */ - ASSERT(!cpumask_test_cpu(cpu, - CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers)); + ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(ops)->idlers)); cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); } } @@ -1083,6 +1084,7 @@ csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit) { struct csched_unit * const svc = CSCHED_UNIT(unit); unsigned int cpu = sched_unit_master(unit); + struct sched_resource *sr = get_sched_res(cpu); SCHED_STAT_CRANK(unit_sleep); @@ -1095,7 +1097,7 @@ csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit) * But, we are here because unit is going to sleep while running on cpu, * so the bit must be zero already. */ - ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers)); + ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(sr->scheduler)->idlers)); cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); } else if ( __unit_on_runq(svc) ) @@ -1575,8 +1577,9 @@ static void csched_tick(void *_cpu) { unsigned int cpu = (unsigned long)_cpu; + struct sched_resource *sr = get_sched_res(cpu); struct csched_pcpu *spc = CSCHED_PCPU(cpu); - struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu)); + struct csched_private *prv = CSCHED_PRIV(sr->scheduler); spc->tick++; @@ -1601,7 +1604,8 @@ csched_tick(void *_cpu) static struct csched_unit * csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) { - const struct csched_private * const prv = CSCHED_PRIV(per_cpu(scheduler, cpu)); + struct sched_resource *sr = get_sched_res(cpu); + const struct csched_private * const prv = CSCHED_PRIV(sr->scheduler); const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu); struct csched_unit *speer; struct list_head *iter; diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index d51df05887..af58ee161d 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -3268,8 +3268,9 @@ runq_candidate(struct csched2_runqueue_data *rqd, unsigned int *skipped) { struct list_head *iter, *temp; + struct sched_resource *sr = get_sched_res(cpu); struct csched2_unit *snext = NULL; - struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu)); + struct csched2_private *prv = csched2_priv(sr->scheduler); bool yield = false, soft_aff_preempt = false; *skipped = 0; diff --git a/xen/common/schedule.c b/xen/common/schedule.c index a6e41be9b3..6da96695c2 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -75,7 +75,6 @@ static void vcpu_singleshot_timer_fn(void *data); static void poll_timer_fn(void *data); /* This is global for now so that private implementations can reach it */ -DEFINE_PER_CPU(struct scheduler *, scheduler); DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res); static DEFINE_PER_CPU_READ_MOSTLY(unsigned int, sched_res_idx); @@ -200,7 +199,7 @@ static inline struct scheduler *unit_scheduler(const struct sched_unit *unit) */ ASSERT(is_idle_domain(d)); - return per_cpu(scheduler, unit->res->master_cpu); + return unit->res->scheduler; } static inline struct scheduler *vcpu_scheduler(const struct vcpu *v) @@ -1921,8 +1920,8 @@ static bool sched_tasklet_check(unsigned int cpu) static struct sched_unit *do_schedule(struct sched_unit *prev, s_time_t now, unsigned int cpu) { - struct scheduler *sched = per_cpu(scheduler, cpu); struct sched_resource *sr = get_sched_res(cpu); + struct scheduler *sched = sr->scheduler; struct sched_unit *next; /* get policy-specific decision on scheduling... */ @@ -2346,7 +2345,7 @@ static int cpu_schedule_up(unsigned int cpu) sr->cpus = cpumask_of(cpu); set_sched_res(cpu, sr); - per_cpu(scheduler, cpu) = &sched_idle_ops; + sr->scheduler = &sched_idle_ops; spin_lock_init(&sr->_lock); sr->schedule_lock = &sched_free_cpu_lock; init_timer(&sr->s_timer, s_timer_fn, NULL, cpu); @@ -2557,7 +2556,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) { struct vcpu *idle; void *ppriv, *ppriv_old, *vpriv, *vpriv_old; - struct scheduler *old_ops = per_cpu(scheduler, cpu); + struct scheduler *old_ops = get_sched_res(cpu)->scheduler; struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched; struct cpupool *old_pool = per_cpu(cpupool, cpu); struct sched_resource *sd = get_sched_res(cpu); @@ -2621,7 +2620,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) ppriv_old = sd->sched_priv; new_lock = sched_switch_sched(new_ops, cpu, ppriv, vpriv); - per_cpu(scheduler, cpu) = new_ops; + sd->scheduler = new_ops; sd->sched_priv = ppriv; /* @@ -2721,7 +2720,7 @@ void sched_tick_suspend(void) struct scheduler *sched; unsigned int cpu = smp_processor_id(); - sched = per_cpu(scheduler, cpu); + sched = get_sched_res(cpu)->scheduler; sched_do_tick_suspend(sched, cpu); rcu_idle_enter(cpu); rcu_idle_timer_start(); @@ -2734,7 +2733,7 @@ void sched_tick_resume(void) rcu_idle_timer_stop(); rcu_idle_exit(cpu); - sched = per_cpu(scheduler, cpu); + sched = get_sched_res(cpu)->scheduler; sched_do_tick_resume(sched, cpu); } diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index 021c1d7c2c..01821b3e5b 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -36,6 +36,7 @@ extern const cpumask_t *sched_res_mask; * as the rest of the struct. Just have the scheduler point to the * one it wants (This may be the one right in front of it).*/ struct sched_resource { + struct scheduler *scheduler; spinlock_t *schedule_lock, _lock; struct sched_unit *curr; @@ -49,7 +50,6 @@ struct sched_resource { const cpumask_t *cpus; /* cpus covered by this struct */ }; -DECLARE_PER_CPU(struct scheduler *, scheduler); DECLARE_PER_CPU(struct cpupool *, cpupool); DECLARE_PER_CPU(struct sched_resource *, sched_res); -- generated by git-patchbot for /home/xen/git/xen.git#staging _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |