[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] xen: sched: get rid of cpupool_scheduler_cpumask()
and of (almost every) direct use of cpupool_online_cpumask(). In fact, what we really want for the most of the times, is the set of valid pCPUs of the cpupool a certain domain is part of. Furthermore, in case it's called with a NULL pool as argument, cpupool_scheduler_cpumask() does more harm than good, by returning the bitmask of free pCPUs! This commit, therefore: * gets rid of cpupool_scheduler_cpumask(), in favour of cpupool_domain_cpumask(), which makes it more evident what we are after, and accommodates some sanity checking; * replaces some of the calls to cpupool_online_cpumask() with calls to the new functions too. Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> Acked-by: Juergen Gross <jgross@xxxxxxxx> Acked-by: Joshua Whitehead <josh.whitehead@xxxxxxxxxxxxxxx> Reviewed-by: Meng Xu <mengxu@xxxxxxxxxxxxx> --- Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Cc: Robert VanVossen <robert.vanvossen@xxxxxxxxxxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> --- xen/common/domain.c | 5 +++-- xen/common/domctl.c | 4 ++-- xen/common/sched_arinc653.c | 2 +- xen/common/sched_credit.c | 8 +++----- xen/common/sched_rt.c | 12 ++++++------ xen/common/schedule.c | 2 +- xen/include/xen/sched-if.h | 12 ++++++++++-- 7 files changed, 26 insertions(+), 19 deletions(-) diff --git a/xen/common/domain.c b/xen/common/domain.c index 8e7a282..cda60a9 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -191,7 +191,8 @@ struct vcpu *alloc_vcpu( /* Must be called after making new vcpu visible to for_each_vcpu(). */ vcpu_check_shutdown(v); - domain_update_node_affinity(d); + if ( !is_idle_domain(d) ) + domain_update_node_affinity(d); return v; } @@ -444,7 +445,7 @@ void domain_update_node_affinity(struct domain *d) return; } - online = cpupool_online_cpumask(d->cpupool); + online = cpupool_domain_cpumask(d); spin_lock(&d->node_affinity_lock); diff --git a/xen/common/domctl.c b/xen/common/domctl.c index 9e0fef5..08de32d 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -664,7 +664,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) goto maxvcpu_out; ret = -ENOMEM; - online = cpupool_online_cpumask(d->cpupool); + online = cpupool_domain_cpumask(d); if ( max > d->max_vcpus ) { struct vcpu **vcpus; @@ -757,7 +757,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) if ( op->cmd == XEN_DOMCTL_setvcpuaffinity ) { cpumask_var_t new_affinity, old_affinity; - cpumask_t *online = cpupool_online_cpumask(v->domain->cpupool);; + cpumask_t *online = cpupool_domain_cpumask(v->domain);; /* * We want to be able to restore hard affinity if we are trying diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c index cff5da9..dbe02ed 100644 --- a/xen/common/sched_arinc653.c +++ b/xen/common/sched_arinc653.c @@ -667,7 +667,7 @@ a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc) * If present, prefer vc's current processor, else * just find the first valid vcpu . */ - online = cpupool_scheduler_cpumask(vc->domain->cpupool); + online = cpupool_domain_cpumask(vc->domain); cpu = cpumask_first(online); diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index a1945ac..27a6439 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -309,7 +309,7 @@ __runq_remove(struct csched_vcpu *svc) static inline int __vcpu_has_soft_affinity(const struct vcpu *vc, const cpumask_t *mask) { - return !cpumask_subset(cpupool_online_cpumask(vc->domain->cpupool), + return !cpumask_subset(cpupool_domain_cpumask(vc->domain), vc->cpu_soft_affinity) && !cpumask_subset(vc->cpu_hard_affinity, vc->cpu_soft_affinity) && cpumask_intersects(vc->cpu_soft_affinity, mask); @@ -372,9 +372,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new) ASSERT(cur); cpumask_clear(&mask); - /* cpu is vc->processor, so it must be in a cpupool. */ - ASSERT(per_cpu(cpupool, cpu) != NULL); - online = cpupool_online_cpumask(per_cpu(cpupool, cpu)); + online = cpupool_domain_cpumask(new->sdom->dom); cpumask_and(&idle_mask, prv->idlers, online); idlers_empty = cpumask_empty(&idle_mask); @@ -641,7 +639,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) int balance_step; /* Store in cpus the mask of online cpus on which the domain can run */ - online = cpupool_scheduler_cpumask(vc->domain->cpupool); + online = cpupool_domain_cpumask(vc->domain); cpumask_and(&cpus, vc->cpu_hard_affinity, online); for_each_csched_balance_step( balance_step ) diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index 4372486..08611c8 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -256,7 +256,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc) */ mask = _cpumask_scratch[svc->vcpu->processor]; - cpupool_mask = cpupool_scheduler_cpumask(svc->vcpu->domain->cpupool); + cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain); cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity); cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask); printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime")," @@ -673,7 +673,7 @@ rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc) cpumask_t *online; int cpu; - online = cpupool_scheduler_cpumask(vc->domain->cpupool); + online = cpupool_domain_cpumask(vc->domain); cpumask_and(&cpus, online, vc->cpu_hard_affinity); cpu = cpumask_test_cpu(vc->processor, &cpus) @@ -753,7 +753,7 @@ __runq_pick(const struct scheduler *ops, const cpumask_t *mask) iter_svc = __q_elem(iter); /* mask cpu_hard_affinity & cpupool & mask */ - online = cpupool_scheduler_cpumask(iter_svc->vcpu->domain->cpupool); + online = cpupool_domain_cpumask(iter_svc->vcpu->domain); cpumask_and(&cpu_common, online, iter_svc->vcpu->cpu_hard_affinity); cpumask_and(&cpu_common, mask, &cpu_common); if ( cpumask_empty(&cpu_common) ) @@ -965,7 +965,7 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new) if ( new == NULL || is_idle_vcpu(new->vcpu) ) return; - online = cpupool_scheduler_cpumask(new->vcpu->domain->cpupool); + online = cpupool_domain_cpumask(new->vcpu->domain); cpumask_and(¬_tickled, online, new->vcpu->cpu_hard_affinity); cpumask_andnot(¬_tickled, ¬_tickled, &prv->tickled); @@ -1078,7 +1078,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) ASSERT(!list_empty(&prv->sdom)); sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem); - online = cpupool_scheduler_cpumask(sdom->dom->cpupool); + online = cpupool_domain_cpumask(sdom->dom); snext = __runq_pick(ops, online); /* pick snext from ALL valid cpus */ runq_tickle(ops, snext); @@ -1113,7 +1113,7 @@ rt_context_saved(const struct scheduler *ops, struct vcpu *vc) ASSERT(!list_empty(&prv->sdom)); sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem); - online = cpupool_scheduler_cpumask(sdom->dom->cpupool); + online = cpupool_domain_cpumask(sdom->dom); snext = __runq_pick(ops, online); /* pick snext from ALL cpus */ runq_tickle(ops, snext); diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 3eefed7..5ffa1a1 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -79,7 +79,7 @@ static struct scheduler __read_mostly ops; #define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : ((_d)->cpupool->sched)) #define VCPU2OP(_v) (DOM2OP((_v)->domain)) -#define VCPU2ONLINE(_v) cpupool_online_cpumask((_v)->domain->cpupool) +#define VCPU2ONLINE(_v) cpupool_domain_cpumask((_v)->domain) static inline void trace_runstate_change(struct vcpu *v, int new_state) { diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index dbe7cab..493d43f 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -182,9 +182,17 @@ struct cpupool atomic_t refcnt; }; -#define cpupool_scheduler_cpumask(_pool) \ - (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid) #define cpupool_online_cpumask(_pool) \ (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid) +static inline cpumask_t* cpupool_domain_cpumask(struct domain *d) +{ + /* + * d->cpupool is NULL only for the idle domain, and no one should + * be interested in calling this for the idle domain. + */ + ASSERT(d->cpupool != NULL); + return d->cpupool->cpu_valid; +} + #endif /* __XEN_SCHED_IF_H__ */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |