[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] xen: sched: factor affinity helpers out of sched_credit.c
commit cf3dd5afadfd88dbb6e891a2a29ccfda6788b339 Author: Dario Faggioli <dario.faggioli@xxxxxxxxxx> AuthorDate: Wed Jul 26 15:55:27 2017 +0100 Commit: George Dunlap <george.dunlap@xxxxxxxxxx> CommitDate: Wed Jul 26 15:55:27 2017 +0100 xen: sched: factor affinity helpers out of sched_credit.c In fact, we want to be able to use them from any scheduler. While there, make the moved code use 'v' for struct_vcpu* variable, like it should be done everywhere. No functional change intended. Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> Signed-off-by: Justin T. Weaver <jtweaver@xxxxxxxxxx> Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx> --- xen/common/sched_credit.c | 97 +++++++--------------------------------------- xen/include/xen/sched-if.h | 64 ++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 82 deletions(-) diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 4f6330e..4fdaa08 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -136,27 +136,6 @@ #define TRC_CSCHED_RATELIMIT TRC_SCHED_CLASS_EVT(CSCHED, 10) #define TRC_CSCHED_STEAL_CHECK TRC_SCHED_CLASS_EVT(CSCHED, 11) - -/* - * Hard and soft affinity load balancing. - * - * Idea is each vcpu has some pcpus that it prefers, some that it does not - * prefer but is OK with, and some that it cannot run on at all. The first - * set of pcpus are the ones that are both in the soft affinity *and* in the - * hard affinity; the second set of pcpus are the ones that are in the hard - * affinity but *not* in the soft affinity; the third set of pcpus are the - * ones that are not in the hard affinity. - * - * We implement a two step balancing logic. Basically, every time there is - * the need to decide where to run a vcpu, we first check the soft affinity - * (well, actually, the && between soft and hard affinity), to see if we can - * send it where it prefers to (and can) run on. However, if the first step - * does not find any suitable and free pcpu, we fall back checking the hard - * affinity. - */ -#define CSCHED_BALANCE_SOFT_AFFINITY 0 -#define CSCHED_BALANCE_HARD_AFFINITY 1 - /* * Boot parameters */ @@ -342,52 +321,6 @@ runq_remove(struct csched_vcpu *svc) __runq_remove(svc); } -#define for_each_csched_balance_step(step) \ - for ( (step) = 0; (step) <= CSCHED_BALANCE_HARD_AFFINITY; (step)++ ) - - -/* - * Hard affinity balancing is always necessary and must never be skipped. - * But soft affinity need only be considered when it has a functionally - * different effect than other constraints (such as hard affinity, cpus - * online, or cpupools). - * - * Soft affinity only needs to be considered if: - * * The cpus in the cpupool are not a subset of soft affinity - * * The hard affinity is not a subset of soft affinity - * * There is an overlap between the soft affinity and the mask which is - * currently being considered. - */ -static inline int __vcpu_has_soft_affinity(const struct vcpu *vc, - const cpumask_t *mask) -{ - return !cpumask_subset(cpupool_domain_cpumask(vc->domain), - vc->cpu_soft_affinity) && - !cpumask_subset(vc->cpu_hard_affinity, vc->cpu_soft_affinity) && - cpumask_intersects(vc->cpu_soft_affinity, mask); -} - -/* - * Each csched-balance step uses its own cpumask. This function determines - * which one (given the step) and copies it in mask. For the soft affinity - * balancing step, the pcpus that are not part of vc's hard affinity are - * filtered out from the result, to avoid running a vcpu where it would - * like, but is not allowed to! - */ -static void -csched_balance_cpumask(const struct vcpu *vc, int step, cpumask_t *mask) -{ - if ( step == CSCHED_BALANCE_SOFT_AFFINITY ) - { - cpumask_and(mask, vc->cpu_soft_affinity, vc->cpu_hard_affinity); - - if ( unlikely(cpumask_empty(mask)) ) - cpumask_copy(mask, vc->cpu_hard_affinity); - } - else /* step == CSCHED_BALANCE_HARD_AFFINITY */ - cpumask_copy(mask, vc->cpu_hard_affinity); -} - static void burn_credits(struct csched_vcpu *svc, s_time_t now) { s_time_t delta; @@ -452,18 +385,18 @@ static inline void __runq_tickle(struct csched_vcpu *new) * Soft and hard affinity balancing loop. For vcpus without * a useful soft affinity, consider hard affinity only. */ - for_each_csched_balance_step( balance_step ) + for_each_affinity_balance_step( balance_step ) { int new_idlers_empty; - if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY - && !__vcpu_has_soft_affinity(new->vcpu, - new->vcpu->cpu_hard_affinity) ) + if ( balance_step == BALANCE_SOFT_AFFINITY + && !has_soft_affinity(new->vcpu, + new->vcpu->cpu_hard_affinity) ) continue; /* Are there idlers suitable for new (for this balance step)? */ - csched_balance_cpumask(new->vcpu, balance_step, - cpumask_scratch_cpu(cpu)); + affinity_balance_cpumask(new->vcpu, balance_step, + cpumask_scratch_cpu(cpu)); cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu), &idle_mask); new_idlers_empty = cpumask_empty(cpumask_scratch_cpu(cpu)); @@ -474,7 +407,7 @@ static inline void __runq_tickle(struct csched_vcpu *new) * hard affinity as well, before taking final decisions. */ if ( new_idlers_empty - && balance_step == CSCHED_BALANCE_SOFT_AFFINITY ) + && balance_step == BALANCE_SOFT_AFFINITY ) continue; /* @@ -800,7 +733,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) online = cpupool_domain_cpumask(vc->domain); cpumask_and(&cpus, vc->cpu_hard_affinity, online); - for_each_csched_balance_step( balance_step ) + for_each_affinity_balance_step( balance_step ) { /* * We want to pick up a pcpu among the ones that are online and @@ -820,12 +753,12 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) * cpus and, if the result is empty, we just skip the soft affinity * balancing step all together. */ - if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY - && !__vcpu_has_soft_affinity(vc, &cpus) ) + if ( balance_step == BALANCE_SOFT_AFFINITY + && !has_soft_affinity(vc, &cpus) ) continue; /* Pick an online CPU from the proper affinity mask */ - csched_balance_cpumask(vc, balance_step, &cpus); + affinity_balance_cpumask(vc, balance_step, &cpus); cpumask_and(&cpus, &cpus, online); /* If present, prefer vc's current processor */ @@ -1721,11 +1654,11 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) * or counter. */ if ( vc->is_running || - (balance_step == CSCHED_BALANCE_SOFT_AFFINITY - && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity)) ) + (balance_step == BALANCE_SOFT_AFFINITY + && !has_soft_affinity(vc, vc->cpu_hard_affinity)) ) continue; - csched_balance_cpumask(vc, balance_step, cpumask_scratch); + affinity_balance_cpumask(vc, balance_step, cpumask_scratch); if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) ) { /* We got a candidate. Grab it! */ @@ -1785,7 +1718,7 @@ csched_load_balance(struct csched_private *prv, int cpu, * 1. any "soft-affine work" to steal first, * 2. if not finding anything, any "hard-affine work" to steal. */ - for_each_csched_balance_step( bstep ) + for_each_affinity_balance_step( bstep ) { /* * We peek at the non-idling CPUs in a node-wise fashion. In fact, diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index c32ee7a..c4a4935 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -208,4 +208,68 @@ static inline cpumask_t* cpupool_domain_cpumask(struct domain *d) return d->cpupool->cpu_valid; } +/* + * Hard and soft affinity load balancing. + * + * Idea is each vcpu has some pcpus that it prefers, some that it does not + * prefer but is OK with, and some that it cannot run on at all. The first + * set of pcpus are the ones that are both in the soft affinity *and* in the + * hard affinity; the second set of pcpus are the ones that are in the hard + * affinity but *not* in the soft affinity; the third set of pcpus are the + * ones that are not in the hard affinity. + * + * We implement a two step balancing logic. Basically, every time there is + * the need to decide where to run a vcpu, we first check the soft affinity + * (well, actually, the && between soft and hard affinity), to see if we can + * send it where it prefers to (and can) run on. However, if the first step + * does not find any suitable and free pcpu, we fall back checking the hard + * affinity. + */ +#define BALANCE_SOFT_AFFINITY 0 +#define BALANCE_HARD_AFFINITY 1 + +#define for_each_affinity_balance_step(step) \ + for ( (step) = 0; (step) <= BALANCE_HARD_AFFINITY; (step)++ ) + +/* + * Hard affinity balancing is always necessary and must never be skipped. + * But soft affinity need only be considered when it has a functionally + * different effect than other constraints (such as hard affinity, cpus + * online, or cpupools). + * + * Soft affinity only needs to be considered if: + * * The cpus in the cpupool are not a subset of soft affinity + * * The hard affinity is not a subset of soft affinity + * * There is an overlap between the soft affinity and the mask which is + * currently being considered. + */ +static inline int has_soft_affinity(const struct vcpu *v, + const cpumask_t *mask) +{ + return !cpumask_subset(cpupool_domain_cpumask(v->domain), + v->cpu_soft_affinity) && + !cpumask_subset(v->cpu_hard_affinity, v->cpu_soft_affinity) && + cpumask_intersects(v->cpu_soft_affinity, mask); +} + +/* + * This function copies in mask the cpumask that should be used for a + * particular affinity balancing step. For the soft affinity one, the pcpus + * that are not part of vc's hard affinity are filtered out from the result, + * to avoid running a vcpu where it would like, but is not allowed to! + */ +static inline void +affinity_balance_cpumask(const struct vcpu *v, int step, cpumask_t *mask) +{ + if ( step == BALANCE_SOFT_AFFINITY ) + { + cpumask_and(mask, v->cpu_soft_affinity, v->cpu_hard_affinity); + + if ( unlikely(cpumask_empty(mask)) ) + cpumask_copy(mask, v->cpu_hard_affinity); + } + else /* step == BALANCE_HARD_AFFINITY */ + cpumask_copy(mask, v->cpu_hard_affinity); +} + #endif /* __XEN_SCHED_IF_H__ */ -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |