[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen: sched: improve checking soft-affinity



commit 5e9e3f88fb88fa62b5fc7a9f80d9ef2a95fb0636
Author:     Dario Faggioli <dfaggioli@xxxxxxxx>
AuthorDate: Wed Mar 21 17:17:46 2018 +0000
Commit:     George Dunlap <george.dunlap@xxxxxxxxxx>
CommitDate: Wed Mar 21 17:19:08 2018 +0000

    xen: sched: improve checking soft-affinity
    
    The function has_soft_affinity() determines whether the soft-affinity
    of a vcpu will have any effect -- that is, whether the affinity will
    have any difference, scheduling-wise, from an empty soft-affinity
    mask.
    
    Such function takes a custom cpumask as its third parameter for better
    flexibility; but that mask is different from the vCPU's hard-affinity
    only in one case. Getting rid of that parameter not only simplifies
    the function, but enables optimizing the soft affinity check.
    
    It's mostly mechanical, with the exception of
    sched_credit.c:_cshed_cpu_pick(), which was the one case where we
    passed in something other than the existing hard-affinity.
    
    Signed-off-by: Dario Faggioli <dfaggioli@xxxxxxxx>
    Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
 xen/common/sched_credit.c  | 74 ++++++++++++++++++++--------------------------
 xen/common/sched_credit2.c | 10 +++----
 xen/common/sched_null.c    |  8 ++---
 xen/include/xen/sched-if.h |  8 ++---
 4 files changed, 42 insertions(+), 58 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index fb7638f809..9bc638c09c 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -410,8 +410,7 @@ static inline void __runq_tickle(struct csched_vcpu *new)
             int new_idlers_empty;
 
             if ( balance_step == BALANCE_SOFT_AFFINITY
-                 && !has_soft_affinity(new->vcpu,
-                                       new->vcpu->cpu_hard_affinity) )
+                 && !has_soft_affinity(new->vcpu) )
                 continue;
 
             /* Are there idlers suitable for new (for this balance step)? */
@@ -733,50 +732,42 @@ __csched_vcpu_is_migrateable(const struct csched_private 
*prv, struct vcpu *vc,
 static int
 _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
 {
-    cpumask_t cpus;
+    /* We must always use vc->procssor's scratch space */
+    cpumask_t *cpus = cpumask_scratch_cpu(vc->processor);
     cpumask_t idlers;
-    cpumask_t *online;
+    cpumask_t *online = cpupool_domain_cpumask(vc->domain);
     struct csched_pcpu *spc = NULL;
     int cpu = vc->processor;
     int balance_step;
 
-    /* Store in cpus the mask of online cpus on which the domain can run */
-    online = cpupool_domain_cpumask(vc->domain);
-    cpumask_and(&cpus, vc->cpu_hard_affinity, online);
-
     for_each_affinity_balance_step( balance_step )
     {
+        affinity_balance_cpumask(vc, balance_step, cpus);
+        cpumask_and(cpus, online, cpus);
         /*
          * We want to pick up a pcpu among the ones that are online and
-         * can accommodate vc, which is basically what we computed above
-         * and stored in cpus. As far as hard affinity is concerned,
-         * there always will be at least one of these pcpus, hence cpus
-         * is never empty and the calls to cpumask_cycle() and
-         * cpumask_test_cpu() below are ok.
+         * can accommodate vc. As far as hard affinity is concerned, there
+         * always will be at least one of these pcpus in the scratch cpumask,
+         * hence, the calls to cpumask_cycle() and cpumask_test_cpu() below
+         * are ok.
          *
-         * On the other hand, when considering soft affinity too, it
-         * is possible for the mask to become empty (for instance, if the
-         * domain has been put in a cpupool that does not contain any of the
-         * pcpus in its soft affinity), which would result in the ASSERT()-s
-         * inside cpumask_*() operations triggering (in debug builds).
+         * On the other hand, when considering soft affinity, it is possible
+         * that the mask is empty (for instance, if the domain has been put
+         * in a cpupool that does not contain any of the pcpus in its soft
+         * affinity), which would result in the ASSERT()-s inside cpumask_*()
+         * operations triggering (in debug builds).
          *
-         * Therefore, in this case, we filter the soft affinity mask against
-         * cpus and, if the result is empty, we just skip the soft affinity
+         * Therefore, if that is the case, we just skip the soft affinity
          * balancing step all together.
          */
-        if ( balance_step == BALANCE_SOFT_AFFINITY
-             && !has_soft_affinity(vc, &cpus) )
+        if ( balance_step == BALANCE_SOFT_AFFINITY &&
+             (!has_soft_affinity(vc) || cpumask_empty(cpus)) )
             continue;
 
-        /* Pick an online CPU from the proper affinity mask */
-        affinity_balance_cpumask(vc, balance_step, &cpus);
-        cpumask_and(&cpus, &cpus, online);
-
         /* If present, prefer vc's current processor */
-        cpu = cpumask_test_cpu(vc->processor, &cpus)
-                ? vc->processor
-                : cpumask_cycle(vc->processor, &cpus);
-        ASSERT(cpumask_test_cpu(cpu, &cpus));
+        cpu = cpumask_test_cpu(vc->processor, cpus)
+                ? vc->processor : cpumask_cycle(vc->processor, cpus);
+        ASSERT(cpumask_test_cpu(cpu, cpus));
 
         /*
          * Try to find an idle processor within the above constraints.
@@ -797,7 +788,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
         cpumask_and(&idlers, &cpu_online_map, CSCHED_PRIV(ops)->idlers);
         if ( vc->processor == cpu && is_runq_idle(cpu) )
             __cpumask_set_cpu(cpu, &idlers);
-        cpumask_and(&cpus, &cpus, &idlers);
+        cpumask_and(cpus, &idlers, cpus);
 
         /*
          * It is important that cpu points to an idle processor, if a suitable
@@ -811,18 +802,18 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
          * Notice that cpumask_test_cpu() is quicker than cpumask_empty(), so
          * we check for it first.
          */
-        if ( !cpumask_test_cpu(cpu, &cpus) && !cpumask_empty(&cpus) )
-            cpu = cpumask_cycle(cpu, &cpus);
-        __cpumask_clear_cpu(cpu, &cpus);
+        if ( !cpumask_test_cpu(cpu, cpus) && !cpumask_empty(cpus) )
+            cpu = cpumask_cycle(cpu, cpus);
+        __cpumask_clear_cpu(cpu, cpus);
 
-        while ( !cpumask_empty(&cpus) )
+        while ( !cpumask_empty(cpus) )
         {
             cpumask_t cpu_idlers;
             cpumask_t nxt_idlers;
             int nxt, weight_cpu, weight_nxt;
             int migrate_factor;
 
-            nxt = cpumask_cycle(cpu, &cpus);
+            nxt = cpumask_cycle(cpu, cpus);
 
             if ( cpumask_test_cpu(cpu, per_cpu(cpu_core_mask, nxt)) )
             {
@@ -852,14 +843,14 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
                  weight_cpu > weight_nxt :
                  weight_cpu * migrate_factor < weight_nxt )
             {
-                cpumask_and(&nxt_idlers, &cpus, &nxt_idlers);
+                cpumask_and(&nxt_idlers, &nxt_idlers, cpus);
                 spc = CSCHED_PCPU(nxt);
                 cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers);
-                cpumask_andnot(&cpus, &cpus, per_cpu(cpu_sibling_mask, cpu));
+                cpumask_andnot(cpus, cpus, per_cpu(cpu_sibling_mask, cpu));
             }
             else
             {
-                cpumask_andnot(&cpus, &cpus, &nxt_idlers);
+                cpumask_andnot(cpus, cpus, &nxt_idlers);
             }
         }
 
@@ -1660,9 +1651,8 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int 
balance_step)
          * vCPUs with useful soft affinities in some sort of bitmap
          * or counter.
          */
-        if ( vc->is_running ||
-             (balance_step == BALANCE_SOFT_AFFINITY
-              && !has_soft_affinity(vc, vc->cpu_hard_affinity)) )
+        if ( vc->is_running || (balance_step == BALANCE_SOFT_AFFINITY &&
+                                !has_soft_affinity(vc)) )
             continue;
 
         affinity_balance_cpumask(vc, balance_step, cpumask_scratch);
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 0e5a8fc541..9a3e71f1c8 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -700,8 +700,7 @@ static int get_fallback_cpu(struct csched2_vcpu *svc)
     {
         int cpu = v->processor;
 
-        if ( bs == BALANCE_SOFT_AFFINITY &&
-             !has_soft_affinity(v, v->cpu_hard_affinity) )
+        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
             continue;
 
         affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
@@ -1484,8 +1483,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
     for_each_affinity_balance_step( bs )
     {
         /* Just skip first step, if we don't have a soft affinity */
-        if ( bs == BALANCE_SOFT_AFFINITY &&
-             !has_soft_affinity(new->vcpu, new->vcpu->cpu_hard_affinity) )
+        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(new->vcpu) )
             continue;
 
         affinity_balance_cpumask(new->vcpu, bs, cpumask_scratch_cpu(cpu));
@@ -2285,7 +2283,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc)
      *
      * Find both runqueues in one pass.
      */
-    has_soft = has_soft_affinity(vc, vc->cpu_hard_affinity);
+    has_soft = has_soft_affinity(vc);
     for_each_cpu(i, &prv->active_queues)
     {
         struct csched2_runqueue_data *rqd;
@@ -3307,7 +3305,7 @@ runq_candidate(struct csched2_runqueue_data *rqd,
     }
 
     /* If scurr has a soft-affinity, let's check whether cpu is part of it */
-    if ( has_soft_affinity(scurr->vcpu, scurr->vcpu->cpu_hard_affinity) )
+    if ( has_soft_affinity(scurr->vcpu) )
     {
         affinity_balance_cpumask(scurr->vcpu, BALANCE_SOFT_AFFINITY,
                                  cpumask_scratch);
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 58ddf7d889..784db71027 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -278,8 +278,7 @@ static unsigned int pick_cpu(struct null_private *prv, 
struct vcpu *v)
 
     for_each_affinity_balance_step( bs )
     {
-        if ( bs == BALANCE_SOFT_AFFINITY &&
-             !has_soft_affinity(v, v->cpu_hard_affinity) )
+        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
             continue;
 
         affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
@@ -491,8 +490,7 @@ static void _vcpu_remove(struct null_private *prv, struct 
vcpu *v)
     {
         list_for_each_entry( wvc, &prv->waitq, waitq_elem )
         {
-            if ( bs == BALANCE_SOFT_AFFINITY &&
-                 !has_soft_affinity(wvc->vcpu, wvc->vcpu->cpu_hard_affinity) )
+            if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
                 continue;
 
             if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
@@ -761,7 +759,7 @@ static struct task_slice null_schedule(const struct 
scheduler *ops,
             list_for_each_entry( wvc, &prv->waitq, waitq_elem )
             {
                 if ( bs == BALANCE_SOFT_AFFINITY &&
-                     !has_soft_affinity(wvc->vcpu, 
wvc->vcpu->cpu_hard_affinity) )
+                     !has_soft_affinity(wvc->vcpu) )
                     continue;
 
                 if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 926d063ccf..65b4538114 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -266,16 +266,14 @@ static inline cpumask_t* cpupool_domain_cpumask(struct 
domain *d)
  * Soft affinity only needs to be considered if:
  * * The cpus in the cpupool are not a subset of soft affinity
  * * The hard affinity is not a subset of soft affinity
- * * There is an overlap between the soft affinity and the mask which is
- *   currently being considered.
+ * * There is an overlap between the soft and hard affinity masks
  */
-static inline int has_soft_affinity(const struct vcpu *v,
-                                    const cpumask_t *mask)
+static inline int has_soft_affinity(const struct vcpu *v)
 {
     return !cpumask_subset(cpupool_domain_cpumask(v->domain),
                            v->cpu_soft_affinity) &&
            !cpumask_subset(v->cpu_hard_affinity, v->cpu_soft_affinity) &&
-           cpumask_intersects(v->cpu_soft_affinity, mask);
+           cpumask_intersects(v->cpu_soft_affinity, v->cpu_hard_affinity);
 }
 
 /*
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.