[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 4/4] xen: sched: get rid of cpupool_scheduler_cpumask()



On 6/26/2015 10:08 AM, Juergen Gross wrote:
On 06/25/2015 02:15 PM, Dario Faggioli wrote:
and of (almost every) direct use of cpupool_online_cpumask().

In fact, what we really want for the most of the times,
is the set of valid pCPUs of the cpupool a certain domain
is part of. Furthermore, in case it's called with a NULL
pool as argument, cpupool_scheduler_cpumask() does more
harm than good, by returning the bitmask of free pCPUs!

This commit, therefore:
  * gets rid of cpupool_scheduler_cpumask(), in favour of
    cpupool_domain_cpumask(), which makes it more evident
    what we are after, and accommodates some sanity checking;
  * replaces some of the calls to cpupool_online_cpumask()
    with calls to the new functions too.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>

Acked-by: Juergen Gross <jgross@xxxxxxxx>


Acked-by: Joshua Whitehead <josh.whitehead@xxxxxxxxxxxxxxx>

---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Juergen Gross <jgross@xxxxxxxx>
Cc: Robert VanVossen <robert.vanvossen@xxxxxxxxxxxxxxx>
Cc: Josh Whitehead <josh.whitehead@xxxxxxxxxxxxxxx>
Cc: Meng Xu <mengxu@xxxxxxxxxxxxx>
Cc: Sisu Xi <xisisu@xxxxxxxxx>
---
  xen/common/domain.c         |    5 +++--
  xen/common/domctl.c         |    4 ++--
  xen/common/sched_arinc653.c |    2 +-
  xen/common/sched_credit.c   |    6 +++---
  xen/common/sched_rt.c       |   12 ++++++------
  xen/common/sched_sedf.c     |    2 +-
  xen/common/schedule.c       |    2 +-
  xen/include/xen/sched-if.h  |   12 ++++++++++--
  8 files changed, 27 insertions(+), 18 deletions(-)

diff --git a/xen/common/domain.c b/xen/common/domain.c
index 3bc52e6..c20accb 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -184,7 +184,8 @@ struct vcpu *alloc_vcpu(
      /* Must be called after making new vcpu visible to
for_each_vcpu(). */
      vcpu_check_shutdown(v);

-    domain_update_node_affinity(d);
+    if ( !is_idle_domain(d) )
+        domain_update_node_affinity(d);

      return v;
  }
@@ -437,7 +438,7 @@ void domain_update_node_affinity(struct domain *d)
          return;
      }

-    online = cpupool_online_cpumask(d->cpupool);
+    online = cpupool_domain_cpumask(d);

      spin_lock(&d->node_affinity_lock);

diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 2a2d203..a399aa6 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -664,7 +664,7 @@ long
do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
              goto maxvcpu_out;

          ret = -ENOMEM;
-        online = cpupool_online_cpumask(d->cpupool);
+        online = cpupool_domain_cpumask(d);
          if ( max > d->max_vcpus )
          {
              struct vcpu **vcpus;
@@ -748,7 +748,7 @@ long
do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
          if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
          {
              cpumask_var_t new_affinity, old_affinity;
-            cpumask_t *online =
cpupool_online_cpumask(v->domain->cpupool);;
+            cpumask_t *online = cpupool_domain_cpumask(v->domain);;

              /*
               * We want to be able to restore hard affinity if we are
trying
diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index cff5da9..dbe02ed 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -667,7 +667,7 @@ a653sched_pick_cpu(const struct scheduler *ops,
struct vcpu *vc)
       * If present, prefer vc's current processor, else
       * just find the first valid vcpu .
       */
-    online = cpupool_scheduler_cpumask(vc->domain->cpupool);
+    online = cpupool_domain_cpumask(vc->domain);

      cpu = cpumask_first(online);

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index a1945ac..8c36635 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -309,7 +309,7 @@ __runq_remove(struct csched_vcpu *svc)
  static inline int __vcpu_has_soft_affinity(const struct vcpu *vc,
                                             const cpumask_t *mask)
  {
-    return !cpumask_subset(cpupool_online_cpumask(vc->domain->cpupool),
+    return !cpumask_subset(cpupool_domain_cpumask(vc->domain),
                             vc->cpu_soft_affinity) &&
             !cpumask_subset(vc->cpu_hard_affinity,
vc->cpu_soft_affinity) &&
             cpumask_intersects(vc->cpu_soft_affinity, mask);
@@ -374,7 +374,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu
*new)

      /* cpu is vc->processor, so it must be in a cpupool. */
      ASSERT(per_cpu(cpupool, cpu) != NULL);
-    online = cpupool_online_cpumask(per_cpu(cpupool, cpu));
+    online = cpupool_domain_cpumask(new->sdom->dom);
      cpumask_and(&idle_mask, prv->idlers, online);
      idlers_empty = cpumask_empty(&idle_mask);

@@ -641,7 +641,7 @@ _csched_cpu_pick(const struct scheduler *ops,
struct vcpu *vc, bool_t commit)
      int balance_step;

      /* Store in cpus the mask of online cpus on which the domain can
run */
-    online = cpupool_scheduler_cpumask(vc->domain->cpupool);
+    online = cpupool_domain_cpumask(vc->domain);
      cpumask_and(&cpus, vc->cpu_hard_affinity, online);

      for_each_csched_balance_step( balance_step )
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 4372486..08611c8 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -256,7 +256,7 @@ rt_dump_vcpu(const struct scheduler *ops, const
struct rt_vcpu *svc)
       */
      mask = _cpumask_scratch[svc->vcpu->processor];

-    cpupool_mask =
cpupool_scheduler_cpumask(svc->vcpu->domain->cpupool);
+    cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
      cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
      cpulist_scnprintf(keyhandler_scratch,
sizeof(keyhandler_scratch), mask);
      printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
@@ -673,7 +673,7 @@ rt_cpu_pick(const struct scheduler *ops, struct
vcpu *vc)
      cpumask_t *online;
      int cpu;

-    online = cpupool_scheduler_cpumask(vc->domain->cpupool);
+    online = cpupool_domain_cpumask(vc->domain);
      cpumask_and(&cpus, online, vc->cpu_hard_affinity);

      cpu = cpumask_test_cpu(vc->processor, &cpus)
@@ -753,7 +753,7 @@ __runq_pick(const struct scheduler *ops, const
cpumask_t *mask)
          iter_svc = __q_elem(iter);

          /* mask cpu_hard_affinity & cpupool & mask */
-        online =
cpupool_scheduler_cpumask(iter_svc->vcpu->domain->cpupool);
+        online = cpupool_domain_cpumask(iter_svc->vcpu->domain);
          cpumask_and(&cpu_common, online,
iter_svc->vcpu->cpu_hard_affinity);
          cpumask_and(&cpu_common, mask, &cpu_common);
          if ( cpumask_empty(&cpu_common) )
@@ -965,7 +965,7 @@ runq_tickle(const struct scheduler *ops, struct
rt_vcpu *new)
      if ( new == NULL || is_idle_vcpu(new->vcpu) )
          return;

-    online = cpupool_scheduler_cpumask(new->vcpu->domain->cpupool);
+    online = cpupool_domain_cpumask(new->vcpu->domain);
      cpumask_and(&not_tickled, online, new->vcpu->cpu_hard_affinity);
      cpumask_andnot(&not_tickled, &not_tickled, &prv->tickled);

@@ -1078,7 +1078,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct
vcpu *vc)

      ASSERT(!list_empty(&prv->sdom));
      sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem);
-    online = cpupool_scheduler_cpumask(sdom->dom->cpupool);
+    online = cpupool_domain_cpumask(sdom->dom);
      snext = __runq_pick(ops, online); /* pick snext from ALL valid
cpus */

      runq_tickle(ops, snext);
@@ -1113,7 +1113,7 @@ rt_context_saved(const struct scheduler *ops,
struct vcpu *vc)

          ASSERT(!list_empty(&prv->sdom));
          sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem);
-        online = cpupool_scheduler_cpumask(sdom->dom->cpupool);
+        online = cpupool_domain_cpumask(sdom->dom);
          snext = __runq_pick(ops, online); /* pick snext from ALL
cpus */

          runq_tickle(ops, snext);
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index cad5b39..396b651 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -383,7 +383,7 @@ static int sedf_pick_cpu(const struct scheduler
*ops, struct vcpu *v)
      cpumask_t online_affinity;
      cpumask_t *online;

-    online = cpupool_scheduler_cpumask(v->domain->cpupool);
+    online = cpupool_domain_cpumask(v->domain);
      cpumask_and(&online_affinity, v->cpu_hard_affinity, online);
      return cpumask_cycle(v->vcpu_id %
cpumask_weight(&online_affinity) - 1,
                           &online_affinity);
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 4ffcd98..7ad298a 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -80,7 +80,7 @@ static struct scheduler __read_mostly ops;

  #define DOM2OP(_d)    (((_d)->cpupool == NULL) ? &ops :
((_d)->cpupool->sched))
  #define VCPU2OP(_v)   (DOM2OP((_v)->domain))
-#define VCPU2ONLINE(_v) cpupool_online_cpumask((_v)->domain->cpupool)
+#define VCPU2ONLINE(_v) cpupool_domain_cpumask((_v)->domain)

  static inline void trace_runstate_change(struct vcpu *v, int new_state)
  {
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 7cc25c6..20af791 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -183,9 +183,17 @@ struct cpupool
      atomic_t         refcnt;
  };

-#define cpupool_scheduler_cpumask(_pool) \
-    (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
  #define cpupool_online_cpumask(_pool) \
      (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)

+static inline cpumask_t* cpupool_domain_cpumask(struct domain *d)
+{
+    /*
+     * d->cpupool == NULL only for the idle domain, which should
+     * have no interest in calling into this.
+     */
+    ASSERT(d->cpupool != NULL);
+    return cpupool_online_cpumask(d->cpupool);
+}
+
  #endif /* __XEN_SCHED_IF_H__ */





_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.