[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 41/48] xen/sched: prepare per-cpupool scheduling granularity



On- and offlining cpus with core scheduling is rather complicated as
the cpus are taken on- or offline one by one, but scheduling wants them
rather to be handled per core.

As the future plan is to be able to select scheduling granularity per
cpupool prepare that by storing the granularity in struct cpupool and
struct sched_resource (we need it there for free cpus which are not
associated to any cpupool). Free cpus will always use granularity 1.

Store the selected granularity option (cpu, core or socket) in the
cpupool as well, as we will need it to select the appropriate cpu mask
when populating the cpupool with cpus.

This will make on- and offlining of cpus much easier and avoids
writing code which would needed to be thrown away later.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V1: new patch
---
 xen/common/cpupool.c       |  2 ++
 xen/common/schedule.c      | 23 +++++++++++++++--------
 xen/include/xen/sched-if.h | 12 ++++++++++++
 3 files changed, 29 insertions(+), 8 deletions(-)

diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index bace684464..8789fde3c4 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -177,6 +177,8 @@ static struct cpupool *cpupool_create(
             return NULL;
         }
     }
+    c->granularity = sched_granularity;
+    c->opt_granularity = opt_sched_granularity;
 
     *q = c;
 
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index b94fd2431a..7823b48e32 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -56,7 +56,8 @@ int sched_ratelimit_us = SCHED_DEFAULT_RATELIMIT_US;
 integer_param("sched_ratelimit_us", sched_ratelimit_us);
 
 /* Number of vcpus per struct sched_unit. */
-static unsigned int __read_mostly sched_granularity = 1;
+enum sched_gran __read_mostly opt_sched_granularity = SCHED_GRAN_cpu;
+unsigned int __read_mostly sched_granularity = 1;
 bool __read_mostly sched_disable_smt_switching;
 const cpumask_t *sched_res_mask = &cpumask_all;
 
@@ -409,10 +410,10 @@ static struct sched_unit *sched_alloc_unit(struct vcpu *v)
 {
     struct sched_unit *unit, **prev_unit;
     struct domain *d = v->domain;
+    unsigned int gran = d->cpupool ? d->cpupool->granularity : 1;
 
     for_each_sched_unit ( d, unit )
-        if ( unit->vcpu_list->vcpu_id / sched_granularity ==
-             v->vcpu_id / sched_granularity )
+        if ( unit->vcpu_list->vcpu_id / gran == v->vcpu_id / gran )
             break;
 
     if ( unit )
@@ -1805,11 +1806,11 @@ static void sched_switch_units(struct sched_resource 
*sd,
         if ( is_idle_unit(prev) )
         {
             prev->runstate_cnt[RUNSTATE_running] = 0;
-            prev->runstate_cnt[RUNSTATE_runnable] = sched_granularity;
+            prev->runstate_cnt[RUNSTATE_runnable] = sd->granularity;
         }
         if ( is_idle_unit(next) )
         {
-            next->runstate_cnt[RUNSTATE_running] = sched_granularity;
+            next->runstate_cnt[RUNSTATE_running] = sd->granularity;
             next->runstate_cnt[RUNSTATE_runnable] = 0;
         }
     }
@@ -2072,11 +2073,12 @@ static struct sched_unit 
*sched_wait_rendezvous_in(struct sched_unit *prev,
 {
     struct sched_unit *next;
     struct vcpu *v;
+    unsigned int gran = get_sched_res(cpu)->granularity;
 
     if ( !--prev->rendezvous_in_cnt )
     {
         next = do_schedule(prev, now, cpu);
-        atomic_set(&next->rendezvous_out_cnt, sched_granularity + 1);
+        atomic_set(&next->rendezvous_out_cnt, gran + 1);
         return next;
     }
 
@@ -2196,6 +2198,7 @@ static void schedule(void)
     struct sched_resource *sd;
     spinlock_t           *lock;
     int cpu = smp_processor_id();
+    unsigned int          gran = get_sched_res(cpu)->granularity;
 
     ASSERT_NOT_IN_ATOMIC();
 
@@ -2221,11 +2224,11 @@ static void schedule(void)
 
     stop_timer(&sd->s_timer);
 
-    if ( sched_granularity > 1 )
+    if ( gran > 1 )
     {
         cpumask_t mask;
 
-        prev->rendezvous_in_cnt = sched_granularity;
+        prev->rendezvous_in_cnt = gran;
         cpumask_andnot(&mask, sd->cpus, cpumask_of(cpu));
         cpumask_raise_softirq(&mask, SCHED_SLAVE_SOFTIRQ);
         next = sched_wait_rendezvous_in(prev, &lock, cpu, now);
@@ -2291,6 +2294,9 @@ static int cpu_schedule_up(unsigned int cpu)
     init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
     atomic_set(&sd->urgent_count, 0);
 
+    /* We start with cpu granularity. */
+    sd->granularity = 1;
+
     /* Boot CPU is dealt with later in scheduler_init(). */
     if ( cpu == 0 )
         return 0;
@@ -2581,6 +2587,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     sched_free_vdata(old_ops, vpriv_old);
     sched_free_pdata(old_ops, ppriv_old, cpu);
 
+    get_sched_res(cpu)->granularity = c ? c->granularity : 1;
     get_sched_res(cpu)->cpupool = c;
     /* When a cpu is added to a pool, trigger it to go pick up some work */
     if ( c != NULL )
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 73b19a2763..606a0d4a25 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -25,6 +25,15 @@ extern int sched_ratelimit_us;
 /* Scheduling resource mask. */
 extern const cpumask_t *sched_res_mask;
 
+/* Number of vcpus per struct sched_unit. */
+enum sched_gran {
+    SCHED_GRAN_cpu,
+    SCHED_GRAN_core,
+    SCHED_GRAN_socket
+};
+extern enum sched_gran opt_sched_granularity;
+extern unsigned int sched_granularity;
+
 /*
  * In order to allow a scheduler to remap the lock->cpu mapping,
  * we have a per-cpu pointer, along with a pre-allocated set of
@@ -47,6 +56,7 @@ struct sched_resource {
     struct timer        s_timer;        /* scheduling timer                */
     atomic_t            urgent_count;   /* how many urgent vcpus           */
     unsigned int        processor;
+    unsigned int        granularity;
     const cpumask_t    *cpus;           /* cpus covered by this struct     */
 };
 
@@ -531,6 +541,8 @@ struct cpupool
     struct cpupool   *next;
     struct scheduler *sched;
     atomic_t         refcnt;
+    unsigned int     granularity;
+    enum sched_gran  opt_granularity;
 };
 
 #define cpupool_online_cpumask(_pool) \
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.