[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 6/7] cpupool: create an the 'cpupool sync' infrastructure



In case we want to make some live changes to the configuration
of (typically) the scheduler of a cpupool, we need things to be
quiet in that pool.

Not necessarily like with stop machine, but we at least need
to make sure that no domains are neither running not sitting
in the runqueues of the scheduler itself.

In fact, we need exactly something like this mechanism, for
changing "on the fly" which CPUs are assigned to which runqueue
in a Credit2 cpupool (check the following changes).
Therefore, instead than doing something specific for such a
use case, let's implement a generic mechanism.

Reason is, of course, that it may turn out to be useful for
other purposes, in future. But even for this specific case,
it is much easier and cleaner to just cede control to cpupool
code, instead of trying to do everything inside the scheduler.

Within the new cpupool_sync() function, we want to pause all
domains of a pool, including potentially the one calling
the function. Therefore, we defer the pausing, the actual work
and also the unpausing to a tasklet.

Suggested-by: Juergen Gross <jgross@xxxxxxxx>
Signed-off-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
Cc: Juergen Gross <jgross@xxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Julien Grall <julien@xxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
Changes from v1:
* new patch
---
 xen/common/sched/cpupool.c |   52 ++++++++++++++++++++++++++++++++++++++++++++
 xen/common/sched/private.h |    6 +++++
 xen/include/xen/sched.h    |    1 +
 3 files changed, 59 insertions(+)

diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 7ea641ca26..122c371c7a 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -234,6 +234,42 @@ void cpupool_put(struct cpupool *pool)
     free_cpupool_struct(pool);
 }
 
+void do_cpupool_sync(void *arg)
+{
+    struct cpupool *c = arg;
+    struct domain *d;
+
+
+    spin_lock(&cpupool_lock);
+
+    /*
+     * With this second call (and this time to domain_pause()) we basically
+     * make sure that all the domains have actually stopped running.
+     */
+    rcu_read_lock(&domlist_read_lock);
+    for_each_domain_in_cpupool(d, c)
+        domain_pause(d);
+    rcu_read_unlock(&domlist_read_lock);
+
+    /*
+     * Let's invoke the function that the caller provided. We pass a reference
+     * to our own scheduler as a parameter, with which it should easily reach
+     * anything it needs.
+     */
+    c->sync_ctl.func(c->sched);
+
+    /* We called pause twice, so we need to to the same with unpause. */
+    rcu_read_lock(&domlist_read_lock);
+    for_each_domain_in_cpupool(d, c)
+    {
+        domain_unpause(d);
+        domain_unpause(d);
+    }
+    rcu_read_unlock(&domlist_read_lock);
+
+    spin_unlock(&cpupool_lock);
+}
+
 /*
  * create a new cpupool with specified poolid and scheduler
  * returns pointer to new cpupool structure if okay, NULL else
@@ -292,6 +328,8 @@ static struct cpupool *cpupool_create(
 
     *q = c;
 
+    tasklet_init(&c->sync_ctl.tasklet, do_cpupool_sync, c);
+
     spin_unlock(&cpupool_lock);
 
     debugtrace_printk("Created cpupool %d with scheduler %s (%s)\n",
@@ -332,6 +370,7 @@ static int cpupool_destroy(struct cpupool *c)
         return -EBUSY;
     }
     *q = c->next;
+    tasklet_kill(&c->sync_ctl.tasklet);
     spin_unlock(&cpupool_lock);
 
     cpupool_put(c);
@@ -372,6 +411,19 @@ int cpupool_move_domain(struct domain *d, struct cpupool 
*c)
     return ret;
 }
 
+void cpupool_sync(struct cpupool *c, void (*func)(void*))
+{
+    struct domain *d;
+
+    rcu_read_lock(&domlist_read_lock);
+    for_each_domain_in_cpupool(d, c)
+        domain_pause_nosync(d);
+    rcu_read_unlock(&domlist_read_lock);
+
+    c->sync_ctl.func = func;
+    tasklet_schedule_on_cpu(&c->sync_ctl.tasklet, cpumask_first(c->cpu_valid));
+}
+
 /*
  * assign a specific cpu to a cpupool
  * cpupool_lock must be held
diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h
index df50976eb2..4705c8b119 100644
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -503,6 +503,11 @@ static inline void sched_unit_unpause(const struct 
sched_unit *unit)
 #define REGISTER_SCHEDULER(x) static const struct scheduler *x##_entry \
   __used_section(".data.schedulers") = &x;
 
+struct cpupool_sync_ctl {
+    struct tasklet tasklet;
+    void (*func)(void*);
+};
+
 struct cpupool
 {
     int              cpupool_id;
@@ -514,6 +519,7 @@ struct cpupool
     struct scheduler *sched;
     atomic_t         refcnt;
     enum sched_gran  gran;
+    struct cpupool_sync_ctl sync_ctl;
 };
 
 static inline cpumask_t *cpupool_domain_master_cpumask(const struct domain *d)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index ac53519d7f..e2a233c96c 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -1061,6 +1061,7 @@ extern enum cpufreq_controller {
 } cpufreq_controller;
 
 int cpupool_move_domain(struct domain *d, struct cpupool *c);
+void cpupool_sync(struct cpupool *c, void (*func)(void*));
 int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
 int cpupool_get_id(const struct domain *d);
 const cpumask_t *cpupool_valid_cpus(const struct cpupool *pool);




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.