[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 15/16] xen: sched: scratch space for cpumasks on Credit2



like what's there already in both Credit1 and RTDS. In
fact, when playing with affinity, a lot of cpumask
manipulation is necessary, inside of various functions.

To avoid having a lot of cpumask_var_t on the stack,
this patch introduces a global scratch area.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/common/sched_credit2.c |   50 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 07b8c67..a650216 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -238,6 +238,23 @@ static void parse_credit2_runqueue(const char *s)
 custom_param("credit2_runqueue", parse_credit2_runqueue);
 
 /*
+ * Scratch space, useful to avoid having too many cpumask_var_t on the stack.
+ *
+ * We want to only allocate the array the first time an instance of this
+ * scheduler is used, and avoid reallocating it (e.g., when more instances
+ * are activated inside new cpupools) or leaking it (e.g., when the last
+ * instance is de-inited).
+ *
+ * Counting the number of active Credit2 instances is all we need, and it
+ * does not even have to happen via atomic_t operations, as the counter
+ * only changes during during boot, or under the cpupool_lock.
+ */
+static cpumask_t **_cpumask_scratch;
+#define cpumask_scratch _cpumask_scratch[smp_processor_id()]
+
+static unsigned int nr_csched2_ops;
+
+/*
  * Per-runqueue data
  */
 struct csched2_runqueue_data {
@@ -2166,6 +2183,15 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned 
int cpu,
     spin_unlock_irq(&prv->lock);
 }
 
+static void *
+csched2_alloc_pdata(const struct scheduler *ops, int cpu)
+{
+    if ( !zalloc_cpumask_var(&_cpumask_scratch[cpu]) )
+        return ERR_PTR(-ENOMEM);
+
+    return NULL;
+}
+
 static void
 csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
@@ -2205,6 +2231,9 @@ csched2_free_pdata(const struct scheduler *ops, void 
*pcpu, int cpu)
 
     spin_unlock_irqrestore(&prv->lock, flags);
 
+    free_cpumask_var(_cpumask_scratch[cpu]);
+    _cpumask_scratch[cpu] = NULL;
+
     return;
 }
 
@@ -2239,6 +2268,19 @@ csched2_init(struct scheduler *ops)
     if ( prv == NULL )
         return -ENOMEM;
     ops->sched_data = prv;
+
+    ASSERT( _cpumask_scratch == NULL || nr_csched2_ops > 0 );
+    if ( !_cpumask_scratch )
+    {
+        _cpumask_scratch = xmalloc_array(cpumask_var_t, nr_cpu_ids);
+        if ( !_cpumask_scratch )
+        {
+            xfree(prv);
+            return -ENOMEM;
+        }
+    }
+    nr_csched2_ops++;
+
     spin_lock_init(&prv->lock);
     INIT_LIST_HEAD(&prv->sdom);
 
@@ -2259,6 +2301,13 @@ csched2_deinit(struct scheduler *ops)
 {
     struct csched2_private *prv;
 
+    ASSERT( _cpumask_scratch && nr_csched2_ops > 0 );
+    if ( (--nr_csched2_ops) == 0 )
+    {
+        xfree(_cpumask_scratch);
+        _cpumask_scratch = NULL;
+    }
+
     prv = CSCHED2_PRIV(ops);
     ops->sched_data = NULL;
     xfree(prv);
@@ -2293,6 +2342,7 @@ static const struct scheduler sched_credit2_def = {
     .alloc_vdata    = csched2_alloc_vdata,
     .free_vdata     = csched2_free_vdata,
     .init_pdata     = csched2_init_pdata,
+    .alloc_pdata    = csched2_alloc_pdata,
     .free_pdata     = csched2_free_pdata,
     .switch_sched   = csched2_switch_sched,
     .alloc_domdata  = csched2_alloc_domdata,


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.