[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] sched: clarify use cases of schedule_cpu_switch()



commit a8c6c623192e3610033e8c79533bd0995038c4dc
Author:     Dario Faggioli <dario.faggioli@xxxxxxxxxx>
AuthorDate: Tue Nov 24 14:49:09 2015 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Nov 24 14:49:09 2015 +0100

    sched: clarify use cases of schedule_cpu_switch()
    
    schedule_cpu_switch() is meant to be only used for moving
    pCPUs from a cpupool to no cpupool, and from there back
    to a cpupool, *not* to move them directly from one cpupool
    to another.
    
    This is something inherent to the way the function is
    implemented and called, but is not that clear, just by the
    look of it.
    
    Make it more evident by:
     - adding commentary and ASSERT()s;
     - update the cpupool per-CPU variable (mapping pCPUs to
       pools) directly in schedule_cpu_switch(), rather than
       in various places in cpupool.c.
    
    Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
    Acked-by: Juergen Gross <jgross@xxxxxxxx>
    Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/common/cpupool.c  |    7 -------
 xen/common/schedule.c |   31 ++++++++++++++++++++++++++++++-
 2 files changed, 30 insertions(+), 8 deletions(-)

diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index e79850b..8e7b723 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -261,19 +261,13 @@ int cpupool_move_domain(struct domain *d, struct cpupool 
*c)
 static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
 {
     int ret;
-    struct cpupool *old;
     struct domain *d;
 
     if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
         return -EBUSY;
-    old = per_cpu(cpupool, cpu);
-    per_cpu(cpupool, cpu) = c;
     ret = schedule_cpu_switch(cpu, c);
     if ( ret )
-    {
-        per_cpu(cpupool, cpu) = old;
         return ret;
-    }
 
     cpumask_clear_cpu(cpu, &cpupool_free_cpus);
     if (cpupool_moving_cpu == cpu)
@@ -326,7 +320,6 @@ static long cpupool_unassign_cpu_helper(void *info)
             cpumask_clear_cpu(cpu, &cpupool_free_cpus);
             goto out;
         }
-        per_cpu(cpupool, cpu) = NULL;
         cpupool_moving_cpu = -1;
         cpupool_put(cpupool_cpu_moving);
         cpupool_cpu_moving = NULL;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index f38df71..92057eb 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1485,6 +1485,17 @@ void __init scheduler_init(void)
         BUG();
 }
 
+/*
+ * Move a pCPU outside of the influence of the scheduler of its current
+ * cpupool, or subject it to the scheduler of a new cpupool.
+ *
+ * For the pCPUs that are removed from their cpupool, their scheduler becomes
+ * &ops (the default scheduler, selected at boot, which also services the
+ * default cpupool). However, as these pCPUs are not really part of any pool,
+ * there won't be any scheduling event on them, not even from the default
+ * scheduler. Basically, they will just sit idle until they are explicitly
+ * added back to a cpupool.
+ */
 int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
 {
     struct vcpu *idle;
@@ -1492,9 +1503,24 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
     struct scheduler *old_ops = per_cpu(scheduler, cpu);
     struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
+    struct cpupool *old_pool = per_cpu(cpupool, cpu);
+
+    /*
+     * pCPUs only move from a valid cpupool to free (i.e., out of any pool),
+     * or from free to a valid cpupool. In the former case (which happens when
+     * c is NULL), we want the CPU to have been marked as free already, as
+     * well as to not be valid for the source pool any longer, when we get to
+     * here. In the latter case (which happens when c is a valid cpupool), we
+     * want the CPU to still be marked as free, as well as to not yet be valid
+     * for the destination pool.
+     */
+    ASSERT(c != old_pool && (c != NULL || old_pool != NULL));
+    ASSERT(cpumask_test_cpu(cpu, &cpupool_free_cpus));
+    ASSERT((c == NULL && !cpumask_test_cpu(cpu, old_pool->cpu_valid)) ||
+           (c != NULL && !cpumask_test_cpu(cpu, c->cpu_valid)));
 
     if ( old_ops == new_ops )
-        return 0;
+        goto out;
 
     idle = idle_vcpu[cpu];
     ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
@@ -1522,6 +1548,9 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool 
*c)
     SCHED_OP(old_ops, free_vdata, vpriv_old);
     SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
 
+ out:
+    per_cpu(cpupool, cpu) = c;
+
     return 0;
 }
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.