[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 37/49] xen/sched: Change vcpu_migrate_*() to operate on schedule item



Now that vcpu_migrate_start() and vcpu_migrate_finish() are used only
to ensure a vcpu is running on a suitable processor they can be
switched to operate on schedule items instead of vcpus.

While doing that rename them accordingly and make the _start() variant
static.

vcpu_move_locked() is switched to schedule item, too.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/schedule.c | 107 +++++++++++++++++++++++++++++---------------------
 1 file changed, 62 insertions(+), 45 deletions(-)

diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 7c7735bf33..22e43d88cc 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -687,38 +687,43 @@ void vcpu_unblock(struct vcpu *v)
 }
 
 /*
- * Do the actual movement of a vcpu from old to new CPU. Locks for *both*
+ * Do the actual movement of an item from old to new CPU. Locks for *both*
  * CPUs needs to have been taken already when calling this!
  */
-static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
+static void sched_item_move_locked(struct sched_item *item,
+                                   unsigned int new_cpu)
 {
-    unsigned int old_cpu = v->processor;
+    unsigned int old_cpu = item->res->processor;
+    struct vcpu *v;
 
     /*
      * Transfer urgency status to new CPU before switching CPUs, as
      * once the switch occurs, v->is_urgent is no longer protected by
      * the per-CPU scheduler lock we are holding.
      */
-    if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
+    for_each_sched_item_vcpu ( item, v )
     {
-        atomic_inc(&per_cpu(sched_res, new_cpu)->urgent_count);
-        atomic_dec(&per_cpu(sched_res, old_cpu)->urgent_count);
+        if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
+        {
+            atomic_inc(&per_cpu(sched_res, new_cpu)->urgent_count);
+            atomic_dec(&per_cpu(sched_res, old_cpu)->urgent_count);
+        }
     }
 
     /*
      * Actual CPU switch to new CPU.  This is safe because the lock
-     * pointer cant' change while the current lock is held.
+     * pointer can't change while the current lock is held.
      */
-    if ( vcpu_scheduler(v)->migrate )
-        SCHED_OP(vcpu_scheduler(v), migrate, v->sched_item, new_cpu);
+    if ( vcpu_scheduler(item->vcpu)->migrate )
+        SCHED_OP(vcpu_scheduler(item->vcpu), migrate, item, new_cpu);
     else
-        sched_set_res(v->sched_item, per_cpu(sched_res, new_cpu));
+        sched_set_res(item, per_cpu(sched_res, new_cpu));
 }
 
 /*
  * Initiating migration
  *
- * In order to migrate, we need the vcpu in question to have stopped
+ * In order to migrate, we need the item in question to have stopped
  * running and had SCHED_OP(sleep) called (to take it off any
  * runqueues, for instance); and if it is currently running, it needs
  * to be scheduled out.  Finally, we need to hold the scheduling locks
@@ -734,36 +739,45 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int 
new_cpu)
  * should be called like this:
  *
  *     lock = item_schedule_lock_irq(item);
- *     vcpu_migrate_start(v);
+ *     sched_item_migrate_start(item);
  *     item_schedule_unlock_irq(lock, item)
- *     vcpu_migrate_finish(v);
+ *     sched_item_migrate_finish(item);
  *
- * vcpu_migrate_finish() will do the work now if it can, or simply
- * return if it can't (because v is still running); in that case
- * vcpu_migrate_finish() will be called by context_saved().
+ * sched_item_migrate_finish() will do the work now if it can, or simply
+ * return if it can't (because item is still running); in that case
+ * sched_item_migrate_finish() will be called by context_saved().
  */
-void vcpu_migrate_start(struct vcpu *v)
+static void sched_item_migrate_start(struct sched_item *item)
 {
-    set_bit(_VPF_migrating, &v->pause_flags);
-    vcpu_sleep_nosync_locked(v);
+    struct vcpu *v;
+
+    for_each_sched_item_vcpu ( item, v )
+    {
+        set_bit(_VPF_migrating, &v->pause_flags);
+        vcpu_sleep_nosync_locked(v);
+    }
 }
 
-static void vcpu_migrate_finish(struct vcpu *v)
+static void sched_item_migrate_finish(struct sched_item *item)
 {
     unsigned long flags;
     unsigned int old_cpu, new_cpu;
     spinlock_t *old_lock, *new_lock;
     bool_t pick_called = 0;
+    struct vcpu *v;
 
     /*
-     * If the vcpu is currently running, this will be handled by
+     * If the item is currently running, this will be handled by
      * context_saved(); and in any case, if the bit is cleared, then
      * someone else has already done the work so we don't need to.
      */
-    if ( vcpu_running(v) || !test_bit(_VPF_migrating, &v->pause_flags) )
-        return;
+    for_each_sched_item_vcpu ( item, v )
+    {
+        if ( vcpu_running(v) || !test_bit(_VPF_migrating, &v->pause_flags) )
+            return;
+    }
 
-    old_cpu = new_cpu = v->processor;
+    old_cpu = new_cpu = item->res->processor;
     for ( ; ; )
     {
         /*
@@ -776,7 +790,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
 
         sched_spin_lock_double(old_lock, new_lock, &flags);
 
-        old_cpu = v->processor;
+        old_cpu = item->res->processor;
         if ( old_lock == per_cpu(sched_res, old_cpu)->schedule_lock )
         {
             /*
@@ -785,15 +799,15 @@ static void vcpu_migrate_finish(struct vcpu *v)
              */
             if ( pick_called &&
                  (new_lock == per_cpu(sched_res, new_cpu)->schedule_lock) &&
-                 cpumask_test_cpu(new_cpu, v->sched_item->cpu_hard_affinity) &&
-                 cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
+                 cpumask_test_cpu(new_cpu, item->cpu_hard_affinity) &&
+                 cpumask_test_cpu(new_cpu, item->domain->cpupool->cpu_valid) )
                 break;
 
             /* Select a new CPU. */
-            new_cpu = SCHED_OP(vcpu_scheduler(v), pick_resource,
-                               v->sched_item)->processor;
+            new_cpu = SCHED_OP(vcpu_scheduler(item->vcpu), pick_resource,
+                               item)->processor;
             if ( (new_lock == per_cpu(sched_res, new_cpu)->schedule_lock) &&
-                 cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
+                 cpumask_test_cpu(new_cpu, item->domain->cpupool->cpu_valid) )
                 break;
             pick_called = 1;
         }
@@ -814,22 +828,26 @@ static void vcpu_migrate_finish(struct vcpu *v)
      * because they both happen in (different) spinlock regions, and those
      * regions are strictly serialised.
      */
-    if ( vcpu_running(v) ||
-         !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
+    for_each_sched_item_vcpu ( item, v )
     {
-        sched_spin_unlock_double(old_lock, new_lock, flags);
-        return;
+        if ( vcpu_running(v) ||
+             !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
+        {
+            sched_spin_unlock_double(old_lock, new_lock, flags);
+            return;
+        }
     }
 
-    vcpu_move_locked(v, new_cpu);
+    sched_item_move_locked(item, new_cpu);
 
     sched_spin_unlock_double(old_lock, new_lock, flags);
 
     if ( old_cpu != new_cpu )
-        sched_move_irqs(v->sched_item);
+        sched_move_irqs(item);
 
     /* Wake on new CPU. */
-    vcpu_wake(v);
+    for_each_sched_item_vcpu ( item, v )
+        vcpu_wake(v);
 }
 
 /*
@@ -970,10 +988,9 @@ int cpu_disable_scheduler(unsigned int cpu)
              *  * the scheduler will always find a suitable solution, or
              *    things would have failed before getting in here.
              */
-            vcpu_migrate_start(item->vcpu);
+            sched_item_migrate_start(item);
             item_schedule_unlock_irqrestore(lock, flags, item);
-
-            vcpu_migrate_finish(item->vcpu);
+            sched_item_migrate_finish(item);
 
             /*
              * The only caveat, in this case, is that if a vcpu active in
@@ -1064,14 +1081,14 @@ static int vcpu_set_affinity(
             ASSERT(which == item->cpu_soft_affinity);
             sched_set_affinity(v, NULL, affinity);
         }
-        vcpu_migrate_start(v);
+        sched_item_migrate_start(item);
     }
 
     item_schedule_unlock_irq(lock, item);
 
     domain_update_node_affinity(v->domain);
 
-    vcpu_migrate_finish(v);
+    sched_item_migrate_finish(item);
 
     return ret;
 }
@@ -1318,13 +1335,13 @@ int vcpu_pin_override(struct vcpu *v, int cpu)
     }
 
     if ( ret == 0 )
-        vcpu_migrate_start(v);
+        sched_item_migrate_start(item);
 
     item_schedule_unlock_irq(lock, item);
 
     domain_update_node_affinity(v->domain);
 
-    vcpu_migrate_finish(v);
+    sched_item_migrate_finish(item);
 
     return ret;
 }
@@ -1709,7 +1726,7 @@ void context_saved(struct vcpu *prev)
 
     SCHED_OP(vcpu_scheduler(prev), context_saved, prev->sched_item);
 
-    vcpu_migrate_finish(prev);
+    sched_item_migrate_finish(prev->sched_item);
 }
 
 /* The scheduler timer: force a run through the scheduler */
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.