[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 08/19] xen: credit2: when tickling, check idle cpus first



If there are idle pCPUs, it's always better to try to
"ship" the new vCPU there, instead than letting it
preempting on a currently busy one.

This commit also adds a cpumask_test_or_cycle() helper
function, to make it easier to code the preference for
the pCPU where the vCPU was running before.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
Cc: Anshul Makkar <anshul.makkar@xxxxxxxxxx>
Cc: David Vrabel <david.vrabel@xxxxxxxxxx>
---
 xen/common/sched_credit2.c |   68 +++++++++++++++++++++++++++++---------------
 xen/include/xen/cpumask.h  |    8 +++++
 2 files changed, 53 insertions(+), 23 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index b73d034..af28e7b 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -568,9 +568,23 @@ __runq_remove(struct csched2_vcpu *svc)
 
 void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, 
s_time_t);
 
-/* Check to see if the item on the runqueue is higher priority than what's
- * currently running; if so, wake up the processor */
-static /*inline*/ void
+/*
+ * Check what processor it is best to 'wake', for picking up a vcpu that has
+ * just been put (back) in the runqueue. Logic is as follows:
+ *  1. if there are idle processors in the runq, wake one of them;
+ *  2. if there aren't idle processor, check the one were the vcpu was
+ *     running before to see if we can preempt what's running there now
+ *     (and hence doing just one migration);
+ *  3. last stand: check all processors and see if the vcpu is in right
+ *     of preempting any of the other vcpus running on them (this requires
+ *     two migrations, and that's indeed why it is left as the last stand).
+ *
+ * Note that when we say 'idle processors' what we really mean is (pretty
+ * much always) both _idle_ and _not_already_tickled_. In fact, if a
+ * processor has been tickled, it will run csched2_schedule() shortly, and
+ * pick up some work, so it would be wrong to consider it idle.
+ */
+static void
 runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t 
now)
 {
     int i, ipid=-1;
@@ -584,22 +598,14 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
 
     BUG_ON(new->rqd != rqd);
 
-    /* Look at the cpu it's running on first */
-    cur = CSCHED2_VCPU(curr_on_cpu(cpu));
-    burn_credits(rqd, cur, now);
-
-    if ( cur->credit < new->credit )
-    {
-        ipid = cpu;
-        goto tickle;
-    }
-    
-    /* Get a mask of idle, but not tickled, that new is allowed to run on. */
+    /*
+     * Get a mask of idle, but not tickled, processors that new is
+     * allowed to run on. If that's not empty, choose someone from there
+     * (preferrably, the one were new was running on already).
+     */
     cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
     cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
-    
-    /* If it's not empty, choose one */
-    i = cpumask_cycle(cpu, &mask);
+    i = cpumask_test_or_cycle(cpu, &mask);
     if ( i < nr_cpu_ids )
     {
         SCHED_STAT_CRANK(tickled_idle_cpu);
@@ -607,12 +613,26 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
         goto tickle;
     }
 
-    /* Otherwise, look for the non-idle cpu with the lowest credit,
-     * skipping cpus which have been tickled but not scheduled yet,
-     * that new is allowed to run on. */
+    /*
+     * Otherwise, look for the non-idle (and non-tickled) processors with
+     * the lowest credit, among the ones new is allowed to run on. Again,
+     * the cpu were it was running on would be the best candidate.
+     */
     cpumask_andnot(&mask, &rqd->active, &rqd->idle);
     cpumask_andnot(&mask, &mask, &rqd->tickled);
     cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
+    if ( cpumask_test_cpu(cpu, &mask) )
+    {
+        cur = CSCHED2_VCPU(curr_on_cpu(cpu));
+        burn_credits(rqd, cur, now);
+
+        if ( cur->credit < new->credit )
+        {
+            SCHED_STAT_CRANK(tickled_busy_cpu);
+            ipid = cpu;
+            goto tickle;
+        }
+    }
 
     for_each_cpu(i, &mask)
     {
@@ -624,7 +644,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
 
         BUG_ON(is_idle_vcpu(cur->vcpu));
 
-        /* Update credits for current to see if we want to preempt */
+        /* Update credits for current to see if we want to preempt. */
         burn_credits(rqd, cur, now);
 
         if ( cur->credit < lowest )
@@ -647,8 +667,10 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
         }
     }
 
-    /* Only switch to another processor if the credit difference is greater
-     * than the migrate resistance */
+    /*
+     * Only switch to another processor if the credit difference is
+     * greater than the migrate resistance.
+     */
     if ( ipid == -1 || lowest + CSCHED2_MIGRATE_RESIST > new->credit )
     {
         SCHED_STAT_CRANK(tickled_no_cpu);
diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
index 0e7108c..3f340d6 100644
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -266,6 +266,14 @@ static inline int cpumask_cycle(int n, const cpumask_t 
*srcp)
     return nxt;
 }
 
+static inline int cpumask_test_or_cycle(int n, const cpumask_t *srcp)
+{
+    if ( cpumask_test_cpu(n, srcp) )
+        return n;
+
+    return cpumask_cycle(n, srcp);
+}
+
 static inline unsigned int cpumask_any(const cpumask_t *srcp)
 {
     unsigned int cpu = cpumask_first(srcp);


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.