[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] scheduler: small csched_cpu_pick() adjustments



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1256727399 0
# Node ID cff23354d026bd359241535c77d90b11604a6538
# Parent  de4b6b7f55db934e45f74c84953ac28efcb96803
scheduler: small csched_cpu_pick() adjustments

When csched_cpu_pick() decides to move a vCPU to a different pCPU, so
far in the vast majority of cases it selected the first core/thread of
the most idle socket/core. When there are many short executing
entities, this will generally lead to them not getting evenly
distributed (since primary cores/threads will be preferred), making
the need for subsequent migration more likely. Instead, candidate
cores/threads should get treated as symmetrically as possible, and
hence this changes the selection logic to cycle through all
candidates.

Further, since csched_cpu_pick() will never move a vCPU between
threads of the same core (and since the weights calculated for
individual threads of the same core are always identical), rather than
removing just the selected pCPU from the mask that still needs looking
at, all siblings of the chosen pCPU can be removed at once without
affecting the outcome.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/common/sched_credit.c |   18 ++++++++++++++----
 1 files changed, 14 insertions(+), 4 deletions(-)

diff -r de4b6b7f55db -r cff23354d026 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Wed Oct 28 10:55:53 2009 +0000
+++ b/xen/common/sched_credit.c Wed Oct 28 10:56:39 2009 +0000
@@ -110,6 +110,7 @@ struct csched_pcpu {
     uint32_t runq_sort_last;
     struct timer ticker;
     unsigned int tick;
+    unsigned int idle_bias;
 };
 
 /*
@@ -298,6 +299,7 @@ csched_pcpu_init(int cpu)
     init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu);
     INIT_LIST_HEAD(&spc->runq);
     spc->runq_sort_last = csched_priv.runq_sort;
+    spc->idle_bias = NR_CPUS - 1;
     per_cpu(schedule_data, cpu).sched_priv = spc;
 
     /* Start off idling... */
@@ -379,7 +381,7 @@ __csched_vcpu_is_migrateable(struct vcpu
 }
 
 static int
-csched_cpu_pick(struct vcpu *vc)
+_csched_cpu_pick(struct vcpu *vc, bool_t commit)
 {
     cpumask_t cpus;
     cpumask_t idlers;
@@ -438,8 +440,10 @@ csched_cpu_pick(struct vcpu *vc)
         if ( ( (weight_cpu < weight_nxt) ^ sched_smt_power_savings )
                 && (weight_cpu != weight_nxt) )
         {
-            cpu = nxt;
-            cpu_clear(cpu, cpus);
+            cpu = cycle_cpu(CSCHED_PCPU(nxt)->idle_bias, nxt_idlers);
+            if ( commit )
+               CSCHED_PCPU(nxt)->idle_bias = cpu;
+            cpus_andnot(cpus, cpus, per_cpu(cpu_sibling_map, cpu));
         }
         else
         {
@@ -448,6 +452,12 @@ csched_cpu_pick(struct vcpu *vc)
     }
 
     return cpu;
+}
+
+static int
+csched_cpu_pick(struct vcpu *vc)
+{
+    return _csched_cpu_pick(vc, 1);
 }
 
 static inline void
@@ -529,7 +539,7 @@ csched_vcpu_acct(unsigned int cpu)
     {
         __csched_vcpu_acct_start(svc);
     }
-    else if ( csched_cpu_pick(current) != cpu )
+    else if ( _csched_cpu_pick(current, 0) != cpu )
     {
         CSCHED_VCPU_STAT_CRANK(svc, migrate_r);
         CSCHED_STAT_CRANK(migrate_running);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.