[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] Optimize credit scheduler load balancing logic



# HG changeset patch
# User Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
# Node ID 32e4952c063866165d9cab913f67b57f8e78aded
# Parent  8eb8c0085604f3671e062c8d7f0054b42a20986b
[XEN] Optimize credit scheduler load balancing logic
When looking for remote work, only look at or grab a remove CPU's
lock when this CPU is not idling.
Signed-off-by: Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
---
 xen/common/sched_credit.c |   34 +++++++++++++++++++++++++---------
 1 files changed, 25 insertions(+), 9 deletions(-)

diff -r 8eb8c0085604 -r 32e4952c0638 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Mon Nov 06 16:36:51 2006 +0000
+++ b/xen/common/sched_credit.c Mon Nov 06 16:55:56 2006 +0000
@@ -955,8 +955,10 @@ static struct csched_vcpu *
 static struct csched_vcpu *
 csched_load_balance(int cpu, struct csched_vcpu *snext)
 {
+    struct csched_vcpu *speer;
     struct csched_pcpu *spc;
-    struct csched_vcpu *speer;
+    struct vcpu *peer_vcpu;
+    cpumask_t workers;
     int peer_cpu;
 
     if ( snext->pri == CSCHED_PRI_IDLE )
@@ -966,15 +968,23 @@ csched_load_balance(int cpu, struct csch
     else
         CSCHED_STAT_CRANK(load_balance_other);
 
+    /*
+     * Peek at non-idling CPUs in the system
+     */
+    cpus_andnot(workers, cpu_online_map, csched_priv.idlers);
+    cpu_clear(cpu, workers);
+
     peer_cpu = cpu;
     BUG_ON( peer_cpu != snext->vcpu->processor );
 
-    while ( 1 )
-    {
-        /* For each PCPU in the system starting with our neighbour... */
-        peer_cpu = (peer_cpu + 1) % csched_priv.ncpus;
-        if ( peer_cpu == cpu )
-            break;
+    while ( !cpus_empty(workers) )
+    {
+        /* For each CPU of interest, starting with our neighbour... */
+        peer_cpu = next_cpu(peer_cpu, workers);
+        if ( peer_cpu == NR_CPUS )
+            peer_cpu = first_cpu(workers);
+
+        cpu_clear(peer_cpu, workers);
 
         /*
          * Get ahold of the scheduler lock for this peer CPU.
@@ -990,13 +1000,19 @@ csched_load_balance(int cpu, struct csch
         }
 
         spc = CSCHED_PCPU(peer_cpu);
+        peer_vcpu = per_cpu(schedule_data, peer_cpu).curr;
+
         if ( unlikely(spc == NULL) )
         {
             CSCHED_STAT_CRANK(steal_peer_down);
             speer = NULL;
         }
-        else if ( is_idle_vcpu(per_cpu(schedule_data, peer_cpu).curr) )
-        {
+        else if ( unlikely(is_idle_vcpu(peer_vcpu)) )
+        {
+            /*
+             * Don't steal from an idle CPU's runq because it's about to
+             * pick up work from it itself.
+             */
             CSCHED_STAT_CRANK(steal_peer_idle);
             speer = NULL;
         }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.