[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Improve I/O performance when competing with CPU intensive workloads.



# HG changeset patch
# User Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
# Node ID 1940ee13f9d6ab1be2c614a0fbf7769536a056d2
# Parent  4a320d26fc24bc49ae24f31dec2bf006a9ddc7a8
Improve I/O performance when competing with CPU intensive workloads.
Allow non CPU consuming wake-to-run latency sensitive VCPUs to
preempt CPU consuming ones.

Signed-off-by: Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
---
 xen/common/sched_credit.c |   30 +++++++++++++++++++++++++++++-
 1 files changed, 29 insertions(+), 1 deletion(-)

diff -r 4a320d26fc24 -r 1940ee13f9d6 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Thu Oct 26 16:56:16 2006 +0100
+++ b/xen/common/sched_credit.c Fri Oct 27 15:44:27 2006 +0100
@@ -46,6 +46,7 @@
 /*
  * Priorities
  */
+#define CSCHED_PRI_TS_BOOST      0      /* time-share waking up */
 #define CSCHED_PRI_TS_UNDER     -1      /* time-share w/ credits */
 #define CSCHED_PRI_TS_OVER      -2      /* time-share w/o credits */
 #define CSCHED_PRI_IDLE         -64     /* idle */
@@ -410,6 +411,14 @@ csched_vcpu_acct(struct csched_vcpu *svc
 
         spin_unlock_irqrestore(&csched_priv.lock, flags);
     }
+
+    /*
+     * If this VCPU's priority was boosted when it last awoke, reset it.
+     * If the VCPU is found here, then it's consuming a non-negligeable
+     * amount of CPU resources and should no longer be boosted.
+     */
+    if ( svc->pri == CSCHED_PRI_TS_BOOST )
+        svc->pri = CSCHED_PRI_TS_UNDER;
 }
 
 static inline void
@@ -566,6 +575,25 @@ csched_vcpu_wake(struct vcpu *vc)
     else
         CSCHED_STAT_CRANK(vcpu_wake_not_runnable);
 
+    /*
+     * We temporarly boost the priority of awaking VCPUs!
+     *
+     * If this VCPU consumes a non negligeable amount of CPU, it
+     * will eventually find itself in the credit accounting code
+     * path where its priority will be reset to normal.
+     *
+     * If on the other hand the VCPU consumes little CPU and is
+     * blocking and awoken a lot (doing I/O for example), its
+     * priority will remain boosted, optimizing it's wake-to-run
+     * latencies.
+     *
+     * This allows wake-to-run latency sensitive VCPUs to preempt
+     * more CPU resource intensive VCPUs without impacting overall 
+     * system fairness.
+     */
+    if ( svc->pri == CSCHED_PRI_TS_UNDER )
+        svc->pri = CSCHED_PRI_TS_BOOST;
+
     /* Put the VCPU on the runq and tickle CPUs */
     __runq_insert(cpu, svc);
     __runq_tickle(cpu, svc);
@@ -659,7 +687,7 @@ csched_runq_sort(unsigned int cpu)
         next = elem->next;
         svc_elem = __runq_elem(elem);
 
-        if ( svc_elem->pri == CSCHED_PRI_TS_UNDER )
+        if ( svc_elem->pri >= CSCHED_PRI_TS_UNDER )
         {
             /* does elem need to move up the runq? */
             if ( elem->prev != last_under )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.