[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC][PATCH 2/4] sched: change the handling of credits over upper bound



By applying this patch, the credit scheduler don't reset vcpu's credit (set to 0) when the credit would be over upper bound. And it prevents a vcpu from missing becoming active.

The difference between this patch and last patch is when vcpu is put back on active list. This patch puts vcpu back on active list only in csched_acct().

Best regards,
Naoki Nishiguchi
diff -r b431367fc717 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Wed Dec 17 16:00:48 2008 +0900
+++ b/xen/common/sched_credit.c Wed Dec 17 16:01:30 2008 +0900
@@ -197,6 +197,7 @@ struct csched_vcpu {
 struct csched_vcpu {
     struct list_head runq_elem;
     struct list_head active_vcpu_elem;
+    struct list_head inactive_vcpu_elem;
     struct csched_dom *sdom;
     struct vcpu *vcpu;
     atomic_t credit;
@@ -232,6 +233,7 @@ struct csched_private {
 struct csched_private {
     spinlock_t lock;
     struct list_head active_sdom;
+    struct list_head inactive_vcpu;
     uint32_t ncpus;
     unsigned int master;
     cpumask_t idlers;
@@ -485,12 +487,9 @@ csched_cpu_pick(struct vcpu *vc)
 }
 
 static inline void
-__csched_vcpu_acct_start(struct csched_vcpu *svc)
+__csched_vcpu_acct_start_locked(struct csched_vcpu *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
-    unsigned long flags;
-
-    spin_lock_irqsave(&csched_priv.lock, flags);
 
     if ( list_empty(&svc->active_vcpu_elem) )
     {
@@ -499,14 +498,13 @@ __csched_vcpu_acct_start(struct csched_v
 
         sdom->active_vcpu_count++;
         list_add(&svc->active_vcpu_elem, &sdom->active_vcpu);
+        list_del_init(&svc->inactive_vcpu_elem);
         if ( list_empty(&sdom->active_sdom_elem) )
         {
             list_add(&sdom->active_sdom_elem, &csched_priv.active_sdom);
             csched_priv.weight += sdom->weight;
         }
     }
-
-    spin_unlock_irqrestore(&csched_priv.lock, flags);
 }
 
 static inline void
@@ -521,6 +519,7 @@ __csched_vcpu_acct_stop_locked(struct cs
 
     sdom->active_vcpu_count--;
     list_del_init(&svc->active_vcpu_elem);
+    list_add(&svc->inactive_vcpu_elem, &csched_priv.inactive_vcpu);
     if ( list_empty(&sdom->active_vcpu) )
     {
         BUG_ON( csched_priv.weight < sdom->weight );
@@ -546,18 +545,12 @@ csched_vcpu_acct(unsigned int cpu)
         svc->pri = CSCHED_PRI_TS_UNDER;
 
     /*
-     * Put this VCPU and domain back on the active list if it was
-     * idling.
-     *
      * If it's been active a while, check if we'd be better off
      * migrating it to run elsewhere (see multi-core and multi-thread
      * support in csched_cpu_pick()).
      */
-    if ( list_empty(&svc->active_vcpu_elem) )
-    {
-        __csched_vcpu_acct_start(svc);
-    }
-    else if ( csched_cpu_pick(current) != cpu )
+    if ( !list_empty(&svc->active_vcpu_elem) &&
+         csched_cpu_pick(current) != cpu )
     {
         CSCHED_VCPU_STAT_CRANK(svc, migrate_r);
         CSCHED_STAT_CRANK(migrate_running);
@@ -582,6 +575,7 @@ csched_vcpu_init(struct vcpu *vc)
 
     INIT_LIST_HEAD(&svc->runq_elem);
     INIT_LIST_HEAD(&svc->active_vcpu_elem);
+    INIT_LIST_HEAD(&svc->inactive_vcpu_elem);
     svc->sdom = sdom;
     svc->vcpu = vc;
     atomic_set(&svc->credit, 0);
@@ -597,6 +591,16 @@ csched_vcpu_init(struct vcpu *vc)
             return -1;
     }
 
+    /* Add vcpu to inactive queue in order to start acct */
+    if ( !is_idle_vcpu(vc) )
+    {
+        unsigned long flags;
+
+        spin_lock_irqsave(&csched_priv.lock, flags);
+        list_add(&svc->inactive_vcpu_elem, &csched_priv.inactive_vcpu);
+        spin_unlock_irqrestore(&csched_priv.lock, flags);
+    }
+
     CSCHED_VCPU_CHECK(vc);
     return 0;
 }
@@ -617,6 +621,9 @@ csched_vcpu_destroy(struct vcpu *vc)
 
     if ( !list_empty(&svc->active_vcpu_elem) )
         __csched_vcpu_acct_stop_locked(svc);
+
+    if ( !list_empty(&svc->inactive_vcpu_elem) )
+        list_del_init(&svc->inactive_vcpu_elem);
 
     spin_unlock_irqrestore(&csched_priv.lock, flags);
 
@@ -835,6 +842,18 @@ csched_acct(void)
 
     spin_lock_irqsave(&csched_priv.lock, flags);
 
+    /* Add vcpu to active list when its credit were consumed by one tick. */
+    list_for_each_safe( iter_vcpu, next_vcpu, &csched_priv.inactive_vcpu )
+    {
+        svc = list_entry(iter_vcpu, struct csched_vcpu, inactive_vcpu_elem);
+
+        if ( atomic_read(&svc->credit)
+             <= CSCHED_CREDITS_PER_TICK * (CSCHED_TICKS_PER_ACCT - 1) )
+        {
+            __csched_vcpu_acct_start_locked(svc);
+        }
+    }
+
     weight_total = csched_priv.weight;
     credit_total = csched_priv.credit;
 
@@ -991,7 +1010,7 @@ csched_acct(void)
                 if ( credit > CSCHED_CREDITS_PER_TSLICE )
                 {
                     __csched_vcpu_acct_stop_locked(svc);
-                    credit = 0;
+                    credit = CSCHED_CREDITS_PER_TSLICE;
                     atomic_set(&svc->credit, credit);
                 }
             }
@@ -1353,6 +1372,17 @@ csched_dump(void)
             csched_dump_vcpu(svc);
         }
     }
+
+    printk("inactive vcpus:\n");
+    loop = 0;
+    list_for_each( iter_svc, &csched_priv.inactive_vcpu )
+    {
+        struct csched_vcpu *svc;
+        svc = list_entry(iter_svc, struct csched_vcpu, inactive_vcpu_elem);
+
+        printk("\t%3d: ", ++loop);
+        csched_dump_vcpu(svc);
+    }
 }
 
 static void
@@ -1360,6 +1390,7 @@ csched_init(void)
 {
     spin_lock_init(&csched_priv.lock);
     INIT_LIST_HEAD(&csched_priv.active_sdom);
+    INIT_LIST_HEAD(&csched_priv.inactive_vcpu);
     csched_priv.ncpus = 0;
     csched_priv.master = UINT_MAX;
     cpus_clear(csched_priv.idlers);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.