[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] credit2: Migrate request infrastructure



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1293179464 0
# Node ID df310dcd19cb7b5840a1f73431abb7aa87531122
# Parent  6a970abb346f1767523649240a6307e55dfcd76c
credit2: Migrate request infrastructure

Put in infrastructure to allow a vcpu to requeset to migrate to a
specific runqueue.  This will allow a load balancer to choose running
VMs to migrate, and know they will go where expected when the VM is
descheduled.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/common/sched_credit2.c |   38 +++++++++++++++++++++++++++++++++++---
 1 files changed, 35 insertions(+), 3 deletions(-)

diff -r 6a970abb346f -r df310dcd19cb xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c        Fri Dec 24 08:30:42 2010 +0000
+++ b/xen/common/sched_credit2.c        Fri Dec 24 08:31:04 2010 +0000
@@ -157,6 +157,12 @@
  */
 #define __CSFLAG_delayed_runq_add 2
 #define CSFLAG_delayed_runq_add (1<<__CSFLAG_delayed_runq_add)
+/* CSFLAG_runq_migrate_request: This vcpu is being migrated as a result of a
+ * credit2-initiated runq migrate request; migrate it to the runqueue indicated
+ * in the svc struct. 
+ */
+#define __CSFLAG_runq_migrate_request 3
+#define CSFLAG_runq_migrate_request (1<<__CSFLAG_runq_migrate_request)
 
 
 int opt_migrate_resist=500;
@@ -247,6 +253,8 @@ struct csched_vcpu {
     /* Individual contribution to load */
     s_time_t load_last_update;  /* Last time average was updated */
     s_time_t avgload;           /* Decaying queue load */
+
+    struct csched_runqueue_data *migrate_rqd; /* Pre-determined rqd to which 
to migrate */
 };
 
 /*
@@ -974,10 +982,10 @@ csched_context_saved(const struct schedu
      * it seems a bit pointless; especially as we have plenty of
      * bits free.
      */
-    if ( test_bit(__CSFLAG_delayed_runq_add, &svc->flags) )
+    if ( test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags)
+         && likely(vcpu_runnable(vc)) )
     {
         BUG_ON(__vcpu_on_runq(svc));
-        clear_bit(__CSFLAG_delayed_runq_add, &svc->flags);
 
         runq_insert(ops, vc->processor, svc);
         runq_tickle(ops, vc->processor, svc, now);
@@ -1015,9 +1023,32 @@ choose_cpu(const struct scheduler *ops, 
 
     if ( !spin_trylock(&prv->lock) )
     {
+        if ( test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) )
+        {
+            d2printk("d%dv%d -\n", svc->vcpu->domain->domain_id, 
svc->vcpu->vcpu_id);
+            clear_bit(__CSFLAG_runq_migrate_request, &svc->flags);
+        }
         /* Leave it where it is for now.  When we actually pay attention
          * to affinity we'll have to figure something out... */
         return vc->processor;
+    }
+
+    /* First check to see if we're here because someone else suggested a place
+     * for us to move. */
+    if ( test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) )
+    {
+        if ( unlikely(svc->migrate_rqd->id < 0) )
+        {
+            printk("%s: Runqueue migrate aborted because target runqueue 
disappeared!\n",
+                   __func__);
+            /* Fall-through to normal cpu pick */
+        }
+        else
+        {
+            d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, 
svc->vcpu->vcpu_id);
+            new_cpu = first_cpu(svc->migrate_rqd->active);
+            goto out_up;
+        }
     }
 
     /* FIXME: Pay attention to cpu affinity */                                 
                                                     
@@ -1053,7 +1084,8 @@ choose_cpu(const struct scheduler *ops, 
         BUG_ON(cpus_empty(prv->rqd[min_rqi].active));
         new_cpu = first_cpu(prv->rqd[min_rqi].active);
     }
- 
+
+out_up:
     spin_unlock(&prv->lock);
 
     return new_cpu;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.