[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC V2 28/45] xen: switch from for_each_vcpu() to for_each_sched_item()



Where appropriate switch from for_each_vcpu() to for_each_sched_item()
in order to prepare core scheduling.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/domain.c   |   9 ++---
 xen/common/schedule.c | 107 ++++++++++++++++++++++++++------------------------
 2 files changed, 59 insertions(+), 57 deletions(-)

diff --git a/xen/common/domain.c b/xen/common/domain.c
index 78a838fab3..d0f9e5e86a 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -510,7 +510,7 @@ void domain_update_node_affinity(struct domain *d)
     cpumask_var_t dom_cpumask, dom_cpumask_soft;
     cpumask_t *dom_affinity;
     const cpumask_t *online;
-    struct vcpu *v;
+    struct sched_item *item;
     unsigned int cpu;
 
     /* Do we have vcpus already? If not, no need to update node-affinity. */
@@ -543,12 +543,11 @@ void domain_update_node_affinity(struct domain *d)
          * and the full mask of where it would prefer to run (the union of
          * the soft affinity of all its various vcpus). Let's build them.
          */
-        for_each_vcpu ( d, v )
+        for_each_sched_item ( d, item )
         {
-            cpumask_or(dom_cpumask, dom_cpumask,
-                       v->sched_item->cpu_hard_affinity);
+            cpumask_or(dom_cpumask, dom_cpumask, item->cpu_hard_affinity);
             cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
-                       v->sched_item->cpu_soft_affinity);
+                       item->cpu_soft_affinity);
         }
         /* Filter out non-online cpus */
         cpumask_and(dom_cpumask, dom_cpumask, online);
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 5368d66cfc..bc0554f2da 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -423,16 +423,17 @@ static void sched_move_irqs(struct sched_item *item)
 int sched_move_domain(struct domain *d, struct cpupool *c)
 {
     struct vcpu *v;
+    struct sched_item *item;
     unsigned int new_p;
-    void **vcpu_priv;
+    void **item_priv;
     void *domdata;
-    void *vcpudata;
+    void *itemdata;
     struct scheduler *old_ops;
     void *old_domdata;
 
-    for_each_vcpu ( d, v )
+    for_each_sched_item ( d, item )
     {
-        if ( v->sched_item->affinity_broken )
+        if ( item->affinity_broken )
             return -EBUSY;
     }
 
@@ -440,22 +441,21 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
     if ( IS_ERR(domdata) )
         return PTR_ERR(domdata);
 
-    vcpu_priv = xzalloc_array(void *, d->max_vcpus);
-    if ( vcpu_priv == NULL )
+    item_priv = xzalloc_array(void *, d->max_vcpus);
+    if ( item_priv == NULL )
     {
         sched_free_domdata(c->sched, domdata);
         return -ENOMEM;
     }
 
-    for_each_vcpu ( d, v )
+    for_each_sched_item ( d, item )
     {
-        vcpu_priv[v->vcpu_id] = sched_alloc_vdata(c->sched, v->sched_item,
-                                                  domdata);
-        if ( vcpu_priv[v->vcpu_id] == NULL )
+        item_priv[item->item_id] = sched_alloc_vdata(c->sched, item, domdata);
+        if ( item_priv[item->item_id] == NULL )
         {
-            for_each_vcpu ( d, v )
-                xfree(vcpu_priv[v->vcpu_id]);
-            xfree(vcpu_priv);
+            for_each_sched_item ( d, item )
+                xfree(item_priv[item->item_id]);
+            xfree(item_priv);
             sched_free_domdata(c->sched, domdata);
             return -ENOMEM;
         }
@@ -466,30 +466,35 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
     old_ops = dom_scheduler(d);
     old_domdata = d->sched_priv;
 
-    for_each_vcpu ( d, v )
+    for_each_sched_item ( d, item )
     {
-        sched_remove_item(old_ops, v->sched_item);
+        sched_remove_item(old_ops, item);
     }
 
     d->cpupool = c;
     d->sched_priv = domdata;
 
     new_p = cpumask_first(c->cpu_valid);
-    for_each_vcpu ( d, v )
+    for_each_sched_item ( d, item )
     {
         spinlock_t *lock;
+        unsigned int item_p = new_p;
 
-        vcpudata = v->sched_item->priv;
+        itemdata = item->priv;
 
-        migrate_timer(&v->periodic_timer, new_p);
-        migrate_timer(&v->singleshot_timer, new_p);
-        migrate_timer(&v->poll_timer, new_p);
+        for_each_sched_item_vcpu( item, v )
+        {
+            migrate_timer(&v->periodic_timer, new_p);
+            migrate_timer(&v->singleshot_timer, new_p);
+            migrate_timer(&v->poll_timer, new_p);
+            new_p = cpumask_cycle(new_p, c->cpu_valid);
+        }
 
-        lock = item_schedule_lock_irq(v->sched_item);
+        lock = item_schedule_lock_irq(item);
 
-        sched_set_affinity(v, &cpumask_all, &cpumask_all);
+        sched_set_affinity(item->vcpu, &cpumask_all, &cpumask_all);
 
-        sched_set_res(v->sched_item, per_cpu(sched_res, new_p));
+        sched_set_res(item, per_cpu(sched_res, item_p));
         /*
          * With v->processor modified we must not
          * - make any further changes assuming we hold the scheduler lock,
@@ -497,15 +502,13 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
          */
         spin_unlock_irq(lock);
 
-        v->sched_item->priv = vcpu_priv[v->vcpu_id];
+        item->priv = item_priv[item->item_id];
         if ( !d->is_dying )
             sched_move_irqs(v->sched_item);
 
-        new_p = cpumask_cycle(new_p, c->cpu_valid);
+        sched_insert_item(c->sched, item);
 
-        sched_insert_item(c->sched, v->sched_item);
-
-        sched_free_vdata(old_ops, vcpudata);
+        sched_free_vdata(old_ops, itemdata);
     }
 
     domain_update_node_affinity(d);
@@ -514,7 +517,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
 
     sched_free_domdata(old_ops, old_domdata);
 
-    xfree(vcpu_priv);
+    xfree(item_priv);
 
     return 0;
 }
@@ -819,15 +822,14 @@ void vcpu_force_reschedule(struct vcpu *v)
 void restore_vcpu_affinity(struct domain *d)
 {
     unsigned int cpu = smp_processor_id();
-    struct vcpu *v;
+    struct sched_item *item;
 
     ASSERT(system_state == SYS_STATE_resume);
 
-    for_each_vcpu ( d, v )
+    for_each_sched_item ( d, item )
     {
         spinlock_t *lock;
-        unsigned int old_cpu = v->processor;
-        struct sched_item *item = v->sched_item;
+        unsigned int old_cpu = sched_item_cpu(item);
         struct sched_resource *res;
 
         ASSERT(!item_runnable(item));
@@ -846,7 +848,8 @@ void restore_vcpu_affinity(struct domain *d)
         {
             if ( item->affinity_broken )
             {
-                sched_set_affinity(v, item->cpu_hard_affinity_saved, NULL);
+                sched_set_affinity(item->vcpu, item->cpu_hard_affinity_saved,
+                                   NULL);
                 item->affinity_broken = 0;
                 cpumask_and(cpumask_scratch_cpu(cpu), item->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
@@ -854,8 +857,8 @@ void restore_vcpu_affinity(struct domain *d)
 
             if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
             {
-                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
-                sched_set_affinity(v, &cpumask_all, NULL);
+                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", item->vcpu);
+                sched_set_affinity(item->vcpu, &cpumask_all, NULL);
                 cpumask_and(cpumask_scratch_cpu(cpu), item->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
             }
@@ -865,12 +868,12 @@ void restore_vcpu_affinity(struct domain *d)
         sched_set_res(item, res);
 
         lock = item_schedule_lock_irq(item);
-        res = sched_pick_resource(vcpu_scheduler(v), item);
+        res = sched_pick_resource(vcpu_scheduler(item->vcpu), item);
         sched_set_res(item, res);
         spin_unlock_irq(lock);
 
-        if ( old_cpu != v->processor )
-            sched_move_irqs(v->sched_item);
+        if ( old_cpu != sched_item_cpu(item) )
+            sched_move_irqs(item);
     }
 
     domain_update_node_affinity(d);
@@ -884,7 +887,6 @@ void restore_vcpu_affinity(struct domain *d)
 int cpu_disable_scheduler(unsigned int cpu)
 {
     struct domain *d;
-    struct vcpu *v;
     struct cpupool *c;
     cpumask_t online_affinity;
     int ret = 0;
@@ -895,10 +897,11 @@ int cpu_disable_scheduler(unsigned int cpu)
 
     for_each_domain_in_cpupool ( d, c )
     {
-        for_each_vcpu ( d, v )
+        struct sched_item *item;
+
+        for_each_sched_item ( d, item )
         {
             unsigned long flags;
-            struct sched_item *item = v->sched_item;
             spinlock_t *lock = item_schedule_lock_irqsave(item, &flags);
 
             cpumask_and(&online_affinity, item->cpu_hard_affinity, 
c->cpu_valid);
@@ -913,14 +916,14 @@ int cpu_disable_scheduler(unsigned int cpu)
                     break;
                 }
 
-                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
+                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", item->vcpu);
 
-                sched_set_affinity(v, &cpumask_all, NULL);
+                sched_set_affinity(item->vcpu, &cpumask_all, NULL);
             }
 
-            if ( v->processor != cpu )
+            if ( sched_item_cpu(item) != sched_get_resource_cpu(cpu) )
             {
-                /* The vcpu is not on this cpu, so we can move on. */
+                /* The item is not on this cpu, so we can move on. */
                 item_schedule_unlock_irqrestore(lock, flags, item);
                 continue;
             }
@@ -933,17 +936,17 @@ int cpu_disable_scheduler(unsigned int cpu)
              *  * the scheduler will always find a suitable solution, or
              *    things would have failed before getting in here.
              */
-            vcpu_migrate_start(v);
+            vcpu_migrate_start(item->vcpu);
             item_schedule_unlock_irqrestore(lock, flags, item);
 
-            vcpu_migrate_finish(v);
+            vcpu_migrate_finish(item->vcpu);
 
             /*
              * The only caveat, in this case, is that if a vcpu active in
              * the hypervisor isn't migratable. In this case, the caller
              * should try again after releasing and reaquiring all locks.
              */
-            if ( v->processor == cpu )
+            if ( sched_item_cpu(item) == sched_get_resource_cpu(cpu) )
                 ret = -EAGAIN;
         }
     }
@@ -954,16 +957,16 @@ int cpu_disable_scheduler(unsigned int cpu)
 static int cpu_disable_scheduler_check(unsigned int cpu)
 {
     struct domain *d;
-    struct vcpu *v;
     struct cpupool *c;
+    struct sched_item *item;
 
     c = per_cpu(cpupool, cpu);
     if ( c == NULL )
         return 0;
 
     for_each_domain_in_cpupool ( d, c )
-        for_each_vcpu ( d, v )
-            if ( v->sched_item->affinity_broken )
+        for_each_sched_item ( d, item )
+            if ( item->affinity_broken )
                 return -EADDRINUSE;
 
     return 0;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.