[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] credit2: replace cpumask_first() uses



commit a6aed7607de96904a96b8800b454511cf8334979
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Fri Aug 23 15:05:39 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Aug 23 15:05:39 2013 +0200

    credit2: replace cpumask_first() uses
    
    ... with cpumask_any() or cpumask_cycle().
    
    In one case this also allows elimination of a cpumask_empty() call,
    and while doing this I also spotted a redundant use of
    cpumask_weight(). (When running on big systems, operations on CPU masks
    aren't cheap enough to use them carelessly.)
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
    Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/common/sched_credit2.c |   18 ++++++++++--------
 1 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 825ec98..ae0abc2 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -515,9 +515,10 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, 
struct csched_vcpu *n
     cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
     
     /* If it's not empty, choose one */
-    if ( !cpumask_empty(&mask) )
+    i = cpumask_cycle(cpu, &mask);
+    if ( i < nr_cpu_ids )
     {
-        ipid = cpumask_first(&mask);
+        ipid = i;
         goto tickle;
     }
 
@@ -1091,7 +1092,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
         else
         {
             d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, 
svc->vcpu->vcpu_id);
-            new_cpu = cpumask_first(&svc->migrate_rqd->active);
+            new_cpu = cpumask_cycle(vc->processor, &svc->migrate_rqd->active);
             goto out_up;
         }
     }
@@ -1138,8 +1139,8 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
         new_cpu = vc->processor;
     else
     {
-        BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
-        new_cpu = cpumask_first(&prv->rqd[min_rqi].active);
+        new_cpu = cpumask_cycle(vc->processor, &prv->rqd[min_rqi].active);
+        BUG_ON(new_cpu >= nr_cpu_ids);
     }
 
 out_up:
@@ -1219,7 +1220,7 @@ void migrate(const struct scheduler *ops,
             on_runq=1;
         }
         __runq_deassign(svc);
-        svc->vcpu->processor = cpumask_first(&trqd->active);
+        svc->vcpu->processor = cpumask_any(&trqd->active);
         __runq_assign(svc, trqd);
         if ( on_runq )
         {
@@ -1299,8 +1300,9 @@ retry:
             load_max = st.orqd->b_avgload;
 
         cpus_max = cpumask_weight(&st.lrqd->active);
-        if ( cpumask_weight(&st.orqd->active) > cpus_max )
-            cpus_max = cpumask_weight(&st.orqd->active);
+        i = cpumask_weight(&st.orqd->active);
+        if ( i > cpus_max )
+            cpus_max = i;
 
         /* If we're under 100% capacaty, only shift if load difference
          * is > 1.  otherwise, shift if under 12.5% */
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.