[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] xen: credit2: avoid using cpumask_weight() in hot-paths



commit 1585ed3c702e680ae492d852c8cff62cf300df99
Author:     Dario Faggioli <dfaggioli@xxxxxxxx>
AuthorDate: Mon Jul 29 12:49:09 2019 +0200
Commit:     George Dunlap <george.dunlap@xxxxxxxxxx>
CommitDate: Tue Jul 30 12:20:14 2019 +0100

    xen: credit2: avoid using cpumask_weight() in hot-paths
    
    cpumask_weight() is known to be expensive. In Credit2, we use it in
    load-balancing, but only for knowing how many CPUs are active in a
    runqueue.
    
    Keeping such count in an integer field of the per-runqueue data
    structure we have, completely avoids the need for cpumask_weight().
    
    While there, remove as much other uses of it as we can, even if not in
    hot-paths.
    
    Signed-off-by: Dario Faggioli <dfaggioli@xxxxxxxx>
    Reviewed-by: Andrii Anisov <andrii_anisov@xxxxxxxx>
    Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
 xen/common/sched_credit2.c | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 8e4381d8a7..fbdc4618cb 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -466,6 +466,7 @@ struct csched2_runqueue_data {
     spinlock_t lock;           /* Lock for this runqueue                     */
 
     struct list_head runq;     /* Ordered list of runnable vms               */
+    unsigned int nr_cpus;      /* How many CPUs are sharing this runqueue    */
     int id;                    /* ID of this runqueue (-1 if invalid)        */
 
     int load;                  /* Instantaneous load (num of non-idle vcpus) */
@@ -2613,8 +2614,8 @@ retry:
         if ( st.orqd->b_avgload > load_max )
             load_max = st.orqd->b_avgload;
 
-        cpus_max = cpumask_weight(&st.lrqd->active);
-        i = cpumask_weight(&st.orqd->active);
+        cpus_max = st.lrqd->nr_cpus;
+        i = st.orqd->nr_cpus;
         if ( i > cpus_max )
             cpus_max = i;
 
@@ -3697,7 +3698,7 @@ csched2_dump(const struct scheduler *ops)
                "\tinstload           = %d\n"
                "\taveload            = %"PRI_stime" (~%"PRI_stime"%%)\n",
                i,
-               cpumask_weight(&prv->rqd[i].active),
+               prv->rqd[i].nr_cpus,
                nr_cpu_ids, cpumask_bits(&prv->rqd[i].active),
                prv->rqd[i].max_weight,
                prv->rqd[i].pick_bias,
@@ -3815,7 +3816,7 @@ init_pdata(struct csched2_private *prv, struct 
csched2_pcpu *spc,
 
     __cpumask_set_cpu(cpu, &spc->sibling_mask);
 
-    if ( cpumask_weight(&rqd->active) > 0 )
+    if ( rqd->nr_cpus > 0 )
         for_each_cpu ( rcpu, per_cpu(cpu_sibling_mask, cpu) )
             if ( cpumask_test_cpu(rcpu, &rqd->active) )
             {
@@ -3828,7 +3829,10 @@ init_pdata(struct csched2_private *prv, struct 
csched2_pcpu *spc,
     __cpumask_set_cpu(cpu, &prv->initialized);
     __cpumask_set_cpu(cpu, &rqd->smt_idle);
 
-    if ( cpumask_weight(&rqd->active) == 1 )
+    rqd->nr_cpus++;
+    ASSERT(cpumask_weight(&rqd->active) == rqd->nr_cpus);
+
+    if ( rqd->nr_cpus == 1 )
         rqd->pick_bias = cpu;
 
     return spc->runq_id;
@@ -3934,7 +3938,10 @@ csched2_deinit_pdata(const struct scheduler *ops, void 
*pcpu, int cpu)
     for_each_cpu ( rcpu, &rqd->active )
         __cpumask_clear_cpu(cpu, &csched2_pcpu(rcpu)->sibling_mask);
 
-    if ( cpumask_empty(&rqd->active) )
+    rqd->nr_cpus--;
+    ASSERT(cpumask_weight(&rqd->active) == rqd->nr_cpus);
+
+    if ( rqd->nr_cpus == 0 )
     {
         printk(XENLOG_INFO " No cpus left on runqueue, disabling\n");
         deactivate_runqueue(prv, spc->runq_id);
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.