[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.11] xen/sched: fix credit2 smt idle handling



commit 6afaac227541df272e9793c2f3af9c2e3a2e39f9
Author:     Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Wed May 15 09:45:58 2019 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed May 15 09:45:58 2019 +0200

    xen/sched: fix credit2 smt idle handling
    
    Credit2's smt_idle_mask_set() and smt_idle_mask_clear() are used to
    identify idle cores where vcpus can be moved to. A core is thought to
    be idle when all siblings are known to have the idle vcpu running on
    them.
    
    Unfortunately the information of a vcpu running on a cpu is per
    runqueue. So in case not all siblings are in the same runqueue a core
    will never be regarded to be idle, as the sibling not in the runqueue
    is never known to run the idle vcpu.
    
    Use a credit2 specific cpumask of siblings with only those cpus
    being marked which are in the same runqueue as the cpu in question.
    
    Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
    Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx>
    master commit: 753ba43d6d16e688f688e01e1c77463ea2c6ec9f
    master date: 2019-03-29 18:28:21 +0000
---
 xen/common/sched_credit2.c | 25 +++++++++++++++++++++----
 1 file changed, 21 insertions(+), 4 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index e6decf104b..f0de34d1b8 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -510,6 +510,7 @@ struct csched2_private {
  * Physical CPU
  */
 struct csched2_pcpu {
+    cpumask_t sibling_mask;            /* Siblings in the same runqueue      */
     int runq_id;
 };
 
@@ -662,7 +663,7 @@ static inline
 void smt_idle_mask_set(unsigned int cpu, const cpumask_t *idlers,
                        cpumask_t *mask)
 {
-    const cpumask_t *cpu_siblings = per_cpu(cpu_sibling_mask, cpu);
+    const cpumask_t *cpu_siblings = &csched2_pcpu(cpu)->sibling_mask;
 
     if ( cpumask_subset(cpu_siblings, idlers) )
         cpumask_or(mask, mask, cpu_siblings);
@@ -674,10 +675,10 @@ void smt_idle_mask_set(unsigned int cpu, const cpumask_t 
*idlers,
 static inline
 void smt_idle_mask_clear(unsigned int cpu, cpumask_t *mask)
 {
-    const cpumask_t *cpu_siblings = per_cpu(cpu_sibling_mask, cpu);
+    const cpumask_t *cpu_siblings = &csched2_pcpu(cpu)->sibling_mask;
 
     if ( cpumask_subset(cpu_siblings, mask) )
-        cpumask_andnot(mask, mask, per_cpu(cpu_sibling_mask, cpu));
+        cpumask_andnot(mask, mask, cpu_siblings);
 }
 
 /*
@@ -3803,6 +3804,7 @@ init_pdata(struct csched2_private *prv, struct 
csched2_pcpu *spc,
            unsigned int cpu)
 {
     struct csched2_runqueue_data *rqd;
+    unsigned int rcpu;
 
     ASSERT(rw_is_write_locked(&prv->lock));
     ASSERT(!cpumask_test_cpu(cpu, &prv->initialized));
@@ -3820,12 +3822,23 @@ init_pdata(struct csched2_private *prv, struct 
csched2_pcpu *spc,
         printk(XENLOG_INFO " First cpu on runqueue, activating\n");
         activate_runqueue(prv, spc->runq_id);
     }
-    
+
     __cpumask_set_cpu(cpu, &rqd->idle);
     __cpumask_set_cpu(cpu, &rqd->active);
     __cpumask_set_cpu(cpu, &prv->initialized);
     __cpumask_set_cpu(cpu, &rqd->smt_idle);
 
+    /* On the boot cpu we are called before cpu_sibling_mask has been set up. 
*/
+    if ( cpu == 0 && system_state < SYS_STATE_active )
+        __cpumask_set_cpu(cpu, &csched2_pcpu(cpu)->sibling_mask);
+    else
+        for_each_cpu ( rcpu, per_cpu(cpu_sibling_mask, cpu) )
+            if ( cpumask_test_cpu(rcpu, &rqd->active) )
+            {
+                __cpumask_set_cpu(cpu, &csched2_pcpu(rcpu)->sibling_mask);
+                __cpumask_set_cpu(rcpu, &csched2_pcpu(cpu)->sibling_mask);
+            }
+
     if ( cpumask_weight(&rqd->active) == 1 )
         rqd->pick_bias = cpu;
 
@@ -3907,6 +3920,7 @@ csched2_deinit_pdata(const struct scheduler *ops, void 
*pcpu, int cpu)
     struct csched2_private *prv = csched2_priv(ops);
     struct csched2_runqueue_data *rqd;
     struct csched2_pcpu *spc = pcpu;
+    unsigned int rcpu;
 
     write_lock_irqsave(&prv->lock, flags);
 
@@ -3933,6 +3947,9 @@ csched2_deinit_pdata(const struct scheduler *ops, void 
*pcpu, int cpu)
 
     printk(XENLOG_INFO "Removing cpu %d from runqueue %d\n", cpu, 
spc->runq_id);
 
+    for_each_cpu ( rcpu, &rqd->active )
+        __cpumask_clear_cpu(cpu, &csched2_pcpu(rcpu)->sibling_mask);
+
     __cpumask_clear_cpu(cpu, &rqd->idle);
     __cpumask_clear_cpu(cpu, &rqd->smt_idle);
     __cpumask_clear_cpu(cpu, &rqd->active);
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.11

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.