[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH v1 11/16] xen: Credit1: support sched_smt_cosched in _csched_cpu_pick()



If sched_smt_cosched is enabled, take it into account when choosing on
what pcpu to run a vcpu (e.g., for doing a migration).

Basically, we can only run vcpus of domain A on pcpus of cores where
other vcpus of domain A are running already (and, vice versa, we
absolutely don't want to run a vcpu of domain A on pcpus of cores
where vcpus of another domains are running!).

Signed-off-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/common/sched_credit.c |   25 ++++++++++++++++---------
 1 file changed, 16 insertions(+), 9 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 22327b61fb..81a2c8b384 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -822,7 +822,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
     cpumask_t *cpus = cpumask_scratch_cpu(vc->processor);
     cpumask_t idlers;
     cpumask_t *online = cpupool_domain_cpumask(vc->domain);
-    struct csched_pcpu *spc = NULL;
+    struct csched_pcpu *spc = NULL, *nspc = NULL;
     int cpu = vc->processor;
     int balance_step;
 
@@ -900,6 +900,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
             int migrate_factor;
 
             nxt = cpumask_cycle(cpu, cpus);
+            nspc = CSCHED_PCPU(nxt);
 
             if ( cpumask_test_cpu(cpu, per_cpu(cpu_core_mask, nxt)) )
             {
@@ -929,15 +930,21 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
                  weight_cpu > weight_nxt :
                  weight_cpu * migrate_factor < weight_nxt )
             {
-                cpumask_and(&nxt_idlers, &nxt_idlers, cpus);
-                spc = CSCHED_PCPU(nxt);
-                cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers);
-                cpumask_andnot(cpus, cpus, per_cpu(cpu_sibling_mask, cpu));
-            }
-            else
-            {
-                cpumask_andnot(cpus, cpus, &nxt_idlers);
+                spin_lock(&nspc->core->lock);
+                if ( !sched_smt_cosched ||
+                     nspc->core->sdom == NULL || nspc->core->sdom->dom == 
vc->domain )
+                {
+                    cpumask_and(&nxt_idlers, &nxt_idlers, cpus);
+                    spc = CSCHED_PCPU(nxt);
+                    cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers);
+                    cpumask_andnot(cpus, cpus, per_cpu(cpu_sibling_mask, cpu));
+                    spin_unlock(&nspc->core->lock);
+                    continue;
+                }
+                spin_unlock(&nspc->core->lock);
             }
+
+            cpumask_andnot(cpus, cpus, &nxt_idlers);
         }
 
         /* Stop if cpu is idle */


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.