[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.6] Revert "xen: Remove buggy initial placement algorithm"



commit 4282362cf3c06976dd305752e42f45d3bd1ccba1
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Aug 8 09:53:44 2016 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Aug 8 09:53:44 2016 +0200

    Revert "xen: Remove buggy initial placement algorithm"
    
    This reverts commit 715242a2764570680c4f9f5b039e390a8a78a642,
    as its prereq had further (so far unidentified) dependencies.
---
 xen/common/domctl.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 49 insertions(+), 1 deletion(-)

diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index efed3df..7f959f3 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -216,6 +216,54 @@ void getdomaininfo(struct domain *d, struct 
xen_domctl_getdomaininfo *info)
     memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
 }
 
+static unsigned int default_vcpu0_location(cpumask_t *online)
+{
+    struct domain *d;
+    struct vcpu   *v;
+    unsigned int   i, cpu, nr_cpus, *cnt;
+    cpumask_t      cpu_exclude_map;
+
+    /* Do an initial CPU placement. Pick the least-populated CPU. */
+    nr_cpus = cpumask_last(&cpu_online_map) + 1;
+    cnt = xzalloc_array(unsigned int, nr_cpus);
+    if ( cnt )
+    {
+        rcu_read_lock(&domlist_read_lock);
+        for_each_domain ( d )
+            for_each_vcpu ( d, v )
+                if ( !test_bit(_VPF_down, &v->pause_flags)
+                     && ((cpu = v->processor) < nr_cpus) )
+                    cnt[cpu]++;
+        rcu_read_unlock(&domlist_read_lock);
+    }
+
+    /*
+     * If we're on a HT system, we only auto-allocate to a non-primary HT. We
+     * favour high numbered CPUs in the event of a tie.
+     */
+    cpumask_copy(&cpu_exclude_map, per_cpu(cpu_sibling_mask, 0));
+    cpu = cpumask_first(&cpu_exclude_map);
+    i = cpumask_next(cpu, &cpu_exclude_map);
+    if ( i < nr_cpu_ids )
+        cpu = i;
+    for_each_cpu(i, online)
+    {
+        if ( cpumask_test_cpu(i, &cpu_exclude_map) )
+            continue;
+        if ( (i == cpumask_first(per_cpu(cpu_sibling_mask, i))) &&
+             (cpumask_next(i, per_cpu(cpu_sibling_mask, i)) < nr_cpu_ids) )
+            continue;
+        cpumask_or(&cpu_exclude_map, &cpu_exclude_map,
+                   per_cpu(cpu_sibling_mask, i));
+        if ( !cnt || cnt[i] <= cnt[cpu] )
+            cpu = i;
+    }
+
+    xfree(cnt);
+
+    return cpu;
+}
+
 bool_t domctl_lock_acquire(void)
 {
     /*
@@ -639,7 +687,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
                 continue;
 
             cpu = (i == 0) ?
-                cpumask_any(online) :
+                default_vcpu0_location(online) :
                 cpumask_cycle(d->vcpu[i-1]->processor, online);
 
             if ( alloc_vcpu(d, i, cpu) == NULL )
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.6

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.