[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.10] xen/shim: allow DomU to have as many vcpus as available



commit 321ef983a06bc14570b79da1ab60344e3feb2c2b
Author:     Roger Pau Monne <roger.pau@xxxxxxxxxx>
AuthorDate: Thu Jan 11 11:41:21 2018 +0000
Commit:     Roger Pau Monne <roger.pau@xxxxxxxxxx>
CommitDate: Fri Jan 12 15:47:32 2018 +0000

    xen/shim: allow DomU to have as many vcpus as available
    
    Since the shim VCPUOP_{up/down} hypercall is wired to the plug/unplug
    of CPUs to the shim itself, start the shim DomU with only the BSP
    online, and let the guest bring up other CPUs as it needs them.
    
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    ---
    Changes since v1:
     - Fix single line comment style.
     - Print Dom%u d->domain_id.
     - Change position of __start_xen comment.
---
 xen/arch/x86/dom0_build.c    | 30 +++++++++++++++++++++++++++---
 xen/arch/x86/pv/dom0_build.c |  2 +-
 xen/arch/x86/setup.c         | 28 ++++++++++++++++++----------
 3 files changed, 46 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index d77c6b40de..b4f4a4ac4a 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -138,9 +138,18 @@ struct vcpu *__init dom0_setup_vcpu(struct domain *d,
 
     if ( v )
     {
-        if ( !d->is_pinned && !dom0_affinity_relaxed )
-            cpumask_copy(v->cpu_hard_affinity, &dom0_cpus);
-        cpumask_copy(v->cpu_soft_affinity, &dom0_cpus);
+        if ( pv_shim )
+        {
+
+            cpumask_setall(v->cpu_hard_affinity);
+            cpumask_setall(v->cpu_soft_affinity);
+        }
+        else
+        {
+            if ( !d->is_pinned && !dom0_affinity_relaxed )
+                cpumask_copy(v->cpu_hard_affinity, &dom0_cpus);
+            cpumask_copy(v->cpu_soft_affinity, &dom0_cpus);
+        }
     }
 
     return v;
@@ -153,6 +162,21 @@ unsigned int __init dom0_max_vcpus(void)
     unsigned int i, max_vcpus, limit;
     nodeid_t node;
 
+    if ( pv_shim )
+    {
+        nodes_setall(dom0_nodes);
+
+        /*
+         * When booting in shim mode APs are not started until the guest brings
+         * other vCPUs up.
+         */
+        cpumask_set_cpu(0, &dom0_cpus);
+
+        /* On PV shim mode allow the guest to have as many CPUs as available. 
*/
+        return nr_cpu_ids;
+    }
+
+
     for ( i = 0; i < dom0_nr_pxms; ++i )
         if ( (node = pxm_to_node(dom0_pxms[i])) != NUMA_NO_NODE )
             node_set(node, dom0_nodes);
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index ebcb47bf26..5d8909fa13 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -701,7 +701,7 @@ int __init dom0_construct_pv(struct domain *d,
     for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
         shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
 
-    printk("Dom0 has maximum %u VCPUs\n", d->max_vcpus);
+    printk("Dom%u has maximum %u VCPUs\n", d->domain_id, d->max_vcpus);
 
     cpu = v->processor;
     for ( i = 1; i < d->max_vcpus; i++ )
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 7091c38047..cf07e5045d 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1584,18 +1584,26 @@ void __init noreturn __start_xen(unsigned long mbi_p)
 
     do_presmp_initcalls();
 
-    for_each_present_cpu ( i )
+    /*
+     * NB: when running as a PV shim VCPUOP_up/down is wired to the shim
+     * physical cpu_add/remove functions, so launch the guest with only
+     * the BSP online and let it bring up the other CPUs as required.
+     */
+    if ( !pv_shim )
     {
-        /* Set up cpu_to_node[]. */
-        srat_detect_node(i);
-        /* Set up node_to_cpumask based on cpu_to_node[]. */
-        numa_add_cpu(i);        
-
-        if ( (num_online_cpus() < max_cpus) && !cpu_online(i) )
+        for_each_present_cpu ( i )
         {
-            int ret = cpu_up(i);
-            if ( ret != 0 )
-                printk("Failed to bring up CPU %u (error %d)\n", i, ret);
+            /* Set up cpu_to_node[]. */
+            srat_detect_node(i);
+            /* Set up node_to_cpumask based on cpu_to_node[]. */
+            numa_add_cpu(i);
+
+            if ( (num_online_cpus() < max_cpus) && !cpu_online(i) )
+            {
+                int ret = cpu_up(i);
+                if ( ret != 0 )
+                    printk("Failed to bring up CPU %u (error %d)\n", i, ret);
+            }
         }
     }
 
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.10

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.