[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 58/62] xen/shim: allow DomU to have as many vcpus as available



From: Roger Pau Monne <roger.pau@xxxxxxxxxx>

Since the shim VCPUOP_{up/down} hypercall is wired to the plug/unplug
of CPUs to the shim itself, start the shim DomU with only the BSP
online, and let the guest bring up other CPUs as it needs them.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Changes since v1:
 - Fix single line comment style.
 - Print Dom%u d->domain_id.
 - Change position of __start_xen comment.
---
 xen/arch/x86/dom0_build.c    | 30 +++++++++++++++++++++++++++---
 xen/arch/x86/pv/dom0_build.c |  2 +-
 xen/arch/x86/setup.c         | 28 ++++++++++++++++++----------
 3 files changed, 46 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index 593bdbc18b..17cb1272c1 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -138,9 +138,18 @@ struct vcpu *__init dom0_setup_vcpu(struct domain *d,
 
     if ( v )
     {
-        if ( !d->is_pinned && !dom0_affinity_relaxed )
-            cpumask_copy(v->cpu_hard_affinity, &dom0_cpus);
-        cpumask_copy(v->cpu_soft_affinity, &dom0_cpus);
+        if ( pv_shim )
+        {
+
+            cpumask_setall(v->cpu_hard_affinity);
+            cpumask_setall(v->cpu_soft_affinity);
+        }
+        else
+        {
+            if ( !d->is_pinned && !dom0_affinity_relaxed )
+                cpumask_copy(v->cpu_hard_affinity, &dom0_cpus);
+            cpumask_copy(v->cpu_soft_affinity, &dom0_cpus);
+        }
     }
 
     return v;
@@ -153,6 +162,21 @@ unsigned int __init dom0_max_vcpus(void)
     unsigned int i, max_vcpus, limit;
     nodeid_t node;
 
+    if ( pv_shim )
+    {
+        nodes_setall(dom0_nodes);
+
+        /*
+         * When booting in shim mode APs are not started until the guest brings
+         * other vCPUs up.
+         */
+        cpumask_set_cpu(0, &dom0_cpus);
+
+        /* On PV shim mode allow the guest to have as many CPUs as available. 
*/
+        return nr_cpu_ids;
+    }
+
+
     for ( i = 0; i < dom0_nr_pxms; ++i )
         if ( (node = pxm_to_node(dom0_pxms[i])) != NUMA_NO_NODE )
             node_set(node, dom0_nodes);
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index e0b6b14e58..e7065bfde3 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -696,7 +696,7 @@ int __init dom0_construct_pv(struct domain *d,
     for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
         shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
 
-    printk("Dom0 has maximum %u VCPUs\n", d->max_vcpus);
+    printk("Dom%u has maximum %u VCPUs\n", d->domain_id, d->max_vcpus);
 
     cpu = v->processor;
     for ( i = 1; i < d->max_vcpus; i++ )
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 0f4282c42f..08796f4256 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1584,18 +1584,26 @@ void __init noreturn __start_xen(unsigned long mbi_p)
 
     do_presmp_initcalls();
 
-    for_each_present_cpu ( i )
+    /*
+     * NB: when running as a PV shim VCPUOP_up/down is wired to the shim
+     * physical cpu_add/remove functions, so launch the guest with only
+     * the BSP online and let it bring up the other CPUs as required.
+     */
+    if ( !pv_shim )
     {
-        /* Set up cpu_to_node[]. */
-        srat_detect_node(i);
-        /* Set up node_to_cpumask based on cpu_to_node[]. */
-        numa_add_cpu(i);        
-
-        if ( (num_online_cpus() < max_cpus) && !cpu_online(i) )
+        for_each_present_cpu ( i )
         {
-            int ret = cpu_up(i);
-            if ( ret != 0 )
-                printk("Failed to bring up CPU %u (error %d)\n", i, ret);
+            /* Set up cpu_to_node[]. */
+            srat_detect_node(i);
+            /* Set up node_to_cpumask based on cpu_to_node[]. */
+            numa_add_cpu(i);
+
+            if ( (num_online_cpus() < max_cpus) && !cpu_online(i) )
+            {
+                int ret = cpu_up(i);
+                if ( ret != 0 )
+                    printk("Failed to bring up CPU %u (error %d)\n", i, ret);
+            }
         }
     }
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.