[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v1 66/74] xen/shim: allow DomU to have as many vcpus as available



From: Roger Pau Monne <roger.pau@xxxxxxxxxx>

Since the shim VCPUOP_{up/down} hypercall is wired to the plug/unplug
of CPUs to the shim itself, start the shim DomU with only the BSP
online, and let the guest bring up other CPUs as it needs them.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/dom0_build.c    | 32 +++++++++++++++++++++++++++++---
 xen/arch/x86/pv/dom0_build.c |  3 ++-
 xen/arch/x86/setup.c         | 28 ++++++++++++++++++----------
 3 files changed, 49 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index 30347bcc67..d54155b1a4 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -138,9 +138,18 @@ struct vcpu *__init dom0_setup_vcpu(struct domain *d,
 
     if ( v )
     {
-        if ( !d->is_pinned && !dom0_affinity_relaxed )
-            cpumask_copy(v->cpu_hard_affinity, &dom0_cpus);
-        cpumask_copy(v->cpu_soft_affinity, &dom0_cpus);
+        if ( pv_shim )
+        {
+
+            cpumask_setall(v->cpu_hard_affinity);
+            cpumask_setall(v->cpu_soft_affinity);
+        }
+        else
+        {
+            if ( !d->is_pinned && !dom0_affinity_relaxed )
+                cpumask_copy(v->cpu_hard_affinity, &dom0_cpus);
+            cpumask_copy(v->cpu_soft_affinity, &dom0_cpus);
+        }
     }
 
     return v;
@@ -153,6 +162,23 @@ unsigned int __init dom0_max_vcpus(void)
     unsigned int i, max_vcpus, limit;
     nodeid_t node;
 
+    if ( pv_shim )
+    {
+        nodes_setall(dom0_nodes);
+
+        /*
+         * When booting in shim mode APs are not started until the guest brings
+         * other vCPUs up.
+         */
+        cpumask_set_cpu(0, &dom0_cpus);
+
+        /*
+         * On PV shim mode allow the guest to have as many CPUs as available.
+         */
+        return nr_cpu_ids;
+    }
+
+
     for ( i = 0; i < dom0_nr_pxms; ++i )
         if ( (node = pxm_to_node(dom0_pxms[i])) != NUMA_NO_NODE )
             node_set(node, dom0_nodes);
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index f5a793b1d2..2bee988f54 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -695,7 +695,8 @@ int __init dom0_construct_pv(struct domain *d,
     for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
         shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
 
-    printk("Dom0 has maximum %u VCPUs\n", d->max_vcpus);
+    printk("%s has maximum %u VCPUs\n", pv_shim ? "DomU" : "Dom0",
+           d->max_vcpus);
 
     cpu = v->processor;
     for ( i = 1; i < d->max_vcpus; i++ )
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 34d746395b..aa6703d542 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1580,20 +1580,28 @@ void __init noreturn __start_xen(unsigned long mbi_p)
 
     do_presmp_initcalls();
 
-    for_each_present_cpu ( i )
+    if ( !pv_shim )
     {
-        /* Set up cpu_to_node[]. */
-        srat_detect_node(i);
-        /* Set up node_to_cpumask based on cpu_to_node[]. */
-        numa_add_cpu(i);        
-
-        if ( (num_online_cpus() < max_cpus) && !cpu_online(i) )
+        for_each_present_cpu ( i )
         {
-            int ret = cpu_up(i);
-            if ( ret != 0 )
-                printk("Failed to bring up CPU %u (error %d)\n", i, ret);
+            /* Set up cpu_to_node[]. */
+            srat_detect_node(i);
+            /* Set up node_to_cpumask based on cpu_to_node[]. */
+            numa_add_cpu(i);
+
+            if ( (num_online_cpus() < max_cpus) && !cpu_online(i) )
+            {
+                int ret = cpu_up(i);
+                if ( ret != 0 )
+                    printk("Failed to bring up CPU %u (error %d)\n", i, ret);
+            }
         }
     }
+    /*
+     * NB: when running as a PV shim VCPUOP_up/down is wired to the shim
+     * physical cpu_add/remove functions, so launch the guest with only
+     * the BSP online and let it bring up the other CPUs as required.
+     */
 
     printk("Brought up %ld CPUs\n", (long)num_online_cpus());
     smp_cpus_done();
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.