[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 19/21] xen/arm: introduce create_domUs



Hi Stefano,

On 07/07/18 00:12, Stefano Stabellini wrote:
Call a new function, "create_domUs", from setup_xen to start DomU VMs.

Introduce support for the "xen,domU" compatible node on device tree.
Create new DomU VMs based on the information found on device tree under
"xen,domU". Calls construct_domU for each domain.

Introduce a simple global variable named max_init_domid to keep track of
the initial allocated domids.

Move the discard_initial_modules after DomUs have been built

Nit: Missing full stop.


Signed-off-by: Stefano Stabellini <stefanos@xxxxxxxxxx>
CC: andrew.cooper3@xxxxxxxxxx
CC: jbeulich@xxxxxxxx
---
Changes in v2:
- coding style
- set nr_spis to 32
- introduce create_domUs
---
  xen/arch/arm/domain_build.c | 38 +++++++++++++++++++++++++++++++++++---
  xen/arch/arm/setup.c        |  8 +++++++-
  xen/include/asm-arm/setup.h |  3 +++
  xen/include/asm-x86/setup.h |  2 ++
  4 files changed, 47 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index d7e9040..9f58002 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -7,6 +7,7 @@
  #include <asm/irq.h>
  #include <asm/regs.h>
  #include <xen/errno.h>
+#include <xen/err.h>
  #include <xen/device_tree.h>
  #include <xen/libfdt/libfdt.h>
  #include <xen/guest_access.h>
@@ -2542,6 +2543,39 @@ static int __init construct_domU(struct domain *d, 
struct dt_device_node *node)
      return rc;
  }
+void __init create_domUs(void)
+{
+    struct dt_device_node *node;
+    struct dt_device_node *chosen = dt_find_node_by_name(dt_host, "chosen");

newline here.

+    if ( chosen != NULL )
+    {
+        dt_for_each_child_node(chosen, node)
+        {
+            struct domain *d;
+            struct xen_domctl_createdomain d_cfg = {};
+
+            if ( !dt_device_is_compatible(node, "xen,domain") )
+                continue;
+
+            d_cfg.arch.gic_version = XEN_DOMCTL_CONFIG_GIC_NATIVE;
+            d_cfg.arch.nr_spis = 32;

You can set those fields directly when defining d_cfg above.

+
+            d = domain_create(max_init_domid++, &d_cfg);
+            if ( IS_ERR(d) )
+                panic("Error creating domU");

It is probably worth to add the node name in the message. So the user knows which guest failed.

+
+            d->is_privileged = 0;
+            d->is_console = 1;
+            d->target = NULL;
+
+            if ( construct_domU(d, node) != 0 )
+                printk("Could not set up DOMU guest OS");

Should not it be a panic here? Also, the message is a little odd "DOMU guest" is a bit redundant and the function will load the kernel but that's not the only thing done.

Lastly, you probably want to add the node name in the message, so the user knows which guest failed.

+
+            domain_unpause_by_systemcontroller(d);

If a domain is bound to CPU0, then it will not boot until CPU0 is done with creating domain. Is that what you want?

+        }
+    }
+}
+
  int __init construct_dom0(struct domain *d)
  {
      struct kernel_info kinfo = {};
@@ -2592,9 +2626,7 @@ int __init construct_dom0(struct domain *d)
          return rc;
- rc = __construct_domain(d, &kinfo);
-    discard_initial_modules();
-    return rc;
+    return __construct_domain(d, &kinfo);
  }
/*
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 7739a80..0b08af2 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -64,6 +64,8 @@ static unsigned long opt_xenheap_megabytes __initdata;
  integer_param("xenheap_megabytes", opt_xenheap_megabytes);
  #endif
+domid_t __read_mostly max_init_domid = 0;
+
  static __used void init_done(void)
  {
      free_init_memory();
@@ -863,7 +865,7 @@ void __init start_xen(unsigned long boot_phys_offset,
      dom0_cfg.arch.gic_version = XEN_DOMCTL_CONFIG_GIC_NATIVE;
      dom0_cfg.arch.nr_spis = gic_number_lines() - 32;
- dom0 = domain_create(0, &dom0_cfg);
+    dom0 = domain_create(max_init_domid++, &dom0_cfg);
      if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) )
              panic("Error creating domain 0");
@@ -889,6 +891,10 @@ void __init start_xen(unsigned long boot_phys_offset, domain_unpause_by_systemcontroller(dom0);

Why do you unpause Dom0 and then create the guests? It feels like to me you want to create all the guests and then unpause dom0. dom0 would likely get blocked anyway has CPU0 will be busy creating domains.

+ create_domUs();
+
+    discard_initial_modules();

I think it would be better to move that in init_done. This is where all initial memory is freed.

+
      /* Switch on to the dynamically allocated stack for the idle vcpu
       * since the static one we're running on is about to be freed. */
      memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(),
diff --git a/xen/include/asm-arm/setup.h b/xen/include/asm-arm/setup.h
index 5ecfe27..21b9729 100644
--- a/xen/include/asm-arm/setup.h
+++ b/xen/include/asm-arm/setup.h
@@ -56,6 +56,8 @@ struct bootinfo {
extern struct bootinfo bootinfo; +extern domid_t max_init_domid;
+
  void arch_init_memory(void);
void copy_from_paddr(void *dst, paddr_t paddr, unsigned long len);
@@ -72,6 +74,7 @@ void acpi_create_efi_mmap_table(struct domain *d,
  int acpi_make_efi_nodes(void *fdt, struct membank tbl_add[]);
int construct_dom0(struct domain *d);
+void __init create_domUs(void);
void discard_initial_modules(void);
  void dt_unreserved_regions(paddr_t s, paddr_t e,
diff --git a/xen/include/asm-x86/setup.h b/xen/include/asm-x86/setup.h
index 19232af..2fb9529 100644
--- a/xen/include/asm-x86/setup.h
+++ b/xen/include/asm-x86/setup.h
@@ -73,4 +73,6 @@ extern bool opt_dom0_shadow;
  #endif
  extern bool dom0_pvh;
+#define max_init_domid (1)
+
  #endif


Cheers,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.