[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 23/28] libxc: allow creating domains without emulated devices.



Introduce a new flag in xc_dom_image that turns on and off the emulated
devices. This prevents creating the VGA hole, the hvm_info page and the
ioreq server pages. libxl unconditionally sets it to true for all HVM
domains at the moment.

Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Changes since v4:
 - Store the size of the VGA hole inside of the xc_dom_image struct and set
   it from libxl.
 - Rename dom->emulation to dom->device_model (no functional change).
 - Add Wei Liu Acked-by.

Changes since v3:
 - Explain the meaning of the "emulation" xc_dom_image field.
---
 tools/libxc/include/xc_dom.h |  4 +++
 tools/libxc/xc_dom_x86.c     | 73 ++++++++++++++++++++++++--------------------
 tools/libxl/libxl_dom.c      |  2 ++
 tools/libxl/libxl_internal.h |  1 +
 4 files changed, 47 insertions(+), 33 deletions(-)

diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h
index cda40d9..507b323 100644
--- a/tools/libxc/include/xc_dom.h
+++ b/tools/libxc/include/xc_dom.h
@@ -193,6 +193,10 @@ struct xc_dom_image {
     xen_pfn_t mmio_size;
     xen_pfn_t lowmem_end;
     xen_pfn_t highmem_end;
+    xen_pfn_t vga_hole_size;
+
+    /* If unset disables the setup of the IOREQ pages. */
+    bool device_model;
 
     /* Extra ACPI tables passed to HVMLOADER */
     struct xc_hvm_firmware_module acpi_module;
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index 0a4ac58..fb71717 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -49,8 +49,6 @@
 #define X86_CR0_PE 0x01
 #define X86_CR0_ET 0x10
 
-#define VGA_HOLE_SIZE (0x20)
-
 #define SPECIALPAGE_PAGING   0
 #define SPECIALPAGE_ACCESS   1
 #define SPECIALPAGE_SHARING  2
@@ -522,12 +520,15 @@ static int alloc_magic_pages_hvm(struct xc_dom_image *dom)
     xen_pfn_t ioreq_server_array[NR_IOREQ_SERVER_PAGES];
     xc_interface *xch = dom->xch;
 
-    if ( (hvm_info_page = xc_map_foreign_range(
-              xch, domid, PAGE_SIZE, PROT_READ | PROT_WRITE,
-              HVM_INFO_PFN)) == NULL )
-        goto error_out;
-    build_hvm_info(hvm_info_page, dom);
-    munmap(hvm_info_page, PAGE_SIZE);
+    if ( dom->device_model )
+    {
+        if ( (hvm_info_page = xc_map_foreign_range(
+                  xch, domid, PAGE_SIZE, PROT_READ | PROT_WRITE,
+                  HVM_INFO_PFN)) == NULL )
+            goto error_out;
+        build_hvm_info(hvm_info_page, dom);
+        munmap(hvm_info_page, PAGE_SIZE);
+    }
 
     /* Allocate and clear special pages. */
     for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
@@ -559,30 +560,33 @@ static int alloc_magic_pages_hvm(struct xc_dom_image *dom)
     xc_hvm_param_set(xch, domid, HVM_PARAM_SHARING_RING_PFN,
                      special_pfn(SPECIALPAGE_SHARING));
 
-    /*
-     * Allocate and clear additional ioreq server pages. The default
-     * server will use the IOREQ and BUFIOREQ special pages above.
-     */
-    for ( i = 0; i < NR_IOREQ_SERVER_PAGES; i++ )
-        ioreq_server_array[i] = ioreq_server_pfn(i);
-
-    rc = xc_domain_populate_physmap_exact(xch, domid, NR_IOREQ_SERVER_PAGES, 0,
-                                          0, ioreq_server_array);
-    if ( rc != 0 )
+    if ( dom->device_model )
     {
-        DOMPRINTF("Could not allocate ioreq server pages.");
-        goto error_out;
-    }
+        /*
+         * Allocate and clear additional ioreq server pages. The default
+         * server will use the IOREQ and BUFIOREQ special pages above.
+         */
+        for ( i = 0; i < NR_IOREQ_SERVER_PAGES; i++ )
+            ioreq_server_array[i] = ioreq_server_pfn(i);
 
-    if ( xc_clear_domain_pages(xch, domid, ioreq_server_pfn(0),
-                               NR_IOREQ_SERVER_PAGES) )
+        rc = xc_domain_populate_physmap_exact(xch, domid, 
NR_IOREQ_SERVER_PAGES, 0,
+                                              0, ioreq_server_array);
+        if ( rc != 0 )
+        {
+            DOMPRINTF("Could not allocate ioreq server pages.");
             goto error_out;
+        }
 
-    /* Tell the domain where the pages are and how many there are */
-    xc_hvm_param_set(xch, domid, HVM_PARAM_IOREQ_SERVER_PFN,
-                     ioreq_server_pfn(0));
-    xc_hvm_param_set(xch, domid, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
-                     NR_IOREQ_SERVER_PAGES);
+        if ( xc_clear_domain_pages(xch, domid, ioreq_server_pfn(0),
+                                   NR_IOREQ_SERVER_PAGES) )
+                goto error_out;
+
+        /* Tell the domain where the pages are and how many there are */
+        xc_hvm_param_set(xch, domid, HVM_PARAM_IOREQ_SERVER_PFN,
+                         ioreq_server_pfn(0));
+        xc_hvm_param_set(xch, domid, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+                         NR_IOREQ_SERVER_PAGES);
+    }
 
     /*
      * Identity-map page table is required for running with CR0.PG=0 when
@@ -1319,7 +1323,8 @@ static int meminit_hvm(struct xc_dom_image *dom)
      * allocated is pointless.
      */
     if ( claim_enabled ) {
-        rc = xc_domain_claim_pages(xch, domid, target_pages - VGA_HOLE_SIZE);
+        rc = xc_domain_claim_pages(xch, domid,
+                                   target_pages - dom->vga_hole_size);
         if ( rc != 0 )
         {
             DOMPRINTF("Could not allocate memory for HVM guest as we cannot 
claim memory!");
@@ -1335,7 +1340,8 @@ static int meminit_hvm(struct xc_dom_image *dom)
          * tot_pages will be target_pages - VGA_HOLE_SIZE after
          * this call.
          */
-        rc = xc_domain_set_pod_target(xch, domid, target_pages - VGA_HOLE_SIZE,
+        rc = xc_domain_set_pod_target(xch, domid,
+                                      target_pages - dom->vga_hole_size,
                                       NULL, NULL, NULL);
         if ( rc != 0 )
         {
@@ -1354,8 +1360,9 @@ static int meminit_hvm(struct xc_dom_image *dom)
      * Under 2MB mode, we allocate pages in batches of no more than 8MB to 
      * ensure that we can be preempted and hence dom0 remains responsive.
      */
-    rc = xc_domain_populate_physmap_exact(
-        xch, domid, 0xa0, 0, memflags, &dom->p2m_host[0x00]);
+    if ( dom->device_model )
+        rc = xc_domain_populate_physmap_exact(
+            xch, domid, 0xa0, 0, memflags, &dom->p2m_host[0x00]);
 
     stat_normal_pages = 0;
     for ( vmemid = 0; vmemid < nr_vmemranges; vmemid++ )
@@ -1374,7 +1381,7 @@ static int meminit_hvm(struct xc_dom_image *dom)
          * 0xA0000-0xC0000. Note that 0x00000-0xA0000 is populated just
          * before this loop.
          */
-        if ( vmemranges[vmemid].start == 0 )
+        if ( vmemranges[vmemid].start == 0 && dom->device_model )
         {
             cur_pages = 0xc0;
             stat_normal_pages += 0xc0;
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index d2cf9e3..4a214e7 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -987,6 +987,8 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
     dom->lowmem_end = lowmem_end;
     dom->highmem_end = highmem_end;
     dom->mmio_start = mmio_start;
+    dom->vga_hole_size = LIBXL_VGA_HOLE_SIZE
+    dom->device_model = true;
 
     rc = libxl__domain_device_construct_rdm(gc, d_config,
                                             
info->u.hvm.rdm_mem_boundary_memkb*1024,
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index ea89f1f..294d442 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -100,6 +100,7 @@
 #define LIBXL_HVM_EXTRA_MEMORY 2048
 #define LIBXL_MIN_DOM0_MEM (128*1024)
 #define LIBXL_INVALID_GFN (~(uint64_t)0)
+#define LIBXL_VGA_HOLE_SIZE 0x20
 /* use 0 as the domid of the toolstack domain for now */
 #define LIBXL_TOOLSTACK_DOMID 0
 #define QEMU_SIGNATURE "DeviceModelRecord0002"
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.