|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 24/31] libxc: allow creating domains without emulated devices.
Introduce a new flag in xc_dom_image that turns on and off the emulated
devices. This prevents creating the VGA hole, the hvm_info page and the
ioreq server pages. libxl unconditionally sets it to true for all HVM
domains at the moment.
Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Changes since v3:
- Explain the meaning of the "emulation" xc_dom_image field.
---
tools/libxc/include/xc_dom.h | 3 ++
tools/libxc/xc_dom_x86.c | 71 +++++++++++++++++++++++++-------------------
tools/libxl/libxl_dom.c | 1 +
3 files changed, 44 insertions(+), 31 deletions(-)
diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h
index cda40d9..99225cf 100644
--- a/tools/libxc/include/xc_dom.h
+++ b/tools/libxc/include/xc_dom.h
@@ -194,6 +194,9 @@ struct xc_dom_image {
xen_pfn_t lowmem_end;
xen_pfn_t highmem_end;
+ /* If set disables the setup of the IOREQ pages and the VGA MMIO hole. */
+ bool emulation;
+
/* Extra ACPI tables passed to HVMLOADER */
struct xc_hvm_firmware_module acpi_module;
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index 18e3340..b587b12 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -522,12 +522,15 @@ static int alloc_magic_pages_hvm(struct xc_dom_image *dom)
xen_pfn_t ioreq_server_array[NR_IOREQ_SERVER_PAGES];
xc_interface *xch = dom->xch;
- if ( (hvm_info_page = xc_map_foreign_range(
- xch, domid, PAGE_SIZE, PROT_READ | PROT_WRITE,
- HVM_INFO_PFN)) == NULL )
- goto error_out;
- build_hvm_info(hvm_info_page, dom);
- munmap(hvm_info_page, PAGE_SIZE);
+ if ( dom->emulation )
+ {
+ if ( (hvm_info_page = xc_map_foreign_range(
+ xch, domid, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ HVM_INFO_PFN)) == NULL )
+ goto error_out;
+ build_hvm_info(hvm_info_page, dom);
+ munmap(hvm_info_page, PAGE_SIZE);
+ }
/* Allocate and clear special pages. */
for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
@@ -559,30 +562,33 @@ static int alloc_magic_pages_hvm(struct xc_dom_image *dom)
xc_hvm_param_set(xch, domid, HVM_PARAM_SHARING_RING_PFN,
special_pfn(SPECIALPAGE_SHARING));
- /*
- * Allocate and clear additional ioreq server pages. The default
- * server will use the IOREQ and BUFIOREQ special pages above.
- */
- for ( i = 0; i < NR_IOREQ_SERVER_PAGES; i++ )
- ioreq_server_array[i] = ioreq_server_pfn(i);
-
- rc = xc_domain_populate_physmap_exact(xch, domid, NR_IOREQ_SERVER_PAGES, 0,
- 0, ioreq_server_array);
- if ( rc != 0 )
+ if ( dom->emulation )
{
- DOMPRINTF("Could not allocate ioreq server pages.");
- goto error_out;
- }
+ /*
+ * Allocate and clear additional ioreq server pages. The default
+ * server will use the IOREQ and BUFIOREQ special pages above.
+ */
+ for ( i = 0; i < NR_IOREQ_SERVER_PAGES; i++ )
+ ioreq_server_array[i] = ioreq_server_pfn(i);
- if ( xc_clear_domain_pages(xch, domid, ioreq_server_pfn(0),
- NR_IOREQ_SERVER_PAGES) )
+ rc = xc_domain_populate_physmap_exact(xch, domid,
NR_IOREQ_SERVER_PAGES, 0,
+ 0, ioreq_server_array);
+ if ( rc != 0 )
+ {
+ DOMPRINTF("Could not allocate ioreq server pages.");
goto error_out;
+ }
- /* Tell the domain where the pages are and how many there are */
- xc_hvm_param_set(xch, domid, HVM_PARAM_IOREQ_SERVER_PFN,
- ioreq_server_pfn(0));
- xc_hvm_param_set(xch, domid, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
- NR_IOREQ_SERVER_PAGES);
+ if ( xc_clear_domain_pages(xch, domid, ioreq_server_pfn(0),
+ NR_IOREQ_SERVER_PAGES) )
+ goto error_out;
+
+ /* Tell the domain where the pages are and how many there are */
+ xc_hvm_param_set(xch, domid, HVM_PARAM_IOREQ_SERVER_PFN,
+ ioreq_server_pfn(0));
+ xc_hvm_param_set(xch, domid, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+ NR_IOREQ_SERVER_PAGES);
+ }
/*
* Identity-map page table is required for running with CR0.PG=0 when
@@ -1320,7 +1326,8 @@ static int meminit_hvm(struct xc_dom_image *dom)
* allocated is pointless.
*/
if ( claim_enabled ) {
- rc = xc_domain_claim_pages(xch, domid, target_pages - VGA_HOLE_SIZE);
+ rc = xc_domain_claim_pages(xch, domid, target_pages -
+ dom->emulation ? VGA_HOLE_SIZE : 0);
if ( rc != 0 )
{
DOMPRINTF("Could not allocate memory for HVM guest as we cannot
claim memory!");
@@ -1336,7 +1343,8 @@ static int meminit_hvm(struct xc_dom_image *dom)
* tot_pages will be target_pages - VGA_HOLE_SIZE after
* this call.
*/
- rc = xc_domain_set_pod_target(xch, domid, target_pages - VGA_HOLE_SIZE,
+ rc = xc_domain_set_pod_target(xch, domid, target_pages -
+ dom->emulation ? VGA_HOLE_SIZE : 0,
NULL, NULL, NULL);
if ( rc != 0 )
{
@@ -1355,8 +1363,9 @@ static int meminit_hvm(struct xc_dom_image *dom)
* Under 2MB mode, we allocate pages in batches of no more than 8MB to
* ensure that we can be preempted and hence dom0 remains responsive.
*/
- rc = xc_domain_populate_physmap_exact(
- xch, domid, 0xa0, 0, memflags, &dom->p2m_host[0x00]);
+ if ( dom->emulation )
+ rc = xc_domain_populate_physmap_exact(
+ xch, domid, 0xa0, 0, memflags, &dom->p2m_host[0x00]);
stat_normal_pages = 0;
for ( vmemid = 0; vmemid < nr_vmemranges; vmemid++ )
@@ -1375,7 +1384,7 @@ static int meminit_hvm(struct xc_dom_image *dom)
* 0xA0000-0xC0000. Note that 0x00000-0xA0000 is populated just
* before this loop.
*/
- if ( vmemranges[vmemid].start == 0 )
+ if ( vmemranges[vmemid].start == 0 && dom->emulation )
{
cur_pages = 0xc0;
stat_normal_pages += 0xc0;
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index bf9b65f..8b8e079 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -987,6 +987,7 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
dom->lowmem_end = lowmem_end;
dom->highmem_end = highmem_end;
dom->mmio_start = mmio_start;
+ dom->emulation = true;
rc = libxl__domain_device_construct_rdm(gc, d_config,
info->u.hvm.rdm_mem_boundary_memkb*1024,
--
1.9.5 (Apple Git-50.3)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |