[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 3/4] tools/init-dom0less: Avoid hardcoding GUEST_MAGIC_BASE


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Henry Wang <xin.wang2@xxxxxxx>
  • Date: Sat, 11 May 2024 08:56:10 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=L0sVjO8e/hTL6PfGKicWPBSowcKLwyxV2IFDm4hHQWE=; b=X27UtIozaBMJ0TTME8eUva2ZwMW1Fs1Ywm2Ar4GkqbxIY3vR29fo0tnZNH7AVX3eHqy7ZWxHSqjNk6UrboIWxQxvzIHvXLHnmInKIU3uWZY+3n5K7yBlZND12Fjf2BDhIxNxy5ztW7Wnlah5/JON46or+n7+tc6agbkCqhHvC4tJOhD3ddlQNMw+PwIhlpa96N9dx0M10ZDAXbAiFGxexmrV8JjVuTAfYi66h/61FLqrJDZjgpx2t+EYuot2w64+tZPvi2J7r/hfPXPrERI9ECb2rS98IhgDenw/3U0V36MTogI17u/srJP5JyHmRQ7puEt7nnFjFj16hynddfBzaA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=OpPUu7rbvrb9YeD0Nu9zccpeG6gNCszERa4y0u9Gum8d+cSe8vR1tqI5L8Fo/aQE+EnlwQv1KKLfsmVArumYWjUJKeuC4/nRj03+JHFhCGihAY3xQn7UJX4Y6+y+l4Cv4RoBZopmYo1lZ3vKNIMpvS/x5IxZsTIMd2URAmR1IRk4l204//HLwJO3ktOHr5nOqBozmKSCjDeZ3tBH9Ng4SlI4ChBzy0Obuk8puIw1hGeT8qru5MStyFh9J5cZYpe3IBeDeZjQPviuEYT9+VggxaeAHdvM3cUE470QEUPIXB5958e004BY+fz5PI1Om+SXZI8oZObGOXIZp6d765KeYg==
  • Cc: Henry Wang <xin.wang2@xxxxxxx>, Anthony PERARD <anthony@xxxxxxxxxxxxxx>, Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
  • Delivery-date: Sat, 11 May 2024 00:56:39 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Currently the GUEST_MAGIC_BASE in the init-dom0less application is
hardcoded, which will lead to failures for 1:1 direct-mapped Dom0less
DomUs.

Since the guest magic region is now allocated from the hypervisor,
instead of hardcoding the guest magic pages region, use
xc_hvm_param_get() to get the guest magic region PFN, and based on
that the XenStore PFN can be calculated. Also, we don't need to set
the max mem anymore, so drop the call to xc_domain_setmaxmem(). Rename
the alloc_xs_page() to get_xs_page() to reflect the changes.

Take the opportunity to do some coding style improvements when possible.

Reported-by: Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
Signed-off-by: Henry Wang <xin.wang2@xxxxxxx>
---
v2:
- Update HVMOP keys name.
---
 tools/helpers/init-dom0less.c | 40 +++++++++++++++--------------------
 1 file changed, 17 insertions(+), 23 deletions(-)

diff --git a/tools/helpers/init-dom0less.c b/tools/helpers/init-dom0less.c
index fee93459c4..04039a2a66 100644
--- a/tools/helpers/init-dom0less.c
+++ b/tools/helpers/init-dom0less.c
@@ -19,24 +19,20 @@
 #define XENSTORE_PFN_OFFSET 1
 #define STR_MAX_LENGTH 128
 
-static int alloc_xs_page(struct xc_interface_core *xch,
-                         libxl_dominfo *info,
-                         uint64_t *xenstore_pfn)
+static int get_xs_page(struct xc_interface_core *xch, libxl_dominfo *info,
+                       uint64_t *xenstore_pfn)
 {
     int rc;
-    const xen_pfn_t base = GUEST_MAGIC_BASE >> XC_PAGE_SHIFT;
-    xen_pfn_t p2m = (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET;
+    xen_pfn_t magic_base_pfn;
 
-    rc = xc_domain_setmaxmem(xch, info->domid,
-                             info->max_memkb + (XC_PAGE_SIZE/1024));
-    if (rc < 0)
-        return rc;
-
-    rc = xc_domain_populate_physmap_exact(xch, info->domid, 1, 0, 0, &p2m);
-    if (rc < 0)
-        return rc;
+    rc = xc_hvm_param_get(xch, info->domid, HVM_PARAM_HV_RSRV_BASE_PFN,
+                          &magic_base_pfn);
+    if (rc < 0) {
+        printf("Failed to get HVM_PARAM_HV_RSRV_BASE_PFN\n");
+        return 1;
+    }
 
-    *xenstore_pfn = base + XENSTORE_PFN_OFFSET;
+    *xenstore_pfn = magic_base_pfn + XENSTORE_PFN_OFFSET;
     rc = xc_clear_domain_page(xch, info->domid, *xenstore_pfn);
     if (rc < 0)
         return rc;
@@ -100,6 +96,7 @@ static bool do_xs_write_vm(struct xs_handle *xsh, 
xs_transaction_t t,
  */
 static int create_xenstore(struct xs_handle *xsh,
                            libxl_dominfo *info, libxl_uuid uuid,
+                           xen_pfn_t xenstore_pfn,
                            evtchn_port_t xenstore_port)
 {
     domid_t domid;
@@ -145,8 +142,7 @@ static int create_xenstore(struct xs_handle *xsh,
     rc = snprintf(target_memkb_str, STR_MAX_LENGTH, "%"PRIu64, 
info->current_memkb);
     if (rc < 0 || rc >= STR_MAX_LENGTH)
         return rc;
-    rc = snprintf(ring_ref_str, STR_MAX_LENGTH, "%lld",
-                  (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET);
+    rc = snprintf(ring_ref_str, STR_MAX_LENGTH, "%"PRIu_xen_pfn, xenstore_pfn);
     if (rc < 0 || rc >= STR_MAX_LENGTH)
         return rc;
     rc = snprintf(xenstore_port_str, STR_MAX_LENGTH, "%u", xenstore_port);
@@ -245,9 +241,9 @@ static int init_domain(struct xs_handle *xsh,
     if (!xenstore_evtchn)
         return 0;
 
-    /* Alloc xenstore page */
-    if (alloc_xs_page(xch, info, &xenstore_pfn) != 0) {
-        printf("Error on alloc magic pages\n");
+    /* Get xenstore page */
+    if (get_xs_page(xch, info, &xenstore_pfn) != 0) {
+        printf("Error on getting xenstore page\n");
         return 1;
     }
 
@@ -278,13 +274,11 @@ static int init_domain(struct xs_handle *xsh,
     if (rc < 0)
         return rc;
 
-    rc = create_xenstore(xsh, info, uuid, xenstore_evtchn);
+    rc = create_xenstore(xsh, info, uuid, xenstore_pfn, xenstore_evtchn);
     if (rc)
         err(1, "writing to xenstore");
 
-    rc = xs_introduce_domain(xsh, info->domid,
-            (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET,
-            xenstore_evtchn);
+    rc = xs_introduce_domain(xsh, info->domid, xenstore_pfn, xenstore_evtchn);
     if (!rc)
         err(1, "xs_introduce_domain");
     return 0;
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.