[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 3/3] tools/init-dom0less: Avoid hardcoding GUEST_MAGIC_BASE


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Henry Wang <xin.wang2@xxxxxxx>
  • Date: Fri, 26 Apr 2024 11:14:55 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=U9NhlDanGQKebTLZq79LUOppo21Q/6jmY2OxbeF0g1k=; b=oNVD4r+MyPJKts8Ltr1lPnwlhF2CE43KB0W3i5hSXeRPaM2bSujsTSRj/ZZocoB0ChwoAh5zLCfBlQU9Y20hORLbO8cSelDegrbbUKmkgW9bC5i6F/IrQ05IwDPvxaF64XiFro2etz2SfAud/IJf+XmaDPSCaTN7jSUUNQBdwQVYaGHPmwfYyIYqt0tZCwrllvDtLoZJ36HJukaDbqwqrgVw4utLY5Gwh+QBbLk6Lna/kcKuMRKRf01XaFcEuL08SPKnCEf6wKAoF6/lJtJTqSgc0GflGK+atnZZJniiCqUoRwfHfhro3iMwiyj5UUKBcb9+84R3tccgnVxheV7xOw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=f6JQJ5tb/wzo7lju7iSH21rYKMSURdjryZq7gdDAnlcb5loNEVtHMKlzsEyN3z5M9Z+CtcAxqvrvCUFIDBCMs4/lxmJpZRe7ur9sB4AvhM4CUG0+csPPUSDWVsy82dcgNtvhMJn+4nPtcfr+azyVr9nNWzX6Pcq16hiIytY2Vds4oTyvhwJ7qrnMh6kw/WqBqt7VfU66sQqqyg0MEgLqP7MnTNBHXAjZOIM16/Py+GjNIejZYGTVjC1I7+bq2e1GH/gTU1Agj2IbBVAVHh0FTq8bq0GUErkVEXqrbLZgMEPJi1gUYTekL7OPmoKHKGJugCp8YWR5zzndYLaS1unYvg==
  • Cc: Henry Wang <xin.wang2@xxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
  • Delivery-date: Fri, 26 Apr 2024 03:15:26 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Currently the GUEST_MAGIC_BASE in the init-dom0less application is
hardcoded, which will lead to failures for 1:1 direct-mapped Dom0less
DomUs.

Since the guest magic region is now allocated from the hypervisor,
instead of hardcoding the guest magic pages region, use
xc_hvm_param_get() to get the guest magic region PFN, and based on
that the XenStore PFN can be calculated. Also, we don't need to set
the max mem anymore, so drop the call to xc_domain_setmaxmem(). Rename
the alloc_xs_page() to get_xs_page() to reflect the changes.

Take the opportunity to do some coding style improvements when possible.

Reported-by: Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
Signed-off-by: Henry Wang <xin.wang2@xxxxxxx>
---
 tools/helpers/init-dom0less.c | 38 +++++++++++++++--------------------
 1 file changed, 16 insertions(+), 22 deletions(-)

diff --git a/tools/helpers/init-dom0less.c b/tools/helpers/init-dom0less.c
index fee93459c4..7f6953a818 100644
--- a/tools/helpers/init-dom0less.c
+++ b/tools/helpers/init-dom0less.c
@@ -19,24 +19,20 @@
 #define XENSTORE_PFN_OFFSET 1
 #define STR_MAX_LENGTH 128
 
-static int alloc_xs_page(struct xc_interface_core *xch,
-                         libxl_dominfo *info,
-                         uint64_t *xenstore_pfn)
+static int get_xs_page(struct xc_interface_core *xch, libxl_dominfo *info,
+                       uint64_t *xenstore_pfn)
 {
     int rc;
-    const xen_pfn_t base = GUEST_MAGIC_BASE >> XC_PAGE_SHIFT;
-    xen_pfn_t p2m = (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET;
+    xen_pfn_t magic_base_pfn;
 
-    rc = xc_domain_setmaxmem(xch, info->domid,
-                             info->max_memkb + (XC_PAGE_SIZE/1024));
-    if (rc < 0)
-        return rc;
-
-    rc = xc_domain_populate_physmap_exact(xch, info->domid, 1, 0, 0, &p2m);
-    if (rc < 0)
-        return rc;
+    rc = xc_hvm_param_get(xch, info->domid, HVM_PARAM_MAGIC_BASE_PFN,
+                          &magic_base_pfn);
+    if (rc < 0) {
+        printf("Failed to get HVM_PARAM_MAGIC_BASE_PFN\n");
+        return 1;
+    }
 
-    *xenstore_pfn = base + XENSTORE_PFN_OFFSET;
+    *xenstore_pfn = magic_base_pfn + XENSTORE_PFN_OFFSET;
     rc = xc_clear_domain_page(xch, info->domid, *xenstore_pfn);
     if (rc < 0)
         return rc;
@@ -100,6 +96,7 @@ static bool do_xs_write_vm(struct xs_handle *xsh, 
xs_transaction_t t,
  */
 static int create_xenstore(struct xs_handle *xsh,
                            libxl_dominfo *info, libxl_uuid uuid,
+                           xen_pfn_t xenstore_pfn,
                            evtchn_port_t xenstore_port)
 {
     domid_t domid;
@@ -145,8 +142,7 @@ static int create_xenstore(struct xs_handle *xsh,
     rc = snprintf(target_memkb_str, STR_MAX_LENGTH, "%"PRIu64, 
info->current_memkb);
     if (rc < 0 || rc >= STR_MAX_LENGTH)
         return rc;
-    rc = snprintf(ring_ref_str, STR_MAX_LENGTH, "%lld",
-                  (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET);
+    rc = snprintf(ring_ref_str, STR_MAX_LENGTH, "%"PRIu_xen_pfn, xenstore_pfn);
     if (rc < 0 || rc >= STR_MAX_LENGTH)
         return rc;
     rc = snprintf(xenstore_port_str, STR_MAX_LENGTH, "%u", xenstore_port);
@@ -245,8 +241,8 @@ static int init_domain(struct xs_handle *xsh,
     if (!xenstore_evtchn)
         return 0;
 
-    /* Alloc xenstore page */
-    if (alloc_xs_page(xch, info, &xenstore_pfn) != 0) {
+    /* Get xenstore page */
+    if (get_xs_page(xch, info, &xenstore_pfn) != 0) {
         printf("Error on alloc magic pages\n");
         return 1;
     }
@@ -278,13 +274,11 @@ static int init_domain(struct xs_handle *xsh,
     if (rc < 0)
         return rc;
 
-    rc = create_xenstore(xsh, info, uuid, xenstore_evtchn);
+    rc = create_xenstore(xsh, info, uuid, xenstore_pfn, xenstore_evtchn);
     if (rc)
         err(1, "writing to xenstore");
 
-    rc = xs_introduce_domain(xsh, info->domid,
-            (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET,
-            xenstore_evtchn);
+    rc = xs_introduce_domain(xsh, info->domid, xenstore_pfn, xenstore_evtchn);
     if (!rc)
         err(1, "xs_introduce_domain");
     return 0;
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.