[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 3/5] xen/arm: Move make_resv_memory_node()


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: Koichiro Den <den@xxxxxxxxxxxxx>
  • Date: Sat, 5 Jul 2025 23:27:01 +0900
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=valinux.co.jp; dmarc=pass action=none header.from=valinux.co.jp; dkim=pass header.d=valinux.co.jp; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=hfc+H3nJRyfmmBSNsiPB3+yojQAp4BzUKW664k7xZCk=; b=C4u3RGM/14feG1Ir0XBLjcE7ZM4aT7v3wQPt8qgafnjQLAenkZuVasgVnElBHfmKnhfIWIRcVnqRqjUe+ON0teGamhk6zecIwK19gsd6R0Y0chr/GTTXuBm9PJZcb24gWL9u2N8MbCnIco46fDD2LBWCMmtr578XQzFd/4pyLbLglZ+rFxMtXRnDQaHr7fLQPRzOOuwieApmHvGmdQYKeSQHmNOIl66p6ZVZmnEcxe4xrILHQK3t1MMewdknMRUbTl7LJCKpi02EOgcso+ExR24TFuoGcwdFEpxEzqpBJkbiLJak91N5+xnh35oh5HsUjuClLgTbDW4QNpPnHDZVJw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=D1Ci8QXkeNpueR4sH/BC7nPkxhB41muMULi+jIlRf+DJxw2cZebvScqgqTQ/5NM8t6k+8+uj9Sc0EwO8UdtGwUD0XknKnNXShdLLIVL4ow0fUXqkY6Qbnacg/n75F7MeN/Wutg/SD0InhblgDPMv85Bk9Ax+mkUgmg37tJsMVO1uksLPPq4b1yGqIBT2jYtZ/Fe7igk4bQXqtJGrMe0EeB6jEkeyJMmWA8khA4Ajb8J8rhYDoZ+/P9z5ZrOURYgms1OZaOtwlDv5a3IN6Sg/PT62Ok3xP8J//+4LkB2tCWvKcY0U+ArfWiBt6eRZ122RAzO77Z/1LcQfa3p5MkPipQ==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=valinux.co.jp;
  • Cc: Koichiro Den <den@xxxxxxxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Bertrand Marquis <bertrand.marquis@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Sat, 05 Jul 2025 14:27:44 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

The /reserved-memory node is inherently not specific to static-shmem.
Move it to a more generic domain build context. While at it, add an
empty kernel_info_get_shm_mem_const() for the CONFIG_STATIC_SHM=n case,
as it can now be invoked in such case.

No functional change.

Signed-off-by: Koichiro Den <den@xxxxxxxxxxxxx>
---
 xen/arch/arm/domain_build.c           | 40 +++++++++++++++++++++++++++
 xen/common/device-tree/static-shmem.c | 40 ---------------------------
 xen/include/xen/fdt-domain-build.h    |  2 ++
 xen/include/xen/static-shmem.h        | 15 ++++------
 4 files changed, 48 insertions(+), 49 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 5fbc26f70988..e063d0d4076e 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -1564,6 +1564,46 @@ int __init make_chosen_node(const struct kernel_info 
*kinfo)
     return res;
 }
 
+int __init make_resv_memory_node(const struct kernel_info *kinfo, int 
addrcells,
+                                 int sizecells)
+{
+    const struct membanks *mem = kernel_info_get_shm_mem_const(kinfo);
+    void *fdt = kinfo->fdt;
+    int res = 0;
+    /* Placeholder for reserved-memory\0 */
+    const char resvbuf[16] = "reserved-memory";
+
+    if ( !mem || mem->nr_banks == 0 )
+        /* No shared memory provided. */
+        return 0;
+
+    dt_dprintk("Create reserved-memory node\n");
+
+    res = fdt_begin_node(fdt, resvbuf);
+    if ( res )
+        return res;
+
+    res = fdt_property(fdt, "ranges", NULL, 0);
+    if ( res )
+        return res;
+
+    res = fdt_property_cell(fdt, "#address-cells", addrcells);
+    if ( res )
+        return res;
+
+    res = fdt_property_cell(fdt, "#size-cells", sizecells);
+    if ( res )
+        return res;
+
+    res = make_shm_resv_memory_node(kinfo, addrcells, sizecells);
+    if ( res )
+        return res;
+
+    res = fdt_end_node(fdt);
+
+    return res;
+}
+
 static int __init handle_node(struct domain *d, struct kernel_info *kinfo,
                               struct dt_device_node *node,
                               p2m_type_t p2mt)
diff --git a/xen/common/device-tree/static-shmem.c 
b/xen/common/device-tree/static-shmem.c
index 8023c0a484c1..7eede97fa25d 100644
--- a/xen/common/device-tree/static-shmem.c
+++ b/xen/common/device-tree/static-shmem.c
@@ -730,46 +730,6 @@ int __init process_shm_node(const void *fdt, int node, 
uint32_t address_cells,
     return 0;
 }
 
-int __init make_resv_memory_node(const struct kernel_info *kinfo, int 
addrcells,
-                                 int sizecells)
-{
-    const struct membanks *mem = kernel_info_get_shm_mem_const(kinfo);
-    void *fdt = kinfo->fdt;
-    int res = 0;
-    /* Placeholder for reserved-memory\0 */
-    const char resvbuf[16] = "reserved-memory";
-
-    if ( mem->nr_banks == 0 )
-        /* No shared memory provided. */
-        return 0;
-
-    dt_dprintk("Create reserved-memory node\n");
-
-    res = fdt_begin_node(fdt, resvbuf);
-    if ( res )
-        return res;
-
-    res = fdt_property(fdt, "ranges", NULL, 0);
-    if ( res )
-        return res;
-
-    res = fdt_property_cell(fdt, "#address-cells", addrcells);
-    if ( res )
-        return res;
-
-    res = fdt_property_cell(fdt, "#size-cells", sizecells);
-    if ( res )
-        return res;
-
-    res = make_shm_resv_memory_node(kinfo, addrcells, sizecells);
-    if ( res )
-        return res;
-
-    res = fdt_end_node(fdt);
-
-    return res;
-}
-
 void __init early_print_info_shmem(void)
 {
     const struct membanks *shmem = bootinfo_get_shmem();
diff --git a/xen/include/xen/fdt-domain-build.h 
b/xen/include/xen/fdt-domain-build.h
index 45981dbec0b8..e9418857e386 100644
--- a/xen/include/xen/fdt-domain-build.h
+++ b/xen/include/xen/fdt-domain-build.h
@@ -25,6 +25,8 @@ int construct_domain(struct domain *d, struct kernel_info 
*kinfo);
 int construct_hwdom(struct kernel_info *kinfo,
                     const struct dt_device_node *node);
 int make_chosen_node(const struct kernel_info *kinfo);
+int make_resv_memory_node(const struct kernel_info *kinfo,
+                          int addrcells, int sizecells);
 int make_cpus_node(const struct domain *d, void *fdt);
 int make_hypervisor_node(struct domain *d, const struct kernel_info *kinfo,
                          int addrcells, int sizecells);
diff --git a/xen/include/xen/static-shmem.h b/xen/include/xen/static-shmem.h
index 76a49869126c..9e7500ed2721 100644
--- a/xen/include/xen/static-shmem.h
+++ b/xen/include/xen/static-shmem.h
@@ -11,9 +11,6 @@
 /* Worst case /memory node reg element: (addrcells + sizecells) */
 #define DT_MEM_NODE_REG_RANGE_SIZE ((NR_MEM_BANKS + NR_SHMEM_BANKS) * 4)
 
-int make_resv_memory_node(const struct kernel_info *kinfo, int addrcells,
-                          int sizecells);
-
 int process_shm(struct domain *d, struct kernel_info *kinfo,
                 const struct dt_device_node *node);
 
@@ -50,12 +47,6 @@ kernel_info_get_shm_mem_const(const struct kernel_info 
*kinfo)
 /* Worst case /memory node reg element: (addrcells + sizecells) */
 #define DT_MEM_NODE_REG_RANGE_SIZE (NR_MEM_BANKS * 4)
 
-static inline int make_resv_memory_node(const struct kernel_info *kinfo,
-                                        int addrcells, int sizecells)
-{
-    return 0;
-}
-
 static inline int process_shm(struct domain *d, struct kernel_info *kinfo,
                               const struct dt_device_node *node)
 {
@@ -80,6 +71,12 @@ static inline void shm_mem_node_fill_reg_range(const struct 
kernel_info *kinfo,
                                                __be32 *reg, int *nr_cells,
                                                int addrcells, int sizecells) 
{};
 
+static inline const struct membanks *
+kernel_info_get_shm_mem_const(const struct kernel_info *kinfo)
+{
+    return NULL;
+}
+
 #endif /* CONFIG_STATIC_SHM */
 
 #endif /* XEN_STATIC_SHMEM_H */
-- 
2.48.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.