|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] xen/arm: skip holes in physical address space when setting up frametable
commit 10d417b8b62efa5093707c1dfe9bd48e0abeb331
Author: Michal Orzel <michal.orzel@xxxxxxx>
AuthorDate: Wed May 6 10:41:37 2026 +0200
Commit: Michal Orzel <michal.orzel@xxxxxxx>
CommitDate: Fri May 8 08:52:27 2026 +0200
xen/arm: skip holes in physical address space when setting up frametable
Refactor setup_frametable_mappings() into init_frametable(), modeled
after x86's implementation. Instead of mapping one contiguous frametable
covering ram_start to ram_end (including holes), iterate the
pdx_group_valid bitmap to allocate and map frametable memory only for
valid PDX groups, skipping gaps in the physical address space.
This reduces memory consumption on systems with sparse RAM layouts by
not allocating frametable entries for non-existent memory regions.
The chunk allocator rounds chunk_size up to PAGE_SIZE only, rather than
to a larger mapping granularity, to avoid overshooting past chunk
boundaries into subsequent gaps or valid regions. This rounding has no
impact for in-loop chunks given that chunk size is a multiple of 14MB
on Arm64 and 2MB on Arm32. The rounding matters only for the last
out-of-loop chunk.
Physical allocations prefer 32MB alignment so that map_pages_to_xen()
can use the contiguous bit for larger TLB entries where virtual
alignment also permits. Fall back to 2MB if the chunk is smaller than
32MB.
Add a comment explaining why we don't use pdx_to_page(). For complete
discussion see [1].
As ram_end is no longer needed by init_frametable(), drop the now-dead
ram_end/bank_end computation from setup_mm().
Update the MPU implementation to match the new init_frametable()
signature. Since MPU has no virtual address translation (ma == va),
hole-skipping is not possible and the frametable remains a single
contiguous allocation.
[1]
https://lore.kernel.org/xen-devel/20260430125103.401811-1-michal.orzel@xxxxxxx/T/#m803025eb6720a1425443dd0f8e72be93ef02f344
Signed-off-by: Michal Orzel <michal.orzel@xxxxxxx>
Reviewed-by: Luca Fancellu <luca.fancellu@xxxxxxx>
Tested-by: Luca Fancellu <luca.fancellu@xxxxxxx>
Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
xen/arch/arm/arm32/mmu/mm.c | 3 +-
xen/arch/arm/include/asm/mm.h | 4 +-
xen/arch/arm/mm.c | 5 +--
xen/arch/arm/mmu/mm.c | 100 ++++++++++++++++++++++++++++++++----------
xen/arch/arm/mpu/mm.c | 23 +++++-----
5 files changed, 93 insertions(+), 42 deletions(-)
diff --git a/xen/arch/arm/arm32/mmu/mm.c b/xen/arch/arm/arm32/mmu/mm.c
index 5e4766ddcf..0b595baa11 100644
--- a/xen/arch/arm/arm32/mmu/mm.c
+++ b/xen/arch/arm/arm32/mmu/mm.c
@@ -178,8 +178,7 @@ void __init setup_mm(void)
setup_directmap_mappings(mfn_x(directmap_mfn_start), xenheap_pages);
- /* Frame table covers all of RAM region, including holes */
- setup_frametable_mappings(ram_start, ram_end);
+ init_frametable(ram_start);
/*
* The allocators may need to use map_domain_page() (such as for
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 72a6928624..2eb8465aa9 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -196,8 +196,8 @@ extern void *early_fdt_map(paddr_t fdt_paddr);
extern void remove_early_mappings(void);
/* Prepare the memory subystem to bring-up the given secondary CPU */
extern int prepare_secondary_mm(int cpu);
-/* Map a frame table to cover physical addresses ps through pe */
-extern void setup_frametable_mappings(paddr_t ps, paddr_t pe);
+/* Map a frame table */
+void init_frametable(paddr_t ram_start);
/* Helper function to setup memory management */
void setup_mm_helper(void);
/* map a physical range in virtual memory */
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 6eddbcf912..65aea71c43 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -33,7 +33,6 @@ void __init setup_mm(void)
{
const struct membanks *banks = bootinfo_get_mem();
paddr_t ram_start = INVALID_PADDR;
- paddr_t ram_end = 0;
paddr_t ram_size = 0;
unsigned int i;
@@ -42,11 +41,9 @@ void __init setup_mm(void)
for ( i = 0; i < banks->nr_banks; i++ )
{
const struct membank *bank = &banks->bank[i];
- paddr_t bank_end = bank->start + bank->size;
ram_size = ram_size + bank->size;
ram_start = min(ram_start, bank->start);
- ram_end = max(ram_end, bank_end);
}
total_pages = ram_size >> PAGE_SHIFT;
@@ -62,7 +59,7 @@ void __init setup_mm(void)
setup_mm_helper();
- setup_frametable_mappings(ram_start, ram_end);
+ init_frametable(ram_start);
init_staticmem_pages();
init_sharedmem_pages();
diff --git a/xen/arch/arm/mmu/mm.c b/xen/arch/arm/mmu/mm.c
index 6604f3bf4e..10c7dfb1ef 100644
--- a/xen/arch/arm/mmu/mm.c
+++ b/xen/arch/arm/mmu/mm.c
@@ -1,23 +1,61 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <xen/bitops.h>
#include <xen/init.h>
#include <xen/lib.h>
#include <xen/macros.h>
#include <xen/mm.h>
#include <xen/mm-frame.h>
#include <xen/pdx.h>
+#include <xen/sizes.h>
#include <xen/string.h>
-/* Map a frame table to cover physical addresses ps through pe */
-void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
+static void __init init_frametable_chunk(unsigned long pdx_s,
+ unsigned long pdx_e)
{
- unsigned long nr_pdxs = mfn_to_pdx(mfn_add(maddr_to_mfn(pe), -1)) -
- mfn_to_pdx(maddr_to_mfn(ps)) + 1;
- unsigned long frametable_size = nr_pdxs * sizeof(struct page_info);
- mfn_t base_mfn;
- const unsigned long mapping_size = frametable_size < MB(32) ? MB(2)
- : MB(32);
+ unsigned long nr_pdxs = pdx_e - pdx_s;
+ unsigned long chunk_size = nr_pdxs * sizeof(struct page_info);
+ unsigned long pfn_align;
+ struct page_info *pg;
int rc;
+ mfn_t base_mfn;
+
+ /*
+ * In-loop chunks span whole PDX groups, which are always page-size
+ * aligned. The last chunk ending at max_pdx may not be, so round up.
+ */
+ chunk_size = ROUNDUP(chunk_size, PAGE_SIZE);
+
+ /*
+ * Try to align the allocation to the contiguous mapping size so that
+ * map_pages_to_xen() can use the contiguous bit.
+ */
+ pfn_align = ((chunk_size >= MB(32)) ? MB(32) : MB(2)) >> PAGE_SHIFT;
+
+ base_mfn = alloc_boot_pages(chunk_size >> PAGE_SHIFT, pfn_align);
+
+ /*
+ * Resolve the frametable VA via mfn_to_page(pdx_to_mfn(...)) rather
+ * than pdx_to_page() because the generic pdx_to_page() does not subtract
+ * frametable_base_pdx. There's more work to be done to make it generic, so
+ * for now route through mfn_to_page(), which on Arm applies the
+ * frametable_base_pdx offset and yields the correct VA.
+ */
+ pg = mfn_to_page(pdx_to_mfn(pdx_s));
+ rc = map_pages_to_xen((unsigned long)pg, base_mfn,
+ chunk_size >> PAGE_SHIFT,
+ PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
+ if ( rc )
+ panic("Unable to setup the frametable mappings\n");
+
+ memset(pg, 0, nr_pdxs * sizeof(struct page_info));
+ memset(pg + nr_pdxs, -1,
+ chunk_size - nr_pdxs * sizeof(struct page_info));
+}
+
+void __init init_frametable(paddr_t ram_start)
+{
+ unsigned int sidx, nidx, max_idx;
/*
* The size of paddr_t should be sufficient for the complete range of
@@ -26,24 +64,40 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t
pe)
BUILD_BUG_ON((sizeof(paddr_t) * BITS_PER_BYTE) < PADDR_BITS);
BUILD_BUG_ON(sizeof(struct page_info) != PAGE_INFO_SIZE);
- if ( frametable_size > FRAMETABLE_SIZE )
- panic("The frametable cannot cover the physical region %#"PRIpaddr" -
%#"PRIpaddr"\n",
- ps, pe);
+ /* init_frametable_chunk() allocation alignment assumes 4KB granule */
+ BUILD_BUG_ON(PAGE_SIZE != SZ_4K);
- frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ps));
- /* Round up to 2M or 32M boundary, as appropriate. */
- frametable_size = ROUNDUP(frametable_size, mapping_size);
- base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32<<(20-12));
+ /* In-loop chunks must produce page-aligned frametable regions */
+ BUILD_BUG_ON((PDX_GROUP_COUNT * sizeof(struct page_info)) % PAGE_SIZE);
- rc = map_pages_to_xen(FRAMETABLE_VIRT_START, base_mfn,
- frametable_size >> PAGE_SHIFT,
- PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
- if ( rc )
- panic("Unable to setup the frametable mappings.\n");
+ max_idx = DIV_ROUND_UP(max_pdx, PDX_GROUP_COUNT);
+ frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ram_start));
+
+ /*
+ * Mapping address in init_frametable_chunk must be page-aligned
+ * for map_pages_to_xen(). Aligning to PDX_GROUP_COUNT guarantees this
+ * because PDX_GROUP_COUNT * sizeof(page_info) is always a multiple of
+ * PAGE_SIZE by construction.
+ */
+ frametable_base_pdx = ROUNDDOWN(frametable_base_pdx, PDX_GROUP_COUNT);
+
+ if ( (max_pdx - frametable_base_pdx) > FRAMETABLE_NR )
+ panic("Frametable too small\n");
+
+ for ( sidx = (frametable_base_pdx / PDX_GROUP_COUNT); ; sidx = nidx )
+ {
+ unsigned int eidx;
+
+ eidx = find_next_zero_bit(pdx_group_valid, max_idx, sidx);
+ nidx = find_next_bit(pdx_group_valid, max_idx, eidx);
+
+ if ( nidx >= max_idx )
+ break;
+
+ init_frametable_chunk(sidx * PDX_GROUP_COUNT, eidx * PDX_GROUP_COUNT);
+ }
- memset(&frame_table[0], 0, nr_pdxs * sizeof(struct page_info));
- memset(&frame_table[nr_pdxs], -1,
- frametable_size - (nr_pdxs * sizeof(struct page_info)));
+ init_frametable_chunk(sidx * PDX_GROUP_COUNT, max_pdx);
}
/*
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index aff88bd3a9..9c568831c1 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -186,16 +186,15 @@ static int is_mm_attr_match(pr_t *region, unsigned int
attributes)
return 0;
}
-/* Map a frame table to cover physical addresses ps through pe */
-void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
+/*
+ * Allocate a contiguous frame table covering ram_start through max_pdx.
+ * Unlike the MMU version, MPU cannot skip holes because there is no virtual
+ * address translation (ma == va).
+ */
+void __init init_frametable(paddr_t ram_start)
{
+ unsigned long nr_pdxs, frametable_size;
mfn_t base_mfn;
- paddr_t aligned_ps = ROUNDUP(ps, PAGE_SIZE);
- paddr_t aligned_pe = ROUNDDOWN(pe, PAGE_SIZE);
-
- unsigned long nr_pdxs = mfn_to_pdx(mfn_add(maddr_to_mfn(aligned_pe), -1)) -
- mfn_to_pdx(maddr_to_mfn(aligned_ps)) + 1;
- unsigned long frametable_size = nr_pdxs * sizeof(struct page_info);
/*
* The size of paddr_t should be sufficient for the complete range of
@@ -204,11 +203,13 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t
pe)
BUILD_BUG_ON((sizeof(paddr_t) * BITS_PER_BYTE) < PADDR_BITS);
BUILD_BUG_ON(sizeof(struct page_info) != PAGE_INFO_SIZE);
+ frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ram_start));
+ nr_pdxs = max_pdx - frametable_base_pdx;
+ frametable_size = nr_pdxs * sizeof(struct page_info);
+
if ( frametable_size > FRAMETABLE_SIZE )
- panic("The frametable cannot cover the physical region %#"PRIpaddr" -
%#"PRIpaddr"\n",
- ps, pe);
+ panic("Frametable too small\n");
- frametable_base_pdx = paddr_to_pdx(aligned_ps);
frametable_size = ROUNDUP(frametable_size, PAGE_SIZE);
base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 1);
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |