From michal.orzel@xxxxxxx Mon Jun 30 02:22:46 2025 Date: Mon, 30 Jun 2025 11:22:27 +0200 From: Michal Orzel Subject: [PATCH] xen/arm: Take into account PDX grouping for setting up frametable At the moment we don't really take into account pdx_group_valid bitmap containing ranges with valid RAM ranges. We populate the bitmap using set_pdx_range() but we set up frametable to cover all RAM including holes wasting a lot of memory (even gigabytes on some platforms with large holes). Take example from x86 where this bitmap is used to init frametable for valid RAM ranges in chunks. On Arm we also apply offset (similar as with direct map), where the starting index for the bitmap comes from frametable_base_pdx. Mapping size remains the same as before i.e. 2MB or 32MB. Signed-off-by: Michal Orzel --- xen/arch/arm/arm32/mmu/mm.c | 4 +- xen/arch/arm/arm64/mmu/mm.c | 3 +- xen/arch/arm/include/asm/mm.h | 4 +- xen/arch/arm/mmu/mm.c | 69 ++++++++++++++++++++++------------- 4 files changed, 50 insertions(+), 30 deletions(-) diff --git a/xen/arch/arm/arm32/mmu/mm.c b/xen/arch/arm/arm32/mmu/mm.c index 956693232a1b..80b3572e0041 100644 --- a/xen/arch/arm/arm32/mmu/mm.c +++ b/xen/arch/arm/arm32/mmu/mm.c @@ -188,10 +188,10 @@ void __init setup_mm(void) setup_directmap_mappings(mfn_x(directmap_mfn_start), xenheap_pages); - /* Frame table covers all of RAM region, including holes */ - setup_frametable_mappings(ram_start, ram_end); max_page = PFN_DOWN(ram_end); + init_frametable(); + /* * The allocators may need to use map_domain_page() (such as for * scrubbing pages). So we need to prepare the domheap area first. diff --git a/xen/arch/arm/arm64/mmu/mm.c b/xen/arch/arm/arm64/mmu/mm.c index c1efa1348aee..8bfa263be91e 100644 --- a/xen/arch/arm/arm64/mmu/mm.c +++ b/xen/arch/arm/arm64/mmu/mm.c @@ -277,9 +277,10 @@ void __init setup_mm(void) directmap_mfn_start = maddr_to_mfn(ram_start); directmap_mfn_end = maddr_to_mfn(ram_end); - setup_frametable_mappings(ram_start, ram_end); max_page = PFN_DOWN(ram_end); + init_frametable(); + init_staticmem_pages(); init_sharedmem_pages(); } diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h index a0d8e5afe977..5f41da4b1c32 100644 --- a/xen/arch/arm/include/asm/mm.h +++ b/xen/arch/arm/include/asm/mm.h @@ -211,8 +211,8 @@ extern void *early_fdt_map(paddr_t fdt_paddr); extern void remove_early_mappings(void); /* Prepare the memory subystem to bring-up the given secondary CPU */ extern int prepare_secondary_mm(int cpu); -/* Map a frame table to cover physical addresses ps through pe */ -extern void setup_frametable_mappings(paddr_t ps, paddr_t pe); +/* Map a frame table */ +void init_frametable(void); /* map a physical range in virtual memory */ void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned int attributes); diff --git a/xen/arch/arm/mmu/mm.c b/xen/arch/arm/mmu/mm.c index 9c50479c6373..69617a4986a5 100644 --- a/xen/arch/arm/mmu/mm.c +++ b/xen/arch/arm/mmu/mm.c @@ -10,16 +10,35 @@ unsigned long frametable_virt_end __read_mostly; -/* Map a frame table to cover physical addresses ps through pe */ -void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) +static void __init +init_frametable_chunk(unsigned long pdx_s, unsigned long pdx_e) { - unsigned long nr_pdxs = mfn_to_pdx(mfn_add(maddr_to_mfn(pe), -1)) - - mfn_to_pdx(maddr_to_mfn(ps)) + 1; - unsigned long frametable_size = nr_pdxs * sizeof(struct page_info); - mfn_t base_mfn; - const unsigned long mapping_size = frametable_size < MB(32) ? MB(2) - : MB(32); + unsigned long nr_pdxs = pdx_e - pdx_s; + unsigned long chunk_size = nr_pdxs * sizeof(struct page_info); + const unsigned long mapping_size = chunk_size < MB(32) ? MB(2) : MB(32); + unsigned long virt; int rc; + mfn_t base_mfn; + + /* Round up to 2M or 32M boundary, as appropriate. */ + chunk_size = ROUNDUP(chunk_size, mapping_size); + base_mfn = alloc_boot_pages(chunk_size >> PAGE_SHIFT, 32 << (20 - 12)); + + virt = (unsigned long)pdx_to_page(pdx_s); + rc = map_pages_to_xen(virt, base_mfn, chunk_size >> PAGE_SHIFT, + PAGE_HYPERVISOR_RW | _PAGE_BLOCK); + if ( rc ) + panic("Unable to setup the frametable mappings\n"); + + memset(&frame_table[pdx_s], 0, nr_pdxs * sizeof(struct page_info)); + memset(&frame_table[pdx_e], -1, + chunk_size - nr_pdxs * sizeof(struct page_info)); +} + +void __init init_frametable(void) +{ + unsigned int sidx, eidx, nidx; + unsigned int max_idx; /* * The size of paddr_t should be sufficient for the complete range of @@ -28,27 +47,27 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) BUILD_BUG_ON((sizeof(paddr_t) * BITS_PER_BYTE) < PADDR_BITS); BUILD_BUG_ON(sizeof(struct page_info) != PAGE_INFO_SIZE); - if ( frametable_size > FRAMETABLE_SIZE ) - panic("The frametable cannot cover the physical region %#"PRIpaddr" - %#"PRIpaddr"\n", - ps, pe); + frametable_base_pdx = mfn_to_pdx(directmap_mfn_start); - frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ps)); - /* Round up to 2M or 32M boundary, as appropriate. */ - frametable_size = ROUNDUP(frametable_size, mapping_size); - base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32<<(20-12)); + max_pdx = pfn_to_pdx(max_page - 1) + 1; - rc = map_pages_to_xen(FRAMETABLE_VIRT_START, base_mfn, - frametable_size >> PAGE_SHIFT, - PAGE_HYPERVISOR_RW | _PAGE_BLOCK); - if ( rc ) - panic("Unable to setup the frametable mappings.\n"); + if ( max_pdx > FRAMETABLE_NR ) + panic("Frametable too small\n"); + + max_idx = DIV_ROUND_UP(max_pdx, PDX_GROUP_COUNT); + + for ( sidx = (frametable_base_pdx / PDX_GROUP_COUNT); ; sidx = nidx ) + { + eidx = find_next_zero_bit(pdx_group_valid, max_idx, sidx); + nidx = find_next_bit(pdx_group_valid, max_idx, eidx); + + if ( nidx >= max_idx ) + break; - memset(&frame_table[0], 0, nr_pdxs * sizeof(struct page_info)); - memset(&frame_table[nr_pdxs], -1, - frametable_size - (nr_pdxs * sizeof(struct page_info))); + init_frametable_chunk(sidx * PDX_GROUP_COUNT, eidx * PDX_GROUP_COUNT); + } - frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * - sizeof(struct page_info)); + init_frametable_chunk(sidx * PDX_GROUP_COUNT, max_pdx); } /* -- 2.25.1