[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH early-RFC 2/5] xen/arm64: Rework the memory layout
Hi Julien, > On 9 Mar 2022, at 12:20, Julien Grall <julien@xxxxxxx> wrote: > > From: Julien Grall <jgrall@xxxxxxxxxx> > > Xen is currently not fully compliant with the Arm because it will I think you wanted to say “arm arm” her. > switch the TTBR with the MMU on. > > In order to be compliant, we need to disable the MMU before > switching the TTBR. The implication is the page-tables should > contain an identity mapping of the code switching the TTBR. > > If we don't rework the memory layout, we would need to find a > virtual address that matches a physical address and doesn't clash > with the static virtual regions. This can be a bit tricky. This sentence is a bit misleading. Even with the rework you need to do that just by moving the Xen virtual address upper you make sure that anything physical memory under 512GB can be mapped 1:1 without clashing with other Xen mappings (unless Xen is loaded in memory at physical address 512GB which would end in the same issue). I think should be rephrased. > > On arm64, the memory layout has plenty of unused space. In most of > the case we expect Xen to be loaded in low memory. > > The memory layout is reshuffled to keep the 0th slot free. Xen will now 0th slot of first level of page table. > be loaded at (512GB + 2MB). This requires a slight tweak of the boot > code as XEN_VIRT_START cannot be used as an immediate. > > Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx> > > --- > > TODO: > - I vaguely recall that one of the early platform we supported add > the memory starting in high memory (> 1TB). I need to check > whether the new layout will be fine. I think we have some Juno with some memory like that, tell me if you need help here. > - Update the documentation to reflect the new layout > --- > xen/arch/arm/arm64/head.S | 3 ++- > xen/arch/arm/include/asm/config.h | 20 ++++++++++++++------ > xen/arch/arm/mm.c | 14 +++++++------- > 3 files changed, 23 insertions(+), 14 deletions(-) > > diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S > index 66d862fc8137..878649280d73 100644 > --- a/xen/arch/arm/arm64/head.S > +++ b/xen/arch/arm/arm64/head.S > @@ -594,7 +594,8 @@ create_page_tables: > * need an additional 1:1 mapping, the virtual mapping will > * suffice. > */ > - cmp x19, #XEN_VIRT_START > + ldr x0, =XEN_VIRT_START > + cmp x19, x0 A comment in the code would be good here to prevent someone reverting this. > bne 1f > ret > 1: > diff --git a/xen/arch/arm/include/asm/config.h > b/xen/arch/arm/include/asm/config.h > index 5db28a8dbd56..b2f31a914103 100644 > --- a/xen/arch/arm/include/asm/config.h > +++ b/xen/arch/arm/include/asm/config.h > @@ -107,8 +107,20 @@ > * Unused > */ > > +#ifdef CONFIG_ARM_32 > + > #define COMMON_VIRT_START _AT(vaddr_t, 0) > > +#else > + > +#define SLOT0_ENTRY_BITS 39 > +#define SLOT0(slot) (_AT(vaddr_t,slot) << SLOT0_ENTRY_BITS) > +#define SLOT0_ENTRY_SIZE SLOT0(1) > + > +#define COMMON_VIRT_START SLOT(1) > + > +#endif > + > #define XEN_VIRT_START (COMMON_VIRT_START + MB(2)) > #define XEN_SLOT_SIZE MB(2) > #define XEN_VIRT_END (XEN_VIRT_START + XEN_SLOT_SIZE) > @@ -161,14 +173,10 @@ > > #else /* ARM_64 */ > > -#define SLOT0_ENTRY_BITS 39 > -#define SLOT0(slot) (_AT(vaddr_t,slot) << SLOT0_ENTRY_BITS) > -#define SLOT0_ENTRY_SIZE SLOT0(1) > - > -#define VMAP_VIRT_START GB(1) > +#define VMAP_VIRT_START (SLOT0(1) + GB(1)) > #define VMAP_VIRT_END (VMAP_VIRT_START + GB(1)) > > -#define FRAMETABLE_VIRT_START GB(32) > +#define FRAMETABLE_VIRT_START (SLOT0(1) + GB(32)) > #define FRAMETABLE_SIZE GB(32) > #define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table)) > #define FRAMETABLE_VIRT_END (FRAMETABLE_VIRT_START + FRAMETABLE_SIZE - 1) > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 6b7c41d827ca..75ed9a3ce249 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -187,11 +187,10 @@ static void __init __maybe_unused build_assertions(void) > BUILD_BUG_ON(DIRECTMAP_VIRT_START & ~FIRST_MASK); > #endif > /* Page table structure constraints */ > -#ifdef CONFIG_ARM_64 > - BUILD_BUG_ON(zeroeth_table_offset(XEN_VIRT_START)); > -#endif Don’t you want to enforce the opposite now ? Check that it is not on slot 0 ? > BUILD_BUG_ON(first_table_offset(XEN_VIRT_START)); > +#ifdef CONFIG_ARM_32 > BUILD_BUG_ON(second_linear_offset(XEN_VIRT_START) >= XEN_PT_LPAE_ENTRIES); > +#endif > #ifdef CONFIG_DOMAIN_PAGE > BUILD_BUG_ON(DOMHEAP_VIRT_START & ~FIRST_MASK); > #endif > @@ -611,10 +610,11 @@ void __init setup_pagetables(unsigned long > boot_phys_offset) > phys_offset = boot_phys_offset; > > #ifdef CONFIG_ARM_64 > - p = (void *) xen_pgtable; > - p[0] = pte_of_xenaddr((uintptr_t)xen_first); > - p[0].pt.table = 1; > - p[0].pt.xn = 0; > + pte = pte_of_xenaddr((uintptr_t)xen_first); > + pte.pt.table = 1; > + pte.pt.xn = 0; > + xen_pgtable[zeroeth_table_offset(XEN_VIRT_START)] = pte; > + > p = (void *) xen_first; > #else > p = (void *) cpu0_pgtable; > -- > 2.32.0 > Cheers Bertrand
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |