[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 5/9] xen/arm: Rework the code mapping Xen to avoid relying on the size of Xen


  • To: Julien Grall <julien@xxxxxxx>, <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Michal Orzel <michal.orzel@xxxxxxx>
  • Date: Mon, 26 Jun 2023 13:43:21 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=xen.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=xNUmSJMVKl0uSHP40181BLwYmBqK/2F05E/fv2k2LeM=; b=Ul5BzaYSBgHXOhc2F9hDg/qHG2m0Sh2zXOLI9xPJxl9K48Yro9jFcXKbTmHEVGLUgtHVZbr8IGW5x4fsFzqoFgdE7YOYgiICeKir6HQa6djR/DhqNncw6vWs/jZME2PwrH6VIByNvgKer2ciSNYDm5eQpiwsbyful/xhNc4VkosUWfU9v7GfLlvIz3I3q45Wu8r4yOjF03F9LG6wf5bOowitIXbPeoSp+GY21vfnOp+P1G/3YrZsFJsmu60K3cBArRwCnxp9N6eu5KmS2t407fElna9wxTiVTJvJ7m8qawp56+h8GjDgIA+3HQRIbwimOKNd8yHyFqocdFiMtn5f6g==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=giJwezzJHw6611jedAUUVKOLru7+6CzF4zmaJXz4BNFvEZeZrIhwG5jftykf+Ygnh2B201JdYUF+5qKjQFiRmTzPfz1M/wFgkL6540TnEC7RR5TNnJkHp2Q7R0sU2j0zt1Q76ExxrCHyzxEGEn7huCAGWLiZ18ejxNpjSvN+QYh5yetkqtCOZ5POop7pS2gQBiYGzBiONoFBYYuHLd+V4S5IdWrQEMzfdltiapPiSpf/9reptRAwLKdpjx6/XdaEEL97Ap6eFGIAo0D84fqXk11rosvKCBpg/fovvk9AnnTiRXUX8VZcW5jvl1VUIulDwjovmnlVPGkXyZ/8syfVFQ==
  • Cc: <Luca.Fancellu@xxxxxxx>, <Henry.Wang@xxxxxxx>, Julien Grall <jgrall@xxxxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, "Bertrand Marquis" <bertrand.marquis@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Delivery-date: Mon, 26 Jun 2023 11:43:48 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>


On 25/06/2023 22:49, Julien Grall wrote:
> 
> 
> From: Julien Grall <jgrall@xxxxxxxxxx>
> 
> At the moment, the maximum size of Xen binary we can support is 2MB.
> This is what we reserved in the virtual address but also what all
> the code in Xen relies on as we only allocate one L3 page-table.
> 
> When feature like UBSAN (will be enabled in a follow-up patch) and GCOV
> are enabled, the binary will be way over 2MB.
> 
> The code is now reworked so it doesn't realy on a specific size but
s/realy/rely

> will instead look at the reversed size and compute the number of
> page-table to allocate/map.
> 
> While at it, replace any reference to 4KB mappings with a more
> generic word because the page-size may change in the future.
> 
> Also fix the typo s/tlb/tbl/ in code move in arm32/head.S
> 
> Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>
> ---
>  xen/arch/arm/arm32/head.S         | 61 ++++++++++++++++++++++++-------
>  xen/arch/arm/arm64/head.S         | 51 +++++++++++++++++++++-----
>  xen/arch/arm/include/asm/config.h |  1 +
>  xen/arch/arm/include/asm/lpae.h   |  8 ++--
>  xen/arch/arm/mm.c                 | 24 +++++++-----
>  5 files changed, 108 insertions(+), 37 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 5e3692eb3abf..5448671de897 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -373,35 +373,55 @@ ENDPROC(cpu_init)
>  .endm
> 
>  /*
> - * Macro to create a page table entry in \ptbl to \tbl
> + * Macro to create a page table entry in \ptbl to \tbl (physical
> + * address)
>   *
>   * ptbl:    table symbol where the entry will be created
> - * tbl:     table symbol to point to
> + * tbl:     physical address of the table to point to
>   * virt:    virtual address
>   * lvl:     page-table level
>   *
>   * Preserves \virt
> - * Clobbers r1 - r4
> + * Clobbers \tbl, r1 - r3
>   *
>   * Also use r10 for the phys offset.
This no longer applies.

>   *
> - * Note that \virt should be in a register other than r1 - r4
> + * Note that \tbl and \virt should be in a register other than r1 - r3
>   */
> -.macro create_table_entry, ptbl, tbl, virt, lvl
> -        get_table_slot r1, \virt, \lvl  /* r1 := slot in \tlb */
> -        lsl   r1, r1, #3                /* r1 := slot offset in \tlb */
> -
> -        load_paddr r4, \tbl
> +.macro create_table_entry_from_paddr, ptbl, tbl, virt, lvl
> +        get_table_slot r1, \virt, \lvl  /* r1 := slot in \tbl */
> +        lsl   r1, r1, #3                /* r1 := slot offset in \tbl */
> 
>          movw  r2, #PT_PT             /* r2:r3 := right for linear PT */
> -        orr   r2, r2, r4             /*           + \tlb paddr */
> +        orr   r2, r2, \tbl           /*           + \tbl paddr */
>          mov   r3, #0
> 
> -        adr_l r4, \ptbl
> +        adr_l \tbl, \ptbl            /* \tbl := (v,p)addr of \ptbl */
> 
> -        strd  r2, r3, [r4, r1]
> +        strd  r2, r3, [\tbl, r1]
>  .endm
> 
> +
> +/*
> + * Macro to create a page table entry in \ptbl to \tbl (symbol)
> + *
> + * ptbl:    table symbol where the entry will be created
> + * tbl:     table symbol to point to
> + * virt:    virtual address
> + * lvl:     page-table level
> + *
> + * Preserves \virt
> + * Clobbers r1 - r4
> + *
> + * Also use r10 for the phys offset.
> + *
> + * Note that \virt should be in a register other than r1 - r4
> + */
> +.macro create_table_entry, ptbl, tbl, virt, lvl
> +        load_paddr r4, \tbl
> +        create_table_entry_from_paddr \ptbl, r4, \virt, \lvl
> + .endm
> +
>  /*
>   * Macro to create a mapping entry in \tbl to \paddr. Only mapping in 3rd
>   * level table (i.e page granularity) is supported.
> @@ -451,13 +471,26 @@ ENDPROC(cpu_init)
>   * Output:
>   *   r12: Was a temporary mapping created?
>   *
> - * Clobbers r0 - r4
> + * Clobbers r0 - r5
>   */
>  create_page_tables:
>          /* Prepare the page-tables for mapping Xen */
>          mov_w r0, XEN_VIRT_START
>          create_table_entry boot_pgtable, boot_second, r0, 1
> -        create_table_entry boot_second, boot_third, r0, 2
> +
> +        /*
> +         * We need to use a stash register because
> +         * create_table_entry_paddr() will clobber the register storing
> +         * the physical address of the table to point to.
> +         */
> +        load_paddr r5, boot_third
> +        mov_w r4, XEN_VIRT_START
Can you just reuse r0 that is already set to XEN_VIRT_START not to repeat this 
operation over and over again?

> +.rept XEN_NR_ENTRIES(2)
> +        mov   r0, r5                        /* r0 := paddr(l3 table) */
> +        create_table_entry_from_paddr boot_second, r0, r4, 2
> +        add   r4, r4, #XEN_PT_LEVEL_SIZE(2) /* r4 := Next vaddr */
> +        add   r5, r5, #PAGE_SIZE            /* r5 := Next table */
> +.endr
> 
>          /*
>           * Find the size of Xen in pages and multiply by the size of a
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index 66bc85d4c39e..c9e2e36506d9 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -490,6 +490,31 @@ ENDPROC(cpu_init)
>          ubfx  \slot, \virt, #XEN_PT_LEVEL_SHIFT(\lvl), #XEN_PT_LPAE_SHIFT
>  .endm
> 
> +/*
> + * Macro to create a page table entry in \ptbl to \tbl
> + * ptbl:    table symbol where the entry will be created
> + * tbl:     physical address of the table to point to
> + * virt:    virtual address
> + * lvl:     page-table level
> + * tmp1:    scratch register
> + * tmp2:    scratch register
> + *
> + * Preserves \virt
> + * Clobbers \tbl, \tmp1, \tmp2
> + *
> + * Note that all parameters using registers should be distinct.
> + */
> +.macro create_table_entry_from_paddr, ptbl, tbl, virt, lvl, tmp1, tmp2
> +        get_table_slot \tmp1, \virt, \lvl   /* \tmp1 := slot in \tlb */
s/tlb/tbl

> +
> +        mov   \tmp2, #PT_PT                 /* \tmp2 := right for linear PT 
> */
> +        orr   \tmp2, \tmp2, \tbl            /*          + \tlb */
s/tlb/tbl

> +
> +        adr_l \tbl, \ptbl                   /* \tlb := address(\ptbl) */
s/tlb/tbl

> +
> +        str   \tmp2, [\tbl, \tmp1, lsl #3]
> +.endm
> +
>  /*
>   * Macro to create a page table entry in \ptbl to \tbl
>   *
> @@ -509,15 +534,8 @@ ENDPROC(cpu_init)
>   * Note that all parameters using registers should be distinct.
>   */
>  .macro create_table_entry, ptbl, tbl, virt, lvl, tmp1, tmp2, tmp3
> -        get_table_slot \tmp1, \virt, \lvl   /* \tmp1 := slot in \tlb */
> -
> -        load_paddr \tmp2, \tbl
> -        mov   \tmp3, #PT_PT                 /* \tmp3 := right for linear PT 
> */
> -        orr   \tmp3, \tmp3, \tmp2           /*          + \tlb paddr */
> -
> -        adr_l \tmp2, \ptbl
> -
> -        str   \tmp3, [\tmp2, \tmp1, lsl #3]
> +        load_paddr \tmp1, \tbl
> +        create_table_entry_from_paddr \ptbl, \tmp1, \virt, \lvl, \tmp2, \tmp3
>  .endm
> 
>  /*
> @@ -570,7 +588,20 @@ create_page_tables:
>          ldr   x0, =XEN_VIRT_START
>          create_table_entry boot_pgtable, boot_first, x0, 0, x1, x2, x3
>          create_table_entry boot_first, boot_second, x0, 1, x1, x2, x3
> -        create_table_entry boot_second, boot_third, x0, 2, x1, x2, x3
> +
> +        /*
> +         * We need to use a stash register because
> +         * create_table_entry_paddr() will clobber the register storing
> +         * the physical address of the table to point to.
> +         */
> +        load_paddr x4, boot_third
> +        ldr   x1, =XEN_VIRT_START
Can you just reuse x0 that is already set to XEN_VIRT_START not to repeat this 
operation over and over again?

> +.rept XEN_NR_ENTRIES(2)
> +        mov   x0, x4                            /* x0 := paddr(l3 table) */
> +        create_table_entry_from_paddr boot_second, x0, x1, 2, x2, x3
> +        add   x1, x1, #XEN_PT_LEVEL_SIZE(2)     /* x1 := Next vaddr */
> +        add   x4, x4, #PAGE_SIZE                /* x4 := Next table */
> +.endr
> 
>          /*
>           * Find the size of Xen in pages and multiply by the size of a
> diff --git a/xen/arch/arm/include/asm/config.h 
> b/xen/arch/arm/include/asm/config.h
> index c969e6da589d..6d246ab23c48 100644
> --- a/xen/arch/arm/include/asm/config.h
> +++ b/xen/arch/arm/include/asm/config.h
> @@ -125,6 +125,7 @@
>  #endif
> 
>  #define XEN_VIRT_SIZE           _AT(vaddr_t, MB(2))
> +#define XEN_NR_ENTRIES(lvl)     (XEN_VIRT_SIZE / XEN_PT_LEVEL_SIZE(lvl))
> 
>  #define FIXMAP_VIRT_START       (XEN_VIRT_START + XEN_VIRT_SIZE)
>  #define FIXMAP_VIRT_SIZE        _AT(vaddr_t, MB(2))
> diff --git a/xen/arch/arm/include/asm/lpae.h b/xen/arch/arm/include/asm/lpae.h
> index 7d2f6fd1bd5a..93e824f01125 100644
> --- a/xen/arch/arm/include/asm/lpae.h
> +++ b/xen/arch/arm/include/asm/lpae.h
> @@ -267,15 +267,17 @@ lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned int attr);
> 
>  /*
>   * Macros to define page-tables:
> - *  - DEFINE_BOOT_PAGE_TABLE is used to define page-table that are used
> + *  - DEFINE_BOOT_PAGE_TABLE{,S} are used to define page-table that are used
s/page-table/page-table(s)/ or maybe using the same comment as for runtime pgt 
macro

>   *  in assembly code before BSS is zeroed.
>   *  - DEFINE_PAGE_TABLE{,S} are used to define one or multiple
>   *  page-tables to be used after BSS is zeroed (typically they are only used
>   *  in C).
>   */
> -#define DEFINE_BOOT_PAGE_TABLE(name)                                         
>  \
> +#define DEFINE_BOOT_PAGE_TABLES(name, nr)                                    
>  \
>  lpae_t __aligned(PAGE_SIZE) __section(".data.page_aligned")                  
>  \
> -    name[XEN_PT_LPAE_ENTRIES]
> +    name[XEN_PT_LPAE_ENTRIES * (nr)]
> +
> +#define DEFINE_BOOT_PAGE_TABLE(name) DEFINE_BOOT_PAGE_TABLES(name, 1)
> 
>  #define DEFINE_PAGE_TABLES(name, nr)                    \
>  lpae_t __aligned(PAGE_SIZE) name[XEN_PT_LPAE_ENTRIES * (nr)]
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index e460249736c3..2b2ff6015ebd 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -53,8 +53,8 @@ mm_printk(const char *fmt, ...) {}
>   * to the CPUs own pagetables.
>   *
>   * These pagetables have a very simple structure. They include:
> - *  - 2MB worth of 4K mappings of xen at XEN_VIRT_START, boot_first and
> - *    boot_second are used to populate the tables down to boot_third
> + *  - XEN_VIRT_SIZE worth of L3 mappings of xen at XEN_VIRT_START, boot_first
> + *    and boot_second are used to populate the tables down to boot_third
>   *    which contains the actual mapping.
>   *  - a 1:1 mapping of xen at its current physical address. This uses a
>   *    section mapping at whichever of boot_{pgtable,first,second}
> @@ -79,7 +79,7 @@ DEFINE_BOOT_PAGE_TABLE(boot_first_id);
>  DEFINE_BOOT_PAGE_TABLE(boot_second_id);
>  DEFINE_BOOT_PAGE_TABLE(boot_third_id);
>  DEFINE_BOOT_PAGE_TABLE(boot_second);
> -DEFINE_BOOT_PAGE_TABLE(boot_third);
> +DEFINE_BOOT_PAGE_TABLES(boot_third, XEN_NR_ENTRIES(2));
> 
>  /* Main runtime page tables */
> 
> @@ -115,7 +115,7 @@ DEFINE_BOOT_PAGE_TABLE(xen_fixmap);
>   * Third level page table used to map Xen itself with the XN bit set
>   * as appropriate.
>   */
> -static DEFINE_PAGE_TABLE(xen_xenmap);
> +static DEFINE_PAGE_TABLES(xen_xenmap, XEN_NR_ENTRIES(2));
> 
>  /* Non-boot CPUs use this to find the correct pagetables. */
>  uint64_t init_ttbr;
> @@ -518,15 +518,15 @@ void __init setup_pagetables(unsigned long 
> boot_phys_offset)
>      p[0].pt.table = 1;
>      p[0].pt.xn = 0;
> 
> -    /* Break up the Xen mapping into 4k pages and protect them separately. */
> -    for ( i = 0; i < XEN_PT_LPAE_ENTRIES; i++ )
> +    /* Break up the Xen mapping into pages and protect them separately. */
> +    for ( i = 0; i < XEN_NR_ENTRIES(3); i++ )
>      {
>          vaddr_t va = XEN_VIRT_START + (i << PAGE_SHIFT);
> 
>          if ( !is_kernel(va) )
>              break;
>          pte = pte_of_xenaddr(va);
> -        pte.pt.table = 1; /* 4k mappings always have this bit set */
> +        pte.pt.table = 1; /* third level mappings always have this bit set */
>          if ( is_kernel_text(va) || is_kernel_inittext(va) )
>          {
>              pte.pt.xn = 0;
> @@ -539,10 +539,14 @@ void __init setup_pagetables(unsigned long 
> boot_phys_offset)
> 
>      /* Initialise xen second level entries ... */
>      /* ... Xen's text etc */
> +    for ( i = 0; i < XEN_NR_ENTRIES(2); i++ )
> +    {
> +        vaddr_t va = XEN_VIRT_START + i * XEN_PT_LEVEL_SIZE(2);
For consistency with the upper loop, maybe (i << XEN_PT_LEVEL_SHIFT(2)) ?

These are all just minor comments, so:
Reviewed-by: Michal Orzel <michal.orzel@xxxxxxx>

~Michal



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.