[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH MM-PART2 RESEND v2 15/19] xen/arm: mm: Introduce DEFINE_PAGE_TABLE{, S} and use it



On Tue, 14 May 2019, Julien Grall wrote:
> We have multiple static page-tables defined in arch/arm/mm.c. The
> current way to define them is difficult to read and does not help when
> making modification.
> 
> Two new helpers DEFINE_PAGE_TABLES (to define multiple page-tables) and
> DEFINE_PAGE_TABLE (alias of DEFINE_PAGE_TABLES(..., 1)) are introduced
> and now used to define static page-tables.
> 
> Note that DEFINE_PAGE_TABLES() alignment differs from what is currently
> used for allocating page-tables. This is fine because page-tables are
> only required to be aligned to a page-size.
> 
> Signed-off-by: Julien Grall <julien.grall@xxxxxxx>

Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>


> ---
>     Changes in v2:
>         - Patch in replacement of "Use the shorter version
>         __aligned(PAGE_SIZE) to align page-tables".
> ---
>  xen/arch/arm/mm.c | 32 ++++++++++++++++++--------------
>  1 file changed, 18 insertions(+), 14 deletions(-)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 6db7dda0da..9a5f2e1c3f 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -50,6 +50,11 @@ struct domain *dom_xen, *dom_io, *dom_cow;
>  #undef mfn_to_virt
>  #define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
>  
> +#define DEFINE_PAGE_TABLES(name, nr)                    \
> +lpae_t __aligned(PAGE_SIZE) name[LPAE_ENTRIES * (nr)]
> +
> +#define DEFINE_PAGE_TABLE(name) DEFINE_PAGE_TABLES(name, 1)
> +
>  /* Static start-of-day pagetables that we use before the allocators
>   * are up. These are used by all CPUs during bringup before switching
>   * to the CPUs own pagetables.
> @@ -73,13 +78,13 @@ struct domain *dom_xen, *dom_io, *dom_cow;
>   * Finally, if EARLY_PRINTK is enabled then xen_fixmap will be mapped
>   * by the CPU once it has moved off the 1:1 mapping.
>   */
> -lpae_t boot_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> +DEFINE_PAGE_TABLE(boot_pgtable);
>  #ifdef CONFIG_ARM_64
> -lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> -lpae_t boot_first_id[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> +DEFINE_PAGE_TABLE(boot_first);
> +DEFINE_PAGE_TABLE(boot_first_id);
>  #endif
> -lpae_t boot_second[LPAE_ENTRIES]  __attribute__((__aligned__(4096)));
> -lpae_t boot_third[LPAE_ENTRIES]  __attribute__((__aligned__(4096)));
> +DEFINE_PAGE_TABLE(boot_second);
> +DEFINE_PAGE_TABLE(boot_third);
>  
>  /* Main runtime page tables */
>  
> @@ -93,8 +98,8 @@ lpae_t boot_third[LPAE_ENTRIES]  
> __attribute__((__aligned__(4096)));
>  
>  #ifdef CONFIG_ARM_64
>  #define HYP_PT_ROOT_LEVEL 0
> -lpae_t xen_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> -lpae_t xen_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> +static DEFINE_PAGE_TABLE(xen_pgtable);
> +static DEFINE_PAGE_TABLE(xen_first);
>  #define THIS_CPU_PGTABLE xen_pgtable
>  #else
>  #define HYP_PT_ROOT_LEVEL 1
> @@ -107,17 +112,16 @@ static DEFINE_PER_CPU(lpae_t *, xen_pgtable);
>   * DOMHEAP_VIRT_START...DOMHEAP_VIRT_END in 2MB chunks. */
>  static DEFINE_PER_CPU(lpae_t *, xen_dommap);
>  /* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
> -lpae_t cpu0_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> +static DEFINE_PAGE_TABLE(cpu0_pgtable);
>  /* cpu0's domheap page tables */
> -lpae_t cpu0_dommap[LPAE_ENTRIES*DOMHEAP_SECOND_PAGES]
> -    __attribute__((__aligned__(4096*DOMHEAP_SECOND_PAGES)));
> +static DEFINE_PAGE_TABLES(cpu0_dommap, DOMHEAP_SECOND_PAGES);
>  #endif
>  
>  #ifdef CONFIG_ARM_64
>  /* The first page of the first level mapping of the xenheap. The
>   * subsequent xenheap first level pages are dynamically allocated, but
>   * we need this one to bootstrap ourselves. */
> -lpae_t xenheap_first_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> +static DEFINE_PAGE_TABLE(xenheap_first_first);
>  /* The zeroeth level slot which uses xenheap_first_first. Used because
>   * setup_xenheap_mappings otherwise relies on mfn_to_virt which isn't
>   * valid for a non-xenheap mapping. */
> @@ -131,12 +135,12 @@ static __initdata int xenheap_first_first_slot = -1;
>   * addresses from 0 to 0x7fffffff. Offsets into it are calculated
>   * with second_linear_offset(), not second_table_offset().
>   */
> -lpae_t xen_second[LPAE_ENTRIES*2] __attribute__((__aligned__(4096*2)));
> +static DEFINE_PAGE_TABLES(xen_second, 2);
>  /* First level page table used for fixmap */
> -lpae_t xen_fixmap[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> +DEFINE_PAGE_TABLE(xen_fixmap);
>  /* First level page table used to map Xen itself with the XN bit set
>   * as appropriate. */
> -static lpae_t xen_xenmap[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> +static DEFINE_PAGE_TABLE(xen_xenmap);
>  
>  /* Non-boot CPUs use this to find the correct pagetables. */
>  uint64_t init_ttbr;
> -- 
> 2.11.0
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.