[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 3/3] xen/ppc: Implement initial Radix MMU support


  • To: Shawn Anastasio <sanastasio@xxxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Wed, 23 Aug 2023 16:04:43 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=c8orRgI0xJBLYjHFWBC40ypxjZSlooJCE2oSuKG1qxw=; b=lqlJkj0coXI1dac34Eog7YTzqJ/e33kk0zvct1V0Yc6BnOtAXvzjc8xfkazpRFJjvqadNd7GUzIOFTCjj6sWm+smafzqrv0BjN3e50Jdy4/nmhjEBNKKWzXTTjqLPsmXHDSqk+sp28EdWIBqA2Xxgpwwb4/52Yzb+gof8n6hQAujaOngOrE/Jia0C6/QOE9y7krra6ZumhysaFWmSexBCJQ83XKYXnjIx7Li76lx5M8vsMhysGeMNfapARCMGg1lh6ULSkN9DicER6Ziv8rYaEHjehioQW1kuR0xxlgE5/gY0+YNneK8jtsXBYOCd5w/KuM8TzdKaXl5+re5oya+gw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=EPHvLaH8Uf8heL5NQ3peqTNGP0gMxx/GsBcaaoIKC1BkE4VN2P/XTO/o6nAWFQqN0AIzKva9RaQl9xRXOB+yzu6vm4MsNT4NhPq4XFexltq0hL6qnqBmyotYBp+vwP8DszBT/roF9d73pCQc+sU2Q4wjxwfoojaCVb0xMsOlNAx5SnbEtyN9JszGL3xtf1SrSS1i1uOkhT2GrjJpQWuYTd65qWCuUGvlXljiJMi6/YtA08w08Tqz4OLzooRFBhD7YfWulzxjV5FsRmbyq2n962S41a2CDo9Kc3UI54tLzwkhDl1ymaAPylU8X1dMHCsx0s/5R7DidfmeG92GPEVhdg==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Timothy Pearson <tpearson@xxxxxxxxxxxxxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxxx
  • Delivery-date: Wed, 23 Aug 2023 14:04:55 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On 23.08.2023 01:03, Shawn Anastasio wrote:
> Add code to construct early identity-mapped page tables as well as the
> required process and partition tables to enable the MMU.
> 
> Signed-off-by: Shawn Anastasio <sanastasio@xxxxxxxxxxxxxxxxxxxxx>

Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
with two nits, which I'll be happy to take care of while committing,
so long as you agree:

> --- /dev/null
> +++ b/xen/arch/ppc/include/asm/page.h
> @@ -0,0 +1,180 @@
> +#ifndef _ASM_PPC_PAGE_H
> +#define _ASM_PPC_PAGE_H
> +
> +#include <xen/types.h>
> +
> +#include <asm/bitops.h>
> +#include <asm/byteorder.h>
> +
> +#define PDE_VALID     PPC_BIT(0)
> +#define PDE_NLB_MASK  0x1ffffffffffffe0UL
> +#define PDE_NLS_MASK  0x1f
> +
> +#define PTE_VALID     PPC_BIT(0)
> +#define PTE_LEAF      PPC_BIT(1)
> +#define PTE_REFERENCE PPC_BIT(55)
> +#define PTE_CHANGE    PPC_BIT(56)
> +
> +/* PTE Attributes */
> +#define PTE_ATT_SAO            PPC_BIT(59) /* Strong Access Ordering */
> +#define PTE_ATT_NON_IDEMPOTENT PPC_BIT(58)
> +#define PTE_ATT_TOLERANT       (PPC_BIT(58) | PPC_BIT(59))
> +
> +/* PTE Encoded Access Authority*/
> +#define PTE_EAA_PRIVILEGED PPC_BIT(60)
> +#define PTE_EAA_READ       PPC_BIT(61)
> +#define PTE_EAA_WRITE      PPC_BIT(62)
> +#define PTE_EAA_EXECUTE    PPC_BIT(63)
> +
> +/* Field shifts/masks */
> +#define PTE_RPN_MASK  0x1fffffffffff000UL
> +#define PTE_ATT_MASK  0x30UL
> +#define PTE_EAA_MASK  0xfUL
> +
> +#define PTE_XEN_BASE (PTE_VALID | PTE_EAA_PRIVILEGED | PTE_REFERENCE)
> +#define PTE_XEN_RW   (PTE_XEN_BASE | PTE_EAA_READ | PTE_EAA_WRITE | 
> PTE_CHANGE)
> +#define PTE_XEN_RO   (PTE_XEN_BASE | PTE_EAA_READ)
> +#define PTE_XEN_RX   (PTE_XEN_BASE | PTE_EAA_READ | PTE_EAA_EXECUTE)
> +
> +/*
> + * Radix Tree layout for 64KB pages:
> + *
> + * [ L1 (ROOT) PAGE DIRECTORY (8192 * sizeof(pde_t)) ]
> + *                     |
> + *                     |
> + *                     v
> + *    [ L2 PAGE DIRECTORY (512 * sizeof(pde_t)) ]
> + *                     |
> + *                     |
> + *                     v
> + *    [ L3 PAGE DIRECTORY (512 * sizeof(pde_t)) ]
> + *                     |
> + *                     |
> + *                     v
> + *      [ L4 PAGE TABLE (32 * sizeof(pte_t)) ]
> + *                     |
> + *                     |
> + *                     v
> + *            [ PAGE TABLE ENTRY ]
> + */
> +
> +#define XEN_PT_ENTRIES_LOG2_LVL_1 13 /* 2**13 entries, maps 2**13 * 512GB = 
> 4PB */
> +#define XEN_PT_ENTRIES_LOG2_LVL_2 9  /* 2**9  entries, maps 2**9  * 1GB = 
> 512GB */
> +#define XEN_PT_ENTRIES_LOG2_LVL_3 9  /* 2**9  entries, maps 2**9  * 1GB = 
> 512GB */
> +#define XEN_PT_ENTRIES_LOG2_LVL_4 5  /* 2**5  entries, maps 2**5  * 64K = 
> 2MB */
> +
> +#define XEN_PT_SHIFT_LVL_1    (XEN_PT_SHIFT_LVL_2 + 
> XEN_PT_ENTRIES_LOG2_LVL_2)
> +#define XEN_PT_SHIFT_LVL_2    (XEN_PT_SHIFT_LVL_3 + 
> XEN_PT_ENTRIES_LOG2_LVL_3)
> +#define XEN_PT_SHIFT_LVL_3    (XEN_PT_SHIFT_LVL_4 + 
> XEN_PT_ENTRIES_LOG2_LVL_4)
> +#define XEN_PT_SHIFT_LVL_4    PAGE_SHIFT
> +
> +#define XEN_PT_ENTRIES_LOG2_LVL(lvl) (XEN_PT_ENTRIES_LOG2_LVL_##lvl)
> +#define XEN_PT_SHIFT_LVL(lvl)        (XEN_PT_SHIFT_LVL_##lvl)
> +#define XEN_PT_ENTRIES_LVL(lvl)      (1UL << XEN_PT_ENTRIES_LOG2_LVL(lvl))
> +#define XEN_PT_SIZE_LVL(lvl)         (sizeof(uint64_t) * 
> XEN_PT_ENTRIES_LVL(lvl))
> +#define XEN_PT_MASK_LVL(lvl)         (XEN_PT_ENTRIES_LVL(lvl) - 1)
> +#define XEN_PT_INDEX_LVL(lvl, va)    (((va) >> XEN_PT_SHIFT_LVL(lvl)) & 
> XEN_PT_MASK_LVL(lvl))
> +
> +/*
> + * Calculate the index of the provided virtual address in the provided
> + * page table struct
> + */
> +#define pt_index(pt, va) _Generic((pt), \
> +    struct lvl1_pd * : XEN_PT_INDEX_LVL(1, (va)), \
> +    struct lvl2_pd * : XEN_PT_INDEX_LVL(2, (va)), \
> +    struct lvl3_pd * : XEN_PT_INDEX_LVL(3, (va)), \
> +    struct lvl4_pt * : XEN_PT_INDEX_LVL(4, (va)))
> +
> +#define pt_entry(pt, va) (&((pt)->entries[pt_index((pt), (va))]))
> +
> +typedef struct
> +{
> +    __be64 pde;
> +} pde_t;
> +
> +typedef struct
> +{
> +    __be64 pte;
> +} pte_t;
> +
> +struct lvl1_pd
> +{
> +    pde_t entries[XEN_PT_ENTRIES_LVL(1)];
> +} __aligned(XEN_PT_SIZE_LVL(1));
> +
> +struct lvl2_pd
> +{
> +    pde_t entries[XEN_PT_ENTRIES_LVL(2)];
> +} __aligned(XEN_PT_SIZE_LVL(2));
> +
> +struct lvl3_pd
> +{
> +    pde_t entries[XEN_PT_ENTRIES_LVL(3)];
> +} __aligned(XEN_PT_SIZE_LVL(3));
> +
> +struct lvl4_pt
> +{
> +    pte_t entries[XEN_PT_ENTRIES_LVL(4)];
> +} __aligned(XEN_PT_SIZE_LVL(4));
> +
> +static inline pte_t paddr_to_pte(paddr_t paddr, unsigned long flags)
> +{
> +    paddr_t paddr_aligned = paddr & PTE_RPN_MASK;
> +
> +    return (pte_t){ .pte = cpu_to_be64(paddr_aligned | flags | PTE_LEAF) };
> +}
> +
> +static inline pde_t paddr_to_pde(paddr_t paddr, unsigned long flags, 
> unsigned long nls)

Nit: Overlong line.

> --- a/xen/arch/ppc/include/asm/processor.h
> +++ b/xen/arch/ppc/include/asm/processor.h
> @@ -133,6 +133,40 @@ struct cpu_user_regs
>      uint32_t entry_vector;
>  };
> 
> +static __inline__ void sync(void)
> +{
> +    asm volatile ( "sync" );
> +}
> +
> +static __inline__ void isync(void)
> +{
> +    asm volatile ( "isync" );
> +}

Why __inline__, not inline, ...

> +static inline unsigned long mfmsr(void)

... as you have here any below?

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.