[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 5/7] xen/arm: Gather all TLB flush helpers in tlbflush.h



On Wed, 8 May 2019, Julien Grall wrote:
> At the moment, TLB helpers are scattered in 2 headers: page.h (for
> Xen TLB helpers) and tlbflush.h (for guest TLB helpers).
> 
> This patch is gathering all of them in tlbflush. This will help to
> uniformize and update the logic of the helpers in follow-up patches.
> 
> Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
> Reviewed-by: Andrii Anisov <andrii_anisov@xxxxxxxx>

Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>


> ---
>     Changes in v2:
>         - Add Andrii's reviewed-by
> ---
>  xen/include/asm-arm/arm32/flushtlb.h | 22 +++++++++++++++++++++
>  xen/include/asm-arm/arm32/page.h     | 22 ---------------------
>  xen/include/asm-arm/arm64/flushtlb.h | 23 ++++++++++++++++++++++
>  xen/include/asm-arm/arm64/page.h     | 23 ----------------------
>  xen/include/asm-arm/flushtlb.h       | 38 
> ++++++++++++++++++++++++++++++++++++
>  xen/include/asm-arm/page.h           | 38 
> ------------------------------------
>  6 files changed, 83 insertions(+), 83 deletions(-)
> 
> diff --git a/xen/include/asm-arm/arm32/flushtlb.h 
> b/xen/include/asm-arm/arm32/flushtlb.h
> index 22e100eccf..b629db61cb 100644
> --- a/xen/include/asm-arm/arm32/flushtlb.h
> +++ b/xen/include/asm-arm/arm32/flushtlb.h
> @@ -45,6 +45,28 @@ static inline void flush_all_guests_tlb(void)
>      isb();
>  }
>  
> +/* Flush all hypervisor mappings from the TLB of the local processor. */
> +static inline void flush_xen_tlb_local(void)
> +{
> +    asm volatile("dsb;" /* Ensure preceding are visible */
> +                 CMD_CP32(TLBIALLH)
> +                 "dsb;" /* Ensure completion of the TLB flush */
> +                 "isb;"
> +                 : : : "memory");
> +}
> +
> +/* Flush TLB of local processor for address va. */
> +static inline void __flush_xen_tlb_one_local(vaddr_t va)
> +{
> +    asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
> +}
> +
> +/* Flush TLB of all processors in the inner-shareable domain for address va. 
> */
> +static inline void __flush_xen_tlb_one(vaddr_t va)
> +{
> +    asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
> +}
> +
>  #endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */
>  /*
>   * Local variables:
> diff --git a/xen/include/asm-arm/arm32/page.h 
> b/xen/include/asm-arm/arm32/page.h
> index 0b41b9214b..715a9e4fef 100644
> --- a/xen/include/asm-arm/arm32/page.h
> +++ b/xen/include/asm-arm/arm32/page.h
> @@ -61,28 +61,6 @@ static inline void invalidate_icache_local(void)
>      isb();                      /* Synchronize fetched instruction stream. */
>  }
>  
> -/* Flush all hypervisor mappings from the TLB of the local processor. */
> -static inline void flush_xen_tlb_local(void)
> -{
> -    asm volatile("dsb;" /* Ensure preceding are visible */
> -                 CMD_CP32(TLBIALLH)
> -                 "dsb;" /* Ensure completion of the TLB flush */
> -                 "isb;"
> -                 : : : "memory");
> -}
> -
> -/* Flush TLB of local processor for address va. */
> -static inline void __flush_xen_tlb_one_local(vaddr_t va)
> -{
> -    asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
> -}
> -
> -/* Flush TLB of all processors in the inner-shareable domain for address va. 
> */
> -static inline void __flush_xen_tlb_one(vaddr_t va)
> -{
> -    asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
> -}
> -
>  /* Ask the MMU to translate a VA for us */
>  static inline uint64_t __va_to_par(vaddr_t va)
>  {
> diff --git a/xen/include/asm-arm/arm64/flushtlb.h 
> b/xen/include/asm-arm/arm64/flushtlb.h
> index adbbd5c522..2fed34b2ec 100644
> --- a/xen/include/asm-arm/arm64/flushtlb.h
> +++ b/xen/include/asm-arm/arm64/flushtlb.h
> @@ -45,6 +45,29 @@ static inline void flush_all_guests_tlb(void)
>          : : : "memory");
>  }
>  
> +/* Flush all hypervisor mappings from the TLB of the local processor. */
> +static inline void flush_xen_tlb_local(void)
> +{
> +    asm volatile (
> +        "dsb    sy;"                    /* Ensure visibility of PTE writes */
> +        "tlbi   alle2;"                 /* Flush hypervisor TLB */
> +        "dsb    sy;"                    /* Ensure completion of TLB flush */
> +        "isb;"
> +        : : : "memory");
> +}
> +
> +/* Flush TLB of local processor for address va. */
> +static inline void  __flush_xen_tlb_one_local(vaddr_t va)
> +{
> +    asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
> +}
> +
> +/* Flush TLB of all processors in the inner-shareable domain for address va. 
> */
> +static inline void __flush_xen_tlb_one(vaddr_t va)
> +{
> +    asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
> +}
> +
>  #endif /* __ASM_ARM_ARM64_FLUSHTLB_H__ */
>  /*
>   * Local variables:
> diff --git a/xen/include/asm-arm/arm64/page.h 
> b/xen/include/asm-arm/arm64/page.h
> index 31d04ecf76..0cba266373 100644
> --- a/xen/include/asm-arm/arm64/page.h
> +++ b/xen/include/asm-arm/arm64/page.h
> @@ -45,29 +45,6 @@ static inline void invalidate_icache_local(void)
>      isb();
>  }
>  
> -/* Flush all hypervisor mappings from the TLB of the local processor. */
> -static inline void flush_xen_tlb_local(void)
> -{
> -    asm volatile (
> -        "dsb    sy;"                    /* Ensure visibility of PTE writes */
> -        "tlbi   alle2;"                 /* Flush hypervisor TLB */
> -        "dsb    sy;"                    /* Ensure completion of TLB flush */
> -        "isb;"
> -        : : : "memory");
> -}
> -
> -/* Flush TLB of local processor for address va. */
> -static inline void  __flush_xen_tlb_one_local(vaddr_t va)
> -{
> -    asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
> -}
> -
> -/* Flush TLB of all processors in the inner-shareable domain for address va. 
> */
> -static inline void __flush_xen_tlb_one(vaddr_t va)
> -{
> -    asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
> -}
> -
>  /* Ask the MMU to translate a VA for us */
>  static inline uint64_t __va_to_par(vaddr_t va)
>  {
> diff --git a/xen/include/asm-arm/flushtlb.h b/xen/include/asm-arm/flushtlb.h
> index 83ff9fa8b3..ab1aae5c90 100644
> --- a/xen/include/asm-arm/flushtlb.h
> +++ b/xen/include/asm-arm/flushtlb.h
> @@ -28,6 +28,44 @@ static inline void page_set_tlbflush_timestamp(struct 
> page_info *page)
>  /* Flush specified CPUs' TLBs */
>  void flush_tlb_mask(const cpumask_t *mask);
>  
> +/*
> + * Flush a range of VA's hypervisor mappings from the TLB of the local
> + * processor.
> + */
> +static inline void flush_xen_tlb_range_va_local(vaddr_t va,
> +                                                unsigned long size)
> +{
> +    vaddr_t end = va + size;
> +
> +    dsb(sy); /* Ensure preceding are visible */
> +    while ( va < end )
> +    {
> +        __flush_xen_tlb_one_local(va);
> +        va += PAGE_SIZE;
> +    }
> +    dsb(sy); /* Ensure completion of the TLB flush */
> +    isb();
> +}
> +
> +/*
> + * Flush a range of VA's hypervisor mappings from the TLB of all
> + * processors in the inner-shareable domain.
> + */
> +static inline void flush_xen_tlb_range_va(vaddr_t va,
> +                                          unsigned long size)
> +{
> +    vaddr_t end = va + size;
> +
> +    dsb(sy); /* Ensure preceding are visible */
> +    while ( va < end )
> +    {
> +        __flush_xen_tlb_one(va);
> +        va += PAGE_SIZE;
> +    }
> +    dsb(sy); /* Ensure completion of the TLB flush */
> +    isb();
> +}
> +
>  #endif /* __ASM_ARM_FLUSHTLB_H__ */
>  /*
>   * Local variables:
> diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
> index 195345e24a..2bcdb0f1a5 100644
> --- a/xen/include/asm-arm/page.h
> +++ b/xen/include/asm-arm/page.h
> @@ -233,44 +233,6 @@ static inline int clean_and_invalidate_dcache_va_range
>              : : "r" (_p), "m" (*_p));                                   \
>  } while (0)
>  
> -/*
> - * Flush a range of VA's hypervisor mappings from the TLB of the local
> - * processor.
> - */
> -static inline void flush_xen_tlb_range_va_local(vaddr_t va,
> -                                                unsigned long size)
> -{
> -    vaddr_t end = va + size;
> -
> -    dsb(sy); /* Ensure preceding are visible */
> -    while ( va < end )
> -    {
> -        __flush_xen_tlb_one_local(va);
> -        va += PAGE_SIZE;
> -    }
> -    dsb(sy); /* Ensure completion of the TLB flush */
> -    isb();
> -}
> -
> -/*
> - * Flush a range of VA's hypervisor mappings from the TLB of all
> - * processors in the inner-shareable domain.
> - */
> -static inline void flush_xen_tlb_range_va(vaddr_t va,
> -                                          unsigned long size)
> -{
> -    vaddr_t end = va + size;
> -
> -    dsb(sy); /* Ensure preceding are visible */
> -    while ( va < end )
> -    {
> -        __flush_xen_tlb_one(va);
> -        va += PAGE_SIZE;
> -    }
> -    dsb(sy); /* Ensure completion of the TLB flush */
> -    isb();
> -}
> -
>  /* Flush the dcache for an entire page. */
>  void flush_page_to_ram(unsigned long mfn, bool sync_icache);
>  
> -- 
> 2.11.0
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.