[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 08/12] arm64: mm: replace TIF_LAZY_MMU with in_lazy_mmu_mode()



On 29/10/2025 10:09, Kevin Brodsky wrote:
> The generic lazy_mmu layer now tracks whether a task is in lazy MMU
> mode. As a result we no longer need a TIF flag for that purpose -
> let's use the new in_lazy_mmu_mode() helper instead.
> 
> Signed-off-by: Kevin Brodsky <kevin.brodsky@xxxxxxx>
> ---
>  arch/arm64/include/asm/pgtable.h     | 16 +++-------------
>  arch/arm64/include/asm/thread_info.h |  3 +--
>  2 files changed, 4 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/pgtable.h 
> b/arch/arm64/include/asm/pgtable.h
> index 535435248923..61ca88f94551 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -62,30 +62,21 @@ static inline void emit_pte_barriers(void)
>  
>  static inline void queue_pte_barriers(void)
>  {
> -     unsigned long flags;
> -
>       if (in_interrupt()) {
>               emit_pte_barriers();
>               return;
>       }
>  
> -     flags = read_thread_flags();
> -
> -     if (flags & BIT(TIF_LAZY_MMU)) {
> -             /* Avoid the atomic op if already set. */
> -             if (!(flags & BIT(TIF_LAZY_MMU_PENDING)))
> -                     set_thread_flag(TIF_LAZY_MMU_PENDING);
> -     } else {
> +     if (in_lazy_mmu_mode())
> +             test_and_set_thread_flag(TIF_LAZY_MMU_PENDING);

This removes the optimization to only do the atomic set operation if the bit is
not already set. I think that should remain.

> +     else
>               emit_pte_barriers();
> -     }
>  }
>  
>  static inline void arch_enter_lazy_mmu_mode(void)
>  {
>       if (in_interrupt())
>               return;

Why are you keeping this test? Surely it can go?

> -
> -     set_thread_flag(TIF_LAZY_MMU);
>  }
>  
>  static inline void arch_flush_lazy_mmu_mode(void)
> @@ -103,7 +94,6 @@ static inline void arch_leave_lazy_mmu_mode(void)
>               return;
>  
>       arch_flush_lazy_mmu_mode();
> -     clear_thread_flag(TIF_LAZY_MMU);
>  }
>  
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> diff --git a/arch/arm64/include/asm/thread_info.h 
> b/arch/arm64/include/asm/thread_info.h
> index f241b8601ebd..4ff8da0767d9 100644
> --- a/arch/arm64/include/asm/thread_info.h
> +++ b/arch/arm64/include/asm/thread_info.h
> @@ -84,8 +84,7 @@ void arch_setup_new_exec(void);
>  #define TIF_SME_VL_INHERIT   28      /* Inherit SME vl_onexec across exec */
>  #define TIF_KERNEL_FPSTATE   29      /* Task is in a kernel mode FPSIMD 
> section */
>  #define TIF_TSC_SIGSEGV              30      /* SIGSEGV on counter-timer 
> access */
> -#define TIF_LAZY_MMU         31      /* Task in lazy mmu mode */
> -#define TIF_LAZY_MMU_PENDING 32      /* Ops pending for lazy mmu mode exit */
> +#define TIF_LAZY_MMU_PENDING 31      /* Ops pending for lazy mmu mode exit */
>  
>  #define _TIF_SIGPENDING              (1 << TIF_SIGPENDING)
>  #define _TIF_NEED_RESCHED    (1 << TIF_NEED_RESCHED)




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.