[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 01/12] powerpc/64s: Do not re-activate batched TLB flush



On 29/10/2025 10:08, Kevin Brodsky wrote:
> From: Alexander Gordeev <agordeev@xxxxxxxxxxxxx>
> 
> Since commit b9ef323ea168 ("powerpc/64s: Disable preemption in hash
> lazy mmu mode") a task can not be preempted while in lazy MMU mode.
> Therefore, the batch re-activation code is never called, so remove it.
> 
> Signed-off-by: Alexander Gordeev <agordeev@xxxxxxxxxxxxx>
> Signed-off-by: Kevin Brodsky <kevin.brodsky@xxxxxxx>

Reviewed-by: Ryan Roberts <ryan.roberts@xxxxxxx>

> ---
>  arch/powerpc/include/asm/thread_info.h |  2 --
>  arch/powerpc/kernel/process.c          | 25 -------------------------
>  2 files changed, 27 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/thread_info.h 
> b/arch/powerpc/include/asm/thread_info.h
> index b0f200aba2b3..97f35f9b1a96 100644
> --- a/arch/powerpc/include/asm/thread_info.h
> +++ b/arch/powerpc/include/asm/thread_info.h
> @@ -154,12 +154,10 @@ void arch_setup_new_exec(void);
>  /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
>  #define TLF_NAPPING          0       /* idle thread enabled NAP mode */
>  #define TLF_SLEEPING         1       /* suspend code enabled SLEEP mode */
> -#define TLF_LAZY_MMU         3       /* tlb_batch is active */
>  #define TLF_RUNLATCH         4       /* Is the runlatch enabled? */
>  
>  #define _TLF_NAPPING         (1 << TLF_NAPPING)
>  #define _TLF_SLEEPING                (1 << TLF_SLEEPING)
> -#define _TLF_LAZY_MMU                (1 << TLF_LAZY_MMU)
>  #define _TLF_RUNLATCH                (1 << TLF_RUNLATCH)
>  
>  #ifndef __ASSEMBLER__
> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index eb23966ac0a9..9237dcbeee4a 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -1281,9 +1281,6 @@ struct task_struct *__switch_to(struct task_struct 
> *prev,
>  {
>       struct thread_struct *new_thread, *old_thread;
>       struct task_struct *last;
> -#ifdef CONFIG_PPC_64S_HASH_MMU
> -     struct ppc64_tlb_batch *batch;
> -#endif
>  
>       new_thread = &new->thread;
>       old_thread = &current->thread;
> @@ -1291,14 +1288,6 @@ struct task_struct *__switch_to(struct task_struct 
> *prev,
>       WARN_ON(!irqs_disabled());
>  
>  #ifdef CONFIG_PPC_64S_HASH_MMU
> -     batch = this_cpu_ptr(&ppc64_tlb_batch);
> -     if (batch->active) {
> -             current_thread_info()->local_flags |= _TLF_LAZY_MMU;
> -             if (batch->index)
> -                     __flush_tlb_pending(batch);
> -             batch->active = 0;
> -     }
> -
>       /*
>        * On POWER9 the copy-paste buffer can only paste into
>        * foreign real addresses, so unprivileged processes can not
> @@ -1369,20 +1358,6 @@ struct task_struct *__switch_to(struct task_struct 
> *prev,
>        */
>  
>  #ifdef CONFIG_PPC_BOOK3S_64
> -#ifdef CONFIG_PPC_64S_HASH_MMU
> -     /*
> -      * This applies to a process that was context switched while inside
> -      * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
> -      * deactivated above, before _switch(). This will never be the case
> -      * for new tasks.
> -      */
> -     if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
> -             current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
> -             batch = this_cpu_ptr(&ppc64_tlb_batch);
> -             batch->active = 1;
> -     }
> -#endif
> -
>       /*
>        * Math facilities are masked out of the child MSR in copy_thread.
>        * A new task does not need to restore_math because it will




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.