[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 1/9] mm/huge_memory: move more common code into insert_pmd()



On Thu, Jul 17, 2025 at 01:52:04PM +0200, David Hildenbrand wrote:
> Let's clean it all further up.
>
> No functional change intended.
>
> Reviewed-by: Oscar Salvador <osalvador@xxxxxxx>
> Reviewed-by: Alistair Popple <apopple@xxxxxxxxxx>
> Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>

Very nice!

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx>

> ---
>  mm/huge_memory.c | 72 ++++++++++++++++--------------------------------
>  1 file changed, 24 insertions(+), 48 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index fe17b0a157cda..1178760d2eda4 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1390,15 +1390,25 @@ struct folio_or_pfn {
>       bool is_folio;
>  };
>
> -static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
> +static vm_fault_t insert_pmd(struct vm_area_struct *vma, unsigned long addr,
>               pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot,
> -             bool write, pgtable_t pgtable)
> +             bool write)
>  {
>       struct mm_struct *mm = vma->vm_mm;
> +     pgtable_t pgtable = NULL;
> +     spinlock_t *ptl;
>       pmd_t entry;
>
> -     lockdep_assert_held(pmd_lockptr(mm, pmd));
> +     if (addr < vma->vm_start || addr >= vma->vm_end)
> +             return VM_FAULT_SIGBUS;
>
> +     if (arch_needs_pgtable_deposit()) {
> +             pgtable = pte_alloc_one(vma->vm_mm);
> +             if (!pgtable)
> +                     return VM_FAULT_OOM;
> +     }
> +
> +     ptl = pmd_lock(mm, pmd);
>       if (!pmd_none(*pmd)) {
>               const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
>                                         fop.pfn;
> @@ -1406,15 +1416,14 @@ static int insert_pmd(struct vm_area_struct *vma, 
> unsigned long addr,
>               if (write) {
>                       if (pmd_pfn(*pmd) != pfn) {
>                               WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
> -                             return -EEXIST;
> +                             goto out_unlock;
>                       }
>                       entry = pmd_mkyoung(*pmd);
>                       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
>                       if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
>                               update_mmu_cache_pmd(vma, addr, pmd);
>               }
> -
> -             return -EEXIST;
> +             goto out_unlock;
>       }
>
>       if (fop.is_folio) {
> @@ -1435,11 +1444,17 @@ static int insert_pmd(struct vm_area_struct *vma, 
> unsigned long addr,
>       if (pgtable) {
>               pgtable_trans_huge_deposit(mm, pmd, pgtable);
>               mm_inc_nr_ptes(mm);
> +             pgtable = NULL;
>       }
>
>       set_pmd_at(mm, addr, pmd, entry);
>       update_mmu_cache_pmd(vma, addr, pmd);
> -     return 0;
> +
> +out_unlock:
> +     spin_unlock(ptl);
> +     if (pgtable)
> +             pte_free(mm, pgtable);
> +     return VM_FAULT_NOPAGE;
>  }
>
>  /**
> @@ -1461,9 +1476,6 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, 
> unsigned long pfn,
>       struct folio_or_pfn fop = {
>               .pfn = pfn,
>       };
> -     pgtable_t pgtable = NULL;
> -     spinlock_t *ptl;
> -     int error;
>
>       /*
>        * If we had pmd_special, we could avoid all these restrictions,
> @@ -1475,25 +1487,9 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, 
> unsigned long pfn,
>                                               (VM_PFNMAP|VM_MIXEDMAP));
>       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
>
> -     if (addr < vma->vm_start || addr >= vma->vm_end)
> -             return VM_FAULT_SIGBUS;
> -
> -     if (arch_needs_pgtable_deposit()) {
> -             pgtable = pte_alloc_one(vma->vm_mm);
> -             if (!pgtable)
> -                     return VM_FAULT_OOM;
> -     }
> -
>       pfnmap_setup_cachemode_pfn(pfn, &pgprot);
>
> -     ptl = pmd_lock(vma->vm_mm, vmf->pmd);
> -     error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write,
> -                        pgtable);
> -     spin_unlock(ptl);
> -     if (error && pgtable)
> -             pte_free(vma->vm_mm, pgtable);
> -
> -     return VM_FAULT_NOPAGE;
> +     return insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write);
>  }
>  EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
>
> @@ -1502,35 +1498,15 @@ vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, 
> struct folio *folio,
>  {
>       struct vm_area_struct *vma = vmf->vma;
>       unsigned long addr = vmf->address & PMD_MASK;
> -     struct mm_struct *mm = vma->vm_mm;
>       struct folio_or_pfn fop = {
>               .folio = folio,
>               .is_folio = true,
>       };
> -     spinlock_t *ptl;
> -     pgtable_t pgtable = NULL;
> -     int error;
> -
> -     if (addr < vma->vm_start || addr >= vma->vm_end)
> -             return VM_FAULT_SIGBUS;
>
>       if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER))
>               return VM_FAULT_SIGBUS;
>
> -     if (arch_needs_pgtable_deposit()) {
> -             pgtable = pte_alloc_one(vma->vm_mm);
> -             if (!pgtable)
> -                     return VM_FAULT_OOM;
> -     }
> -
> -     ptl = pmd_lock(mm, vmf->pmd);
> -     error = insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot,
> -                        write, pgtable);
> -     spin_unlock(ptl);
> -     if (error && pgtable)
> -             pte_free(mm, pgtable);
> -
> -     return VM_FAULT_NOPAGE;
> +     return insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, write);
>  }
>  EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd);
>
> --
> 2.50.1
>



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.