Re: [PATCH 3/6] hugetlb: make hugetlb_fault_mutex_hash() take PAGE_SIZE index
From: Usama Arif
Date: Fri Apr 10 2026 - 07:34:32 EST
On Thu, 9 Apr 2026 17:41:54 -0600 Jane Chu <jane.chu@xxxxxxxxxx> wrote:
> hugetlb_fault_mutex_hash() is used to serialize faults and page cache
> operations on the same hugetlb file offset. The helper currently expects
> its index argument in hugetlb page granularity, so callers have to
> open-code conversions from the PAGE_SIZE-based indices commonly used
> in the rest of MM helpers.
>
> Change hugetlb_fault_mutex_hash() to take a PAGE_SIZE-based index
> instead, and perform the hugetlb-granularity conversion inside the helper.
> Update all callers accordingly.
>
> This makes the helper interface consistent with filemap_get_folio(),
> and linear_page_index(), while preserving the same lock selection for
> a given hugetlb file offset.
>
> Signed-off-by: Jane Chu <jane.chu@xxxxxxxxxx>
> ---
> fs/hugetlbfs/inode.c | 19 ++++++++++---------
> mm/hugetlb.c | 28 +++++++++++++++++++---------
> mm/memfd.c | 11 ++++++-----
> mm/userfaultfd.c | 7 +++----
> 4 files changed, 38 insertions(+), 27 deletions(-)
>
> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> index cf79fb830377..e24e9bf54e14 100644
> --- a/fs/hugetlbfs/inode.c
> +++ b/fs/hugetlbfs/inode.c
> @@ -575,7 +575,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
> struct address_space *mapping = &inode->i_data;
> const pgoff_t end = lend >> PAGE_SHIFT;
> struct folio_batch fbatch;
> - pgoff_t next, index;
> + pgoff_t next, idx;
> int i, freed = 0;
> bool truncate_op = (lend == LLONG_MAX);
>
> @@ -586,15 +586,15 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
> struct folio *folio = fbatch.folios[i];
> u32 hash = 0;
>
> - index = folio->index >> huge_page_order(h);
> - hash = hugetlb_fault_mutex_hash(mapping, index);
> + hash = hugetlb_fault_mutex_hash(mapping, folio->index);
> mutex_lock(&hugetlb_fault_mutex_table[hash]);
>
> /*
> * Remove folio that was part of folio_batch.
> */
> + idx = folio->index >> huge_page_order(h);
> remove_inode_single_folio(h, inode, mapping, folio,
> - index, truncate_op);
> + idx, truncate_op);
> freed++;
>
> mutex_unlock(&hugetlb_fault_mutex_table[hash]);
> @@ -734,7 +734,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
> struct mm_struct *mm = current->mm;
> loff_t hpage_size = huge_page_size(h);
> unsigned long hpage_shift = huge_page_shift(h);
> - pgoff_t start, index, end;
> + pgoff_t start, end, idx, index;
> int error;
> u32 hash;
>
> @@ -774,7 +774,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
> vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
> pseudo_vma.vm_file = file;
>
> - for (index = start; index < end; index++) {
> + for (idx = start; idx < end; idx++) {
> /*
> * This is supposed to be the vaddr where the page is being
> * faulted in, but we have no vaddr here.
> @@ -794,14 +794,15 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
> }
>
> /* addr is the offset within the file (zero based) */
> - addr = index * hpage_size;
> + addr = idx * hpage_size;
>
> /* mutex taken here, fault path and hole punch */
> + index = idx << huge_page_order(h);
> hash = hugetlb_fault_mutex_hash(mapping, index);
> mutex_lock(&hugetlb_fault_mutex_table[hash]);
>
> /* See if already present in mapping to avoid alloc/free */
> - folio = filemap_get_folio(mapping, index << huge_page_order(h));
> + folio = filemap_get_folio(mapping, index);
> if (!IS_ERR(folio)) {
> folio_put(folio);
> mutex_unlock(&hugetlb_fault_mutex_table[hash]);
> @@ -824,7 +825,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
> }
> folio_zero_user(folio, addr);
> __folio_mark_uptodate(folio);
> - error = hugetlb_add_to_page_cache(folio, mapping, index);
> + error = hugetlb_add_to_page_cache(folio, mapping, idx);
> if (unlikely(error)) {
> restore_reserve_on_error(h, &pseudo_vma, addr, folio);
> folio_put(folio);
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 38b39eaf46cc..9d5ae1f87850 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5515,7 +5515,7 @@ static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
> */
> if (cow_from_owner) {
> struct address_space *mapping = vma->vm_file->f_mapping;
> - pgoff_t idx;
> + pgoff_t index;
> u32 hash;
>
> folio_put(old_folio);
> @@ -5528,8 +5528,9 @@ static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
> *
> * Reacquire both after unmap operation.
> */
> - idx = vma_hugecache_offset(h, vma, vmf->address);
> - hash = hugetlb_fault_mutex_hash(mapping, idx);
> + index = linear_page_index(vma, vmf->address);
> + hash = hugetlb_fault_mutex_hash(mapping, index);
> +
> hugetlb_vma_unlock_read(vma);
> mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>
> @@ -5664,6 +5665,10 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
> unsigned long reason)
> {
> u32 hash;
> + pgoff_t index;
> +
> + index = linear_page_index((const struct vm_area_struct *)vmf, vmf->address);
This is supposed to be linear_page_index(vmf->vma, vmf->address), right?