[PATCH 5/6] hugetlb: make hugetlb_add_to_page_cache() use PAGE_SIZE-based index

From: Jane Chu

Date: Thu Apr 09 2026 - 19:44:50 EST


hugetlb_add_to_page_cache() currently takes a parameter named 'idx',
but internally converts it from hugetlb page units into PAGE_SIZE-based
page-cache index units before calling __filemap_add_folio().

Make hugetlb_add_to_page_cache() take a PAGE_SIZE-based index directly
and update its callers accordingly. This removes the internal shift,
keeps the index units consistent with filemap_lock_folio() and
__filemap_add_folio(), and simplifies the surrounding code.

Signed-off-by: Jane Chu <jane.chu@xxxxxxxxxx>
---
fs/hugetlbfs/inode.c | 2 +-
include/linux/hugetlb.h | 2 +-
mm/hugetlb.c | 21 ++++++++-------------
mm/memfd.c | 2 +-
4 files changed, 11 insertions(+), 16 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e24e9bf54e14..a72d46ff7980 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -825,7 +825,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
}
folio_zero_user(folio, addr);
__folio_mark_uptodate(folio);
- error = hugetlb_add_to_page_cache(folio, mapping, idx);
+ error = hugetlb_add_to_page_cache(folio, mapping, index);
if (unlikely(error)) {
restore_reserve_on_error(h, &pseudo_vma, addr, folio);
folio_put(folio);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 71691a2b6855..a51a5e12859c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -713,7 +713,7 @@ struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);

int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
- pgoff_t idx);
+ pgoff_t index);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
unsigned long address, struct folio *folio);

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 138e5ecf818e..47ef41b6fb2e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5625,15 +5625,14 @@ bool hugetlbfs_pagecache_present(struct hstate *h,
}

int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
- pgoff_t idx)
+ pgoff_t index)
{
struct inode *inode = mapping->host;
struct hstate *h = hstate_inode(inode);
int err;

- idx <<= huge_page_order(h);
__folio_set_locked(folio);
- err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
+ err = __filemap_add_folio(mapping, folio, index, GFP_KERNEL, NULL);

if (unlikely(err)) {
__folio_clear_locked(folio);
@@ -5724,7 +5723,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
* before we get page_table_lock.
*/
new_folio = false;
- folio = filemap_lock_folio(mapping, vmf->pgoff << huge_page_order(h));
+ folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio)) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (vmf->pgoff >= size)
@@ -5788,8 +5787,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
new_folio = true;

if (vma->vm_flags & VM_MAYSHARE) {
- int err = hugetlb_add_to_page_cache(folio, mapping,
- vmf->pgoff);
+ int err = hugetlb_add_to_page_cache(folio, mapping, index);
if (err) {
/*
* err can't be -EEXIST which implies someone
@@ -6173,7 +6171,6 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
uffd_flags_t flags,
struct folio **foliop)
{
- pgoff_t idx;
spinlock_t *ptl;
struct folio *folio;
pte_t _dst_pte, dst_ptep;
@@ -6183,13 +6180,11 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct mm_struct *dst_mm = dst_vma->vm_mm;
bool wp_enabled = (flags & MFILL_ATOMIC_WP);
int vm_shared = dst_vma->vm_flags & VM_SHARED;
+ pgoff_t index = linear_page_index(dst_vma, dst_addr);
struct address_space *mapping = dst_vma->vm_file->f_mapping;
bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
int ret = -ENOMEM;

- idx = linear_page_index(dst_vma, dst_addr);
- idx >>= huge_page_order(h);
-
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
ptl = huge_pte_lock(h, dst_mm, dst_pte);

@@ -6211,7 +6206,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,

if (is_continue) {
ret = -EFAULT;
- folio = filemap_lock_folio(mapping, idx << huge_page_order(h));
+ folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio))
goto out;
folio_in_pagecache = true;
@@ -6307,7 +6302,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
/* Add shared, newly allocated pages to the page cache. */
if (vm_shared && !is_continue) {
ret = -EFAULT;
- if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
+ if (index >= (i_size_read(mapping->host) >> PAGE_SHIFT))
goto out_release_nounlock;

/*
@@ -6316,7 +6311,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
* hugetlb_fault_mutex_table that here must be hold by
* the caller.
*/
- ret = hugetlb_add_to_page_cache(folio, mapping, idx);
+ ret = hugetlb_add_to_page_cache(folio, mapping, index);
if (ret)
goto out_release_nounlock;
folio_in_pagecache = true;
diff --git a/mm/memfd.c b/mm/memfd.c
index 911ff8220d05..56c8833c4195 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -122,7 +122,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t index)

err = hugetlb_add_to_page_cache(folio,
memfd->f_mapping,
- idx);
+ index);

mutex_unlock(&hugetlb_fault_mutex_table[hash]);

--
2.43.5