Re: [PATCH 1/6] hugetlb: open-code hugetlb folio lookup index conversion
From: jane . chu
Date: Mon Apr 13 2026 - 12:31:40 EST
On 4/13/2026 9:22 AM, Oscar Salvador wrote:
On Thu, Apr 09, 2026 at 05:41:52PM -0600, Jane Chu wrote:
This patch removes `filemap_lock_hugetlb_folio()` and open-codes
the index conversion at each call site, making it explicit when
hugetlb code is translating a hugepage index into the base-page index
expected by `filemap_lock_folio()`. As part of that cleanup,
it also uses a base-page index directly in `hugetlbfs_zero_partial_page()`,
where the byte offset is already page-granular. Overall, the change
makes the indexing model more obvious at the call sites and avoids
hiding the huge-index to base-index conversion inside a helper.
Suggested-by: David Hildenbrand <david@xxxxxxxxxx>
Signed-off-by: Jane Chu <jane.chu@xxxxxxxxxx>
It kind of funny that most of the patch is s/index/idx noise.
Checking mm/hugetlb* and fs/hugetlb/* we do have a mix of index/idx but
I would say that idx predominates, so I am ok with going with that one.
Indeed the situation that both idx/index can represent both huge page index and base page index had led me intentionally memorize which is representing what in a given local context. I thought that to denote 'index' to base page granularity and 'idx' to huge page granularity could relax the readers.
Acked-by: Oscar Salvador <osalvador@xxxxxxx>
thanks,
-jane
---
fs/hugetlbfs/inode.c | 20 ++++++++++----------
include/linux/hugetlb.h | 12 ------------
mm/hugetlb.c | 4 ++--
3 files changed, 12 insertions(+), 24 deletions(-)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index cd6b22f6e2b1..cf79fb830377 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -242,9 +242,9 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct hstate *h = hstate_file(file);
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- unsigned long index = iocb->ki_pos >> huge_page_shift(h);
+ unsigned long idx = iocb->ki_pos >> huge_page_shift(h);
unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
- unsigned long end_index;
+ unsigned long end_idx;
loff_t isize;
ssize_t retval = 0;
@@ -257,10 +257,10 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
isize = i_size_read(inode);
if (!isize)
break;
- end_index = (isize - 1) >> huge_page_shift(h);
- if (index > end_index)
+ end_idx = (isize - 1) >> huge_page_shift(h);
+ if (idx > end_idx)
break;
- if (index == end_index) {
+ if (idx == end_idx) {
nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
if (nr <= offset)
break;
@@ -268,7 +268,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
nr = nr - offset;
/* Find the folio */
- folio = filemap_lock_hugetlb_folio(h, mapping, index);
+ folio = filemap_lock_folio(mapping, idx << huge_page_order(h));
if (IS_ERR(folio)) {
/*
* We have a HOLE, zero out the user-buffer for the
@@ -307,10 +307,10 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
retval = -EFAULT;
break;
}
- index += offset >> huge_page_shift(h);
+ idx += offset >> huge_page_shift(h);
offset &= ~huge_page_mask(h);
}
- iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
+ iocb->ki_pos = ((loff_t)idx << huge_page_shift(h)) + offset;
return retval;
}
@@ -652,10 +652,10 @@ static void hugetlbfs_zero_partial_page(struct hstate *h,
loff_t start,
loff_t end)
{
- pgoff_t idx = start >> huge_page_shift(h);
+ pgoff_t index = start >> PAGE_SHIFT;
struct folio *folio;
- folio = filemap_lock_hugetlb_folio(h, mapping, idx);
+ folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio))
return;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 9c098a02a09e..c64c6e5e50f5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -829,12 +829,6 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
return huge_page_size(h) / 512;
}
-static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
- struct address_space *mapping, pgoff_t idx)
-{
- return filemap_lock_folio(mapping, idx << huge_page_order(h));
-}
-
#include <asm/hugetlb.h>
#ifndef is_hugepage_only_range
@@ -1106,12 +1100,6 @@ static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio
return NULL;
}
-static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
- struct address_space *mapping, pgoff_t idx)
-{
- return NULL;
-}
-
static inline int isolate_or_dissolve_huge_folio(struct folio *folio,
struct list_head *list)
{
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a786034ac95c..38b39eaf46cc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5724,7 +5724,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
* before we get page_table_lock.
*/
new_folio = false;
- folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
+ folio = filemap_lock_folio(mapping, vmf->pgoff << huge_page_order(h));
if (IS_ERR(folio)) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (vmf->pgoff >= size)
@@ -6208,7 +6208,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
if (is_continue) {
ret = -EFAULT;
- folio = filemap_lock_hugetlb_folio(h, mapping, idx);
+ folio = filemap_lock_folio(mapping, idx << huge_page_order(h));
if (IS_ERR(folio))
goto out;
folio_in_pagecache = true;
--
2.43.5