[PATCH 2/2] mm: mincore: use folio_pte_batch() to batch process large folios

From: Baolin Wang
Date: Tue Mar 25 2025 - 23:40:11 EST


When I tested the mincore() syscall, I observed that it takes longer with
64K mTHP enabled on my Arm64 server. The reason is the mincore_pte_range()
still checks each PTE individually, even when the PTEs are contiguous,
which is not efficient.

Thus we can use folio_pte_batch() to get the batch number of the present
contiguous PTEs, which can improve the performance. I tested the mincore()
syscall with 1G anonymous memory populated with 64K mTHP, and observed an
obvious performance improvement:

w/o patch w/ patch changes
6022us 1115us +81%

Moreover, I also tested mincore() with disabling mTHP/THP, and did not
see any obvious regression.

Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
---
mm/mincore.c | 27 ++++++++++++++++++++++-----
1 file changed, 22 insertions(+), 5 deletions(-)

diff --git a/mm/mincore.c b/mm/mincore.c
index 832f29f46767..88be180b5550 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -21,6 +21,7 @@

#include <linux/uaccess.h>
#include "swap.h"
+#include "internal.h"

static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
unsigned long end, struct mm_walk *walk)
@@ -105,6 +106,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t *ptep;
unsigned char *vec = walk->private;
int nr = (end - addr) >> PAGE_SHIFT;
+ int step, i;

ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
@@ -118,16 +120,31 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
walk->action = ACTION_AGAIN;
return 0;
}
- for (; addr != end; ptep++, addr += PAGE_SIZE) {
+ for (; addr != end; ptep += step, addr += step * PAGE_SIZE) {
pte_t pte = ptep_get(ptep);

+ step = 1;
/* We need to do cache lookup too for pte markers */
if (pte_none_mostly(pte))
__mincore_unmapped_range(addr, addr + PAGE_SIZE,
vma, vec);
- else if (pte_present(pte))
- *vec = 1;
- else { /* pte is a swap entry */
+ else if (pte_present(pte)) {
+ if (pte_batch_hint(ptep, pte) > 1) {
+ struct folio *folio = vm_normal_folio(vma, addr, pte);
+
+ if (folio && folio_test_large(folio)) {
+ const fpb_t fpb_flags = FPB_IGNORE_DIRTY |
+ FPB_IGNORE_SOFT_DIRTY;
+ int max_nr = (end - addr) / PAGE_SIZE;
+
+ step = folio_pte_batch(folio, addr, ptep, pte,
+ max_nr, fpb_flags, NULL, NULL, NULL);
+ }
+ }
+
+ for (i = 0; i < step; i++)
+ vec[i] = 1;
+ } else { /* pte is a swap entry */
swp_entry_t entry = pte_to_swp_entry(pte);

if (non_swap_entry(entry)) {
@@ -146,7 +163,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
#endif
}
}
- vec++;
+ vec += step;
}
pte_unmap_unlock(ptep - 1, ptl);
out:
--
2.43.5