Re: [PATCH 4/5] mm: support batched checking of the young flag for MGLRU
From: David Hildenbrand (Arm)
Date: Wed Feb 25 2026 - 09:31:36 EST
On 2/24/26 02:56, Baolin Wang wrote:
> Use the batched helper clear_young_ptes_notify() to check and clear the
> young flag to improve the performance during large folio reclamation when
> MGLRU is enabled.
>
> Meanwhile, we can also support batched checking the young and dirty flag
> when MGLRU walks the mm's pagetable to update the folios' generation
> counter. Since MGLRU also checks the PTE dirty bit, use folio_pte_batch_flags()
> with FPB_MERGE_YOUNG_DIRTY set to detect batches of PTEs for a large folio.
>
> Then we can remove the ptep_clear_young_notify() since it has no users now.
>
> Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
> ---
[...]
>
> -static inline int ptep_clear_young_notify(struct vm_area_struct *vma,
> - unsigned long addr, pte_t *ptep)
> -{
> - return clear_young_ptes_notify(vma, addr, ptep, 1);
> -}
> -
> static inline int pmdp_clear_young_notify(struct vm_area_struct *vma,
> unsigned long addr, pmd_t *pmdp)
> {
> @@ -1847,12 +1841,6 @@ static inline int pmdp_clear_young_notify(struct vm_area_struct *vma,
> #define clear_young_ptes_notify test_and_clear_young_ptes
> #define pmdp_clear_young_notify pmdp_test_and_clear_young
>
> -static inline int ptep_clear_young_notify(struct vm_area_struct *vma,
> - unsigned long addr, pte_t *ptep)
> -{
> - return test_and_clear_young_ptes(vma, addr, ptep, 1);
> -}
> -
Oh, we remove the last user, nice.
> #endif /* CONFIG_MMU_NOTIFIER */
>
> #endif /* __MM_INTERNAL_H */
> diff --git a/mm/rmap.c b/mm/rmap.c
> index be785dfc9336..1c147251ae28 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -958,25 +958,21 @@ static bool folio_referenced_one(struct folio *folio,
> return false;
> }
>
> + if (pvmw.pte && folio_test_large(folio)) {
> + unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
> + unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
Both could be const.
> + pte_t pteval = ptep_get(pvmw.pte);
I wonder if there could be a way to avoid this ptep_get() by letting
page_vma_mapped_walk() just provide the last value it used (in
check_pte() I guess). Something for another patch.
> +
> + nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
> + ptes += nr;
> + }
> +
> if (lru_gen_enabled() && pvmw.pte) {
> - if (lru_gen_look_around(&pvmw))
> + if (lru_gen_look_around(&pvmw, nr))
> referenced++;
> } else if (pvmw.pte) {
> - if (folio_test_large(folio)) {
> - unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
> - unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
> - pte_t pteval = ptep_get(pvmw.pte);
> -
> - nr = folio_pte_batch(folio, pvmw.pte,
> - pteval, max_nr);
> - }
> -
> - ptes += nr;
> if (clear_flush_young_ptes_notify(vma, address, pvmw.pte, nr))
> referenced++;
> - /* Skip the batched PTEs */
> - pvmw.pte += nr - 1;
> - pvmw.address += (nr - 1) * PAGE_SIZE;
> } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
> if (pmdp_clear_flush_young_notify(vma, address,
> pvmw.pmd))
> @@ -995,6 +991,12 @@ static bool folio_referenced_one(struct folio *folio,
> page_vma_mapped_walk_done(&pvmw);
> break;
> }
> +
> + /* Skip the batched PTEs */
> + if (nr > 1) {
> + pvmw.pte += nr - 1;
> + pvmw.address += (nr - 1) * PAGE_SIZE;
> + }
As nr >= 1, you can just unconditionaly do
pvmw.pte += nr - 1;
pvmw.address += (nr - 1) * PAGE_SIZE;
> }
>
> if (referenced)
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 728868c61750..d83962468b2e 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3494,6 +3494,7 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
> struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
> DEFINE_MAX_SEQ(walk->lruvec);
> int gen = lru_gen_from_seq(max_seq);
> + unsigned int nr;
> pmd_t pmdval;
>
> pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl);
> @@ -3512,11 +3513,13 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
>
> lazy_mmu_mode_enable();
> restart:
> - for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
> + for (i = pte_index(start), addr = start; addr != end; i += nr, addr += nr * PAGE_SIZE) {
> unsigned long pfn;
> struct folio *folio;
> - pte_t ptent = ptep_get(pte + i);
> + pte_t *ptep = pte + i;
> + pte_t ptent = ptep_get(ptep);
Existing "pte vs ptent" vs. "ptep vs. pte" is already confusing.
Combining them into "pte vs. ptep vs. ptent" is no good.
If you need another variable, call it "cur_pte". Or rename "pte" to
"start_pte".
>
> + nr = 1;
> total++;
> walk->mm_stats[MM_LEAF_TOTAL]++;
>
> @@ -3528,7 +3531,14 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
> if (!folio)
> continue;
>
> - if (!ptep_clear_young_notify(args->vma, addr, pte + i))
> + if (folio_test_large(folio)) {
> + unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
> +
> + nr = folio_pte_batch_flags(folio, NULL, ptep, &ptent,
> + max_nr, FPB_MERGE_YOUNG_DIRTY);
> + }
> +
> + if (!clear_young_ptes_notify(args->vma, addr, ptep, nr))
> continue;
>
> if (last != folio) {
> @@ -4186,7 +4196,7 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
> * the PTE table to the Bloom filter. This forms a feedback loop between the
> * eviction and the aging.
> */
> -bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
> +bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw, unsigned int batched)
What is "batched"? Did you mean "nr_ptes" ? Or just the initial value
for "nr" ?
[...]
>
> - if (!ptep_clear_young_notify(vma, addr, pte + i))
> + if (folio_test_large(folio)) {
> + unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
Can be const.
> +
> + nr = folio_pte_batch_flags(folio, NULL, ptep, &ptent,
> + max_nr, FPB_MERGE_YOUNG_DIRTY);
> + }
I guess we might benefit from a FPB_MERGE_YOUNG only here. But this
should work.
--
Cheers,
David