[PATCH v3 11/14] mm/mglru: remove no longer used reclaim argument for folio protection
From: Kairui Song via B4 Relay
Date: Thu Apr 02 2026 - 14:58:40 EST
From: Kairui Song <kasong@xxxxxxxxxxx>
Now dirty reclaim folios are handled after isolation, not before,
since dirty reactivation must take the folio off LRU first, and that
helps to unify the dirty handling logic.
So this argument is no longer needed. Just remove it.
Signed-off-by: Kairui Song <kasong@xxxxxxxxxxx>
---
mm/vmscan.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd2bf45826de..9bd0a3b94855 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3220,7 +3220,7 @@ static int folio_update_gen(struct folio *folio, int gen)
}
/* protect pages accessed multiple times through file descriptors */
-static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio)
{
int type = folio_is_file_lru(folio);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
@@ -3239,9 +3239,6 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
- /* for folio_end_writeback() */
- if (reclaiming)
- new_flags |= BIT(PG_reclaim);
} while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
lru_gen_update_size(lruvec, folio, old_gen, new_gen);
@@ -3855,7 +3852,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness)
VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
- new_gen = folio_inc_gen(lruvec, folio, false);
+ new_gen = folio_inc_gen(lruvec, folio);
list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
/* don't count the workingset being lazily promoted */
@@ -4607,7 +4604,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
/* protected */
if (tier > tier_idx || refs + workingset == BIT(LRU_REFS_WIDTH) + 1) {
- gen = folio_inc_gen(lruvec, folio, false);
+ gen = folio_inc_gen(lruvec, folio);
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
/* don't count the workingset being lazily promoted */
@@ -4622,7 +4619,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
/* ineligible */
if (zone > sc->reclaim_idx) {
- gen = folio_inc_gen(lruvec, folio, false);
+ gen = folio_inc_gen(lruvec, folio);
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
--
2.53.0