Re: [PATCH 1/8] mm/mglru: consolidate common code for retrieving evitable size
From: Axel Rasmussen
Date: Fri Mar 20 2026 - 16:03:28 EST
For what it's worth, I applied the full series and ran it through some
basic functional testing, I didn't see any bugs or regressions from
that.
Unfortunately, the best signal would be actually deploying it under
some real serving workloads, but the latency for me to do that + get
results is like order(weeks) and I suspect you don't want to wait that
long. :)
This particular commit looks good besides one minor nitpick:
Reviewed-by: Axel Rasmussen <axelrasmussen@xxxxxxxxxx>
On Wed, Mar 18, 2026 at 7:19 PM Chen Ridong <chenridong@xxxxxxxxxxxxxxx> wrote:
>
>
>
> On 2026/3/18 3:08, Kairui Song via B4 Relay wrote:
> > From: Kairui Song <kasong@xxxxxxxxxxx>
> >
> > Merge commonly used code for counting evictable folios in a lruvec.
> >
> > No behavior change.
> >
> > Signed-off-by: Kairui Song <kasong@xxxxxxxxxxx>
>
> Reviewed-by: Chen Ridong <chenridong@xxxxxxxxxxxxxxx>
>
> > ---
> > mm/vmscan.c | 42 +++++++++++++++++-------------------------
> > 1 file changed, 17 insertions(+), 25 deletions(-)
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 33287ba4a500..d7fc7f1fe06d 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -4078,27 +4078,33 @@ static void set_initial_priority(struct pglist_data *pgdat, struct scan_control
> > sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
> > }
> >
> > -static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
> > +static long lruvec_evictable_size(struct lruvec *lruvec, int swappiness)
> > {
Since `total` is unsigned long, should this function likewise return
`unsigned long`? It seems ideal to avoid conversions unless there's a
good reason to do so.
> > int gen, type, zone;
> > - unsigned long total = 0;
> > - int swappiness = get_swappiness(lruvec, sc);
> > + unsigned long seq, total = 0;
> > struct lru_gen_folio *lrugen = &lruvec->lrugen;
> > - struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> > DEFINE_MAX_SEQ(lruvec);
> > DEFINE_MIN_SEQ(lruvec);
> >
> > for_each_evictable_type(type, swappiness) {
> > - unsigned long seq;
> > -
> > for (seq = min_seq[type]; seq <= max_seq; seq++) {
> > gen = lru_gen_from_seq(seq);
> > -
> > for (zone = 0; zone < MAX_NR_ZONES; zone++)
> > total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
> > }
> > }
> >
> > + return total;
> > +}
> > +
> > +static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
> > +{
> > + unsigned long total;
> > + int swappiness = get_swappiness(lruvec, sc);
> > + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> > +
> > + total = lruvec_evictable_size(lruvec, swappiness);
> > +
> > /* whether the size is big enough to be helpful */
> > return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
> > }
> > @@ -4921,9 +4927,6 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> > static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> > int swappiness, unsigned long *nr_to_scan)
> > {
> > - int gen, type, zone;
> > - unsigned long size = 0;
> > - struct lru_gen_folio *lrugen = &lruvec->lrugen;
> > DEFINE_MIN_SEQ(lruvec);
> >
> > *nr_to_scan = 0;
> > @@ -4931,18 +4934,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> > if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
> > return true;
> >
> > - for_each_evictable_type(type, swappiness) {
> > - unsigned long seq;
> > -
> > - for (seq = min_seq[type]; seq <= max_seq; seq++) {
> > - gen = lru_gen_from_seq(seq);
> > -
> > - for (zone = 0; zone < MAX_NR_ZONES; zone++)
> > - size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
> > - }
> > - }
> > -
> > - *nr_to_scan = size;
> > + *nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
> > /* better to run aging even though eviction is still possible */
> > return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
> > }
> > @@ -4954,7 +4946,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> > */
> > static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
> > {
> > - bool success;
> > + bool need_aging;
>
> I have suffered a lot because of this name. Thank you.
>
> > unsigned long nr_to_scan;
> > struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> > DEFINE_MAX_SEQ(lruvec);
> > @@ -4962,7 +4954,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> > if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
> > return -1;
> >
> > - success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> > + need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> >
> > /* try to scrape all its memory if this memcg was deleted */
> > if (nr_to_scan && !mem_cgroup_online(memcg))
> > @@ -4971,7 +4963,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> > nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
> >
> > /* try to get away with not aging at the default priority */
> > - if (!success || sc->priority == DEF_PRIORITY)
> > + if (!need_aging || sc->priority == DEF_PRIORITY)
> > return nr_to_scan >> sc->priority;
> >
> > /* stop scanning this lruvec as it's low on cold folios */
> >
>
> --
> Best regards,
> Ridong
>