Re: [PATCH v2 7/7] mm: switch deferred split shrinker to list_lru

From: Johannes Weiner

Date: Fri Mar 20 2026 - 12:07:21 EST


On Thu, Mar 19, 2026 at 08:21:21AM +0100, David Hildenbrand (Arm) wrote:
> Of course :) If list_lru lock helpers would be the right thing to do, it
> might be better placed in this series.

I think this is slightly more promising. See below. The callsites in
huge_memory.c look nicer. But the double folio_nid() and folio_memcg()
lookups (when the caller needs them too) are kind of unfortunate; and
it feels like a lot of API for 4 callsites. Thoughts?

include/linux/list_lru.h | 8 ++++++++
mm/huge_memory.c | 43 +++++++++++++++----------------------------
mm/list_lru.c | 29 +++++++++++++++++++++++++++++
3 files changed, 52 insertions(+), 28 deletions(-)

diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 4bd29b61c59a..6b734d08fa1b 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -123,6 +123,14 @@ struct list_lru_one *list_lru_lock_irqsave(struct list_lru *lru, int nid,
void list_lru_unlock_irqrestore(struct list_lru_one *l,
unsigned long *irq_flags);

+struct list_lru_one *folio_list_lru_lock(struct folio *folio,
+ struct list_lru *lru);
+void folio_list_lru_unlock(struct folio *folio, struct list_lru_one *l);
+struct list_lru_one *folio_list_lru_lock_irqsave(struct folio *folio,
+ struct list_lru *lru, unsigned long *flags);
+void folio_list_lru_unlock_irqrestore(struct folio *folio,
+ struct list_lru_one *l, unsigned long *flags);
+
/* Caller-locked variants, see list_lru_add() etc for documentation */
bool __list_lru_add(struct list_lru *lru, struct list_lru_one *l,
struct list_head *item, int nid, struct mem_cgroup *memcg);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e90d08db219d..6996ef224e24 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3768,11 +3768,8 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
VM_WARN_ON_ONCE(!mapping && end);
/* Prevent deferred_split_scan() touching ->_refcount */
dequeue_deferred = folio_test_anon(folio) && old_order > 1;
- if (dequeue_deferred) {
- rcu_read_lock();
- l = list_lru_lock(&deferred_split_lru,
- folio_nid(folio), folio_memcg(folio));
- }
+ if (dequeue_deferred)
+ l = folio_list_lru_lock(folio, &deferred_split_lru);
if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
struct swap_cluster_info *ci = NULL;
struct lruvec *lruvec;
@@ -3785,8 +3782,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
mod_mthp_stat(old_order,
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
}
- list_lru_unlock(l);
- rcu_read_unlock();
+ folio_list_lru_unlock(folio, l);
}

if (mapping) {
@@ -3889,10 +3885,8 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
if (ci)
swap_cluster_unlock(ci);
} else {
- if (dequeue_deferred) {
- list_lru_unlock(l);
- rcu_read_unlock();
- }
+ if (dequeue_deferred)
+ folio_list_lru_unlock(folio, l);
return -EAGAIN;
}

@@ -4276,8 +4270,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
WARN_ON_ONCE(folio_ref_count(folio));
WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));

- rcu_read_lock();
- l = list_lru_lock_irqsave(&deferred_split_lru, nid, folio_memcg(folio), &flags);
+ l = folio_list_lru_lock_irqsave(folio, &deferred_split_lru, &flags);
if (__list_lru_del(&deferred_split_lru, l, &folio->_deferred_list, nid)) {
if (folio_test_partially_mapped(folio)) {
folio_clear_partially_mapped(folio);
@@ -4286,7 +4279,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
}
unqueued = true;
}
- list_lru_unlock_irqrestore(l, &flags);
+ folio_list_lru_unlock_irqrestore(folio, l, &flags);
rcu_read_unlock();

return unqueued; /* useful for debug warnings */
@@ -4297,7 +4290,6 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
{
struct list_lru_one *l;
int nid;
- struct mem_cgroup *memcg;
unsigned long flags;

/*
@@ -4322,9 +4314,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)

nid = folio_nid(folio);

- rcu_read_lock();
- memcg = folio_memcg(folio);
- l = list_lru_lock_irqsave(&deferred_split_lru, nid, memcg, &flags);
+ l = folio_list_lru_lock_irqsave(folio, &deferred_split_lru, &flags);
if (partially_mapped) {
if (!folio_test_partially_mapped(folio)) {
folio_set_partially_mapped(folio);
@@ -4337,9 +4327,9 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
/* partially mapped folios cannot become non-partially mapped */
VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
}
- __list_lru_add(&deferred_split_lru, l, &folio->_deferred_list, nid, memcg);
- list_lru_unlock_irqrestore(l, &flags);
- rcu_read_unlock();
+ __list_lru_add(&deferred_split_lru, l, &folio->_deferred_list, nid,
+ folio_memcg(folio));
+ folio_list_lru_unlock_irqrestore(folio, l, &flags);
}

static unsigned long deferred_split_count(struct shrinker *shrink,
@@ -4445,16 +4435,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
* don't add it back to split_queue.
*/
if (!did_split && folio_test_partially_mapped(folio)) {
- rcu_read_lock();
- l = list_lru_lock_irqsave(&deferred_split_lru,
- folio_nid(folio),
- folio_memcg(folio),
- &flags);
+ l = folio_list_lru_lock_irqsave(folio,
+ &deferred_split_lru,
+ &flags);
__list_lru_add(&deferred_split_lru, l,
&folio->_deferred_list,
folio_nid(folio), folio_memcg(folio));
- list_lru_unlock_irqrestore(l, &flags);
- rcu_read_unlock();
+ folio_list_lru_unlock_irqrestore(folio, l, &flags);
}
folio_put(folio);
}
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 1ccdd45b1d14..8d50741ef18d 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -173,6 +173,35 @@ void list_lru_unlock_irqrestore(struct list_lru_one *l, unsigned long *flags)
unlock_list_lru(l, /*irq_off=*/true, /*irq_flags=*/flags);
}

+struct list_lru_one *folio_list_lru_lock(struct folio *folio, struct list_lru *lru)
+{
+ rcu_read_lock();
+ return list_lru_lock(lru, folio_nid(folio), folio_memcg(folio));
+}
+
+void folio_list_lru_unlock(struct folio *folio, struct list_lru_one *l)
+{
+ list_lru_unlock(l);
+ rcu_read_unlock();
+}
+
+struct list_lru_one *folio_list_lru_lock_irqsave(struct folio *folio,
+ struct list_lru *lru,
+ unsigned long *flags)
+{
+ rcu_read_lock();
+ return list_lru_lock_irqsave(lru, folio_nid(folio),
+ folio_memcg(folio), flags);
+}
+
+void folio_list_lru_unlock_irqrestore(struct folio *folio,
+ struct list_lru_one *l,
+ unsigned long *flags)
+{
+ list_lru_unlock_irqrestore(l, flags);
+ rcu_read_unlock();
+}
+
bool __list_lru_add(struct list_lru *lru, struct list_lru_one *l,
struct list_head *item, int nid,
struct mem_cgroup *memcg)