[PATCH] list_lru: cleanup
From: Shakeel Butt
Date: Wed Mar 18 2026 - 13:43:53 EST
Signed-off-by: Shakeel Butt <shakeel.butt@xxxxxxxxx>
---
mm/list_lru.c | 53 ++++++++++++++++++++++++++++-----------------------
1 file changed, 29 insertions(+), 24 deletions(-)
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 26463ae29c64..062394c598d4 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -77,27 +77,30 @@ static inline bool lock_list_lru(struct list_lru_one *l, bool irq)
}
static inline struct list_lru_one *
-lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
- bool irq, bool skip_empty)
+__lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
+ bool irq)
{
struct list_lru_one *l;
rcu_read_lock();
-again:
l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
- if (likely(l) && lock_list_lru(l, irq)) {
- rcu_read_unlock();
+ if (likely(l) && !lock_list_lru(l, irq))
+ l = NULL;
+ rcu_read_unlock();
+
+ return l;
+}
+
+static inline struct list_lru_one *
+lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg)
+{
+ struct list_lru_one *l;
+again:
+ l = __lock_list_lru_of_memcg(lru, nid, memcg, false);
+ if (likely(l))
return l;
- }
- /*
- * Caller may simply bail out if raced with reparenting or
- * may iterate through the list_lru and expect empty slots.
- */
- if (skip_empty) {
- rcu_read_unlock();
- return NULL;
- }
- VM_WARN_ON(!css_is_dying(&memcg->css));
+
+ VM_WARN_ON_ONCE(!css_is_dying(&memcg->css));
memcg = parent_mem_cgroup(memcg);
goto again;
}
@@ -135,8 +138,8 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
}
static inline struct list_lru_one *
-lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
- bool irq, bool skip_empty)
+__lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
+ bool irq)
{
struct list_lru_one *l = &lru->node[nid].lru;
@@ -148,6 +151,12 @@ lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
return l;
}
+static inline struct list_lru_one *
+lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg)
+{
+ return __lock_list_lru_of_memcg(lru, nid, memcg, false);
+}
+
static inline void unlock_list_lru(struct list_lru_one *l, bool irq_off)
{
if (irq_off)
@@ -164,9 +173,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
struct list_lru_node *nlru = &lru->node[nid];
struct list_lru_one *l;
- l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
- if (!l)
- return false;
+ l = lock_list_lru_of_memcg(lru, nid, memcg);
if (list_empty(item)) {
list_add_tail(item, &l->list);
/* Set shrinker bit if the first element was added */
@@ -203,9 +210,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
{
struct list_lru_node *nlru = &lru->node[nid];
struct list_lru_one *l;
- l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
- if (!l)
- return false;
+ l = lock_list_lru_of_memcg(lru, nid, memcg);
if (!list_empty(item)) {
list_del_init(item);
l->nr_items--;
@@ -287,7 +292,7 @@ __list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
unsigned long isolated = 0;
restart:
- l = lock_list_lru_of_memcg(lru, nid, memcg, irq_off, true);
+ l = __lock_list_lru_of_memcg(lru, nid, memcg, irq_off);
if (!l)
return isolated;
list_for_each_safe(item, n, &l->list) {
--
2.52.0