[PATCH 06/11] mm/zsmalloc, zswap: Redirect zswap_entry->objcg to zspage

From: Joshua Hahn

Date: Wed Mar 11 2026 - 15:53:19 EST


Now that obj_cgroups are tracked in the zspage, redirect the zswap layer
to use the pointer stored in the zspage and remove the pointer in
struct zswap_entry.

This offsets the temporary memory increase caused by the duplicate
storage of the obj_cgroup pointer and results in a net zero memory
footprint change (aside from the array pointer and flags in zspage).

The lifetime and charging of the obj_cgroup is still handled in the
zswap layer.

Clean up mem_cgroup_from_entry, which has no remaining callers.

Suggested-by: Johannes Weiner <hannes@xxxxxxxxxxx>
Signed-off-by: Joshua Hahn <joshua.hahnjy@xxxxxxxxx>
---
include/linux/memcontrol.h | 5 ++++
include/linux/zsmalloc.h | 1 +
mm/zsmalloc.c | 25 +++++++++++++++++++
mm/zswap.c | 50 +++++++++++++++++---------------------
4 files changed, 53 insertions(+), 28 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 70b685a85bf4..0652db4ff2d5 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1072,6 +1072,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *ob
return NULL;
}

+static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
+{
+ return NULL;
+}
+
static inline bool folio_memcg_kmem(struct folio *folio)
{
return false;
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 645957a156c4..6010d8dac9ff 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -41,6 +41,7 @@ unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);

unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
+struct obj_cgroup *zs_lookup_objcg(struct zs_pool *pool, unsigned long handle);

void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index d4735451c273..a94ca8c26ad9 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1049,6 +1049,31 @@ unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
}
EXPORT_SYMBOL_GPL(zs_lookup_class_index);

+struct obj_cgroup *zs_lookup_objcg(struct zs_pool *pool, unsigned long handle)
+{
+ unsigned long obj;
+ struct zpdesc *zpdesc;
+ struct zspage *zspage;
+ struct obj_cgroup *objcg;
+ unsigned int obj_idx;
+
+ if (!pool->memcg_aware)
+ return NULL;
+
+ read_lock(&pool->lock);
+ obj = handle_to_obj(handle);
+ obj_to_location(obj, &zpdesc, &obj_idx);
+
+ zspage = get_zspage(zpdesc);
+ zspage_read_lock(zspage);
+ read_unlock(&pool->lock);
+
+ objcg = zspage->objcgs[obj_idx];
+ zspage_read_unlock(zspage);
+
+ return objcg;
+}
+
unsigned long zs_get_total_pages(struct zs_pool *pool)
{
return atomic_long_read(&pool->pages_allocated);
diff --git a/mm/zswap.c b/mm/zswap.c
index 68b87c3cc326..436066965413 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -193,7 +193,6 @@ struct zswap_entry {
bool referenced;
struct zswap_pool *pool;
unsigned long handle;
- struct obj_cgroup *objcg;
struct list_head lru;
};

@@ -602,25 +601,13 @@ static int zswap_enabled_param_set(const char *val,
* lru functions
**********************************/

-/* should be called under RCU */
-#ifdef CONFIG_MEMCG
-static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
-{
- return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
-}
-#else
-static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
-{
- return NULL;
-}
-#endif
-
static inline int entry_to_nid(struct zswap_entry *entry)
{
return page_to_nid(virt_to_page(entry));
}

-static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
+static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry,
+ struct obj_cgroup *objcg)
{
int nid = entry_to_nid(entry);
struct mem_cgroup *memcg;
@@ -637,19 +624,20 @@ static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
* Similar reasoning holds for list_lru_del().
*/
rcu_read_lock();
- memcg = mem_cgroup_from_entry(entry);
+ memcg = objcg ? obj_cgroup_memcg(objcg) : NULL;
/* will always succeed */
list_lru_add(list_lru, &entry->lru, nid, memcg);
rcu_read_unlock();
}

-static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
+static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry,
+ struct obj_cgroup *objcg)
{
int nid = entry_to_nid(entry);
struct mem_cgroup *memcg;

rcu_read_lock();
- memcg = mem_cgroup_from_entry(entry);
+ memcg = objcg ? obj_cgroup_memcg(objcg) : NULL;
/* will always succeed */
list_lru_del(list_lru, &entry->lru, nid, memcg);
rcu_read_unlock();
@@ -717,12 +705,15 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
*/
static void zswap_entry_free(struct zswap_entry *entry)
{
- zswap_lru_del(&zswap_list_lru, entry);
+ struct obj_cgroup *objcg = zs_lookup_objcg(entry->pool->zs_pool,
+ entry->handle);
+
+ zswap_lru_del(&zswap_list_lru, entry, objcg);
zs_free(entry->pool->zs_pool, entry->handle);
zswap_pool_put(entry->pool);
- if (entry->objcg) {
- obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
- obj_cgroup_put(entry->objcg);
+ if (objcg) {
+ obj_cgroup_uncharge_zswap(objcg, entry->length);
+ obj_cgroup_put(objcg);
}
if (entry->length == PAGE_SIZE)
atomic_long_dec(&zswap_stored_incompressible_pages);
@@ -995,6 +986,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct mempolicy *mpol;
bool folio_was_allocated;
struct swap_info_struct *si;
+ struct obj_cgroup *objcg;
int ret = 0;

/* try to allocate swap cache folio */
@@ -1044,8 +1036,9 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
xa_erase(tree, offset);

count_vm_event(ZSWPWB);
- if (entry->objcg)
- count_objcg_events(entry->objcg, ZSWPWB, 1);
+ objcg = zs_lookup_objcg(entry->pool->zs_pool, entry->handle);
+ if (objcg)
+ count_objcg_events(objcg, ZSWPWB, 1);

zswap_entry_free(entry);

@@ -1464,11 +1457,10 @@ static bool zswap_store_page(struct page *page,
*/
entry->pool = pool;
entry->swpentry = page_swpentry;
- entry->objcg = objcg;
entry->referenced = true;
if (entry->length) {
INIT_LIST_HEAD(&entry->lru);
- zswap_lru_add(&zswap_list_lru, entry);
+ zswap_lru_add(&zswap_list_lru, entry, objcg);
}

return true;
@@ -1593,6 +1585,7 @@ int zswap_load(struct folio *folio)
bool swapcache = folio_test_swapcache(folio);
struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
+ struct obj_cgroup *objcg;

VM_WARN_ON_ONCE(!folio_test_locked(folio));

@@ -1621,8 +1614,9 @@ int zswap_load(struct folio *folio)
folio_mark_uptodate(folio);

count_vm_event(ZSWPIN);
- if (entry->objcg)
- count_objcg_events(entry->objcg, ZSWPIN, 1);
+ objcg = zs_lookup_objcg(entry->pool->zs_pool, entry->handle);
+ if (objcg)
+ count_objcg_events(objcg, ZSWPIN, 1);

/*
* When reading into the swapcache, invalidate our entry. The
--
2.52.0