[PATCH 08/11] mm/memcontrol: Track MEMCG_ZSWAPPED in bytes
From: Joshua Hahn
Date: Wed Mar 11 2026 - 15:54:02 EST
Zswap compresses and uncompresses in PAGE_SIZE units, which simplifies
the accounting for how much memory it has compressed. However, when a
compressed object is stored at the boundary of two zspages, accounting
at a PAGE_SIZE granularity makes it difficult to fractionally charge
each backing zspage with the ratio of memory it backs for the
compressed object.
To make sub-PAGE_SIZE granularity charging possible for MEMCG_ZSWAPPED,
track the value in bytes and adjust its accounting accordingly.
No functional changes intended.
Signed-off-by: Joshua Hahn <joshua.hahnjy@xxxxxxxxx>
---
include/linux/memcontrol.h | 2 +-
mm/memcontrol.c | 5 +++--
mm/zsmalloc.c | 4 ++--
mm/zswap.c | 8 +++++---
4 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 701d9ab6fef1..ce2e598b5963 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -38,7 +38,7 @@ enum memcg_stat_item {
MEMCG_VMALLOC,
MEMCG_KMEM,
MEMCG_ZSWAP_B,
- MEMCG_ZSWAPPED,
+ MEMCG_ZSWAPPED_B,
MEMCG_NR_STAT,
};
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 68139be66a4f..1cb02d2febe8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -342,7 +342,7 @@ static const unsigned int memcg_stat_items[] = {
MEMCG_VMALLOC,
MEMCG_KMEM,
MEMCG_ZSWAP_B,
- MEMCG_ZSWAPPED,
+ MEMCG_ZSWAPPED_B,
};
#define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
@@ -1364,7 +1364,7 @@ static const struct memory_stat memory_stats[] = {
{ "shmem", NR_SHMEM },
#ifdef CONFIG_ZSWAP
{ "zswap", MEMCG_ZSWAP_B },
- { "zswapped", MEMCG_ZSWAPPED },
+ { "zswapped", MEMCG_ZSWAPPED_B },
#endif
{ "file_mapped", NR_FILE_MAPPED },
{ "file_dirty", NR_FILE_DIRTY },
@@ -1412,6 +1412,7 @@ static int memcg_page_state_unit(int item)
switch (item) {
case MEMCG_PERCPU_B:
case MEMCG_ZSWAP_B:
+ case MEMCG_ZSWAPPED_B:
case NR_SLAB_RECLAIMABLE_B:
case NR_SLAB_UNRECLAIMABLE_B:
return 1;
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 291194572a09..24665d7cd4a9 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1047,7 +1047,7 @@ static void zs_charge_objcg(struct zs_pool *pool, struct obj_cgroup *objcg,
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
mod_memcg_state(memcg, pool->compressed_stat, size);
- mod_memcg_state(memcg, pool->uncompressed_stat, 1);
+ mod_memcg_state(memcg, pool->uncompressed_stat, PAGE_SIZE);
rcu_read_unlock();
}
@@ -1066,7 +1066,7 @@ static void zs_uncharge_objcg(struct zs_pool *pool, struct obj_cgroup *objcg,
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
mod_memcg_state(memcg, pool->compressed_stat, -size);
- mod_memcg_state(memcg, pool->uncompressed_stat, -1);
+ mod_memcg_state(memcg, pool->uncompressed_stat, -(int)PAGE_SIZE);
rcu_read_unlock();
}
#else
diff --git a/mm/zswap.c b/mm/zswap.c
index bca29a6e18f3..d81e2db4490b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -257,7 +257,7 @@ static struct zswap_pool *zswap_pool_create(char *compressor)
/* unique name for each pool specifically required by zsmalloc */
snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
pool->zs_pool = zs_create_pool(name, true, MEMCG_ZSWAP_B,
- MEMCG_ZSWAPPED);
+ MEMCG_ZSWAPPED_B);
if (!pool->zs_pool)
goto error;
@@ -1214,8 +1214,10 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
*/
if (!mem_cgroup_disabled()) {
mem_cgroup_flush_stats(memcg);
- nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
- nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+ nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B);
+ nr_backing >>= PAGE_SHIFT;
+ nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED_B);
+ nr_stored >>= PAGE_SHIFT;
} else {
nr_backing = zswap_total_pages();
nr_stored = atomic_long_read(&zswap_stored_pages);
--
2.52.0