[PATCH 4/8 RFC] mm/page_counter: introduce stock drain APIs

From: Joshua Hahn

Date: Fri Apr 10 2026 - 17:11:53 EST


Introduce page_counter_drain_stock() and page_counter_drain_cpu()
to replace memcg stock draining functions.

page_counter_drain_stock() runs from drain_all_stock, which is called
when the system is under memory pressure or a cgroup is dying. Because
it is a rare operation, it uses work_on_cpu() to synchronously drain
each online CPU's stock and synchronizes with concurrent charge/uncharge
via local_lock.

page_counter_drain_cpu() handles the CPU hotplug dead path, where the
stock can be accessed directly without locking since the CPU is dead.

Suggested-by: Johannes Weiner <hannes@xxxxxxxxxxx>
Signed-off-by: Joshua Hahn <joshua.hahnjy@xxxxxxxxx>
---
include/linux/page_counter.h | 2 ++
mm/page_counter.c | 51 ++++++++++++++++++++++++++++++++++++
2 files changed, 53 insertions(+)

diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index c7e3ab3356d20..c6772531074b5 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -111,6 +111,8 @@ static inline void page_counter_reset_watermark(struct page_counter *counter)
int page_counter_enable_stock(struct page_counter *counter, unsigned int batch);
void page_counter_disable_stock(struct page_counter *counter);
void page_counter_free_stock(struct page_counter *counter);
+void page_counter_drain_stock(struct page_counter *counter);
+void page_counter_drain_cpu(struct page_counter *counter, unsigned int cpu);

#if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM)
void page_counter_calculate_protection(struct page_counter *root,
diff --git a/mm/page_counter.c b/mm/page_counter.c
index 7be214034bfad..28c2e6442f7d3 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -12,6 +12,8 @@
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/bug.h>
+#include <linux/cpu.h>
+#include <linux/workqueue.h>
#include <asm/page.h>

static bool track_protection(struct page_counter *c)
@@ -402,6 +404,55 @@ void page_counter_free_stock(struct page_counter *counter)
counter->stock = NULL;
}

+static long page_counter_drain_stock_cpu(void *arg)
+{
+ struct page_counter *counter = arg;
+ struct page_counter_stock *stock;
+ unsigned long nr_pages;
+
+ local_lock(&counter->stock->lock);
+ stock = this_cpu_ptr(counter->stock);
+ nr_pages = stock->nr_pages;
+ stock->nr_pages = 0;
+ local_unlock(&counter->stock->lock);
+
+ if (nr_pages)
+ page_counter_cancel_hierarchy(counter, nr_pages);
+
+ return 0;
+}
+/*
+ * Drain per-cpu stock across all online CPUs. Caller (drain_all_stock) is
+ * already protected by a mutex, all future callers must serialize as well.
+ */
+void page_counter_drain_stock(struct page_counter *counter)
+{
+ int cpu;
+
+ if (!counter->stock)
+ return;
+
+ cpus_read_lock();
+ for_each_online_cpu(cpu)
+ work_on_cpu(cpu, page_counter_drain_stock_cpu, counter);
+ cpus_read_unlock();
+}
+
+void page_counter_drain_cpu(struct page_counter *counter, unsigned int cpu)
+{
+ struct page_counter_stock *stock;
+ unsigned long nr_pages;
+
+ if (!counter->stock)
+ return;
+
+ stock = per_cpu_ptr(counter->stock, cpu);
+ nr_pages = stock->nr_pages;
+ if (nr_pages) {
+ stock->nr_pages = 0;
+ page_counter_cancel_hierarchy(counter, nr_pages);
+ }
+}

#if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM)
/*
--
2.52.0