[PATCH] lockpagememcg

From: Johannes Weiner
Date: Mon Nov 16 2020 - 10:48:06 EST


---
include/linux/memcontrol.h | 61 ++++++++++++++++++++++++++--
mm/memcontrol.c | 82 +++++++-------------------------------
2 files changed, 73 insertions(+), 70 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 20108e426f84..b4b73e375948 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -842,9 +842,64 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
extern bool cgroup_memory_noswap;
#endif

-struct mem_cgroup *lock_page_memcg(struct page *page);
-void __unlock_page_memcg(struct mem_cgroup *memcg);
-void unlock_page_memcg(struct page *page);
+struct mem_cgroup *lock_page_memcg_slowpath(struct page *page,
+ struct mem_cgroup *memcg);
+void unlock_page_memcg_slowpath(struct mem_cgroup *memcg);
+
+/**
+ * lock_page_memcg - lock a page and memcg binding
+ * @page: the page
+ *
+ * This function protects unlocked LRU pages from being moved to
+ * another cgroup.
+ *
+ * It ensures lifetime of the memcg -- the caller is responsible for
+ * the lifetime of the page; __unlock_page_memcg() is available when
+ * @page might get freed inside the locked section.
+ */
+static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+{
+ struct page *head = compound_head(page); /* rmap on tail pages */
+ struct mem_cgroup *memcg;
+
+ /*
+ * The RCU lock is held throughout the transaction. The fast
+ * path can get away without acquiring the memcg->move_lock
+ * because page moving starts with an RCU grace period.
+ *
+ * The RCU lock also protects the memcg from being freed when
+ * the page state that is going to change is the only thing
+ * preventing the page itself from being freed. E.g. writeback
+ * doesn't hold a page reference and relies on PG_writeback to
+ * keep off truncation, migration and so forth.
+ */
+ rcu_read_lock();
+
+ if (mem_cgroup_disabled())
+ return NULL;
+
+ memcg = page_memcg(head);
+ if (unlikely(!memcg))
+ return NULL;
+
+ if (likely(!atomic_read(&memcg->moving_account)))
+ return memcg;
+
+ return lock_page_memcg_slowpath(head, memcg);
+}
+
+static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
+{
+ if (unlikely(memcg && memcg->move_lock_task == current))
+ unlock_page_memcg_slowpath(memcg);
+
+ rcu_read_unlock();
+}
+
+static inline void unlock_page_memcg(struct page *page)
+{
+ __unlock_page_memcg(page_memcg(compound_head(page)));
+}

/*
* idx can be of type enum memcg_stat_item or node_stat_item.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 69a2893a6455..9acc42388b86 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2084,49 +2084,19 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
pr_cont(" are going to be killed due to memory.oom.group set\n");
}

-/**
- * lock_page_memcg - lock a page and memcg binding
- * @page: the page
- *
- * This function protects unlocked LRU pages from being moved to
- * another cgroup.
- *
- * It ensures lifetime of the returned memcg. Caller is responsible
- * for the lifetime of the page; __unlock_page_memcg() is available
- * when @page might get freed inside the locked section.
- */
-struct mem_cgroup *lock_page_memcg(struct page *page)
+struct mem_cgroup *lock_page_memcg_slowpath(struct page *page,
+ struct mem_cgroup *memcg)
{
- struct page *head = compound_head(page); /* rmap on tail pages */
- struct mem_cgroup *memcg;
unsigned long flags;
-
- /*
- * The RCU lock is held throughout the transaction. The fast
- * path can get away without acquiring the memcg->move_lock
- * because page moving starts with an RCU grace period.
- *
- * The RCU lock also protects the memcg from being freed when
- * the page state that is going to change is the only thing
- * preventing the page itself from being freed. E.g. writeback
- * doesn't hold a page reference and relies on PG_writeback to
- * keep off truncation, migration and so forth.
- */
- rcu_read_lock();
-
- if (mem_cgroup_disabled())
- return NULL;
again:
- memcg = page_memcg(head);
- if (unlikely(!memcg))
- return NULL;
-
- if (atomic_read(&memcg->moving_account) <= 0)
- return memcg;
-
spin_lock_irqsave(&memcg->move_lock, flags);
- if (memcg != page_memcg(head)) {
+ if (memcg != page_memcg(page)) {
spin_unlock_irqrestore(&memcg->move_lock, flags);
+ memcg = page_memcg(page);
+ if (unlikely(!memcg))
+ return NULL;
+ if (!atomic_read(&memcg->moving_account))
+ return memcg;
goto again;
}

@@ -2140,39 +2110,17 @@ struct mem_cgroup *lock_page_memcg(struct page *page)

return memcg;
}
-EXPORT_SYMBOL(lock_page_memcg);
-
-/**
- * __unlock_page_memcg - unlock and unpin a memcg
- * @memcg: the memcg
- *
- * Unlock and unpin a memcg returned by lock_page_memcg().
- */
-void __unlock_page_memcg(struct mem_cgroup *memcg)
-{
- if (memcg && memcg->move_lock_task == current) {
- unsigned long flags = memcg->move_lock_flags;
-
- memcg->move_lock_task = NULL;
- memcg->move_lock_flags = 0;
-
- spin_unlock_irqrestore(&memcg->move_lock, flags);
- }
-
- rcu_read_unlock();
-}
+EXPORT_SYMBOL(lock_page_memcg_slowpath);

-/**
- * unlock_page_memcg - unlock a page and memcg binding
- * @page: the page
- */
-void unlock_page_memcg(struct page *page)
+void unlock_page_memcg_slowpath(struct mem_cgroup *memcg)
{
- struct page *head = compound_head(page);
+ unsigned long flags = memcg->move_lock_flags;

- __unlock_page_memcg(page_memcg(head));
+ memcg->move_lock_task = NULL;
+ memcg->move_lock_flags = 0;
+ spin_unlock_irqrestore(&memcg->move_lock, flags);
}
-EXPORT_SYMBOL(unlock_page_memcg);
+EXPORT_SYMBOL(unlock_page_memcg_slowpath);

struct memcg_stock_pcp {
struct mem_cgroup *cached; /* this never be root cgroup */
--
2.29.1