[PATCH v8 02/10] mm/memcg: fold lock_page_lru into commit_charge
From: Alex Shi
Date: Wed Jan 15 2020 - 22:05:51 EST
As Konstantin Khlebnikov mentioned:
Also I don't like these functions:
- called lock/unlock but actually also isolates
- used just once
- pgdat evaluated twice
Cleanup and fold these functions into commit_charge. It also reduces
lock time while lrucare && !PageLRU.
Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx>
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx>
Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: cgroups@xxxxxxxxxxxxxxx
Cc: linux-mm@xxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
---
mm/memcontrol.c | 57 ++++++++++++++++++++-------------------------------------
1 file changed, 20 insertions(+), 37 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c5b5f74cfd4d..d92538a9185c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2570,41 +2570,11 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
css_put_many(&memcg->css, nr_pages);
}
-static void lock_page_lru(struct page *page, int *isolated)
-{
- pg_data_t *pgdat = page_pgdat(page);
-
- spin_lock_irq(&pgdat->lru_lock);
- if (PageLRU(page)) {
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
- ClearPageLRU(page);
- del_page_from_lru_list(page, lruvec, page_lru(page));
- *isolated = 1;
- } else
- *isolated = 0;
-}
-
-static void unlock_page_lru(struct page *page, int isolated)
-{
- pg_data_t *pgdat = page_pgdat(page);
-
- if (isolated) {
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_page_lruvec(page, pgdat);
- VM_BUG_ON_PAGE(PageLRU(page), page);
- SetPageLRU(page);
- add_page_to_lru_list(page, lruvec, page_lru(page));
- }
- spin_unlock_irq(&pgdat->lru_lock);
-}
-
static void commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare)
{
- int isolated;
+ struct lruvec *lruvec = NULL;
+ pg_data_t *pgdat;
VM_BUG_ON_PAGE(page->mem_cgroup, page);
@@ -2612,9 +2582,17 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
* In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
* may already be on some other mem_cgroup's LRU. Take care of it.
*/
- if (lrucare)
- lock_page_lru(page, &isolated);
-
+ if (lrucare) {
+ pgdat = page_pgdat(page);
+ spin_lock_irq(&pgdat->lru_lock);
+
+ if (PageLRU(page)) {
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
+ ClearPageLRU(page);
+ del_page_from_lru_list(page, lruvec, page_lru(page));
+ } else
+ spin_unlock_irq(&pgdat->lru_lock);
+ }
/*
* Nobody should be changing or seriously looking at
* page->mem_cgroup at this point:
@@ -2631,8 +2609,13 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
*/
page->mem_cgroup = memcg;
- if (lrucare)
- unlock_page_lru(page, isolated);
+ if (lrucare && lruvec) {
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
+ VM_BUG_ON_PAGE(PageLRU(page), page);
+ SetPageLRU(page);
+ add_page_to_lru_list(page, lruvec, page_lru(page));
+ spin_unlock_irq(&pgdat->lru_lock);
+ }
}
#ifdef CONFIG_MEMCG_KMEM
--
1.8.3.1