[PATCH v6 03/10] mm/lru: introduce the relock_page_lruvec function
From: Alex Shi
Date: Mon Dec 16 2019 - 04:27:27 EST
During the lruvec locking, a new page's lruvec is may same as
previous one. Thus we could save a re-locking, and only
change lock iff lruvec is new.
Function named relock_page_lruvec following Hugh Dickins' patch.
Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx>
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Roman Gushchin <guro@xxxxxx>
Cc: Shakeel Butt <shakeelb@xxxxxxxxxx>
Cc: Chris Down <chris@xxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>
Cc: swkhack <swkhack@xxxxxxxxx>
Cc: "Potyra, Stefan" <Stefan.Potyra@xxxxxxxxxxxxxx>
Cc: Jason Gunthorpe <jgg@xxxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Mauro Carvalho Chehab <mchehab+samsung@xxxxxxxxxx>
Cc: Peng Fan <peng.fan@xxxxxxx>
Cc: Nikolay Borisov <nborisov@xxxxxxxx>
Cc: Ira Weiny <ira.weiny@xxxxxxxxx>
Cc: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx>
Cc: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx>
Cc: Yafang Shao <laoar.shao@xxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
Cc: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Tejun Heo <tj@xxxxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: cgroups@xxxxxxxxxxxxxxx
Cc: linux-mm@xxxxxxxxx
---
include/linux/memcontrol.h | 36 ++++++++++++++++++++++++++++++++++++
mm/vmscan.c | 8 ++------
2 files changed, 38 insertions(+), 6 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 8389b9b927ef..09e861df48e8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1299,6 +1299,42 @@ static inline void dec_lruvec_page_state(struct page *page,
mod_lruvec_page_state(page, idx, -1);
}
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+ struct lruvec *locked_lruvec)
+{
+ struct pglist_data *pgdat = page_pgdat(page);
+ struct lruvec *lruvec;
+
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
+
+ if (likely(locked_lruvec == lruvec))
+ return lruvec;
+
+ if (unlikely(locked_lruvec))
+ unlock_page_lruvec_irq(locked_lruvec);
+
+ return lock_page_lruvec_irq(page);
+}
+
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
+ struct lruvec *locked_lruvec, unsigned long *flags)
+{
+ struct pglist_data *pgdat = page_pgdat(page);
+ struct lruvec *lruvec;
+
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
+
+ if (likely(locked_lruvec == lruvec))
+ return lruvec;
+
+ if (unlikely(locked_lruvec))
+ unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+
+ return lock_page_lruvec_irqsave(page, flags);
+}
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 017901112789..be8bca45f7c6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4343,14 +4343,10 @@ void check_move_unevictable_pages(struct pagevec *pvec)
for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i];
- struct lruvec *new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
pgscanned++;
- if (lruvec != new_lruvec) {
- if (lruvec)
- unlock_page_lruvec_irq(lruvec);
- lruvec = lock_page_lruvec_irq(page);
- }
+
+ lruvec = relock_page_lruvec_irq(page, lruvec);
if (!PageLRU(page) || !PageUnevictable(page))
continue;
--
1.8.3.1