[PATCH v4 6/9] mm/vmscan: only change the lru_lock iff page's lruvec is different

From: Alex Shi
Date: Tue Nov 19 2019 - 07:24:16 EST


Used relock_page_lruvec in more places for spin_lock reducing.

Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx>
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Roman Gushchin <guro@xxxxxx>
Cc: Shakeel Butt <shakeelb@xxxxxxxxxx>
Cc: Chris Down <chris@xxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>
Cc: swkhack <swkhack@xxxxxxxxx>
Cc: "Potyra, Stefan" <Stefan.Potyra@xxxxxxxxxxxxxx>
Cc: Jason Gunthorpe <jgg@xxxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Mauro Carvalho Chehab <mchehab+samsung@xxxxxxxxxx>
Cc: Peng Fan <peng.fan@xxxxxxx>
Cc: Nikolay Borisov <nborisov@xxxxxxxx>
Cc: Ira Weiny <ira.weiny@xxxxxxxxx>
Cc: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx>
Cc: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx>
Cc: Yafang Shao <laoar.shao@xxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
Cc: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Tejun Heo <tj@xxxxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: cgroups@xxxxxxxxxxxxxxx
Cc: linux-mm@xxxxxxxxx
---
mm/vmscan.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3cdf343e7a27..ba57c55a6a41 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1825,22 +1825,25 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
* Returns the number of pages moved to the given lruvec.
*/

-static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
+static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *llvec,
struct list_head *list)
{
int nr_pages, nr_moved = 0;
LIST_HEAD(pages_to_free);
struct page *page;
enum lru_list lru;
+ struct lruvec *lruvec = llvec;

while (!list_empty(list)) {
page = lru_to_page(list);
+ lruvec = relock_page_lruvec_irq(page, lruvec);
+
VM_BUG_ON_PAGE(PageLRU(page), page);
if (unlikely(!page_evictable(page))) {
list_del(&page->lru);
spin_unlock_irq(&lruvec->lru_lock);
+ lruvec = NULL;
putback_lru_page(page);
- spin_lock_irq(&lruvec->lru_lock);
continue;
}

@@ -1858,8 +1861,8 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,

if (unlikely(PageCompound(page))) {
spin_unlock_irq(&lruvec->lru_lock);
+ lruvec = NULL;
(*get_compound_page_dtor(page))(page);
- spin_lock_irq(&lruvec->lru_lock);
} else
list_add(&page->lru, &pages_to_free);
} else {
@@ -1867,6 +1870,11 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
}
}

+ if (lruvec != llvec) {
+ if (lruvec)
+ spin_unlock_irq(&lruvec->lru_lock);
+ spin_lock_irq(&llvec->lru_lock);
+ }
/*
* To save our caller's stack, now use input list for pages to free.
*/
@@ -4289,18 +4297,10 @@ void check_move_unevictable_pages(struct pagevec *pvec)

for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i];
- struct pglist_data *pgdat = page_pgdat(page);
- struct lruvec *new_lruvec = mem_cgroup_page_lruvec(page, pgdat);
-

pgscanned++;

- if (lruvec != new_lruvec) {
- if (lruvec)
- spin_unlock_irq(&lruvec->lru_lock);
- lruvec = new_lruvec;
- spin_lock_irq(&lruvec->lru_lock);
- }
+ lruvec = relock_page_lruvec_irq(page, lruvec);

if (!PageLRU(page) || !PageUnevictable(page))
continue;
--
1.8.3.1