[PATCH v1 04/12] mm: vmscan: rework move_pages_to_lru()
From: Muchun Song
Date: Sat Aug 14 2021 - 01:26:04 EST
In the later patch, we will reparent the LRU pages. The pages moved to
appropriate LRU list can be reparented during the process of the
move_pages_to_lru(). So holding a lruvec lock by the caller is wrong, we
should use the more general interface of folio_lruvec_relock_irq() to
acquire the correct lruvec lock.
Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
include/linux/mm.h | 1 +
mm/vmscan.c | 49 +++++++++++++++++++++++++------------------------
2 files changed, 26 insertions(+), 24 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ce8fc0fd6d6e..1e7f06bc5f2d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -227,6 +227,7 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+#define lru_to_folio(head) (list_entry((head)->prev, struct folio, lru))
void setup_initial_init_mm(void *start_code, void *end_code,
void *end_data, void *brk);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 403a175a720f..8ce42858ad5d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2153,23 +2153,28 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
* move_pages_to_lru() moves pages from private @list to appropriate LRU list.
* On return, @list is reused as a list of pages to be freed by the caller.
*
- * Returns the number of pages moved to the given lruvec.
+ * Returns the number of pages moved to the appropriate LRU list.
+ *
+ * Note: The caller must not hold any lruvec lock.
*/
-static unsigned int move_pages_to_lru(struct lruvec *lruvec,
- struct list_head *list)
+static unsigned int move_pages_to_lru(struct list_head *list)
{
- int nr_pages, nr_moved = 0;
+ int nr_moved = 0;
+ struct lruvec *lruvec = NULL;
LIST_HEAD(pages_to_free);
- struct page *page;
while (!list_empty(list)) {
- page = lru_to_page(list);
+ int nr_pages;
+ struct folio *folio = lru_to_folio(list);
+ struct page *page = &folio->page;
+
+ lruvec = folio_lruvec_relock_irq(folio, lruvec);
VM_BUG_ON_PAGE(PageLRU(page), page);
list_del(&page->lru);
if (unlikely(!page_evictable(page))) {
- spin_unlock_irq(&lruvec->lru_lock);
+ unlock_page_lruvec_irq(lruvec);
putback_lru_page(page);
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec = NULL;
continue;
}
@@ -2190,20 +2195,16 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
__clear_page_lru_flags(page);
if (unlikely(PageCompound(page))) {
- spin_unlock_irq(&lruvec->lru_lock);
+ unlock_page_lruvec_irq(lruvec);
destroy_compound_page(page);
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec = NULL;
} else
list_add(&page->lru, &pages_to_free);
continue;
}
- /*
- * All pages were isolated from the same lruvec (and isolation
- * inhibits memcg migration).
- */
- VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page);
+ VM_BUG_ON_PAGE(!folio_matches_lruvec(folio, lruvec), page);
add_page_to_lru_list(page, lruvec);
nr_pages = thp_nr_pages(page);
nr_moved += nr_pages;
@@ -2211,6 +2212,8 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
workingset_age_nonresident(lruvec, nr_pages);
}
+ if (lruvec)
+ unlock_page_lruvec_irq(lruvec);
/*
* To save our caller's stack, now use input list for pages to free.
*/
@@ -2284,16 +2287,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
- spin_lock_irq(&lruvec->lru_lock);
- move_pages_to_lru(lruvec, &page_list);
+ move_pages_to_lru(&page_list);
+ local_irq_disable();
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
- spin_unlock_irq(&lruvec->lru_lock);
+ local_irq_enable();
lru_note_cost(lruvec, file, stat.nr_pageout);
mem_cgroup_uncharge_list(&page_list);
@@ -2420,18 +2423,16 @@ static void shrink_active_list(unsigned long nr_to_scan,
/*
* Move pages back to the lru list.
*/
- spin_lock_irq(&lruvec->lru_lock);
-
- nr_activate = move_pages_to_lru(lruvec, &l_active);
- nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
+ nr_activate = move_pages_to_lru(&l_active);
+ nr_deactivate = move_pages_to_lru(&l_inactive);
/* Keep all free pages in l_active list */
list_splice(&l_inactive, &l_active);
+ local_irq_disable();
__count_vm_events(PGDEACTIVATE, nr_deactivate);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
-
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
- spin_unlock_irq(&lruvec->lru_lock);
+ local_irq_enable();
mem_cgroup_uncharge_list(&l_active);
free_unref_page_list(&l_active);
--
2.11.0