[PATCH v2 39/46] mm/lru: Convert __pagevec_lru_add_fn to take a folio

From: Matthew Wilcox (Oracle)
Date: Tue Jun 22 2021 - 08:52:36 EST


This saves five calls to compound_head(), totalling 60 bytes of text.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
mm/swap.c | 35 ++++++++++++++++++-----------------
1 file changed, 18 insertions(+), 17 deletions(-)

diff --git a/mm/swap.c b/mm/swap.c
index 2ed00cfd03ac..f3f1ee9f8616 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -998,17 +998,18 @@ void __pagevec_release(struct pagevec *pvec)
}
EXPORT_SYMBOL(__pagevec_release);

-static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
+static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
{
- int was_unevictable = TestClearPageUnevictable(page);
- int nr_pages = thp_nr_pages(page);
+ int was_unevictable = folio_test_clear_unevictable_flag(folio);
+ int nr_pages = folio_nr_pages(folio);

- VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_FOLIO(folio_lru(folio), folio);

/*
- * Page becomes evictable in two ways:
+ * Folio becomes evictable in two ways:
* 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
- * 2) Before acquiring LRU lock to put the page to correct LRU and then
+ * 2) Before acquiring LRU lock to put the folio on the correct LRU
+ * and then
* a) do PageLRU check with lock [check_move_unevictable_pages]
* b) do PageLRU check before lock [clear_page_mlock]
*
@@ -1017,10 +1018,10 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
*
* #0: __pagevec_lru_add_fn #1: clear_page_mlock
*
- * SetPageLRU() TestClearPageMlocked()
+ * folio_set_lru_flag() folio_test_clear_mlocked_flag()
* smp_mb() // explicit ordering // above provides strict
* // ordering
- * PageMlocked() PageLRU()
+ * folio_mlocked() folio_lru()
*
*
* if '#1' does not observe setting of PG_lru by '#0' and fails
@@ -1031,21 +1032,21 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
* looking at the same page) and the evictable page will be stranded
* in an unevictable LRU.
*/
- SetPageLRU(page);
+ folio_set_lru_flag(folio);
smp_mb__after_atomic();

- if (page_evictable(page)) {
+ if (folio_evictable(folio)) {
if (was_unevictable)
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else {
- ClearPageActive(page);
- SetPageUnevictable(page);
+ folio_clear_active_flag(folio);
+ folio_set_unevictable_flag(folio);
if (!was_unevictable)
__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
}

- add_page_to_lru_list(page, lruvec);
- trace_mm_lru_insertion(page);
+ folio_add_to_lru_list(folio, lruvec);
+ trace_mm_lru_insertion(&folio->page);
}

/*
@@ -1059,10 +1060,10 @@ void __pagevec_lru_add(struct pagevec *pvec)
unsigned long flags = 0;

for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
+ struct folio *folio = page_folio(pvec->pages[i]);

- lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
- __pagevec_lru_add_fn(page, lruvec);
+ lruvec = folio_relock_lruvec_irqsave(folio, lruvec, &flags);
+ __pagevec_lru_add_fn(folio, lruvec);
}
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
--
2.30.2