Re: [PATCH 03/11] mm: don't pass "enum lru_list" to lru list addition functions
From: Alex Shi
Date: Tue Dec 08 2020 - 03:24:54 EST
在 2020/12/8 上午6:09, Yu Zhao 写道:
> The "enum lru_list" parameter to add_page_to_lru_list() and
> add_page_to_lru_list_tail() is redundant in the sense that it can
> be extracted from the "struct page" parameter by page_lru().
>
> A caveat is that we need to make sure PageActive() or
> PageUnevictable() is correctly set or cleared before calling
> these two functions. And they are indeed.
>
> Signed-off-by: Yu Zhao <yuzhao@xxxxxxxxxx>
> ---
> include/linux/mm_inline.h | 8 ++++++--
> mm/swap.c | 15 +++++++--------
> mm/vmscan.c | 6 ++----
> 3 files changed, 15 insertions(+), 14 deletions(-)
>
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index 2889741f450a..130ba3201d3f 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -106,15 +106,19 @@ static __always_inline enum lru_list page_lru(struct page *page)
> }
>
> static __always_inline void add_page_to_lru_list(struct page *page,
> - struct lruvec *lruvec, enum lru_list lru)
> + struct lruvec *lruvec)
> {
> + enum lru_list lru = page_lru(page);
> +
> update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
> list_add(&page->lru, &lruvec->lists[lru]);
> }
>
> static __always_inline void add_page_to_lru_list_tail(struct page *page,
> - struct lruvec *lruvec, enum lru_list lru)
> + struct lruvec *lruvec)
> {
> + enum lru_list lru = page_lru(page);
> +
> update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
> list_add_tail(&page->lru, &lruvec->lists[lru]);
> }
> diff --git a/mm/swap.c b/mm/swap.c
> index 5022dfe388ad..136acabbfab5 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
> if (!PageUnevictable(page)) {
> del_page_from_lru_list(page, lruvec, page_lru(page));
> ClearPageActive(page);
> - add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> + add_page_to_lru_list_tail(page, lruvec);
> __count_vm_events(PGROTATED, thp_nr_pages(page));
> }
> }
> @@ -313,8 +313,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec)
>
> del_page_from_lru_list(page, lruvec, lru);
> SetPageActive(page);
> - lru += LRU_ACTIVE;
Uh, actully, page to lru functions like, page_lru(page), always inline, so generally, no instruction
increasing, except few place like here.
> - add_page_to_lru_list(page, lruvec, lru);
> + add_page_to_lru_list(page, lruvec);
> trace_mm_lru_activate(page);
>
> __count_vm_events(PGACTIVATE, nr_pages);
> @@ -543,14 +542,14 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
> * It can make readahead confusing. But race window
> * is _really_ small and it's non-critical problem.
> */
> - add_page_to_lru_list(page, lruvec, lru);
> + add_page_to_lru_list(page, lruvec);
> SetPageReclaim(page);
> } else {
> /*
> * The page's writeback ends up during pagevec
> * We moves tha page into tail of inactive.
> */
> - add_page_to_lru_list_tail(page, lruvec, lru);
> + add_page_to_lru_list_tail(page, lruvec);
> __count_vm_events(PGROTATED, nr_pages);
> }
>
> @@ -570,7 +569,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
> del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
> ClearPageActive(page);
> ClearPageReferenced(page);
> - add_page_to_lru_list(page, lruvec, lru);
> + add_page_to_lru_list(page, lruvec);
>
> __count_vm_events(PGDEACTIVATE, nr_pages);
> __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
> @@ -595,7 +594,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
> * anonymous pages
> */
> ClearPageSwapBacked(page);
> - add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
> + add_page_to_lru_list(page, lruvec);
>
> __count_vm_events(PGLAZYFREE, nr_pages);
> __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
> @@ -1005,7 +1004,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
> __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
> }
>
> - add_page_to_lru_list(page, lruvec, lru);
> + add_page_to_lru_list(page, lruvec);
> trace_mm_lru_insertion(page, lru);
> }
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index a174594e40f8..8fc8f2c9d7ec 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1865,7 +1865,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
> * inhibits memcg migration).
> */
> VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
> - add_page_to_lru_list(page, lruvec, page_lru(page));
> + add_page_to_lru_list(page, lruvec);
> nr_pages = thp_nr_pages(page);
> nr_moved += nr_pages;
> if (PageActive(page))
> @@ -4280,12 +4280,10 @@ void check_move_unevictable_pages(struct pagevec *pvec)
>
> lruvec = relock_page_lruvec_irq(page, lruvec);
> if (page_evictable(page) && PageUnevictable(page)) {
> - enum lru_list lru = page_lru_base_type(page);
> -
> VM_BUG_ON_PAGE(PageActive(page), page);
> ClearPageUnevictable(page);
> del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
> - add_page_to_lru_list(page, lruvec, lru);
And here.
> + add_page_to_lru_list(page, lruvec);
> pgrescued += nr_pages;
> }
> SetPageLRU(page);
>