[PATCH resend 3/3] mm: fix LRU balancing effect of new transparent huge pages
From: Shakeel Butt
Date: Wed May 27 2020 - 14:48:31 EST
From: Johannes Weiner <hannes@xxxxxxxxxxx>
Currently, THP are counted as single pages until they are split right
before being swapped out. However, at that point the VM is already in
the middle of reclaim, and adjusting the LRU balance then is useless.
Always account THP by the number of basepages, and remove the fixup
from the splitting path.
Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx>
Signed-off-by: Shakeel Butt <shakeelb@xxxxxxxxxx>
---
mm/swap.c | 23 +++++++++--------------
1 file changed, 9 insertions(+), 14 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index 4eb179ee0b72..b75c0ce90418 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -262,14 +262,14 @@ void rotate_reclaimable_page(struct page *page)
}
}
-static void update_page_reclaim_stat(struct lruvec *lruvec,
- int file, int rotated)
+static void update_page_reclaim_stat(struct lruvec *lruvec, int file,
+ int rotated, int nr_pages)
{
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
- reclaim_stat->recent_scanned[file]++;
+ reclaim_stat->recent_scanned[file] += nr_pages;
if (rotated)
- reclaim_stat->recent_rotated[file]++;
+ reclaim_stat->recent_rotated[file] += nr_pages;
}
static void __activate_page(struct page *page, struct lruvec *lruvec,
@@ -288,7 +288,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
- update_page_reclaim_stat(lruvec, file, 1);
+ update_page_reclaim_stat(lruvec, file, 1, nr_pages);
}
}
@@ -546,7 +546,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGDEACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
}
- update_page_reclaim_stat(lruvec, file, 0);
+ update_page_reclaim_stat(lruvec, file, 0, nr_pages);
}
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
@@ -564,7 +564,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGDEACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
- update_page_reclaim_stat(lruvec, file, 0);
+ update_page_reclaim_stat(lruvec, file, 0, nr_pages);
}
}
@@ -590,7 +590,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGLAZYFREE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
- update_page_reclaim_stat(lruvec, 1, 0);
+ update_page_reclaim_stat(lruvec, 1, 0, nr_pages);
}
}
@@ -899,8 +899,6 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *list)
{
- const int file = 0;
-
VM_BUG_ON_PAGE(!PageHead(page), page);
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
@@ -926,9 +924,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
add_page_to_lru_list_tail(page_tail, lruvec,
page_lru(page_tail));
}
-
- if (!PageUnevictable(page))
- update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -973,7 +968,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
if (page_evictable(page)) {
lru = page_lru(page);
update_page_reclaim_stat(lruvec, page_is_file_lru(page),
- PageActive(page));
+ PageActive(page), nr_pages);
if (was_unevictable)
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else {
--
2.27.0.rc0.183.gde8f92d652-goog