[RFC PATCH 10/16] mm: thp: split 1GB THPs at page reclaim.
From: Zi Yan
Date: Wed Sep 02 2020 - 14:07:33 EST
From: Zi Yan <ziy@xxxxxxxxxx>
We cannot swap 1GB THPs, so split them before swap them out.
Signed-off-by: Zi Yan <ziy@xxxxxxxxxx>
---
mm/swap_slots.c | 2 ++
mm/vmscan.c | 58 +++++++++++++++++++++++++++++++++++++------------
2 files changed, 46 insertions(+), 14 deletions(-)
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 3e6453573a89..65b8742a0446 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -312,6 +312,8 @@ swp_entry_t get_swap_page(struct page *page)
entry.val = 0;
if (PageTransHuge(page)) {
+ if (compound_order(page) == HPAGE_PUD_ORDER)
+ return entry;
if (IS_ENABLED(CONFIG_THP_SWAP))
get_swap_pages(1, &entry, HPAGE_PMD_NR);
goto out;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 99e1796eb833..617d15a041f8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1240,23 +1240,49 @@ static unsigned int shrink_page_list(struct list_head *page_list,
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
if (PageTransHuge(page)) {
- /* cannot split THP, skip it */
- if (!can_split_huge_page(page, NULL))
- goto activate_locked;
- /*
- * Split pages without a PMD map right
- * away. Chances are some or all of the
- * tail pages can be freed without IO.
- */
- if (!compound_mapcount(page) &&
- split_huge_page_to_list(page,
- page_list))
+ if (compound_order(page) == HPAGE_PUD_ORDER) {
+ /* cannot split THP, skip it */
+ if (!can_split_huge_pud_page(page, NULL))
+ goto activate_locked;
+ /*
+ * Split pages without a PUD map right
+ * away. Chances are some or all of the
+ * tail pages can be freed without IO.
+ */
+ if (!compound_mapcount(page) &&
+ split_huge_pud_page_to_list(page,
+ page_list))
+ goto activate_locked;
+ }
+ if (compound_order(page) == HPAGE_PMD_ORDER) {
+ /* cannot split THP, skip it */
+ if (!can_split_huge_page(page, NULL))
+ goto activate_locked;
+ /*
+ * Split pages without a PMD map right
+ * away. Chances are some or all of the
+ * tail pages can be freed without IO.
+ */
+ if (!compound_mapcount(page) &&
+ split_huge_page_to_list(page,
+ page_list))
+ goto activate_locked;
+ }
+ }
+ /* Split PUD THPs before swapping */
+ if (compound_order(page) == HPAGE_PUD_ORDER) {
+ if (split_huge_pud_page_to_list(page, page_list))
goto activate_locked;
+ else {
+ sc->nr_scanned -= (nr_pages - HPAGE_PMD_NR);
+ nr_pages = HPAGE_PMD_NR;
+ }
}
if (!add_to_swap(page)) {
if (!PageTransHuge(page))
goto activate_locked_split;
/* Fallback to swap normal pages */
+ VM_BUG_ON_PAGE(compound_order(page) != HPAGE_PMD_ORDER, page);
if (split_huge_page_to_list(page,
page_list))
goto activate_locked;
@@ -1273,6 +1299,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
mapping = page_mapping(page);
}
} else if (unlikely(PageTransHuge(page))) {
+ VM_BUG_ON_PAGE(compound_order(page) != HPAGE_PMD_ORDER, page);
/* Split file THP */
if (split_huge_page_to_list(page, page_list))
goto keep_locked;
@@ -1298,9 +1325,12 @@ static unsigned int shrink_page_list(struct list_head *page_list,
enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
bool was_swapbacked = PageSwapBacked(page);
- if (unlikely(PageTransHuge(page)))
- flags |= TTU_SPLIT_HUGE_PMD;
-
+ if (unlikely(PageTransHuge(page))) {
+ if (compound_order(page) == HPAGE_PMD_ORDER)
+ flags |= TTU_SPLIT_HUGE_PMD;
+ else if (compound_order(page) == HPAGE_PUD_ORDER)
+ flags |= TTU_SPLIT_HUGE_PUD;
+ }
if (!try_to_unmap(page, flags)) {
stat->nr_unmap_fail += nr_pages;
if (!was_swapbacked && PageSwapBacked(page))
--
2.28.0