[PATCHv4 5/8] khugepaged: Allow to collapse a page shared across fork
From: Kirill A. Shutemov
Date: Thu Apr 16 2020 - 12:01:10 EST
The page can be included into collapse as long as it doesn't have extra
pins (from GUP or otherwise).
Logic to check the refcount is moved to a separate function.
For pages in swap cache, add compound_nr(page) to the expected refcount,
in order to handle the compound page case. This is in preparation for
the following patch.
VM_BUG_ON_PAGE() was removed from __collapse_huge_page_copy() as the
invariant it checks is no longer valid: the source can be mapped
multiple times now.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
Reviewed-by: William Kucharski <william.kucharski@xxxxxxxxxx>
Reviewed-and-Tested-by: Zi Yan <ziy@xxxxxxxxxx>
Acked-by: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx>
Reviewed-by: John Hubbard <jhubbard@xxxxxxxxxx>
---
mm/khugepaged.c | 41 ++++++++++++++++++++++++++++++-----------
1 file changed, 30 insertions(+), 11 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index adc686f13447..c3eb9d0d53f8 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -526,6 +526,24 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte)
}
}
+static bool is_refcount_suitable(struct page *page)
+{
+ int expected_refcount, refcount;
+
+ refcount = page_count(page);
+ expected_refcount = total_mapcount(page);
+ if (PageSwapCache(page))
+ expected_refcount += compound_nr(page);
+
+ if (IS_ENABLED(CONFIG_DEBUG_VM) && expected_refcount > refcount) {
+ pr_err("expected_refcount (%d) > refcount (%d)\n",
+ expected_refcount, refcount);
+ dump_page(page, "Unexpected refcount");
+ }
+
+ return page_count(page) == expected_refcount;
+}
+
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
unsigned long address,
pte_t *pte)
@@ -578,11 +596,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
}
/*
- * cannot use mapcount: can't collapse if there's a gup pin.
- * The page must only be referenced by the scanned process
- * and page swap cache.
+ * Check if the page has any GUP (or other external) pins.
+ *
+ * The page table that maps the page has been already unlinked
+ * from the page table tree and this process cannot get
+ * an additinal pin on the page.
+ *
+ * New pins can come later if the page is shared across fork,
+ * but not from this process. The other process cannot write to
+ * the page, only trigger CoW.
*/
- if (page_count(page) != 1 + PageSwapCache(page)) {
+ if (!is_refcount_suitable(page)) {
unlock_page(page);
result = SCAN_PAGE_COUNT;
goto out;
@@ -669,7 +693,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
} else {
src_page = pte_page(pteval);
copy_user_highpage(page, src_page, address, vma);
- VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
release_pte_page(src_page);
/*
* ptl mostly unnecessary, but preempt has to
@@ -1220,12 +1243,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
goto out_unmap;
}
- /*
- * cannot use mapcount: can't collapse if there's a gup pin.
- * The page must only be referenced by the scanned process
- * and page swap cache.
- */
- if (page_count(page) != 1 + PageSwapCache(page)) {
+ /* Check if the page has any GUP (or other external) pins */
+ if (!is_refcount_suitable(page)) {
result = SCAN_PAGE_COUNT;
goto out_unmap;
}
--
2.26.1