[PATCH v2 08/10] mm: rmap: make try_to_unmap() void function
From: Hugh Dickins
Date: Wed Jun 09 2021 - 00:26:41 EST
From: Yang Shi <shy828301@xxxxxxxxx>
Currently try_to_unmap() return bool value by checking page_mapcount(),
however this may return false positive since page_mapcount() doesn't
check all subpages of compound page. The total_mapcount() could be used
instead, but its cost is higher since it traverses all subpages.
Actually the most callers of try_to_unmap() don't care about the
return value at all. So just need check if page is still mapped by
page_mapped() when necessary. And page_mapped() does bail out early
when it finds mapped subpage.
Suggested-by: Hugh Dickins <hughd@xxxxxxxxxx>
Signed-off-by: Yang Shi <shy828301@xxxxxxxxx>
Acked-by: Minchan Kim <minchan@xxxxxxxxxx>
Reviewed-by: Shakeel Butt <shakeelb@xxxxxxxxxx>
Acked-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
Signed-off-by: Hugh Dickins <hughd@xxxxxxxxxx>
---
Patch inserted since the v1 series was posted.
v5: Rediffed by Hugh to fit before 7/7 in mm/thp series; akpm fixed grammar.
v4: Updated the comment of try_to_unmap() per Minchan.
Minor fix and patch reorder per Hugh.
Collected ack tag from Hugh.
include/linux/rmap.h | 2 +-
mm/memory-failure.c | 15 +++++++--------
mm/rmap.c | 15 ++++-----------
mm/vmscan.c | 3 ++-
4 files changed, 14 insertions(+), 21 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 8d04e7deedc6..ed31a559e857 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -195,7 +195,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
-bool try_to_unmap(struct page *, enum ttu_flags flags);
+void try_to_unmap(struct page *, enum ttu_flags flags);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 85ad98c00fd9..b6806e446567 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1063,7 +1063,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
enum ttu_flags ttu = TTU_IGNORE_MLOCK;
struct address_space *mapping;
LIST_HEAD(tokill);
- bool unmap_success = true;
+ bool unmap_success;
int kill = 1, forcekill;
struct page *hpage = *hpagep;
bool mlocked = PageMlocked(hpage);
@@ -1126,7 +1126,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
if (!PageHuge(hpage)) {
- unmap_success = try_to_unmap(hpage, ttu);
+ try_to_unmap(hpage, ttu);
} else {
if (!PageAnon(hpage)) {
/*
@@ -1138,17 +1138,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
*/
mapping = hugetlb_page_mapping_lock_write(hpage);
if (mapping) {
- unmap_success = try_to_unmap(hpage,
- ttu|TTU_RMAP_LOCKED);
+ try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
i_mmap_unlock_write(mapping);
- } else {
+ } else
pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
- unmap_success = false;
- }
} else {
- unmap_success = try_to_unmap(hpage, ttu);
+ try_to_unmap(hpage, ttu);
}
}
+
+ unmap_success = !page_mapped(hpage);
if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));
diff --git a/mm/rmap.c b/mm/rmap.c
index e05c300048e6..f9fd5bc54f0a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1405,7 +1405,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
- * try_to_unmap() may return false when it is about to become true,
+ * try_to_unmap() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
if (flags & TTU_SYNC)
@@ -1756,9 +1756,10 @@ static int page_not_mapped(struct page *page)
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
*
- * If unmap is successful, return true. Otherwise, false.
+ * It is the caller's responsibility to check if the page is still
+ * mapped when needed (use TTU_SYNC to prevent accounting races).
*/
-bool try_to_unmap(struct page *page, enum ttu_flags flags)
+void try_to_unmap(struct page *page, enum ttu_flags flags)
{
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
@@ -1783,14 +1784,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
rmap_walk_locked(page, &rwc);
else
rmap_walk(page, &rwc);
-
- /*
- * When racing against e.g. zap_pte_range() on another cpu,
- * in between its ptep_get_and_clear_full() and page_remove_rmap(),
- * try_to_unmap() may return false when it is about to become true,
- * if page table locking is skipped: use TTU_SYNC to wait for that.
- */
- return !page_mapcount(page);
}
/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5199b9696bab..db49cb1dc052 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD;
- if (!try_to_unmap(page, flags)) {
+ try_to_unmap(page, flags);
+ if (page_mapped(page)) {
stat->nr_unmap_fail += nr_pages;
if (!was_swapbacked && PageSwapBacked(page))
stat->nr_lazyfree_fail += nr_pages;
--
2.26.2