[PATCH v1 5/6] mm/rmap: simplify PageAnonExclusive sanity checks when adding anon rmap

From: David Hildenbrand
Date: Wed Sep 13 2023 - 08:52:29 EST


Let's sanity-check PageAnonExclusive vs. mapcount in page_add_anon_rmap()
and hugepage_add_anon_rmap() after setting PageAnonExclusive simply by
re-reading the mapcounts.

We can stop initializing the "first" variable in page_add_anon_rmap()
and no longer need an atomic_inc_and_test() in hugepage_add_anon_rmap().

While at it, switch to VM_WARN_ON_FOLIO().

Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
---
mm/rmap.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/mm/rmap.c b/mm/rmap.c
index 489c142d073b..10d477a0991f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1199,7 +1199,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
atomic_t *mapped = &folio->_nr_pages_mapped;
int nr = 0, nr_pmdmapped = 0;
bool compound = flags & RMAP_COMPOUND;
- bool first = true;
+ bool first;

/* Is page being mapped by PTE? Is this its first map to be added? */
if (likely(!compound)) {
@@ -1228,9 +1228,6 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
}
}

- VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
- VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
-
if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
if (nr)
@@ -1252,6 +1249,8 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
}
if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(page);
+ VM_WARN_ON_FOLIO(page_mapcount(page) > 1 && PageAnonExclusive(page),
+ folio);

mlock_vma_folio(folio, vma, compound);
}
@@ -2532,15 +2531,14 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
unsigned long address, rmap_t flags)
{
struct folio *folio = page_folio(page);
- int first;

VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);

- first = atomic_inc_and_test(&folio->_entire_mapcount);
- VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
- VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
+ atomic_inc(&folio->_entire_mapcount);
if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(page);
+ VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
+ PageAnonExclusive(page), folio);
}

void hugepage_add_new_anon_rmap(struct folio *folio,
--
2.41.0