[PATCH] mm: unified folio_test_anon()/folio_anon_vma() and use them

From: Yajun Deng
Date: Thu Jul 11 2024 - 09:08:11 EST


In folio_get_anon_vma() and folio_lock_anon_vma_read(), we get anon_vma
from folio, but the code isn't clean enough. Instead, folio_test_anon()
and folio_anon_vma() can do this.

They have the same functionality, but their implementations are not
identical.

Unify folio_test_anon() and folio_anon_vma(), and use them.

Signed-off-by: Yajun Deng <yajun.deng@xxxxxxxxx>
---
include/linux/page-flags.h | 3 ++-
mm/rmap.c | 20 +++++++++-----------
mm/util.c | 6 +++---
3 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5769fe6e4950..6e2197b22f5f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -691,7 +691,8 @@ static __always_inline bool PageMappingFlags(const struct page *page)

static __always_inline bool folio_test_anon(const struct folio *folio)
{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
+ PAGE_MAPPING_ANON;
}

static __always_inline bool PageAnon(const struct page *page)
diff --git a/mm/rmap.c b/mm/rmap.c
index 86787df6e212..3f5d9879591c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -499,16 +499,14 @@ void __init anon_vma_init(void)
struct anon_vma *folio_get_anon_vma(struct folio *folio)
{
struct anon_vma *anon_vma = NULL;
- unsigned long anon_mapping;

rcu_read_lock();
- anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
- if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
+ anon_vma = folio_anon_vma(folio);
+ if (!anon_vma)
goto out;
if (!folio_mapped(folio))
goto out;

- anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
if (!atomic_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
@@ -550,12 +548,12 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
retry:
rcu_read_lock();
anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
- if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
+ anon_vma = folio_anon_vma(folio);
+ if (!anon_vma)
goto out;
if (!folio_mapped(folio))
goto out;

- anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
root_anon_vma = READ_ONCE(anon_vma->root);
if (down_read_trylock(&root_anon_vma->rwsem)) {
/*
@@ -774,16 +772,16 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
struct folio *folio = page_folio(page);
+ struct anon_vma *anon_vma;
pgoff_t pgoff;

- if (folio_test_anon(folio)) {
- struct anon_vma *page__anon_vma = folio_anon_vma(folio);
+ anon_vma = folio_anon_vma(folio);
+ if (anon_vma) {
/*
* Note: swapoff's unuse_vma() is more efficient with this
* check, and needs it to match anon_vma when KSM is active.
*/
- if (!vma->anon_vma || !page__anon_vma ||
- vma->anon_vma->root != page__anon_vma->root)
+ if (!vma->anon_vma || vma->anon_vma->root != anon_vma->root)
return -EFAULT;
} else if (!vma->vm_file) {
return -EFAULT;
@@ -791,7 +789,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
return -EFAULT;
}

- /* The !page__anon_vma above handles KSM folios */
+ /* The !anon_vma above handles KSM folios */
pgoff = folio->index + folio_page_idx(folio, page);
return vma_address(vma, pgoff, 1);
}
diff --git a/mm/util.c b/mm/util.c
index bc488f0121a7..668dab9e27e6 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -782,11 +782,11 @@ EXPORT_SYMBOL(vcalloc_noprof);

struct anon_vma *folio_anon_vma(struct folio *folio)
{
- unsigned long mapping = (unsigned long)folio->mapping;
+ unsigned long mapping = (unsigned long)READ_ONCE(folio->mapping);

- if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
+ if (!folio_test_anon(folio))
return NULL;
- return (void *)(mapping - PAGE_MAPPING_ANON);
+ return (struct anon_vma *)(mapping - PAGE_MAPPING_ANON);
}

/**
--
2.25.1