[PATCH v10 33/33] mm: Add folio_mapped

From: Matthew Wilcox (Oracle)
Date: Tue May 11 2021 - 18:11:52 EST


This function is the equivalent of page_mapped(). It is slightly
shorter as we do not need to handle the PageTail() case. Reimplement
page_mapped() as a wrapper around folio_mapped().

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
include/linux/mm.h | 1 +
mm/folio-compat.c | 6 ++++++
mm/util.c | 29 ++++++++++++++++-------------
3 files changed, 23 insertions(+), 13 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6e3dde81ecc9..4686107a4f96 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1779,6 +1779,7 @@ static inline pgoff_t page_index(struct page *page)
}

bool page_mapped(struct page *page);
+bool folio_mapped(struct folio *folio);

/*
* Return true only if the page has been allocated with
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 3c83f03b80d7..7044fcc8a8aa 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -35,3 +35,9 @@ void wait_for_stable_page(struct page *page)
return folio_wait_stable(page_folio(page));
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);
+
+bool page_mapped(struct page *page)
+{
+ return folio_mapped(page_folio(page));
+}
+EXPORT_SYMBOL(page_mapped);
diff --git a/mm/util.c b/mm/util.c
index 245f5c7bedae..c2d22145ebae 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -652,28 +652,31 @@ void *page_rmapping(struct page *page)
return __page_rmapping(page);
}

-/*
- * Return true if this page is mapped into pagetables.
- * For compound page it returns true if any subpage of compound page is mapped.
+/**
+ * folio_mapped - Is this folio mapped into userspace?
+ * @folio: The folio.
+ *
+ * Return: true if any page in this folio is mapped into pagetables.
*/
-bool page_mapped(struct page *page)
+bool folio_mapped(struct folio *folio)
{
- int i;
+ int i, nr;

- if (likely(!PageCompound(page)))
- return atomic_read(&page->_mapcount) >= 0;
- page = compound_head(page);
- if (atomic_read(compound_mapcount_ptr(page)) >= 0)
+ if (folio_single(folio))
+ return atomic_read(&folio->_mapcount) >= 0;
+ if (atomic_read(compound_mapcount_ptr(&folio->page)) >= 0)
return true;
- if (PageHuge(page))
+ if (folio_hugetlb(folio))
return false;
- for (i = 0; i < compound_nr(page); i++) {
- if (atomic_read(&page[i]._mapcount) >= 0)
+
+ nr = folio_nr_pages(folio);
+ for (i = 0; i < nr; i++) {
+ if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0)
return true;
}
return false;
}
-EXPORT_SYMBOL(page_mapped);
+EXPORT_SYMBOL(folio_mapped);

struct anon_vma *page_anon_vma(struct page *page)
{
--
2.30.2