[PATCH v2 10/27] mm/util: Add folio_mapping and folio_file_mapping

From: Matthew Wilcox (Oracle)
Date: Mon Jan 18 2021 - 12:22:52 EST


These are the folio equivalent of page_mapping() and page_file_mapping().
Adjust page_file_mapping() and page_mapping_file() to use folios
internally.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
include/linux/mm.h | 23 +++++++++++++++--------
mm/swapfile.c | 6 +++---
mm/util.c | 20 ++++++++++----------
3 files changed, 28 insertions(+), 21 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 380328930d6c..46cee44c0c68 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1586,17 +1586,25 @@ void page_address_init(void);

extern void *page_rmapping(struct page *page);
extern struct anon_vma *page_anon_vma(struct page *page);
-extern struct address_space *page_mapping(struct page *page);
+struct address_space *folio_mapping(struct folio *);
+struct address_space *__folio_file_mapping(struct folio *);

-extern struct address_space *__page_file_mapping(struct page *);
+static inline struct address_space *page_mapping(struct page *page)
+{
+ return folio_mapping(page_folio(page));
+}

-static inline
-struct address_space *page_file_mapping(struct page *page)
+static inline struct address_space *folio_file_mapping(struct folio *folio)
{
- if (unlikely(PageSwapCache(page)))
- return __page_file_mapping(page);
+ if (unlikely(FolioSwapCache(folio)))
+ return __folio_file_mapping(folio);

- return page->mapping;
+ return folio->page.mapping;
+}
+
+static inline struct address_space *page_file_mapping(struct page *page)
+{
+ return folio_file_mapping(page_folio(page));
}

extern pgoff_t __page_file_index(struct page *page);
@@ -1613,7 +1621,6 @@ static inline pgoff_t page_index(struct page *page)
}

bool page_mapped(struct page *page);
-struct address_space *page_mapping(struct page *page);
struct address_space *page_mapping_file(struct page *page);

/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9fffc5af29d1..ddb734fccfc3 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3551,11 +3551,11 @@ struct swap_info_struct *page_swap_info(struct page *page)
/*
* out-of-line __page_file_ methods to avoid include hell.
*/
-struct address_space *__page_file_mapping(struct page *page)
+struct address_space *__folio_file_mapping(struct folio *folio)
{
- return page_swap_info(page)->swap_file->f_mapping;
+ return page_swap_info(&folio->page)->swap_file->f_mapping;
}
-EXPORT_SYMBOL_GPL(__page_file_mapping);
+EXPORT_SYMBOL_GPL(__folio_file_mapping);

pgoff_t __page_file_index(struct page *page)
{
diff --git a/mm/util.c b/mm/util.c
index c37e24d5fa43..c052c39b9f1c 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -686,39 +686,39 @@ struct anon_vma *page_anon_vma(struct page *page)
return __page_rmapping(page);
}

-struct address_space *page_mapping(struct page *page)
+struct address_space *folio_mapping(struct folio *folio)
{
struct address_space *mapping;

- page = compound_head(page);
-
/* This happens if someone calls flush_dcache_page on slab page */
- if (unlikely(PageSlab(page)))
+ if (unlikely(FolioSlab(folio)))
return NULL;

- if (unlikely(PageSwapCache(page))) {
+ if (unlikely(FolioSwapCache(folio))) {
swp_entry_t entry;

- entry.val = page_private(page);
+ entry.val = folio_private(folio);
return swap_address_space(entry);
}

- mapping = page->mapping;
+ mapping = folio->page.mapping;
if ((unsigned long)mapping & PAGE_MAPPING_ANON)
return NULL;

return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
}
-EXPORT_SYMBOL(page_mapping);
+EXPORT_SYMBOL(folio_mapping);

/*
* For file cache pages, return the address_space, otherwise return NULL
*/
struct address_space *page_mapping_file(struct page *page)
{
- if (unlikely(PageSwapCache(page)))
+ struct folio *folio = page_folio(page);
+
+ if (unlikely(FolioSwapCache(folio)))
return NULL;
- return page_mapping(page);
+ return folio_mapping(folio);
}

/* Slow path of page_mapcount() for compound pages */
--
2.29.2