On 6/30/23 5:13 AM, Peng Zhang wrote:
From: ZhangPeng <zhangpeng362@xxxxxxxxxx>
After converting the last user to folio_raw_mapping(), we can safely
remove the function.
Signed-off-by: ZhangPeng <zhangpeng362@xxxxxxxxxx>
---
include/linux/mm.h | 1 -
mm/memory.c | 2 +-
mm/util.c | 6 ------
3 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9ecb8b9c07f6..8c7eba7370b2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2129,7 +2129,6 @@ static inline void *folio_address(const struct folio *folio)
return page_address(&folio->page);
}
-extern void *page_rmapping(struct page *page);
extern pgoff_t __page_file_index(struct page *page);
/*
diff --git a/mm/memory.c b/mm/memory.c
index 21fab2727209..6921df44a99f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2979,7 +2979,7 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
* pinned by vma->vm_file's reference. We rely on unlock_page()'s
* release semantics to prevent the compiler from undoing this copying.
*/
struct page *page = vmf->page;
...
dirtied = set_page_dirty(page);
VM_BUG_ON_PAGE(PageAnon(page), page);
Can this earlier part of the function be converted to folios so a folio can be passed directly in folio_raw_mapping()?
- mapping = page_rmapping(page);
+ mapping = folio_raw_mapping(page_folio(page));
unlock_page(page);
if (!page_mkwrite)
diff --git a/mm/util.c b/mm/util.c
index dd12b9531ac4..5e9305189c3f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -734,12 +734,6 @@ void *vcalloc(size_t n, size_t size)
}
EXPORT_SYMBOL(vcalloc);
-/* Neutral page->mapping pointer to address_space or anon_vma or other */
-void *page_rmapping(struct page *page)
-{
- return folio_raw_mapping(page_folio(page));
-}
-
struct anon_vma *folio_anon_vma(struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;