[PATCH v9 44/96] mm/rmap: Add folio_mkclean

From: Matthew Wilcox (Oracle)
Date: Wed May 05 2021 - 12:03:13 EST


Transform page_mkclean() into folio_mkclean() and add a page_mkclean()
wrapper around folio_mkclean().

folio_mkclean is 15 bytes smaller than page_mkclean, but the kernel
is enlarged by 33 bytes due to inlining page_folio() into each caller.
This will go away once the callers are converted to use folio_mkclean().

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
include/linux/rmap.h | 10 ++++++----
mm/rmap.c | 12 ++++++------
2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index def5c62c93b3..edb006bc4159 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -233,7 +233,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*
* returns the number of cleaned PTEs.
*/
-int page_mkclean(struct page *);
+int folio_mkclean(struct folio *);

/*
* called in munlock()/munmap() path to check for other vmas holding
@@ -291,12 +291,14 @@ static inline int page_referenced(struct page *page, int is_locked,

#define try_to_unmap(page, refs) false

-static inline int page_mkclean(struct page *page)
+static inline int folio_mkclean(struct folio *folio)
{
return 0;
}
-
-
#endif /* CONFIG_MMU */

+static inline int page_mkclean(struct page *page)
+{
+ return folio_mkclean(page_folio(page));
+}
#endif /* _LINUX_RMAP_H */
diff --git a/mm/rmap.c b/mm/rmap.c
index 693a610e181d..e29dbbc880d7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -983,7 +983,7 @@ static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
return true;
}

-int page_mkclean(struct page *page)
+int folio_mkclean(struct folio *folio)
{
int cleaned = 0;
struct address_space *mapping;
@@ -993,20 +993,20 @@ int page_mkclean(struct page *page)
.invalid_vma = invalid_mkclean_vma,
};

- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_locked(folio));

- if (!page_mapped(page))
+ if (!folio_mapped(folio))
return 0;

- mapping = page_mapping(page);
+ mapping = folio_mapping(folio);
if (!mapping)
return 0;

- rmap_walk(page, &rwc);
+ rmap_walk(&folio->page, &rwc);

return cleaned;
}
-EXPORT_SYMBOL_GPL(page_mkclean);
+EXPORT_SYMBOL_GPL(folio_mkclean);

/**
* page_move_anon_rmap - move a page to our anon_vma
--
2.30.2