[PATCH v13 109/137] mm/filemap: Add filemap_remove_folio and __filemap_remove_folio

From: Matthew Wilcox (Oracle)
Date: Mon Jul 12 2021 - 00:05:15 EST


Reimplement __delete_from_page_cache() as a wrapper around
__filemap_remove_folio() and delete_from_page_cache() as a wrapper
around filemap_remove_folio(). Remove the EXPORT_SYMBOL as
delete_from_page_cache() was not used by any in-tree modules.
Convert page_cache_free_page() into filemap_free_folio().

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
include/linux/pagemap.h | 9 +++++++--
mm/filemap.c | 44 ++++++++++++++++++++---------------------
mm/folio-compat.c | 5 +++++
3 files changed, 33 insertions(+), 25 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4be3b6242d6b..58d86679b92f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -877,8 +877,13 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp);
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
pgoff_t index, gfp_t gfp);
-extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
+void filemap_remove_folio(struct folio *folio);
+void delete_from_page_cache(struct page *page);
+void __filemap_remove_folio(struct folio *folio, void *shadow);
+static inline void __delete_from_page_cache(struct page *page, void *shadow)
+{
+ __filemap_remove_folio(page_folio(page), shadow);
+}
void replace_page_cache_page(struct page *old, struct page *new);
void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec);
diff --git a/mm/filemap.c b/mm/filemap.c
index bede1d754769..a24e26563e9f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -219,55 +219,53 @@ static void filemap_unaccount_folio(struct address_space *mapping,
* sure the page is locked and that nobody else uses it - or that usage
* is safe. The caller must hold the i_pages lock.
*/
-void __delete_from_page_cache(struct page *page, void *shadow)
+void __filemap_remove_folio(struct folio *folio, void *shadow)
{
- struct folio *folio = page_folio(page);
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;

- trace_mm_filemap_delete_from_page_cache(page);
+ trace_mm_filemap_delete_from_page_cache(&folio->page);

filemap_unaccount_folio(mapping, folio);
page_cache_delete(mapping, folio, shadow);
}

-static void page_cache_free_page(struct address_space *mapping,
- struct page *page)
+static void filemap_free_folio(struct address_space *mapping,
+ struct folio *folio)
{
void (*freepage)(struct page *);

freepage = mapping->a_ops->freepage;
if (freepage)
- freepage(page);
+ freepage(&folio->page);

- if (PageTransHuge(page) && !PageHuge(page)) {
- page_ref_sub(page, thp_nr_pages(page));
- VM_BUG_ON_PAGE(page_count(page) <= 0, page);
+ if (folio_multi(folio) && !folio_hugetlb(folio)) {
+ folio_ref_sub(folio, folio_nr_pages(folio));
+ VM_BUG_ON_FOLIO(folio_ref_count(folio) <= 0, folio);
} else {
- put_page(page);
+ folio_put(folio);
}
}

/**
- * delete_from_page_cache - delete page from page cache
- * @page: the page which the kernel is trying to remove from page cache
+ * filemap_remove_folio - Remove folio from page cache.
+ * @folio: The folio.
*
- * This must be called only on pages that have been verified to be in the page
- * cache and locked. It will never put the page into the free list, the caller
- * has a reference on the page.
+ * This must be called only on folios that are locked and have been
+ * verified to be in the page cache. It will never put the folio into
+ * the free list because the caller has a reference on the page.
*/
-void delete_from_page_cache(struct page *page)
+void filemap_remove_folio(struct folio *folio)
{
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping = folio->mapping;
unsigned long flags;

- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_locked(folio));
xa_lock_irqsave(&mapping->i_pages, flags);
- __delete_from_page_cache(page, NULL);
+ __filemap_remove_folio(folio, NULL);
xa_unlock_irqrestore(&mapping->i_pages, flags);

- page_cache_free_page(mapping, page);
+ filemap_free_folio(mapping, folio);
}
-EXPORT_SYMBOL(delete_from_page_cache);

/*
* page_cache_delete_batch - delete several pages from page cache
@@ -350,7 +348,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
xa_unlock_irqrestore(&mapping->i_pages, flags);

for (i = 0; i < pagevec_count(pvec); i++)
- page_cache_free_page(mapping, pvec->pages[i]);
+ filemap_free_folio(mapping, page_folio(pvec->pages[i]));
}

int filemap_check_errors(struct address_space *mapping)
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 5b6ae1da314e..749a695b4217 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -140,3 +140,8 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
+
+void delete_from_page_cache(struct page *page)
+{
+ return filemap_remove_folio(page_folio(page));
+}
--
2.30.2