[PATCH v4 28/36] mm: Support storing shadow entries for large pages

From: Matthew Wilcox
Date: Fri May 15 2020 - 09:17:49 EST


From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx>

If the page is being replaced with a NULL, we can do a single large store,
but for now we have to use a loop to store one shadow entry in each entry.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
mm/filemap.c | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 9c760dd7208e..0ec7f25a07b2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -120,22 +120,27 @@ static void page_cache_delete(struct address_space *mapping,
struct page *page, void *shadow)
{
XA_STATE(xas, &mapping->i_pages, page->index);
- unsigned int nr = 1;
+ unsigned int i, nr = 1, entries = 1;

mapping_set_update(&xas, mapping);

/* hugetlb pages are represented by a single entry in the xarray */
if (!PageHuge(page)) {
- xas_set_order(&xas, page->index, compound_order(page));
- nr = compound_nr(page);
+ entries = nr = hpage_nr_pages(page);
+ if (!shadow) {
+ xas_set_order(&xas, page->index, thp_order(page));
+ entries = 1;
+ }
}

VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
- VM_BUG_ON_PAGE(nr != 1 && shadow, page);

- xas_store(&xas, shadow);
- xas_init_marks(&xas);
+ for (i = 0; i < entries; i++) {
+ xas_store(&xas, shadow);
+ xas_init_marks(&xas);
+ xas_next(&xas);
+ }

page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
--
2.26.2