[PATCH] mm: Introduce a pageflag for partially mapped folios fix

From: Usama Arif
Date: Mon Aug 19 2024 - 16:07:16 EST


Test partially_mapped flag before clearing it. This should
avoid unnecessary writes and will be needed in the nr_split_deferred
series.
Also no need to clear partially_mapped prepping compound head, as it
should start with already being cleared.

Signed-off-by: Usama Arif <usamaarif642@xxxxxxxxx>
---
include/linux/page-flags.h | 2 +-
mm/huge_memory.c | 9 ++++++---
mm/internal.h | 4 +---
3 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c3bb0e0da581..f1602695daf2 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -1182,7 +1182,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
- 1UL << PG_large_rmappable)
+ 1UL << PG_large_rmappable | 1UL << PG_partially_mapped)

#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5d67d3b3c1b2..402b9d933de0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3422,7 +3422,8 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
* page_deferred_list.
*/
list_del_init(&folio->_deferred_list);
- __folio_clear_partially_mapped(folio);
+ if (folio_test_partially_mapped(folio))
+ __folio_clear_partially_mapped(folio);
}
spin_unlock(&ds_queue->split_queue_lock);
if (mapping) {
@@ -3479,7 +3480,8 @@ void __folio_undo_large_rmappable(struct folio *folio)
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
list_del_init(&folio->_deferred_list);
- __folio_clear_partially_mapped(folio);
+ if (folio_test_partially_mapped(folio))
+ __folio_clear_partially_mapped(folio);
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
}
@@ -3610,7 +3612,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
} else {
/* We lost race with folio_put() */
list_del_init(&folio->_deferred_list);
- __folio_clear_partially_mapped(folio);
+ if (folio_test_partially_mapped(folio))
+ __folio_clear_partially_mapped(folio);
ds_queue->split_queue_len--;
}
if (!--sc->nr_to_scan)
diff --git a/mm/internal.h b/mm/internal.h
index 27cbb5365841..52f7fc4e8ac3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -662,10 +662,8 @@ static inline void prep_compound_head(struct page *page, unsigned int order)
atomic_set(&folio->_entire_mapcount, -1);
atomic_set(&folio->_nr_pages_mapped, 0);
atomic_set(&folio->_pincount, 0);
- if (order > 1) {
+ if (order > 1)
INIT_LIST_HEAD(&folio->_deferred_list);
- __folio_clear_partially_mapped(folio);
- }
}

static inline void prep_compound_tail(struct page *head, int tail_idx)
--
2.43.5