[PATCH] Revert "mm: skip CMA pages when they are not available"
From: Usama Arif
Date: Wed Aug 21 2024 - 15:26:07 EST
This reverts commit 5da226dbfce3a2f44978c2c7cf88166e69a6788b.
lruvec->lru_lock is highly contended and is held when calling
isolate_lru_folios. If the lru has a large number of CMA folios
consecutively, while the allocation type requested is not
MIGRATE_MOVABLE, isolate_lru_folios can hold the lock for a very long
time while it skips those. For FIO workload, ~150million order=0
folios were skipped to isolate a few ZONE_DMA folios [1].
This can cause lockups [1] and high memory pressure for extended periods
of time [2].
[1] https://lore.kernel.org/all/CAOUHufbkhMZYz20aM_3rHZ3OcK4m2puji2FGpUpn_-DevGk3Kg@xxxxxxxxxxxxxx/
[2] https://lore.kernel.org/all/ZrssOrcJIDy8hacI@xxxxxxxxx/
Signed-off-by: Usama Arif <usamaarif642@xxxxxxxxx>
---
mm/vmscan.c | 41 ++++++++++++++++++++---------------------
1 file changed, 20 insertions(+), 21 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5dc96a843466..53372f726a1b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1625,25 +1625,6 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
}
-#ifdef CONFIG_CMA
-/*
- * It is waste of effort to scan and reclaim CMA pages if it is not available
- * for current allocation context. Kswapd can not be enrolled as it can not
- * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
- */
-static bool skip_cma(struct folio *folio, struct scan_control *sc)
-{
- return !current_is_kswapd() &&
- gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
- folio_migratetype(folio) == MIGRATE_CMA;
-}
-#else
-static bool skip_cma(struct folio *folio, struct scan_control *sc)
-{
- return false;
-}
-#endif
-
/*
* Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
*
@@ -1690,8 +1671,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
nr_pages = folio_nr_pages(folio);
total_scan += nr_pages;
- if (folio_zonenum(folio) > sc->reclaim_idx ||
- skip_cma(folio, sc)) {
+ if (folio_zonenum(folio) > sc->reclaim_idx) {
nr_skipped[folio_zonenum(folio)] += nr_pages;
move_to = &folios_skipped;
goto move;
@@ -4297,6 +4277,25 @@ void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
#endif /* CONFIG_MEMCG */
+#ifdef CONFIG_CMA
+/*
+ * It is waste of effort to scan and reclaim CMA pages if it is not available
+ * for current allocation context. Kswapd can not be enrolled as it can not
+ * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
+ */
+static bool skip_cma(struct folio *folio, struct scan_control *sc)
+{
+ return !current_is_kswapd() &&
+ gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
+ folio_migratetype(folio) == MIGRATE_CMA;
+}
+#else
+static bool skip_cma(struct folio *folio, struct scan_control *sc)
+{
+ return false;
+}
+#endif
+
/******************************************************************************
* the eviction
******************************************************************************/
--
2.43.5