[PATCH v4 3/5] mm: convert compaction to zone lock wrappers
From: Dmitry Ilvokhin
Date: Fri Feb 27 2026 - 11:13:58 EST
Compaction uses compact_lock_irqsave(), which currently operates
on a raw spinlock_t pointer so it can be used for both zone->lock
and lruvec->lru_lock. Since zone lock operations are now wrapped,
compact_lock_irqsave() can no longer directly operate on a
spinlock_t when the lock belongs to a zone.
Split the helper into compact_zone_lock_irqsave() and
compact_lruvec_lock_irqsave(), duplicating the small amount of
shared logic. As there are only two call sites and both statically
know the lock type, this avoids introducing additional abstraction
or runtime dispatch in the compaction path.
No functional change intended.
Signed-off-by: Dmitry Ilvokhin <d@xxxxxxxxxxxx>
Acked-by: Shakeel Butt <shakeel.butt@xxxxxxxxx>
---
mm/compaction.c | 33 ++++++++++++++++++++++++---------
1 file changed, 24 insertions(+), 9 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index fa0e332a8a92..c68fcc416fc7 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -503,19 +503,36 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page)
*
* Always returns true which makes it easier to track lock state in callers.
*/
-static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
- struct compact_control *cc)
- __acquires(lock)
+static bool compact_zone_lock_irqsave(struct zone *zone,
+ unsigned long *flags,
+ struct compact_control *cc)
+ __acquires(&zone->lock)
{
/* Track if the lock is contended in async mode */
if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
- if (spin_trylock_irqsave(lock, *flags))
+ if (zone_trylock_irqsave(zone, *flags))
return true;
cc->contended = true;
}
- spin_lock_irqsave(lock, *flags);
+ zone_lock_irqsave(zone, *flags);
+ return true;
+}
+
+static bool compact_lruvec_lock_irqsave(struct lruvec *lruvec,
+ unsigned long *flags,
+ struct compact_control *cc)
+ __acquires(&lruvec->lru_lock)
+{
+ if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
+ if (spin_trylock_irqsave(&lruvec->lru_lock, *flags))
+ return true;
+
+ cc->contended = true;
+ }
+
+ spin_lock_irqsave(&lruvec->lru_lock, *flags);
return true;
}
@@ -531,7 +548,6 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
* Returns true if compaction should abort due to fatal signal pending.
* Returns false when compaction can continue.
*/
-
static bool compact_unlock_should_abort(struct zone *zone,
unsigned long flags,
bool *locked,
@@ -616,8 +632,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
/* If we already hold the lock, we can skip some rechecking. */
if (!locked) {
- locked = compact_lock_irqsave(&cc->zone->lock,
- &flags, cc);
+ locked = compact_zone_lock_irqsave(cc->zone, &flags, cc);
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
@@ -1163,7 +1178,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (locked)
unlock_page_lruvec_irqrestore(locked, flags);
- compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
+ compact_lruvec_lock_irqsave(lruvec, &flags, cc);
locked = lruvec;
lruvec_memcg_debug(lruvec, folio);
--
2.47.3