[PATCH 3/4] mm: convert compaction to zone lock wrappers

From: Dmitry Ilvokhin

Date: Wed Feb 11 2026 - 10:24:26 EST


Compaction uses compact_lock_irqsave(), which currently operates
on a raw spinlock_t pointer so that it can be used for both
zone->lock and lru_lock. Since zone lock operations are now wrapped,
compact_lock_irqsave() can no longer operate directly on a spinlock_t
when the lock belongs to a zone.

Introduce struct compact_lock to abstract the underlying lock type. The
structure carries a lock type enum and a union holding either a zone
pointer or a raw spinlock_t pointer, and dispatches to the appropriate
lock/unlock helper.

No functional change intended.

Signed-off-by: Dmitry Ilvokhin <d@xxxxxxxxxxxx>
---
mm/compaction.c | 108 +++++++++++++++++++++++++++++++++++++++---------
1 file changed, 89 insertions(+), 19 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 1e8f8eca318c..1b000d2b95b2 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -24,6 +24,7 @@
#include <linux/page_owner.h>
#include <linux/psi.h>
#include <linux/cpuset.h>
+#include <linux/zone_lock.h>
#include "internal.h"

#ifdef CONFIG_COMPACTION
@@ -493,6 +494,65 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page)
}
#endif /* CONFIG_COMPACTION */

+enum compact_lock_type {
+ COMPACT_LOCK_ZONE,
+ COMPACT_LOCK_RAW_SPINLOCK,
+};
+
+struct compact_lock {
+ enum compact_lock_type type;
+ union {
+ struct zone *zone;
+ spinlock_t *lock; /* Reference to lru lock */
+ };
+};
+
+static bool compact_do_zone_trylock_irqsave(struct zone *zone,
+ unsigned long *flags)
+{
+ return zone_trylock_irqsave(zone, *flags);
+}
+
+static bool compact_do_raw_trylock_irqsave(spinlock_t *lock,
+ unsigned long *flags)
+{
+ return spin_trylock_irqsave(lock, *flags);
+}
+
+static bool compact_do_trylock_irqsave(struct compact_lock lock,
+ unsigned long *flags)
+{
+ if (lock.type == COMPACT_LOCK_ZONE)
+ return compact_do_zone_trylock_irqsave(lock.zone, flags);
+
+ return compact_do_raw_trylock_irqsave(lock.lock, flags);
+}
+
+static void compact_do_zone_lock_irqsave(struct zone *zone,
+ unsigned long *flags)
+__acquires(zone->lock)
+{
+ zone_lock_irqsave(zone, *flags);
+}
+
+static void compact_do_raw_lock_irqsave(spinlock_t *lock,
+ unsigned long *flags)
+__acquires(lock)
+{
+ spin_lock_irqsave(lock, *flags);
+}
+
+static void compact_do_lock_irqsave(struct compact_lock lock,
+ unsigned long *flags)
+{
+ if (lock.type == COMPACT_LOCK_ZONE) {
+ compact_do_zone_lock_irqsave(lock.zone, flags);
+ return;
+ }
+
+ return compact_do_raw_lock_irqsave(lock.lock, flags);
+}
+
/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. For async compaction, trylock and record if the
@@ -502,19 +562,19 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page)
*
* Always returns true which makes it easier to track lock state in callers.
*/
-static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
- struct compact_control *cc)
- __acquires(lock)
+static bool compact_lock_irqsave(struct compact_lock lock,
+ unsigned long *flags,
+ struct compact_control *cc)
{
/* Track if the lock is contended in async mode */
if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
- if (spin_trylock_irqsave(lock, *flags))
+ if (compact_do_trylock_irqsave(lock, flags))
return true;

cc->contended = true;
}

- spin_lock_irqsave(lock, *flags);
+ compact_do_lock_irqsave(lock, flags);
return true;
}

@@ -530,11 +590,13 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
* Returns true if compaction should abort due to fatal signal pending.
* Returns false when compaction can continue.
*/
-static bool compact_unlock_should_abort(spinlock_t *lock,
- unsigned long flags, bool *locked, struct compact_control *cc)
+static bool compact_unlock_should_abort(struct zone *zone,
+ unsigned long flags,
+ bool *locked,
+ struct compact_control *cc)
{
if (*locked) {
- spin_unlock_irqrestore(lock, flags);
+ zone_unlock_irqrestore(zone, flags);
*locked = false;
}

@@ -582,9 +644,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* contention, to give chance to IRQs. Abort if fatal signal
* pending.
*/
- if (!(blockpfn % COMPACT_CLUSTER_MAX)
- && compact_unlock_should_abort(&cc->zone->lock, flags,
- &locked, cc))
+ if (!(blockpfn % COMPACT_CLUSTER_MAX) &&
+ compact_unlock_should_abort(cc->zone, flags, &locked, cc))
break;

nr_scanned++;
@@ -613,8 +674,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,

/* If we already hold the lock, we can skip some rechecking. */
if (!locked) {
- locked = compact_lock_irqsave(&cc->zone->lock,
- &flags, cc);
+ struct compact_lock zol = {
+ .type = COMPACT_LOCK_ZONE,
+ .zone = cc->zone,
+ };
+
+ locked = compact_lock_irqsave(zol, &flags, cc);

/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
@@ -649,7 +714,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
}

if (locked)
- spin_unlock_irqrestore(&cc->zone->lock, flags);
+ zone_unlock_irqrestore(cc->zone, flags);

/*
* Be careful to not go outside of the pageblock.
@@ -1157,10 +1222,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,

/* If we already hold the lock, we can skip some rechecking */
if (lruvec != locked) {
+ struct compact_lock zol = {
+ .type = COMPACT_LOCK_RAW_SPINLOCK,
+ .lock = &lruvec->lru_lock,
+ };
+
if (locked)
unlock_page_lruvec_irqrestore(locked, flags);

- compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
+ compact_lock_irqsave(zol, &flags, cc);
locked = lruvec;

lruvec_memcg_debug(lruvec, folio);
@@ -1555,7 +1625,7 @@ static void fast_isolate_freepages(struct compact_control *cc)
if (!area->nr_free)
continue;

- spin_lock_irqsave(&cc->zone->lock, flags);
+ zone_lock_irqsave(cc->zone, flags);
freelist = &area->free_list[MIGRATE_MOVABLE];
list_for_each_entry_reverse(freepage, freelist, buddy_list) {
unsigned long pfn;
@@ -1614,7 +1684,7 @@ static void fast_isolate_freepages(struct compact_control *cc)
}
}

- spin_unlock_irqrestore(&cc->zone->lock, flags);
+ zone_unlock_irqrestore(cc->zone, flags);

/* Skip fast search if enough freepages isolated */
if (cc->nr_freepages >= cc->nr_migratepages)
@@ -1988,7 +2058,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
if (!area->nr_free)
continue;

- spin_lock_irqsave(&cc->zone->lock, flags);
+ zone_lock_irqsave(cc->zone, flags);
freelist = &area->free_list[MIGRATE_MOVABLE];
list_for_each_entry(freepage, freelist, buddy_list) {
unsigned long free_pfn;
@@ -2021,7 +2091,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
break;
}
}
- spin_unlock_irqrestore(&cc->zone->lock, flags);
+ zone_unlock_irqrestore(cc->zone, flags);
}

cc->total_migrate_scanned += nr_scanned;
--
2.47.3