[PATCH] lib/list_sort: introduce list_sort_nonatomic() and remove dummy cmp() calls

From: Kuan-Wei Chiu

Date: Sun Mar 15 2026 - 15:39:23 EST


Historically, list_sort() implemented a hack in merge_final():
if (unlikely(!++count))
cmp(priv, b, b);

This was designed specifically so that callers could periodically
invoke cond_resched() within their comparison functions when merging
highly unbalanced lists.

However, an audit of the kernel tree reveals that only fs/ubifs/ relies
on this mechanism. For the vast majority of list_sort() users (such as
block layer IO schedulers and file systems), this results in completely
wasted function calls. In the worst-case scenario (merging an already
sorted list where 'a' is exhausted quickly), this results in
approximately (N/2)/256 unnecessary cmp() calls.

To clean up this API while ensuring behavior compatibility:
1. Introduce list_sort_nonatomic(), which explicitly calls
cond_resched() internally when count overflows.
2. Remove the dummy cmp(priv, b, b) fallback for standard list_sort(),
saving unnecessary function calls and improving determinism.
3. Convert the sole user (fs/ubifs/) to the new API.

Note that ubifs still maintains cond_resched() inside its own
comparison functions. This patch does not alter the frequency or timing
of those scheduling points, guaranteeing no regressions for ubifs,
while benefiting all other kernel users.

Signed-off-by: Kuan-Wei Chiu <visitorckw@xxxxxxxxx>
---
fs/ubifs/gc.c | 4 +-
fs/ubifs/replay.c | 2 +-
include/linux/list_sort.h | 3 +
lib/list_sort.c | 166 +++++++++++++++++++++-----------------
4 files changed, 100 insertions(+), 75 deletions(-)

diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 0bf08b7755b8..dcc5742210b0 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -277,8 +277,8 @@ static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
}

/* Sort data and non-data nodes */
- list_sort(c, &sleb->nodes, &data_nodes_cmp);
- list_sort(c, nondata, &nondata_nodes_cmp);
+ list_sort_nonatomic(c, &sleb->nodes, &data_nodes_cmp);
+ list_sort_nonatomic(c, nondata, &nondata_nodes_cmp);

err = dbg_check_data_nodes_order(c, &sleb->nodes);
if (err)
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index a9a568f4a868..ad03dd106a54 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -329,7 +329,7 @@ static int apply_replay_list(struct ubifs_info *c)
struct replay_entry *r;
int err;

- list_sort(c, &c->replay_list, &replay_entries_cmp);
+ list_sort_nonatomic(c, &c->replay_list, &replay_entries_cmp);

list_for_each_entry(r, &c->replay_list, list) {
cond_resched();
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h
index 453105f74e05..f7af29073d48 100644
--- a/include/linux/list_sort.h
+++ b/include/linux/list_sort.h
@@ -11,4 +11,7 @@ typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *,

__attribute__((nonnull(2,3)))
void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp);
+
+__attribute__((nonnull(2, 3)))
+void list_sort_nonatomic(void *priv, struct list_head *head, list_cmp_func_t cmp);
#endif
diff --git a/lib/list_sort.c b/lib/list_sort.c
index a310ecb7ccc0..788bfc26cf7b 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -3,6 +3,7 @@
#include <linux/export.h>
#include <linux/list_sort.h>
#include <linux/list.h>
+#include <linux/sched.h>

/*
* Returns a list organized in an intermediate format suited
@@ -47,7 +48,7 @@ static struct list_head *merge(void *priv, list_cmp_func_t cmp,
*/
__attribute__((nonnull(2,3,4,5)))
static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
- struct list_head *a, struct list_head *b)
+ struct list_head *a, struct list_head *b, bool may_schedule)
{
struct list_head *tail = head;
u8 count = 0;
@@ -79,12 +80,11 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
/*
* If the merge is highly unbalanced (e.g. the input is
* already sorted), this loop may run many iterations.
- * Continue callbacks to the client even though no
- * element comparison is needed, so the client's cmp()
- * routine can invoke cond_resched() periodically.
+ * If may_schedule is true, periodically invoke cond_resched()
+ * to avoid soft lockups.
*/
- if (unlikely(!++count))
- cmp(priv, b, b);
+ if (may_schedule && unlikely(!++count))
+ cond_resched();
b->prev = tail;
tail = b;
b = b->next;
@@ -95,6 +95,75 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
head->prev = tail;
}

+static void __list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp, bool may_schedule)
+{
+ struct list_head *list = head->next, *pending = NULL;
+ size_t count = 0; /* Count of pending */
+
+ if (list == head->prev) /* Zero or one elements */
+ return;
+
+ /* Convert to a null-terminated singly-linked list. */
+ head->prev->next = NULL;
+
+ /*
+ * Data structure invariants:
+ * - All lists are singly linked and null-terminated; prev
+ * pointers are not maintained.
+ * - pending is a prev-linked "list of lists" of sorted
+ * sublists awaiting further merging.
+ * - Each of the sorted sublists is power-of-two in size.
+ * - Sublists are sorted by size and age, smallest & newest at front.
+ * - There are zero to two sublists of each size.
+ * - A pair of pending sublists are merged as soon as the number
+ * of following pending elements equals their size (i.e.
+ * each time count reaches an odd multiple of that size).
+ * That ensures each later final merge will be at worst 2:1.
+ * - Each round consists of:
+ * - Merging the two sublists selected by the highest bit
+ * which flips when count is incremented, and
+ * - Adding an element from the input as a size-1 sublist.
+ */
+ do {
+ size_t bits;
+ struct list_head **tail = &pending;
+
+ /* Find the least-significant clear bit in count */
+ for (bits = count; bits & 1; bits >>= 1)
+ tail = &(*tail)->prev;
+ /* Do the indicated merge */
+ if (likely(bits)) {
+ struct list_head *a = *tail, *b = a->prev;
+
+ a = merge(priv, cmp, b, a);
+ /* Install the merged result in place of the inputs */
+ a->prev = b->prev;
+ *tail = a;
+ }
+
+ /* Move one element from input list to pending */
+ list->prev = pending;
+ pending = list;
+ list = list->next;
+ pending->next = NULL;
+ count++;
+ } while (list);
+
+ /* End of input; merge together all the pending lists. */
+ list = pending;
+ pending = pending->prev;
+ for (;;) {
+ struct list_head *next = pending->prev;
+
+ if (!next)
+ break;
+ list = merge(priv, cmp, pending, list);
+ pending = next;
+ }
+ /* The final merge, rebuilding prev links */
+ merge_final(priv, cmp, head, pending, list, may_schedule);
+}
+
/**
* list_sort - sort a list
* @priv: private data, opaque to list_sort(), passed to @cmp
@@ -185,73 +254,26 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
* of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
* 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
*/
-__attribute__((nonnull(2,3)))
+__attribute__((nonnull(2, 3)))
void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
{
- struct list_head *list = head->next, *pending = NULL;
- size_t count = 0; /* Count of pending */
-
- if (list == head->prev) /* Zero or one elements */
- return;
-
- /* Convert to a null-terminated singly-linked list. */
- head->prev->next = NULL;
-
- /*
- * Data structure invariants:
- * - All lists are singly linked and null-terminated; prev
- * pointers are not maintained.
- * - pending is a prev-linked "list of lists" of sorted
- * sublists awaiting further merging.
- * - Each of the sorted sublists is power-of-two in size.
- * - Sublists are sorted by size and age, smallest & newest at front.
- * - There are zero to two sublists of each size.
- * - A pair of pending sublists are merged as soon as the number
- * of following pending elements equals their size (i.e.
- * each time count reaches an odd multiple of that size).
- * That ensures each later final merge will be at worst 2:1.
- * - Each round consists of:
- * - Merging the two sublists selected by the highest bit
- * which flips when count is incremented, and
- * - Adding an element from the input as a size-1 sublist.
- */
- do {
- size_t bits;
- struct list_head **tail = &pending;
-
- /* Find the least-significant clear bit in count */
- for (bits = count; bits & 1; bits >>= 1)
- tail = &(*tail)->prev;
- /* Do the indicated merge */
- if (likely(bits)) {
- struct list_head *a = *tail, *b = a->prev;
-
- a = merge(priv, cmp, b, a);
- /* Install the merged result in place of the inputs */
- a->prev = b->prev;
- *tail = a;
- }
-
- /* Move one element from input list to pending */
- list->prev = pending;
- pending = list;
- list = list->next;
- pending->next = NULL;
- count++;
- } while (list);
-
- /* End of input; merge together all the pending lists. */
- list = pending;
- pending = pending->prev;
- for (;;) {
- struct list_head *next = pending->prev;
-
- if (!next)
- break;
- list = merge(priv, cmp, pending, list);
- pending = next;
- }
- /* The final merge, rebuilding prev links */
- merge_final(priv, cmp, head, pending, list);
+ __list_sort(priv, head, cmp, false);
}
EXPORT_SYMBOL(list_sort);
+
+/**
+ * list_sort_nonatomic - sort a list with conditional rescheduling
+ * @priv: private data, opaque to list_sort(), passed to @cmp
+ * @head: the list to sort
+ * @cmp: the elements comparison function
+ *
+ * This variant of list_sort() periodically invokes cond_resched()
+ * during the sorting process. It should be used for sorting very
+ * long lists where the operation might otherwise cause soft lockups.
+ */
+__attribute__((nonnull(2, 3)))
+void list_sort_nonatomic(void *priv, struct list_head *head, list_cmp_func_t cmp)
+{
+ __list_sort(priv, head, cmp, true);
+}
+EXPORT_SYMBOL(list_sort_nonatomic);
--
2.53.0.851.ga537e3e6e9-goog