[PATCH] exit: combine work under lock in synchronize_group_exit() and coredump_task_exit()
From: Mateusz Guzik
Date: Wed Mar 19 2025 - 14:00:40 EST
This reduces single-threaded overhead as it avoids one lock+irq trip on
exit.
It also improves scalability of spawning and killing threads within one
process (just shy of 5% when doing it on 24 cores on my test jig).
This happens to test to for coredumping prior to handling kcov and
kmsan, which afaics is harmless.
I however was not comfortable lifting dumping prior to it, so it still
happens after.
Signed-off-by: Mateusz Guzik <mjguzik@xxxxxxxxx>
---
kernel/exit.c | 67 +++++++++++++++++++++++++++++----------------------
1 file changed, 38 insertions(+), 29 deletions(-)
diff --git a/kernel/exit.c b/kernel/exit.c
index f97a2bbc9db9..f799c32dd8f5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -412,9 +412,9 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
}
}
-static void coredump_task_exit(struct task_struct *tsk)
+static struct core_state *coredump_task_exit_prep(struct task_struct *tsk)
{
- struct core_state *core_state;
+ lockdep_assert_held(&tsk->sighand->siglock);
/*
* Serialize with any possible pending coredump.
@@ -423,33 +423,37 @@ static void coredump_task_exit(struct task_struct *tsk)
* will increment ->nr_threads for each thread in the
* group without PF_POSTCOREDUMP set.
*/
- spin_lock_irq(&tsk->sighand->siglock);
tsk->flags |= PF_POSTCOREDUMP;
- core_state = tsk->signal->core_state;
- spin_unlock_irq(&tsk->sighand->siglock);
- if (core_state) {
- struct core_thread self;
-
- self.task = current;
- if (self.task->flags & PF_SIGNALED)
- self.next = xchg(&core_state->dumper.next, &self);
- else
- self.task = NULL;
- /*
- * Implies mb(), the result of xchg() must be visible
- * to core_state->dumper.
- */
- if (atomic_dec_and_test(&core_state->nr_threads))
- complete(&core_state->startup);
+ return tsk->signal->core_state;
+}
- for (;;) {
- set_current_state(TASK_IDLE|TASK_FREEZABLE);
- if (!self.task) /* see coredump_finish() */
- break;
- schedule();
- }
- __set_current_state(TASK_RUNNING);
+static void coredump_task_exit_finish(struct task_struct *tsk,
+ struct core_state *core_state)
+{
+ struct core_thread self;
+
+ if (likely(!core_state))
+ return;
+
+ self.task = current;
+ if (self.task->flags & PF_SIGNALED)
+ self.next = xchg(&core_state->dumper.next, &self);
+ else
+ self.task = NULL;
+ /*
+ * Implies mb(), the result of xchg() must be visible
+ * to core_state->dumper.
+ */
+ if (atomic_dec_and_test(&core_state->nr_threads))
+ complete(&core_state->startup);
+
+ for (;;) {
+ set_current_state(TASK_IDLE|TASK_FREEZABLE);
+ if (!self.task) /* see coredump_finish() */
+ break;
+ schedule();
}
+ __set_current_state(TASK_RUNNING);
}
#ifdef CONFIG_MEMCG
@@ -878,7 +882,8 @@ static void synchronize_group_exit(struct task_struct *tsk, long code)
struct sighand_struct *sighand = tsk->sighand;
struct signal_struct *signal = tsk->signal;
- spin_lock_irq(&sighand->siglock);
+ lockdep_assert_held(&sighand->siglock);
+
signal->quick_threads--;
if ((signal->quick_threads == 0) &&
!(signal->flags & SIGNAL_GROUP_EXIT)) {
@@ -886,24 +891,28 @@ static void synchronize_group_exit(struct task_struct *tsk, long code)
signal->group_exit_code = code;
signal->group_stop_count = 0;
}
- spin_unlock_irq(&sighand->siglock);
}
void __noreturn do_exit(long code)
{
struct task_struct *tsk = current;
+ struct sighand_struct *sighand = tsk->sighand;
+ struct core_state *core_state;
int group_dead;
WARN_ON(irqs_disabled());
+ spin_lock_irq(&sighand->siglock);
synchronize_group_exit(tsk, code);
+ core_state = coredump_task_exit_prep(tsk);
+ spin_unlock_irq(&sighand->siglock);
WARN_ON(tsk->plug);
kcov_task_exit(tsk);
kmsan_task_exit(tsk);
- coredump_task_exit(tsk);
+ coredump_task_exit_finish(tsk, core_state);
ptrace_event(PTRACE_EVENT_EXIT, code);
user_events_exit(tsk);
--
2.43.0