[RFC][PATCH 1/2] sighand: Count each thread group once in sighand_struct

From: Eric W. Biederman
Date: Sat Apr 01 2017 - 01:19:42 EST



In practice either a thread group is either using a sighand_struct or
it isn't. Therefore simplify things a bit and only increment the
count in sighand_struct when a new thread group is created that uses
the existing sighand_struct, and only decrement the count in
sighand_struct when a thread group exits.

As well as standing on it's own merits this has the potential to simply
de_thread.

Signed-off-by: "Eric W. Biederman" <ebiederm@xxxxxxxxxxxx>
---
kernel/exit.c | 2 +-
kernel/fork.c | 6 ++++--
2 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/kernel/exit.c b/kernel/exit.c
index e126ebf2400c..8c5b3e106298 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -163,9 +163,9 @@ static void __exit_signal(struct task_struct *tsk)
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);

- __cleanup_sighand(sighand);
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
if (group_dead) {
+ __cleanup_sighand(sighand);
flush_sigqueue(&sig->shared_pending);
tty_kref_put(tty);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 6c463c80e93d..fe6f1bf32bb9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1295,7 +1295,8 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
struct sighand_struct *sig;

if (clone_flags & CLONE_SIGHAND) {
- atomic_inc(&current->sighand->count);
+ if (!(clone_flags & CLONE_THREAD))
+ atomic_inc(&current->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
@@ -1896,7 +1897,8 @@ static __latent_entropy struct task_struct *copy_process(
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
- __cleanup_sighand(p->sighand);
+ if (!(clone_flags & CLONE_THREAD))
+ __cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
--
2.10.1