[PATCH] exec: make de_thread alloc new signal struct earlier

From: Bernd Edlinger
Date: Sun Mar 08 2020 - 08:58:45 EST


It was pointed out that de_thread may return -ENOMEM
when it already terminated threads, and returning
an error from execve, except when a fatal signal is
being delivered is not an option any more.

Allocate the memory for the signal table earlier,
and make sure that -ENOMEM is returned before the
unrecoverable actions are started.

Signed-off-by: Bernd Edlinger <bernd.edlinger@xxxxxxxxxx>
---
Eric, what do you think, might this be helpful
to move the "point of no return" lower, and simplify
your patch?

fs/exec.c | 31 +++++++++++++++++++++++--------
1 file changed, 23 insertions(+), 8 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index 74d88da..a0328dc 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1057,16 +1057,26 @@ static int exec_mmap(struct mm_struct *mm)
* disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIGHAND option to clone().)
*/
-static int de_thread(struct task_struct *tsk)
+static int de_thread(void)
{
+ struct task_struct *tsk = current;
struct signal_struct *sig = tsk->signal;
struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
+ struct sighand_struct *newsighand = NULL;

if (thread_group_empty(tsk))
goto no_thread_group;

/*
+ * This is the last time for an out of memory error.
+ * After this point only fatal signals are are okay.
+ */
+ newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
+ if (!newsighand)
+ return -ENOMEM;
+
+ /*
* Kill all other threads in the thread group.
*/
spin_lock_irq(lock);
@@ -1076,7 +1086,7 @@ static int de_thread(struct task_struct *tsk)
* return so that the signal is processed.
*/
spin_unlock_irq(lock);
- return -EAGAIN;
+ goto err_free;
}

sig->group_exit_task = tsk;
@@ -1191,14 +1201,16 @@ static int de_thread(struct task_struct *tsk)
#endif

if (refcount_read(&oldsighand->count) != 1) {
- struct sighand_struct *newsighand;
/*
* This ->sighand is shared with the CLONE_SIGHAND
* but not CLONE_THREAD task, switch to the new one.
*/
- newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
- if (!newsighand)
- return -ENOMEM;
+ if (!newsighand) {
+ newsighand = kmem_cache_alloc(sighand_cachep,
+ GFP_KERNEL);
+ if (!newsighand)
+ return -ENOMEM;
+ }

refcount_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
@@ -1211,7 +1223,8 @@ static int de_thread(struct task_struct *tsk)
write_unlock_irq(&tasklist_lock);

__cleanup_sighand(oldsighand);
- }
+ } else if (newsighand)
+ kmem_cache_free(sighand_cachep, newsighand);

BUG_ON(!thread_group_leader(tsk));
return 0;
@@ -1222,6 +1235,8 @@ static int de_thread(struct task_struct *tsk)
sig->group_exit_task = NULL;
sig->notify_count = 0;
read_unlock(&tasklist_lock);
+err_free:
+ kmem_cache_free(sighand_cachep, newsighand);
return -EAGAIN;
}

@@ -1262,7 +1277,7 @@ int flush_old_exec(struct linux_binprm * bprm)
* Make sure we have a private signal table and that
* we are unassociated from the previous thread group.
*/
- retval = de_thread(current);
+ retval = de_thread();
if (retval)
goto out;

--
1.9.1