[PATCH 1/1] Revert "kmod: handle UMH_WAIT_PROC from system unbound workqueue"

From: Oleg Nesterov
Date: Wed Oct 14 2015 - 14:55:38 EST


This reverts commit bb304a5c6fc63d8506cd9741a3a5f35b73605625.

Because this patch leads to kthread zombies.

call_usermodehelper_exec_sync() does fork() + wait() with "unignored"
SIGCHLD. What we have missed is that this worker thread can have other
children previously forked by call_usermodehelper_exec_work() without
UMH_WAIT_PROC. If such a child exits in between it becomes a zombie
and nobody can reap it (unless/until this worker thread exits too).

Signed-off-by: Oleg Nesterov <oleg@xxxxxxxxxx>
---
kernel/kmod.c | 44 ++++++++++++++++++++++++--------------------
1 file changed, 24 insertions(+), 20 deletions(-)

diff --git a/kernel/kmod.c b/kernel/kmod.c
index da98d05..d38b2da 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -265,9 +265,15 @@ out:
do_exit(0);
}

-/* Handles UMH_WAIT_PROC. */
-static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
+/*
+ * Handles UMH_WAIT_PROC. Our parent (unbound workqueue) might not be able to
+ * run enough instances to handle usermodehelper completions without blocking
+ * some other pending requests. That's why we use a kernel thread dedicated for
+ * that purpose.
+ */
+static int call_usermodehelper_exec_sync(void *data)
{
+ struct subprocess_info *sub_info = data;
pid_t pid;

/* If SIGCLD is ignored sys_wait4 won't populate the status. */
@@ -281,9 +287,9 @@ static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
* Normally it is bogus to call wait4() from in-kernel because
* wait4() wants to write the exit code to a userspace address.
* But call_usermodehelper_exec_sync() always runs as kernel
- * thread (workqueue) and put_user() to a kernel address works
- * OK for kernel threads, due to their having an mm_segment_t
- * which spans the entire address space.
+ * thread and put_user() to a kernel address works OK for kernel
+ * threads, due to their having an mm_segment_t which spans the
+ * entire address space.
*
* Thus the __user pointer cast is valid here.
*/
@@ -298,21 +304,19 @@ static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
sub_info->retval = ret;
}

- /* Restore default kernel sig handler */
- kernel_sigaction(SIGCHLD, SIG_IGN);
-
umh_complete(sub_info);
+ do_exit(0);
}

/*
- * We need to create the usermodehelper kernel thread from a task that is affine
+ * This function doesn't strictly needs to be called asynchronously. But we
+ * need to create the usermodehelper kernel threads from a task that is affine
* to an optimized set of CPUs (or nohz housekeeping ones) such that they
* inherit a widest affinity irrespective of call_usermodehelper() callers with
* possibly reduced affinity (eg: per-cpu workqueues). We don't want
* usermodehelper targets to contend a busy CPU.
*
- * Unbound workqueues provide such wide affinity and allow to block on
- * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
+ * Unbound workqueues provide such wide affinity.
*
* Besides, workqueues provide the privilege level that caller might not have
* to perform the usermodehelper request.
@@ -322,18 +326,18 @@ static void call_usermodehelper_exec_work(struct work_struct *work)
{
struct subprocess_info *sub_info =
container_of(work, struct subprocess_info, work);
+ pid_t pid;

- if (sub_info->wait & UMH_WAIT_PROC) {
- call_usermodehelper_exec_sync(sub_info);
- } else {
- pid_t pid;
-
+ if (sub_info->wait & UMH_WAIT_PROC)
+ pid = kernel_thread(call_usermodehelper_exec_sync, sub_info,
+ CLONE_FS | CLONE_FILES | SIGCHLD);
+ else
pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
SIGCHLD);
- if (pid < 0) {
- sub_info->retval = pid;
- umh_complete(sub_info);
- }
+
+ if (pid < 0) {
+ sub_info->retval = pid;
+ umh_complete(sub_info);
}
}

--
2.4.3


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/