[PATCH 5/8] io_uring: switch to kernel_copy_process

From: Mike Christie
Date: Thu Sep 16 2021 - 17:21:34 EST


Convert io_uring and io-wq to use kernel_copy_process.

Signed-off-by: Mike Christie <michael.christie@xxxxxxxxxx>
---
fs/io-wq.c | 9 +++++++--
fs/io_uring.c | 5 ++++-
include/linux/sched/task.h | 1 -
kernel/fork.c | 22 ----------------------
4 files changed, 11 insertions(+), 26 deletions(-)

diff --git a/fs/io-wq.c b/fs/io-wq.c
index 6c55362c1f99..6fccba5bdc65 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -68,6 +68,9 @@ struct io_worker {

#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)

+#define IO_WQ_CLONE_FLAGS CLONE_FS|CLONE_FILES|CLONE_SIGHAND| \
+ CLONE_THREAD|CLONE_IO
+
struct io_wqe_acct {
unsigned nr_workers;
unsigned max_workers;
@@ -687,7 +690,8 @@ static void create_worker_cont(struct callback_head *cb)
worker = container_of(cb, struct io_worker, create_work);
clear_bit_unlock(0, &worker->create_state);
wqe = worker->wqe;
- tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
+ tsk = kernel_copy_process(io_wqe_worker, worker, wqe->node,
+ IO_WQ_CLONE_FLAGS, 1, 0);
if (!IS_ERR(tsk)) {
io_init_new_worker(wqe, worker, tsk);
io_worker_release(worker);
@@ -757,7 +761,8 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
if (index == IO_WQ_ACCT_BOUND)
worker->flags |= IO_WORKER_F_BOUND;

- tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
+ tsk = kernel_copy_process(io_wqe_worker, worker, wqe->node,
+ IO_WQ_CLONE_FLAGS, 1, 0);
if (!IS_ERR(tsk)) {
io_init_new_worker(wqe, worker, tsk);
} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 16fb7436043c..2493a78ddd7d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8519,6 +8519,8 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
fdput(f);
}
if (ctx->flags & IORING_SETUP_SQPOLL) {
+ unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|
+ CLONE_THREAD|CLONE_IO;
struct task_struct *tsk;
struct io_sq_data *sqd;
bool attached;
@@ -8560,7 +8562,8 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,

sqd->task_pid = current->pid;
sqd->task_tgid = current->tgid;
- tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
+ tsk = kernel_copy_process(io_sq_thread, sqd, NUMA_NO_NODE,
+ flags, 1, 0);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
goto err_sqpoll;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 4a6100a24894..f43e81c907e1 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -85,7 +85,6 @@ extern void exit_files(struct task_struct *);
extern void exit_itimers(struct signal_struct *);

extern pid_t kernel_clone(struct kernel_clone_args *kargs);
-struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
struct task_struct *kernel_copy_process(int (*fn)(void *), void *arg, int node,
unsigned long clone_flags,
int io_thread, int no_files);
diff --git a/kernel/fork.c b/kernel/fork.c
index 1dda1d4ea77b..9011cbe83fe8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2518,28 +2518,6 @@ struct mm_struct *copy_init_mm(void)
return dup_mm(NULL, &init_mm);
}

-/*
- * This is like kernel_clone(), but shaved down and tailored to just
- * creating io_uring workers. It returns a created task, or an error pointer.
- * The returned task is inactive, and the caller must fire it up through
- * wake_up_new_task(p). All signals are blocked in the created task.
- */
-struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
-{
- unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|
- CLONE_IO;
- struct kernel_clone_args args = {
- .flags = ((lower_32_bits(flags) | CLONE_VM |
- CLONE_UNTRACED) & ~CSIGNAL),
- .exit_signal = (lower_32_bits(flags) & CSIGNAL),
- .stack = (unsigned long)fn,
- .stack_size = (unsigned long)arg,
- .io_thread = 1,
- };
-
- return copy_process(NULL, 0, node, &args);
-}
-
/**
* kernel_copy_process - create a copy of a process to be used by the kernel
* @fn: thread stack
--
2.25.1