Re: [PATCH 2/8] ntb_perf: Improve thread handling to increase robustness

From: Jiang, Dave
Date: Mon Jun 13 2016 - 14:17:06 EST


On Fri, 2016-06-10 at 16:54 -0600, Logan Gunthorpe wrote:
> This commit accomplishes a few things:
>
> 1) Properly prevent multiple sets of threads from running at once
> using
> a mutex. Lots of race issues existed with the thread_cleanup.
>
> 2) The mutex allows us to ensure that threads are finished before
> tearing down the device or module.
>
> 3) Don't use kthread_stop when the threads can exit by themselves, as
> this is counter-indicated by the kthread_create documentation.
> Threads
> now wait for kthread_stop to occur.
>
> 4) Writing to the run file now blocks until the threads are complete.
> The test can then be safely interrupted by a SIGINT.
>
> Also, while I was at it:
>
> 5) debugfs_run_write shouldn't return 0 in the early check cases as
> this
> could cause debugfs_run_write to loop undesirably.
>
> Signed-off-by: Logan Gunthorpe <logang@xxxxxxxxxxxx>
Acked-by: Dave Jiang <dave.jiang@xxxxxxxxx>

> ---
> Âdrivers/ntb/test/ntb_perf.c | 124 +++++++++++++++++++++++++++-------
> ----------
> Â1 file changed, 76 insertions(+), 48 deletions(-)
>
> diff --git a/drivers/ntb/test/ntb_perf.c
> b/drivers/ntb/test/ntb_perf.c
> index 5008ccf..db4dc61 100644
> --- a/drivers/ntb/test/ntb_perf.c
> +++ b/drivers/ntb/test/ntb_perf.c
> @@ -58,6 +58,7 @@
> Â#include <linux/delay.h>
> Â#include <linux/sizes.h>
> Â#include <linux/ntb.h>
> +#include <linux/mutex.h>
> Â
> Â#define DRIVER_NAME "ntb_perf"
> Â#define DRIVER_DESCRIPTION "PCIe NTB Performance Measurement
> Tool"
> @@ -121,6 +122,7 @@ struct pthr_ctx {
> Â int dma_prep_err;
> Â int src_idx;
> Â void *srcs[MAX_SRCS];
> + wait_queue_head_tÂÂÂÂÂÂÂ*wq;
> Â};
> Â
> Âstruct perf_ctx {
> @@ -134,9 +136,11 @@ struct perf_ctx {
> Â struct dentry *debugfs_run;
> Â struct dentry *debugfs_threads;
> Â u8 perf_threads;
> - bool run;
> + /* mutex ensures only one set of threads run at once */
> + struct mutex run_mutex;
> Â struct pthr_ctx pthr_ctx[MAX_THREADS];
> Â atomic_t tsync;
> + atomic_tÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂtdone;
> Â};
> Â
> Âenum {
> @@ -295,12 +299,18 @@ static int perf_move_data(struct pthr_ctx
> *pctx, char __iomem *dst, char *src,
> Â set_current_state(TASK_INTERRUPTIBLE);
> Â schedule_timeout(1);
> Â }
> +
> + if (unlikely(kthread_should_stop()))
> + break;
> Â }
> Â
> Â if (use_dma) {
> Â pr_info("%s: All DMA descriptors submitted\n",
> current->comm);
> - while (atomic_read(&pctx->dma_sync) != 0)
> + while (atomic_read(&pctx->dma_sync) != 0) {
> + if (kthread_should_stop())
> + break;
> Â msleep(20);
> + }
> Â }
> Â
> Â kstop = ktime_get();
> @@ -393,7 +403,10 @@ static int ntb_perf_thread(void *data)
> Â pctx->srcs[i] = NULL;
> Â }
> Â
> - return 0;
> + atomic_inc(&perf->tdone);
> + wake_up(pctx->wq);
> + rc = 0;
> + goto done;
> Â
> Âerr:
> Â for (i = 0; i < MAX_SRCS; i++) {
> @@ -406,6 +419,16 @@ err:
> Â pctx->dma_chan = NULL;
> Â }
> Â
> +done:
> + /* Wait until we are told to stop */
> + for (;;) {
> + set_current_state(TASK_INTERRUPTIBLE);
> + if (kthread_should_stop())
> + break;
> + schedule();
> + }
> + __set_current_state(TASK_RUNNING);
> +
> Â return rc;
> Â}
> Â
> @@ -553,6 +576,7 @@ static ssize_t debugfs_run_read(struct file
> *filp, char __user *ubuf,
> Â struct perf_ctx *perf = filp->private_data;
> Â char *buf;
> Â ssize_t ret, out_offset;
> + int running;
> Â
> Â if (!perf)
> Â return 0;
> @@ -560,7 +584,9 @@ static ssize_t debugfs_run_read(struct file
> *filp, char __user *ubuf,
> Â buf = kmalloc(64, GFP_KERNEL);
> Â if (!buf)
> Â return -ENOMEM;
> - out_offset = snprintf(buf, 64, "%d\n", perf->run);
> +
> + running = mutex_is_locked(&perf->run_mutex);
> + out_offset = snprintf(buf, 64, "%d\n", running);
> Â ret = simple_read_from_buffer(ubuf, count, offp, buf,
> out_offset);
> Â kfree(buf);
> Â
> @@ -572,7 +598,6 @@ static void threads_cleanup(struct perf_ctx
> *perf)
> Â struct pthr_ctx *pctx;
> Â int i;
> Â
> - perf->run = false;
> Â for (i = 0; i < MAX_THREADS; i++) {
> Â pctx = &perf->pthr_ctx[i];
> Â if (pctx->thread) {
> @@ -587,65 +612,66 @@ static ssize_t debugfs_run_write(struct file
> *filp, const char __user *ubuf,
> Â{
> Â struct perf_ctx *perf = filp->private_data;
> Â int node, i;
> + DECLARE_WAIT_QUEUE_HEAD(wq);
> Â
> Â if (!perf->link_is_up)
> - return 0;
> + return -ENOLINK;
> Â
> Â if (perf->perf_threads == 0)
> - return 0;
> + return -EINVAL;
> Â
> - if (atomic_read(&perf->tsync) == 0)
> - perf->run = false;
> + if (!mutex_trylock(&perf->run_mutex))
> + return -EBUSY;
> Â
> - if (perf->run)
> - threads_cleanup(perf);
> - else {
> - perf->run = true;
> + if (perf->perf_threads > MAX_THREADS) {
> + perf->perf_threads = MAX_THREADS;
> + pr_info("Reset total threads to: %u\n",
> MAX_THREADS);
> + }
> Â
> - if (perf->perf_threads > MAX_THREADS) {
> - perf->perf_threads = MAX_THREADS;
> - pr_info("Reset total threads to: %u\n",
> MAX_THREADS);
> - }
> + /* no greater than 1M */
> + if (seg_order > MAX_SEG_ORDER) {
> + seg_order = MAX_SEG_ORDER;
> + pr_info("Fix seg_order to %u\n", seg_order);
> + }
> Â
> - /* no greater than 1M */
> - if (seg_order > MAX_SEG_ORDER) {
> - seg_order = MAX_SEG_ORDER;
> - pr_info("Fix seg_order to %u\n", seg_order);
> - }
> + if (run_order < seg_order) {
> + run_order = seg_order;
> + pr_info("Fix run_order to %u\n", run_order);
> + }
> Â
> - if (run_order < seg_order) {
> - run_order = seg_order;
> - pr_info("Fix run_order to %u\n", run_order);
> - }
> + node = dev_to_node(&perf->ntb->pdev->dev);
> + atomic_set(&perf->tdone, 0);
> Â
> - node = dev_to_node(&perf->ntb->pdev->dev);
> - /* launch kernel thread */
> - for (i = 0; i < perf->perf_threads; i++) {
> - struct pthr_ctx *pctx;
> -
> - pctx = &perf->pthr_ctx[i];
> - atomic_set(&pctx->dma_sync, 0);
> - pctx->perf = perf;
> - pctx->thread =
> - kthread_create_on_node(ntb_perf_thre
> ad,
> - ÂÂÂÂÂÂÂ(void *)pctx,
> - ÂÂÂÂÂÂÂnode,
> "ntb_perf %d", i);
> - if (IS_ERR(pctx->thread)) {
> - pctx->thread = NULL;
> - goto err;
> - } else
> - wake_up_process(pctx->thread);
> -
> - if (perf->run == false)
> - return -ENXIO;
> - }
> + /* launch kernel thread */
> + for (i = 0; i < perf->perf_threads; i++) {
> + struct pthr_ctx *pctx;
> Â
> + pctx = &perf->pthr_ctx[i];
> + atomic_set(&pctx->dma_sync, 0);
> + pctx->perf = perf;
> + pctx->wq = &wq;
> + pctx->thread =
> + kthread_create_on_node(ntb_perf_thread,
> + ÂÂÂÂÂÂÂ(void *)pctx,
> + ÂÂÂÂÂÂÂnode, "ntb_perf %d",
> i);
> + if (IS_ERR(pctx->thread)) {
> + pctx->thread = NULL;
> + goto err;
> + } else {
> + wake_up_process(pctx->thread);
> + }
> Â }
> Â
> + wait_event_interruptible(wq,
> + atomic_read(&perf->tdone) == perf->perf_threads);
> +
> + threads_cleanup(perf);
> + mutex_unlock(&perf->run_mutex);
> Â return count;
> Â
> Âerr:
> Â threads_cleanup(perf);
> + mutex_unlock(&perf->run_mutex);
> Â return -ENXIO;
> Â}
> Â
> @@ -713,7 +739,7 @@ static int perf_probe(struct ntb_client *client,
> struct ntb_dev *ntb)
> Â perf->ntb = ntb;
> Â perf->perf_threads = 1;
> Â atomic_set(&perf->tsync, 0);
> - perf->run = false;
> + mutex_init(&perf->run_mutex);
> Â spin_lock_init(&perf->db_lock);
> Â perf_setup_mw(ntb, perf);
> Â INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
> @@ -748,6 +774,8 @@ static void perf_remove(struct ntb_client
> *client, struct ntb_dev *ntb)
> Â
> Â dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
> Â
> + mutex_lock(&perf->run_mutex);
> +
> Â cancel_delayed_work_sync(&perf->link_work);
> Â cancel_work_sync(&perf->link_cleanup);
> Â
> --Â
> 2.1.4
>