[PATCH 4/7] workqueue: single pass rebind_workers

From: Lai Jiangshan
Date: Mon Aug 27 2012 - 13:57:54 EST


busy_worker_rebind_fn() can't return until all idle workers are rebound,
the code of busy_worker_rebind_fn() ensure this.

So we can change the order of the code of rebind_workers(),
and make it a single pass do the rebind_workers().

It makes the code much clean and better readability.

Signed-off-by: Lai Jiangshan <laijs@xxxxxxxxxxxxxx>
---
kernel/workqueue.c | 19 +++++++------------
1 files changed, 7 insertions(+), 12 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f6e4394..96485c0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1394,16 +1394,12 @@ static void rebind_workers(struct global_cwq *gcwq)
* us to finish up by competing on pool->manager_mutex.
*/
init_completion(&idle_rebind.done);
-retry:
idle_rebind.cnt = 1;
INIT_COMPLETION(idle_rebind.done);

/* set REBIND and kick idle ones, we'll wait for these later */
for_each_worker_pool(pool, gcwq) {
list_for_each_entry(worker, &pool->idle_list, entry) {
- if (!(worker->flags & WORKER_UNBOUND))
- continue;
-
/* morph UNBOUND to REBIND */
worker->flags &= ~WORKER_UNBOUND;
worker->flags |= WORKER_REBIND;
@@ -1416,14 +1412,6 @@ retry:
}
}

- if (--idle_rebind.cnt) {
- spin_unlock_irq(&gcwq->lock);
- wait_for_completion(&idle_rebind.done);
- spin_lock_irq(&gcwq->lock);
- /* busy ones might have become idle while waiting, retry */
- goto retry;
- }
-
/* rebind busy workers */
for_each_busy_worker(worker, i, pos, gcwq) {
struct work_struct *rebind_work = &worker->rebind_work;
@@ -1442,6 +1430,13 @@ retry:
worker->scheduled.next,
work_color_to_flags(WORK_NO_COLOR));
}
+
+ /* waiting for all idle workers to be rebound */
+ if (--idle_rebind.cnt) {
+ spin_unlock_irq(&gcwq->lock);
+ wait_for_completion(&idle_rebind.done);
+ spin_lock_irq(&gcwq->lock);
+ }
}

static struct worker *alloc_worker(void)
--
1.7.4.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/