On 12/14/2014 12:35 AM, Kamezawa Hiroyuki wrote:
remove node aware unbound pools if node goes offline.
scan unbound workqueue and remove numa affine pool when
a node goes offline.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx>
---
kernel/workqueue.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 35f4f00..07b4eb5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4921,11 +4921,40 @@ early_initcall(init_workqueues);
* cached pools per cpu should be freed at node unplug
*/
+/*
+ * Replace per-node pwq with dfl_pwq because this node disappers.
+ * The new pool will be set at CPU_ONLINE by wq_update_unbound_numa.
+ */
+static void wq_release_unbound_numa(struct workqueue_struct *wq, int nid)
+{
+ struct pool_workqueue *old_pwq = NULL;
+
+ if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND))
+ return;
+ mutex_lock(&wq->mutex);
+ if (wq->unbound_attrs->no_numa)
+ goto out_unlock;
+ spin_lock_irq(&wq->dfl_pwq->pool->lock);
+ get_pwq(wq->dfl_pwq);
+ spin_unlock_irq(&wq->dfl_pwq->pool->lock);
+ old_pwq = numa_pwq_tbl_install(wq, nid, wq->dfl_pwq);
+out_unlock:
+ mutex_unlock(&wq->mutex);
+ put_pwq_unlocked(old_pwq);
+ return;
+}
We have already did it in wq_update_unbound_numa().
+
void workqueue_register_numanode(int nid)
{
}
void workqueue_unregister_numanode(int nid)
{
+ struct workqueue_struct *wq;
+
+ mutex_lock(&wq_pool_mutex);
+ list_for_each_entry(wq, &workqueues, list)
+ wq_release_unbound_numa(wq, nid);
+ mutex_unlock(&wq_pool_mutex);
}
#endif