[tip:smp/hotplug] padata: Avoid nested calls to cpus_read_lock() in pcrypt_init_padata()

From: tip-bot for Sebastian Andrzej Siewior
Date: Fri May 26 2017 - 04:39:52 EST


Commit-ID: c5a81c8ff816d89941fe86961b286765d6ca2f5f
Gitweb: http://git.kernel.org/tip/c5a81c8ff816d89941fe86961b286765d6ca2f5f
Author: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
AuthorDate: Wed, 24 May 2017 10:15:18 +0200
Committer: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
CommitDate: Fri, 26 May 2017 10:10:37 +0200

padata: Avoid nested calls to cpus_read_lock() in pcrypt_init_padata()

pcrypt_init_padata()
cpus_read_lock()
padata_alloc_possible()
padata_alloc()
cpus_read_lock()

The nested call to cpus_read_lock() works with the current implementation,
but prevents the conversion to a percpu rwsem.

The other caller of padata_alloc_possible() is pcrypt_init_padata() which
calls from a cpus_read_lock() protected region as well.

Remove the cpus_read_lock() call in padata_alloc() and document the
calling convention.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Tested-by: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
Acked-by: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Steffen Klassert <steffen.klassert@xxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: linux-crypto@xxxxxxxxxxxxxxx
Link: http://lkml.kernel.org/r/20170524081547.571278910@xxxxxxxxxxxxx

---
kernel/padata.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/kernel/padata.c b/kernel/padata.c
index 0c708f6..868f947 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -940,6 +940,8 @@ static struct kobj_type padata_attr_type = {
* @wq: workqueue to use for the allocated padata instance
* @pcpumask: cpumask that will be used for padata parallelization
* @cbcpumask: cpumask that will be used for padata serialization
+ *
+ * Must be called from a cpus_read_lock() protected region
*/
static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
const struct cpumask *pcpumask,
@@ -952,7 +954,6 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
if (!pinst)
goto err;

- get_online_cpus();
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
goto err_free_inst;
if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
@@ -976,14 +977,12 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,

pinst->flags = 0;

- put_online_cpus();
-
BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
kobject_init(&pinst->kobj, &padata_attr_type);
mutex_init(&pinst->lock);

#ifdef CONFIG_HOTPLUG_CPU
- cpuhp_state_add_instance_nocalls(hp_online, &pinst->node);
+ cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
#endif
return pinst;

@@ -992,7 +991,6 @@ err_free_masks:
free_cpumask_var(pinst->cpumask.cbcpu);
err_free_inst:
kfree(pinst);
- put_online_cpus();
err:
return NULL;
}
@@ -1003,9 +1001,12 @@ err:
* parallel workers.
*
* @wq: workqueue to use for the allocated padata instance
+ *
+ * Must be called from a cpus_read_lock() protected region
*/
struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
{
+ lockdep_assert_cpus_held();
return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
}
EXPORT_SYMBOL(padata_alloc_possible);