[PATCH v4] perf/arm-dmc620: Fix dmc620_pmu_irqs_lock/cpu_hotplug_lock circular lock dependency
From: Waiman Long
Date: Mon Aug 07 2023 - 11:46:31 EST
The following circular locking dependency was reported when running
cpus online/offline test on an arm64 system.
[ 84.195923] Chain exists of:
dmc620_pmu_irqs_lock --> cpu_hotplug_lock --> cpuhp_state-down
[ 84.207305] Possible unsafe locking scenario:
[ 84.213212] CPU0 CPU1
[ 84.217729] ---- ----
[ 84.222247] lock(cpuhp_state-down);
[ 84.225899] lock(cpu_hotplug_lock);
[ 84.232068] lock(cpuhp_state-down);
[ 84.238237] lock(dmc620_pmu_irqs_lock);
[ 84.242236]
*** DEADLOCK ***
The problematic locking order seems to be
lock(dmc620_pmu_irqs_lock) --> lock(cpu_hotplug_lock)
This locking order happens when dmc620_pmu_get_irq() calls
cpuhp_state_add_instance_nocalls(). Since dmc620_pmu_irqs_lock is used
for protecting the dmc620_pmu_irqs structure only, we don't actually need
to hold the lock when adding a new instance to the CPU hotplug subsystem.
Fix this possible deadlock scenario by adding a new
dmc620_pmu_get_irq_lock for protecting the call to __dmc620_pmu_get_irq()
and taking dmc620_pmu_irqs_lock inside __dmc620_pmu_get_irq()
only when dmc620_pmu_irqs is being searched or modified. As a
result, cpuhp_state_add_instance_nocalls() won't be called with
dmc620_pmu_irqs_lock held and cpu_hotplug_lock won't be acquired after
dmc620_pmu_irqs_lock.
Suggested-by: Robin Murphy <robin.murphy@xxxxxxx>
Signed-off-by: Waiman Long <longman@xxxxxxxxxx>
---
drivers/perf/arm_dmc620_pmu.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 9d0f01c4455a..895971915f2d 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -68,6 +68,7 @@
static LIST_HEAD(dmc620_pmu_irqs);
static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
+static DEFINE_MUTEX(dmc620_pmu_get_irq_lock);
struct dmc620_pmu_irq {
struct hlist_node node;
@@ -421,11 +422,18 @@ static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
{
struct dmc620_pmu_irq *irq;
+ bool found = false;
int ret;
+ mutex_lock(&dmc620_pmu_irqs_lock);
list_for_each_entry(irq, &dmc620_pmu_irqs, irqs_node)
- if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount))
- return irq;
+ if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount)) {
+ found = true;
+ break;
+ }
+ mutex_unlock(&dmc620_pmu_irqs_lock);
+ if (found)
+ return irq;
irq = kzalloc(sizeof(*irq), GFP_KERNEL);
if (!irq)
@@ -452,7 +460,9 @@ static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
goto out_free_irq;
irq->irq_num = irq_num;
+ mutex_lock(&dmc620_pmu_irqs_lock);
list_add(&irq->irqs_node, &dmc620_pmu_irqs);
+ mutex_unlock(&dmc620_pmu_irqs_lock);
return irq;
@@ -467,9 +477,9 @@ static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
{
struct dmc620_pmu_irq *irq;
- mutex_lock(&dmc620_pmu_irqs_lock);
+ mutex_lock(&dmc620_pmu_get_irq_lock);
irq = __dmc620_pmu_get_irq(irq_num);
- mutex_unlock(&dmc620_pmu_irqs_lock);
+ mutex_unlock(&dmc620_pmu_get_irq_lock);
if (IS_ERR(irq))
return PTR_ERR(irq);
--
2.31.1