[PATCH 1/1] iommu/vt-d: Fix possible recursive locking in intel_iommu_init()

From: Lu Baolu
Date: Mon Jul 18 2022 - 19:58:23 EST


The global rwsem dmar_global_lock was introduced by commit 3a5670e8ac932
("iommu/vt-d: Introduce a rwsem to protect global data structures"). It
is used to protect DMAR related global data from DMAR and PCI devices
hotplug operations.

The dmar_global_lock used in the intel_iommu_init() might cause recursive
locking issue, where intel_iommu_get_resv_regions() is taking the
dmar_global_lock from within a section where intel_iommu_init() already
holds it via probe_acpi_namespace_devices().

Using dmar_global_lock in intel_iommu_init() is unnecessary since it is
unlikely that any IO board must be hot added during the early boot stage.
This fixes the possible recursive locking issue by moving down DMAR and
PCI devices hotplug support after the IOMMU is initialized and removing
the dmar_global_lock uses in intel_iommu_init().

Fixes: d5692d4af08cd ("iommu/vt-d: Fix suspicious RCU usage in probe_acpi_namespace_devices()")
Reported-by: Robin Murphy <robin.murphy@xxxxxxx>
Link: https://lore.kernel.org/linux-iommu/894db0ccae854b35c73814485569b634237b5538.1657034828.git.robin.murphy@xxxxxxx/
Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
---
include/linux/dmar.h | 4 +++-
drivers/iommu/intel/dmar.c | 7 +++++++
drivers/iommu/intel/iommu.c | 27 ++-------------------------
3 files changed, 12 insertions(+), 26 deletions(-)

diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index d81a51978d01..8917a32173c4 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -65,6 +65,7 @@ struct dmar_pci_notify_info {

extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units;
+extern int intel_iommu_enabled;

#define for_each_drhd_unit(drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
@@ -88,7 +89,8 @@ extern struct list_head dmar_drhd_units;
static inline bool dmar_rcu_check(void)
{
return rwsem_is_locked(&dmar_global_lock) ||
- system_state == SYSTEM_BOOTING;
+ system_state == SYSTEM_BOOTING ||
+ (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
}

#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 6327b34f5aa7..63f32dfece82 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -2349,6 +2349,13 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert)
if (!dmar_in_use())
return 0;

+ /*
+ * It's unlikely that any I/O board is hot added before the IOMMU
+ * subsystem is initialized.
+ */
+ if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled)
+ return -EOPNOTSUPP;
+
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
tmp = handle;
} else {
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 7cca030a508e..59064cf92e7b 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3013,13 +3013,7 @@ static int __init init_dmars(void)

#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
- /*
- * Call dmar_alloc_hwirq() with dmar_global_lock held,
- * could cause possible lock race condition.
- */
- up_write(&dmar_global_lock);
ret = intel_svm_enable_prq(iommu);
- down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
}
@@ -3932,7 +3926,6 @@ int __init intel_iommu_init(void)
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
platform_optin_force_iommu();

- down_write(&dmar_global_lock);
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
@@ -3945,16 +3938,6 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}

- up_write(&dmar_global_lock);
-
- /*
- * The bus notifier takes the dmar_global_lock, so lockdep will
- * complain later when we register it under the lock.
- */
- dmar_register_bus_notifier();
-
- down_write(&dmar_global_lock);
-
if (!no_iommu)
intel_iommu_debugfs_init();

@@ -3999,11 +3982,9 @@ int __init intel_iommu_init(void)
pr_err("Initialization failed\n");
goto out_free_dmar;
}
- up_write(&dmar_global_lock);

init_iommu_pm_ops();

- down_read(&dmar_global_lock);
for_each_active_iommu(iommu, drhd) {
/*
* The flush queue implementation does not perform
@@ -4021,13 +4002,11 @@ int __init intel_iommu_init(void)
"%s", iommu->name);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
}
- up_read(&dmar_global_lock);

bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);

- down_read(&dmar_global_lock);
if (probe_acpi_namespace_devices())
pr_warn("ACPI name space devices didn't probe correctly\n");

@@ -4038,17 +4017,15 @@ int __init intel_iommu_init(void)

iommu_disable_protect_mem_regions(iommu);
}
- up_read(&dmar_global_lock);
-
- pr_info("Intel(R) Virtualization Technology for Directed I/O\n");

intel_iommu_enabled = 1;
+ dmar_register_bus_notifier();
+ pr_info("Intel(R) Virtualization Technology for Directed I/O\n");

return 0;

out_free_dmar:
intel_iommu_free_dmars();
- up_write(&dmar_global_lock);
return ret;
}

--
2.25.1