[PATCH v3 4/6] iommu: Move IOMMU pagesize check to attach_device

From: Sven Peter
Date: Tue Oct 19 2021 - 12:39:05 EST


The iova allocator is capable of handling any granularity which is a power
of two. Remove the much stronger condition that the granularity must be
smaller or equal to the CPU page size from a BUG_ON there.
Instead, check this condition during __iommu_attach_device and fail
gracefully.

Signed-off-by: Sven Peter <sven@xxxxxxxxxxxxx>
---
drivers/iommu/iommu.c | 35 ++++++++++++++++++++++++++++++++---
drivers/iommu/iova.c | 7 ++++---
include/linux/iommu.h | 5 +++++
3 files changed, 41 insertions(+), 6 deletions(-)

diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index dd7863e453a5..28896739964b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -80,6 +80,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
unsigned type);
static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
+static void __iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
static void __iommu_detach_group(struct iommu_domain *domain,
@@ -1974,6 +1976,19 @@ void iommu_domain_free(struct iommu_domain *domain)
}
EXPORT_SYMBOL_GPL(iommu_domain_free);

+static int iommu_check_page_size(struct iommu_domain *domain)
+{
+ if (!iommu_is_paging_domain(domain))
+ return 0;
+
+ if (!(domain->pgsize_bitmap & (PAGE_SIZE | (PAGE_SIZE - 1)))) {
+ pr_warn("IOMMU pages cannot exactly represent CPU pages.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -1983,9 +1998,23 @@ static int __iommu_attach_device(struct iommu_domain *domain,
return -ENODEV;

ret = domain->ops->attach_dev(domain, dev);
- if (!ret)
- trace_attach_device_to_domain(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ /*
+ * Check that CPU pages can be represented by the IOVA granularity.
+ * This has to be done after ops->attach_dev since many IOMMU drivers
+ * only limit domain->pgsize_bitmap after having attached the first
+ * device.
+ */
+ ret = iommu_check_page_size(domain);
+ if (ret) {
+ __iommu_detach_device(domain, dev);
+ return ret;
+ }
+
+ trace_attach_device_to_domain(dev);
+ return 0;
}

int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 9e8bc802ac05..707eb0ceb29f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -50,10 +50,11 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
{
/*
* IOVA granularity will normally be equal to the smallest
- * supported IOMMU page size; both *must* be capable of
- * representing individual CPU pages exactly.
+ * supported IOMMU page size; while both usually are capable of
+ * representing individual CPU pages exactly the IOVA allocator
+ * supports any granularities that are an exact power of two.
*/
- BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
+ BUG_ON(!is_power_of_2(granule));

spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d2f3435e7d17..cabd25879613 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -101,6 +101,11 @@ static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
return domain->type & __IOMMU_DOMAIN_DMA_API;
}

+static inline bool iommu_is_paging_domain(struct iommu_domain *domain)
+{
+ return domain->type & __IOMMU_DOMAIN_PAGING;
+}
+
enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */
--
2.25.1