[PATCH 5.10 337/663] iommu: Move iotlb_sync_map out from __iommu_map

From: Greg Kroah-Hartman
Date: Mon Mar 01 2021 - 19:36:57 EST


From: Yong Wu <yong.wu@xxxxxxxxxxxx>

[ Upstream commit d8c1df02ac7f2c802a9b2afc0f5c888c4217f1d5 ]

In the end of __iommu_map, It alway call iotlb_sync_map.

This patch moves iotlb_sync_map out from __iommu_map since it is
unnecessary to call this for each sg segment especially iotlb_sync_map
is flush tlb all currently. Add a little helper _iommu_map for this.

Signed-off-by: Yong Wu <yong.wu@xxxxxxxxxxxx>
Reviewed-by: Robin Murphy <robin.murphy@xxxxxxx>
Acked-by: Will Deacon <will@xxxxxxxxxx>
Link: https://lore.kernel.org/r/20210107122909.16317-2-yong.wu@xxxxxxxxxxxx
Signed-off-by: Will Deacon <will@xxxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
drivers/iommu/iommu.c | 23 ++++++++++++++++++-----
1 file changed, 18 insertions(+), 5 deletions(-)

diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 0f4dc25d46c92..a25a85a0bba5b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2409,9 +2409,6 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
size -= pgsize;
}

- if (ops->iotlb_sync_map)
- ops->iotlb_sync_map(domain);
-
/* unroll mapping in case something went wrong */
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
@@ -2421,18 +2418,31 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}

+static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ const struct iommu_ops *ops = domain->ops;
+ int ret;
+
+ ret = __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+ if (ret == 0 && ops->iotlb_sync_map)
+ ops->iotlb_sync_map(domain);
+
+ return ret;
+}
+
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
might_sleep();
- return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+ return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(iommu_map);

int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
- return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+ return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iommu_map_atomic);

@@ -2516,6 +2526,7 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{
+ const struct iommu_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
phys_addr_t start;
unsigned int i = 0;
@@ -2546,6 +2557,8 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
sg = sg_next(sg);
}

+ if (ops->iotlb_sync_map)
+ ops->iotlb_sync_map(domain);
return mapped;

out_err:
--
2.27.0