Introduce iotlb_sync_map() callback that is invoked in the end of
iommu_map(). This new callback allows IOMMU drivers to avoid syncing
on mapping of each contiguous chunk and sync only when whole mapping
is completed, optimizing performance of the mapping operation.
Signed-off-by: Dmitry Osipenko <digetx@xxxxxxxxx>
---
drivers/iommu/iommu.c | 8 ++++++--
include/linux/iommu.h | 1 +
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d2aa23202bb9..39b2ee66aa96 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1508,13 +1508,14 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
+ const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
phys_addr_t orig_paddr = paddr;
int ret = 0;
- if (unlikely(domain->ops->map == NULL ||
+ if (unlikely(ops->map == NULL ||
domain->pgsize_bitmap == 0UL))
return -ENODEV;
@@ -1543,7 +1544,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
- ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
@@ -1552,6 +1553,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
size -= pgsize;
}
+ if (ops->iotlb_sync_map)
+ ops->iotlb_sync_map(domain);
+
/* unroll mapping in case something went wrong */
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 19938ee6eb31..5224aa376377 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -206,6 +206,7 @@ struct iommu_ops {
void (*flush_iotlb_all)(struct iommu_domain *domain);
void (*iotlb_range_add)(struct iommu_domain *domain,
unsigned long iova, size_t size);
+ void (*iotlb_sync_map)(struct iommu_domain *domain);
void (*iotlb_sync)(struct iommu_domain *domain);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);