[PATCH 05/25] iommu/dma: Use for_each_sg in iommu_dma_alloc

From: Christoph Hellwig
Date: Tue Apr 30 2019 - 06:54:51 EST


arch_dma_prep_coherent can handle physically contiguous ranges larger
than PAGE_SIZE just fine, which means we don't need a page-based
iterator.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
Reviewed-by: Robin Murphy <robin.murphy@xxxxxxx>
---
drivers/iommu/dma-iommu.c | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 77d704c8f565..f915cb7c46e6 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -577,15 +577,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
goto out_free_iova;

if (!(prot & IOMMU_CACHE)) {
- struct sg_mapping_iter miter;
- /*
- * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
- * sufficient here, so skip it by using the "wrong" direction.
- */
- sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
- while (sg_miter_next(&miter))
- arch_dma_prep_coherent(miter.page, PAGE_SIZE);
- sg_miter_stop(&miter);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
+ arch_dma_prep_coherent(sg_page(sg), sg->length);
}

if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
--
2.20.1