[PATCH 3/4] dma-iommu: expose a few helper functions to module

From: David Stevens
Date: Wed Jul 07 2021 - 03:55:46 EST


From: David Stevens <stevensd@xxxxxxxxxxxx>

Expose a few helper functions from dma-iommu to the rest of the module.

Signed-off-by: David Stevens <stevensd@xxxxxxxxxxxx>
---
drivers/iommu/dma-iommu.c | 27 ++++++++++++++-------------
include/linux/dma-iommu.h | 12 ++++++++++++
2 files changed, 26 insertions(+), 13 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 98a5c566a303..48267d9f5152 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -413,7 +413,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
}

-static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
+dma_addr_t __iommu_dma_alloc_iova(struct iommu_domain *domain,
size_t size, u64 dma_limit, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -453,7 +453,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
return (dma_addr_t)iova << shift;
}

-static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
+void __iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
dma_addr_t iova, size_t size, struct page *freelist)
{
struct iova_domain *iovad = &cookie->iovad;
@@ -489,7 +489,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,

if (!cookie->fq_domain)
iommu_iotlb_sync(domain, &iotlb_gather);
- iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
+ __iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
}

static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
@@ -525,12 +525,12 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,

size = iova_align(iovad, size + iova_off);

- iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
+ iova = __iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;

if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
@@ -585,14 +585,14 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
return iova;
}

-static void __iommu_dma_free_pages(struct page **pages, int count)
+void __iommu_dma_free_pages(struct page **pages, int count)
{
while (count--)
__free_page(pages[count]);
kvfree(pages);
}

-static struct page **__iommu_dma_alloc_pages(
+struct page **__iommu_dma_alloc_pages(
unsigned int count, unsigned long order_mask,
unsigned int nid, gfp_t page_gfp, gfp_t kalloc_gfp)
{
@@ -686,7 +686,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
return NULL;

size = iova_align(iovad, size);
- iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
+ iova = __iommu_dma_alloc_iova(domain, size,
+ dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;

@@ -712,7 +713,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
out_free_sg:
sg_free_table(sgt);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
@@ -1063,7 +1064,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}

- iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
+ iova = __iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova)
goto out_restore_sg;

@@ -1077,7 +1078,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
return __finalise_sg(dev, sg, nents, iova);

out_free_iova:
- iommu_dma_free_iova(cookie, iova, iova_len, NULL);
+ __iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
return 0;
@@ -1370,7 +1371,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;

- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ iova = __iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
goto out_free_page;

@@ -1384,7 +1385,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page;

out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_page:
kfree(msi_page);
return NULL;
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 6e75a2d689b4..fc9acc581db0 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -42,6 +42,18 @@ void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,

extern bool iommu_dma_forcedac;

+struct iommu_dma_cookie;
+
+struct page **__iommu_dma_alloc_pages(
+ unsigned int count, unsigned long order_mask,
+ unsigned int nid, gfp_t page_gfp, gfp_t kalloc_gfp);
+void __iommu_dma_free_pages(struct page **pages, int count);
+dma_addr_t __iommu_dma_alloc_iova(struct iommu_domain *domain,
+ size_t size, dma_addr_t dma_limit,
+ struct device *dev);
+void __iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
+ dma_addr_t iova, size_t size, struct page *freelist);
+
#else /* CONFIG_IOMMU_DMA */

struct iommu_domain;
--
2.32.0.93.g670b81a890-goog