[PATCH v1 06/16] ARM/dma-mapping: return error code from .map_sg() ops
From: Logan Gunthorpe
Date: Thu Jul 15 2021 - 12:46:22 EST
From: Martin Oliveira <martin.oliveira@xxxxxxxxxxxxx>
The .map_sg() op now expects an error code instead of zero on failure,
so propagate any errors that may happen all the way up.
Signed-off-by: Martin Oliveira <martin.oliveira@xxxxxxxxxxxxx>
Signed-off-by: Logan Gunthorpe <logang@xxxxxxxxxxxx>
Cc: Russell King <linux@xxxxxxxxxxxxxxx>
Cc: Thomas Bogendoerfer <tsbogend@xxxxxxxxxxxxxxxx>
---
arch/arm/mm/dma-mapping.c | 22 +++++++++++++---------
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c4b8df2ad328..8c286e690756 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -980,7 +980,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
{
const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
- int i, j;
+ int i, j, ret;
for_each_sg(sg, s, nents, i) {
#ifdef CONFIG_NEED_SG_DMA_LENGTH
@@ -988,7 +988,8 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
#endif
s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
s->length, dir, attrs);
- if (dma_mapping_error(dev, s->dma_address))
+ ret = dma_mapping_error(dev, s->dma_address);
+ if (ret)
goto bad_mapping;
}
return nents;
@@ -996,7 +997,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
bad_mapping:
for_each_sg(sg, s, i, j)
ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
- return 0;
+ return ret;
}
/**
@@ -1622,7 +1623,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
bool is_coherent)
{
struct scatterlist *s = sg, *dma = sg, *start = sg;
- int i, count = 0;
+ int i, count = 0, ret;
unsigned int offset = s->offset;
unsigned int size = s->offset + s->length;
unsigned int max = dma_get_max_seg_size(dev);
@@ -1634,8 +1635,10 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
- if (__map_sg_chunk(dev, start, size, &dma->dma_address,
- dir, attrs, is_coherent) < 0)
+ ret = __map_sg_chunk(dev, start, size,
+ &dma->dma_address, dir, attrs,
+ is_coherent);
+ if (ret < 0)
goto bad_mapping;
dma->dma_address += offset;
@@ -1648,8 +1651,9 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
}
size += s->length;
}
- if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
- is_coherent) < 0)
+ ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
+ is_coherent);
+ if (ret < 0)
goto bad_mapping;
dma->dma_address += offset;
@@ -1660,7 +1664,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
bad_mapping:
for_each_sg(sg, s, count, i)
__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
- return 0;
+ return ret;
}
/**
--
2.20.1