[PATCH 13/14] crypto: inside-secure - fix use of the SG list

From: Antoine Tenart
Date: Mon May 27 2019 - 10:55:10 EST


Replace sg_nents_for_len by sg_nents when DMA mapping/unmapping buffers
and when looping over the SG entries. This fix cases where the SG
entries aren't used fully, which would in such cases led to using fewer
SG entries than needed (and thus the engine wouldn't have access to the
full input data and the result would be wrong).

Signed-off-by: Antoine Tenart <antoine.tenart@xxxxxxxxxxx>
---
.../crypto/inside-secure/safexcel_cipher.c | 39 ++++++-------------
drivers/crypto/inside-secure/safexcel_hash.c | 3 +-
2 files changed, 12 insertions(+), 30 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index cedfb121c278..6e193baccec7 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -369,16 +369,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
safexcel_complete(priv, ring);

if (src == dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, cryptlen),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, cryptlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst,
- sg_nents_for_len(dst, cryptlen),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
}

*should_complete = true;
@@ -403,26 +397,21 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
int i, ret = 0;

if (src == dst) {
- nr_src = dma_map_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
+ nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
DMA_BIDIRECTIONAL);
nr_dst = nr_src;
if (!nr_src)
return -EINVAL;
} else {
- nr_src = dma_map_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
+ nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
DMA_TO_DEVICE);
if (!nr_src)
return -EINVAL;

- nr_dst = dma_map_sg(priv->dev, dst,
- sg_nents_for_len(dst, totlen),
+ nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
DMA_FROM_DEVICE);
if (!nr_dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
return -EINVAL;
}
}
@@ -472,7 +461,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,

/* result descriptors */
for_each_sg(dst, sg, nr_dst, i) {
- bool first = !i, last = (i == nr_dst - 1);
+ bool first = !i, last = sg_is_last(sg);
u32 len = sg_dma_len(sg);

rdesc = safexcel_add_rdesc(priv, ring, first, last,
@@ -501,16 +490,10 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);

if (src == dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst,
- sg_nents_for_len(dst, totlen),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
}

return ret;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 20950744ea4e..a80a5e757b1f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -273,8 +273,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}

/* Now handle the current ahash request buffer(s) */
- req->nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
+ req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
if (!req->nents) {
ret = -ENOMEM;
--
2.21.0