[PATCH] scsi: ibmvscsi: Use dma_alloc_coherent() instead of get_zeroed_page/dma_map_single()

From: Cai Huoqing
Date: Sun Oct 10 2021 - 12:01:50 EST


Replacing get_zeroed_page/free_page/dma_map_single/dma_unmap_single()
with dma_alloc_coherent/dma_free_coherent() helps to reduce
code size, and simplify the code, and coherent DMA will not
clear the cache every time.

Signed-off-by: Cai Huoqing <caihuoqing@xxxxxxxxx>
---
drivers/scsi/ibmvscsi/ibmvfc.c | 15 +++------------
drivers/scsi/ibmvscsi/ibmvscsi.c | 26 ++++++--------------------
2 files changed, 9 insertions(+), 32 deletions(-)

diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 1f1586ad48fe..f65d1a78b272 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -869,8 +869,7 @@ static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
{
struct device *dev = vhost->dev;

- dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
- free_page((unsigned long)queue->msgs.handle);
+ dma_free_coherent(dev, PAGE_SIZE, queue->msgs.handle, queue->msg_token);
queue->msgs.handle = NULL;

ibmvfc_free_event_pool(vhost, queue);
@@ -5663,19 +5662,11 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
return -ENOMEM;
}

- queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
+ queue->msgs.handle = dma_alloc_coherent(dev, PAGE_SIZE,
+ &queue->msg_token, GFP_KERNEL);
if (!queue->msgs.handle)
return -ENOMEM;

- queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(dev, queue->msg_token)) {
- free_page((unsigned long)queue->msgs.handle);
- queue->msgs.handle = NULL;
- return -ENOMEM;
- }
-
queue->cur = 0;
queue->fmt = fmt;
queue->size = PAGE_SIZE / fmt_size;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index ea8e01f49cba..61b315d1edbc 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -151,10 +151,7 @@ static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
- dma_unmap_single(hostdata->dev,
- queue->msg_token,
- queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
- free_page((unsigned long)queue->msgs);
+ dma_free_coherent(hostdata->dev, PAGE_SIZE, queue->msgs, queue->msg_token);
}

/**
@@ -331,18 +328,11 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
int retrc;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);

- queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
-
- if (!queue->msgs)
- goto malloc_failed;
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
-
- queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
- queue->size * sizeof(*queue->msgs),
- DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(hostdata->dev, queue->msg_token))
- goto map_failed;
+ queue->msgs = dma_alloc_coherent(hostdata->dev, PAGE_SIZE,
+ &queue->msg_token, GFP_KERNEL);
+ if (!queue->msg)
+ goto malloc_failed;

gather_partition_info();
set_adapter_info(hostdata);
@@ -395,11 +385,7 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
reg_crq_failed:
- dma_unmap_single(hostdata->dev,
- queue->msg_token,
- queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
- map_failed:
- free_page((unsigned long)queue->msgs);
+ dma_free_coherent(hostdata->dev, PAGE_SIZE, queue->msg, queue->msg_token);
malloc_failed:
return -1;
}
--
2.25.1