[PATCH V2 1/4] misc: vop: change the way of allocating vring

From: Sherry Sun
Date: Tue Sep 29 2020 - 04:45:46 EST


Allocate vrings use dma_alloc_coherent is a common way in kernel. As the
memory interacted between two systems should use consistent memory to
avoid caching effects.

The orginal way use __get_free_pages and dma_map_single to allocate and
map vring, but not use dma_sync_single_for_cpu/device api to sync the
changes of vring between EP and RC, which will cause memory
synchronization problem for those devices which don't support hardware
dma coherent.

Signed-off-by: Joakim Zhang <qiangqing.zhang@xxxxxxx>
Signed-off-by: Sherry Sun <sherry.sun@xxxxxxx>
---
drivers/misc/mic/vop/vop_vringh.c | 39 +++++++------------------------
1 file changed, 9 insertions(+), 30 deletions(-)

diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index 7014ffe88632..08e63c81c5b2 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -298,9 +298,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
mutex_init(&vvr->vr_mutex);
vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
sizeof(struct _mic_vring_info));
- vr->va = (void *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(vr_size));
+ vr->va = dma_alloc_coherent(vop_dev(vdev), vr_size, &vr_addr, GFP_KERNEL);
if (!vr->va) {
ret = -ENOMEM;
dev_err(vop_dev(vdev), "%s %d err %d\n",
@@ -310,15 +308,6 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
vr->len = vr_size;
vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
- vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&vpdev->dev, vr_addr)) {
- free_pages((unsigned long)vr->va, get_order(vr_size));
- ret = -ENOMEM;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
vqconfig[i].address = cpu_to_le64(vr_addr);

vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
@@ -339,11 +328,8 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
dev_dbg(&vpdev->dev,
"%s %d index %d va %p info %p vr_size 0x%x\n",
__func__, __LINE__, i, vr->va, vr->info, vr_size);
- vvr->buf = (void *)__get_free_pages(GFP_KERNEL,
- get_order(VOP_INT_DMA_BUF_SIZE));
- vvr->buf_da = dma_map_single(&vpdev->dev,
- vvr->buf, VOP_INT_DMA_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ vvr->buf = dma_alloc_coherent(vop_dev(vdev), VOP_INT_DMA_BUF_SIZE,
+ &vvr->buf_da, GFP_KERNEL);
}

snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index,
@@ -382,10 +368,8 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
for (j = 0; j < i; j++) {
struct vop_vringh *vvr = &vdev->vvr[j];

- dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address),
- vvr->vring.len, DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->vring.va,
- get_order(vvr->vring.len));
+ dma_free_coherent(vop_dev(vdev), vvr->vring.len, vvr->vring.va,
+ le64_to_cpu(vqconfig[j].address));
}
return ret;
}
@@ -433,17 +417,12 @@ static void vop_virtio_del_device(struct vop_vdev *vdev)
for (i = 0; i < vdev->dd->num_vq; i++) {
struct vop_vringh *vvr = &vdev->vvr[i];

- dma_unmap_single(&vpdev->dev,
- vvr->buf_da, VOP_INT_DMA_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->buf,
- get_order(VOP_INT_DMA_BUF_SIZE));
+ dma_free_coherent(vop_dev(vdev), VOP_INT_DMA_BUF_SIZE,
+ vvr->buf, vvr->buf_da);
vringh_kiov_cleanup(&vvr->riov);
vringh_kiov_cleanup(&vvr->wiov);
- dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address),
- vvr->vring.len, DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->vring.va,
- get_order(vvr->vring.len));
+ dma_free_coherent(vop_dev(vdev), vvr->vring.len, vvr->vring.va,
+ le64_to_cpu(vqconfig[i].address));
}
/*
* Order the type update with previous stores. This write barrier
--
2.17.1