[PATCH v2 6/8] VMCI: dma dg: allocate send and receive buffers for DMA datagrams

From: Jorgen Hansen
Date: Thu Feb 03 2022 - 08:18:46 EST


If DMA datagrams are used, allocate send and receive buffers
in coherent DMA memory.

This is done in preparation for the send and receive datagram
operations, where the buffers are used for the exchange of data
between driver and device.

Reviewed-by: Vishnu Dasa <vdasa@xxxxxxxxxx>
Signed-off-by: Jorgen Hansen <jhansen@xxxxxxxxxx>
---
drivers/misc/vmw_vmci/vmci_guest.c | 66 ++++++++++++++++++++++++++----
include/linux/vmw_vmci_defs.h | 4 ++
2 files changed, 63 insertions(+), 7 deletions(-)

diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 0920fbc6b64f..c207ca2ca42e 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -31,6 +31,12 @@

#define VMCI_UTIL_NUM_RESOURCES 1

+/*
+ * Datagram buffers for DMA send/receive must accommodate at least
+ * a maximum sized datagram and the header.
+ */
+#define VMCI_DMA_DG_BUFFER_SIZE (VMCI_MAX_DG_SIZE + PAGE_SIZE)
+
static bool vmci_disable_msi;
module_param_named(disable_msi, vmci_disable_msi, bool, 0);
MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
@@ -53,6 +59,9 @@ struct vmci_guest_device {
struct tasklet_struct bm_tasklet;

void *data_buffer;
+ dma_addr_t data_buffer_base;
+ void *tx_buffer;
+ dma_addr_t tx_buffer_base;
void *notification_bitmap;
dma_addr_t notification_base;
};
@@ -451,6 +460,24 @@ static irqreturn_t vmci_interrupt_dma_datagram(int irq, void *_dev)
return IRQ_HANDLED;
}

+static void vmci_free_dg_buffers(struct vmci_guest_device *vmci_dev)
+{
+ if (vmci_dev->mmio_base != NULL) {
+ if (vmci_dev->tx_buffer != NULL)
+ dma_free_coherent(vmci_dev->dev,
+ VMCI_DMA_DG_BUFFER_SIZE,
+ vmci_dev->tx_buffer,
+ vmci_dev->tx_buffer_base);
+ if (vmci_dev->data_buffer != NULL)
+ dma_free_coherent(vmci_dev->dev,
+ VMCI_DMA_DG_BUFFER_SIZE,
+ vmci_dev->data_buffer,
+ vmci_dev->data_buffer_base);
+ } else {
+ vfree(vmci_dev->data_buffer);
+ }
+}
+
/*
* Most of the initialization at module load time is done here.
*/
@@ -520,11 +547,27 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
tasklet_init(&vmci_dev->bm_tasklet,
vmci_process_bitmap, (unsigned long)vmci_dev);

- vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
+ if (mmio_base != NULL) {
+ vmci_dev->tx_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
+ &vmci_dev->tx_buffer_base,
+ GFP_KERNEL);
+ if (!vmci_dev->tx_buffer) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for datagram tx buffer\n");
+ return -ENOMEM;
+ }
+
+ vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
+ &vmci_dev->data_buffer_base,
+ GFP_KERNEL);
+ } else {
+ vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
+ }
if (!vmci_dev->data_buffer) {
dev_err(&pdev->dev,
"Can't allocate memory for datagram buffer\n");
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_free_data_buffers;
}

pci_set_master(pdev); /* To enable queue_pair functionality. */
@@ -542,7 +585,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
dev_err(&pdev->dev, "Device does not support datagrams\n");
error = -ENXIO;
- goto err_free_data_buffer;
+ goto err_free_data_buffers;
}
caps_in_use = VMCI_CAPS_DATAGRAM;

@@ -586,7 +629,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
dev_err(&pdev->dev,
"Missing capability: VMCI_CAPS_DMA_DATAGRAM\n");
error = -ENXIO;
- goto err_free_data_buffer;
+ goto err_free_data_buffers;
}
}

@@ -602,6 +645,12 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_write_reg(vmci_dev, PAGE_SHIFT, VMCI_GUEST_PAGE_SHIFT);
page_shift = vmci_read_reg(vmci_dev, VMCI_GUEST_PAGE_SHIFT);
dev_info(&pdev->dev, "Using page shift %d\n", page_shift);
+
+ /* Configure the high order parts of the data in/out buffers. */
+ vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->data_buffer_base),
+ VMCI_DATA_IN_HIGH_ADDR);
+ vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->tx_buffer_base),
+ VMCI_DATA_OUT_HIGH_ADDR);
}

/* Set up global device so that we can start sending datagrams */
@@ -755,8 +804,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_dev_g = NULL;
spin_unlock_irq(&vmci_dev_spinlock);

-err_free_data_buffer:
- vfree(vmci_dev->data_buffer);
+err_free_data_buffers:
+ vmci_free_dg_buffers(vmci_dev);

/* The rest are managed resources and will be freed by PCI core */
return error;
@@ -814,7 +863,10 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
vmci_dev->notification_base);
}

- vfree(vmci_dev->data_buffer);
+ vmci_free_dg_buffers(vmci_dev);
+
+ if (vmci_dev->mmio_base != NULL)
+ pci_iounmap(pdev, vmci_dev->mmio_base);

/* The rest are managed resources and will be freed by PCI core */
}
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 2b70c024dacb..8bc37d8244a8 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -21,6 +21,10 @@
#define VMCI_CAPS_ADDR 0x18
#define VMCI_RESULT_LOW_ADDR 0x1c
#define VMCI_RESULT_HIGH_ADDR 0x20
+#define VMCI_DATA_OUT_LOW_ADDR 0x24
+#define VMCI_DATA_OUT_HIGH_ADDR 0x28
+#define VMCI_DATA_IN_LOW_ADDR 0x2c
+#define VMCI_DATA_IN_HIGH_ADDR 0x30
#define VMCI_GUEST_PAGE_SHIFT 0x34

/* Max number of devices. */
--
2.25.1