[PATCH 6/6] staging: vme_user: provide DMA functionality

From: Dmitry Kalinkin
Date: Mon May 18 2015 - 14:57:58 EST


This introduces a new dma device that provides a single ioctl call that
provides DMA read and write functionality to the user space.

Signed-off-by: Dmitry Kalinkin <dmitry.kalinkin@xxxxxxxxx>
Cc: Igor Alekseev <igor.alekseev@xxxxxxx>
---
drivers/staging/vme/devices/vme_user.c | 174 ++++++++++++++++++++++++++++++++-
drivers/staging/vme/devices/vme_user.h | 11 +++
2 files changed, 183 insertions(+), 2 deletions(-)

diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index da828f4..aca1e0b 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -79,13 +79,14 @@ static unsigned int bus_num;
* We shall support 4 masters and 4 slaves with this driver.
*/
#define VME_MAJOR 221 /* VME Major Device Number */
-#define VME_DEVS 9 /* Number of dev entries */
+#define VME_DEVS 10 /* Number of dev entries */

#define MASTER_MINOR 0
#define MASTER_MAX 3
#define SLAVE_MINOR 4
#define SLAVE_MAX 7
#define CONTROL_MINOR 8
+#define DMA_MINOR 9

#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */

@@ -125,7 +126,7 @@ static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
MASTER_MINOR, MASTER_MINOR,
SLAVE_MINOR, SLAVE_MINOR,
SLAVE_MINOR, SLAVE_MINOR,
- CONTROL_MINOR
+ CONTROL_MINOR, DMA_MINOR
};


@@ -443,6 +444,145 @@ static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
return -EINVAL;
}

+static int vme_user_sg_to_dma_list(const struct vme_dma_op *dma_op,
+ struct sg_table *sgt, int sg_count, struct vme_dma_list *dma_list)
+{
+ ssize_t pos = 0;
+ struct scatterlist *sg;
+ int i, ret;
+
+ for_each_sg(sgt->sgl, sg, sg_count, i) {
+ struct vme_dma_attr *pci_attr, *vme_attr, *dest, *src;
+ dma_addr_t hw_address = sg_dma_address(sg);
+ unsigned int hw_len = sg_dma_len(sg);
+
+ vme_attr = vme_dma_vme_attribute(dma_op->vme_addr + pos,
+ dma_op->aspace, dma_op->cycle, dma_op->dwidth);
+ if (!vme_attr)
+ return -ENOMEM;
+
+ pci_attr = vme_dma_pci_attribute(hw_address);
+ if (!pci_attr) {
+ vme_dma_free_attribute(vme_attr);
+ return -ENOMEM;
+ }
+
+ if (dma_op->write) {
+ dest = vme_attr;
+ src = pci_attr;
+ } else {
+ dest = pci_attr;
+ src = vme_attr;
+ }
+
+ ret = vme_dma_list_add(dma_list, src, dest, hw_len);
+
+ /*
+ * XXX VME API doesn't mention whether we should keep
+ * attributes around
+ */
+ vme_dma_free_attribute(vme_attr);
+ vme_dma_free_attribute(pci_attr);
+
+ if (ret)
+ return ret;
+
+ pos += hw_len;
+ }
+
+ WARN_ON(pos != dma_op->count);
+
+ return 0;
+}
+
+static ssize_t vme_user_dma_ioctl(unsigned int minor,
+ const struct vme_dma_op *dma_op)
+{
+ unsigned int offset = offset_in_page(dma_op->buf_vaddr);
+ unsigned long nr_pages;
+ enum dma_data_direction dir;
+ struct vme_dma_list *dma_list;
+ struct sg_table *sgt = NULL;
+ struct page **pages = NULL;
+ long got_pages;
+ int ret, sg_count;
+
+ /* Overflow check for nr_pages */
+ if (dma_op->count > U32_MAX - 2 * PAGE_SIZE)
+ return -EINVAL;
+
+ /* Prevent WARN from dma_map_sg */
+ if (dma_op->count == 0)
+ return 0;
+
+ nr_pages = (offset + dma_op->count + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ dir = dma_op->write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ pages = kmalloc_array(nr_pages, sizeof(pages[0]), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ dma_list = vme_new_dma_list(image[minor].resource);
+ if (!dma_list) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ got_pages = get_user_pages_fast(dma_op->buf_vaddr, nr_pages,
+ !dma_op->write, pages);
+ if (got_pages != nr_pages) {
+ pr_debug("Not all pages were pinned\n");
+ ret = (got_pages < 0) ? got_pages : -EFAULT;
+ goto release_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, nr_pages,
+ offset, dma_op->count, GFP_KERNEL);
+ if (ret)
+ goto release_pages;
+
+ sg_count = dma_map_sg(vme_user_bridge->dev.parent,
+ sgt->sgl, sgt->nents, dir);
+ if (!sg_count) {
+ pr_debug("DMA mapping error\n");
+ ret = -EFAULT;
+ goto free_sgt;
+ }
+
+ ret = vme_user_sg_to_dma_list(dma_op, sgt, sg_count, dma_list);
+ if (ret)
+ goto dma_unmap;
+
+ ret = vme_dma_list_exec(dma_list);
+
+dma_unmap:
+ dma_unmap_sg(vme_user_bridge->dev.parent, sgt->sgl, sgt->nents, dir);
+
+free_sgt:
+ sg_free_table(sgt);
+
+release_pages:
+ if (got_pages > 0)
+ release_pages(pages, got_pages, 0);
+
+ vme_dma_list_free(dma_list);
+
+free:
+ kfree(sgt);
+ kfree(pages);
+ if (ret)
+ return ret;
+ return dma_op->count;
+}
+
/*
* The ioctls provided by the old VME access method (the one at vmelinux.org)
* are most certainly wrong as the effectively push the registers layout
@@ -459,6 +599,7 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
struct vme_master master;
struct vme_slave slave;
struct vme_irq_id irq_req;
+ struct vme_dma_op dma_op;
unsigned long copied;
unsigned int minor = MINOR(inode->i_rdev);
int retval;
@@ -569,6 +710,19 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
break;
}
break;
+ case DMA_MINOR:
+ switch (cmd) {
+ case VME_DO_DMA:
+ copied = copy_from_user(&dma_op, argp,
+ sizeof(dma_op));
+ if (copied != 0) {
+ pr_warn("Partial copy from userspace\n");
+ return -EFAULT;
+ }
+
+ return vme_user_dma_ioctl(minor, &dma_op);
+ }
+ break;
}

return -EINVAL;
@@ -842,6 +996,15 @@ static int vme_user_probe(struct vme_dev *vdev)
}
}

+ image[DMA_MINOR].resource = vme_dma_request(vme_user_bridge,
+ VME_DMA_VME_TO_MEM | VME_DMA_MEM_TO_VME);
+ if (image[DMA_MINOR].resource == NULL) {
+ dev_warn(&vdev->dev,
+ "Unable to allocate dma resource\n");
+ err = -ENOMEM;
+ goto err_master;
+ }
+
/* Create sysfs entries - on udev systems this creates the dev files */
vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
if (IS_ERR(vme_user_sysfs_class)) {
@@ -864,6 +1027,9 @@ static int vme_user_probe(struct vme_dev *vdev)
case SLAVE_MINOR:
name = "bus/vme/s%d";
break;
+ case DMA_MINOR:
+ name = "bus/vme/dma0";
+ break;
default:
err = -EINVAL;
goto err_sysfs;
@@ -888,6 +1054,8 @@ err_sysfs:
}
class_destroy(vme_user_sysfs_class);

+ vme_dma_free(image[DMA_MINOR].resource);
+
/* Ensure counter set correcty to unalloc all master windows */
i = MASTER_MAX + 1;
err_master:
@@ -927,6 +1095,8 @@ static int vme_user_remove(struct vme_dev *dev)
}
class_destroy(vme_user_sysfs_class);

+ vme_dma_free(image[DMA_MINOR].resource);
+
for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
kfree(image[i].kern_buf);
vme_master_free(image[i].resource);
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index b8cc7bc..2ae5d99 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -48,11 +48,22 @@ struct vme_irq_id {
__u8 statid;
};

+struct vme_dma_op {
+ __u32 aspace; /* Address Space */
+ __u32 cycle; /* Cycle properties */
+ __u32 dwidth; /* Data transfer width */
+ __u64 vme_addr; /* Starting Address on the VMEbus */
+ __u64 buf_vaddr; /* Pointer to userspace memory */
+ __u32 count; /* Count of bytes to copy */
+ __u32 write; /* Write flag */
+} __packed;
+
#define VME_GET_SLAVE _IOR(VME_IOC_MAGIC, 1, struct vme_slave)
#define VME_SET_SLAVE _IOW(VME_IOC_MAGIC, 2, struct vme_slave)
#define VME_GET_MASTER _IOR(VME_IOC_MAGIC, 3, struct vme_master)
#define VME_SET_MASTER _IOW(VME_IOC_MAGIC, 4, struct vme_master)
#define VME_IRQ_GEN _IOW(VME_IOC_MAGIC, 5, struct vme_irq_id)
+#define VME_DO_DMA _IOW(VME_IOC_MAGIC, 7, struct vme_dma_op)

#endif /* _VME_USER_H_ */

--
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/