On 05/31/2018 02:10 AM, Dongwon Kim wrote:The sgt returned for the same attachment, so it is ok to return this cached one
On Fri, May 25, 2018 at 06:33:29PM +0300, Oleksandr Andrushchenko wrote:Well, I was not sure about this piece of code as well,
From: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>may need to check xen_dmabuf_attach->sgt == NULL (i.e. first time mapping)?
1. Create a dma-buf from grant references provided by the foreign
ÂÂÂ domain. By default dma-buf is backed by system memory pages, but
ÂÂÂ by providing GNTDEV_DMA_FLAG_XXX flags it can also be created
ÂÂÂ as a DMA write-combine/coherent buffer, e.g. allocated with
ÂÂÂ corresponding dma_alloc_xxx API.
ÂÂÂ Export the resulting buffer as a new dma-buf.
2. Implement waiting for the dma-buf to be released: block until the
ÂÂÂ dma-buf with the file descriptor provided is released.
ÂÂÂ If within the time-out provided the buffer is not released then
ÂÂÂ -ETIMEDOUT error is returned. If the buffer with the file descriptor
ÂÂÂ does not exist or has already been released, then -ENOENT is returned.
ÂÂÂ For valid file descriptors this must not be treated as error.
Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>
---
 drivers/xen/gntdev.c | 478 ++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 476 insertions(+), 2 deletions(-)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 9e450622af1a..52abc6cd5846 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -4,6 +4,8 @@
ÂÂ * Device for accessing (in user-space) pages that have been granted by other
ÂÂ * domains.
ÂÂ *
+ * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
+ *
ÂÂ * Copyright (c) 2006-2007, D G Murray.
ÂÂ *ÂÂÂÂÂÂÂÂÂÂ (c) 2009 Gerd Hoffmann <kraxel@xxxxxxxxxx>
ÂÂ *ÂÂÂÂÂÂÂÂÂÂ (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
@@ -41,6 +43,9 @@
 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 #include <linux/of_device.h>
 #endif
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+#include <linux/dma-buf.h>
+#endif
  #include <xen/xen.h>
 #include <xen/grant_table.h>
@@ -81,6 +86,17 @@ struct gntdev_priv {
ÂÂÂÂÂ /* Device for which DMA memory is allocated. */
ÂÂÂÂÂ struct device *dma_dev;
 #endif
+
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ÂÂÂ /* Private data of the hyper DMA buffers. */
+
+ÂÂÂ /* List of exported DMA buffers. */
+ÂÂÂ struct list_head dmabuf_exp_list;
+ÂÂÂ /* List of wait objects. */
+ÂÂÂ struct list_head dmabuf_exp_wait_list;
+ÂÂÂ /* This is the lock which protects dma_buf_xxx lists. */
+ÂÂÂ struct mutex dmabuf_lock;
+#endif
 };
  struct unmap_notify {
@@ -125,12 +141,38 @@ struct grant_map {
  #ifdef CONFIG_XEN_GNTDEV_DMABUF
 struct xen_dmabuf {
+ÂÂÂ struct gntdev_priv *priv;
+ÂÂÂ struct dma_buf *dmabuf;
+ÂÂÂ struct list_head next;
+ÂÂÂ int fd;
+
ÂÂÂÂÂ union {
+ÂÂÂÂÂÂÂ struct {
+ÂÂÂÂÂÂÂÂÂÂÂ /* Exported buffers are reference counted. */
+ÂÂÂÂÂÂÂÂÂÂÂ struct kref refcount;
+ÂÂÂÂÂÂÂÂÂÂÂ struct grant_map *map;
+ÂÂÂÂÂÂÂ } exp;
ÂÂÂÂÂÂÂÂÂ struct {
ÂÂÂÂÂÂÂÂÂÂÂÂÂ /* Granted references of the imported buffer. */
ÂÂÂÂÂÂÂÂÂÂÂÂÂ grant_ref_t *refs;
ÂÂÂÂÂÂÂÂÂ } imp;
ÂÂÂÂÂ } u;
+
+ÂÂÂ /* Number of pages this buffer has. */
+ÂÂÂ int nr_pages;
+ÂÂÂ /* Pages of this buffer. */
+ÂÂÂ struct page **pages;
+};
+
+struct xen_dmabuf_wait_obj {
+ÂÂÂ struct list_head next;
+ÂÂÂ struct xen_dmabuf *xen_dmabuf;
+ÂÂÂ struct completion completion;
+};
+
+struct xen_dmabuf_attachment {
+ÂÂÂ struct sg_table *sgt;
+ÂÂÂ enum dma_data_direction dir;
 };
 #endif
 @@ -320,6 +362,16 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
ÂÂÂÂÂ gntdev_free_map(map);
 }
 +#ifdef CONFIG_XEN_GNTDEV_DMABUF
+static void gntdev_remove_map(struct gntdev_priv *priv, struct grant_map *map)
+{
+ÂÂÂ mutex_lock(&priv->lock);
+ÂÂÂ list_del(&map->next);
+ÂÂÂ gntdev_put_map(NULL /* already removed */, map);
+ÂÂÂ mutex_unlock(&priv->lock);
+}
+#endif
+
 /* ------------------------------------------------------------------ */
  static int find_grant_ptes(pte_t *pte, pgtable_t token,
@@ -628,6 +680,12 @@ static int gntdev_open(struct inode *inode, struct file *flip)
ÂÂÂÂÂ INIT_LIST_HEAD(&priv->freeable_maps);
ÂÂÂÂÂ mutex_init(&priv->lock);
 +#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ÂÂÂ mutex_init(&priv->dmabuf_lock);
+ÂÂÂ INIT_LIST_HEAD(&priv->dmabuf_exp_list);
+ÂÂÂ INIT_LIST_HEAD(&priv->dmabuf_exp_wait_list);
+#endif
+
ÂÂÂÂÂ if (use_ptemod) {
ÂÂÂÂÂÂÂÂÂ priv->mm = get_task_mm(current);
ÂÂÂÂÂÂÂÂÂ if (!priv->mm) {
@@ -1053,17 +1111,433 @@ static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 /* DMA buffer export support. */
 /* ------------------------------------------------------------------ */
 +/* ------------------------------------------------------------------ */
+/* Implementation of wait for exported DMA buffer to be released.ÂÂÂÂ */
+/* ------------------------------------------------------------------ */
+
+static void dmabuf_exp_release(struct kref *kref);
+
+static struct xen_dmabuf_wait_obj *
+dmabuf_exp_wait_obj_new(struct gntdev_priv *priv,
+ÂÂÂÂÂÂÂÂÂÂÂ struct xen_dmabuf *xen_dmabuf)
+{
+ÂÂÂ struct xen_dmabuf_wait_obj *obj;
+
+ÂÂÂ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ÂÂÂ if (!obj)
+ÂÂÂÂÂÂÂ return ERR_PTR(-ENOMEM);
+
+ÂÂÂ init_completion(&obj->completion);
+ÂÂÂ obj->xen_dmabuf = xen_dmabuf;
+
+ÂÂÂ mutex_lock(&priv->dmabuf_lock);
+ÂÂÂ list_add(&obj->next, &priv->dmabuf_exp_wait_list);
+ÂÂÂ /* Put our reference and wait for xen_dmabuf's release to fire. */
+ÂÂÂ kref_put(&xen_dmabuf->u.exp.refcount, dmabuf_exp_release);
+ÂÂÂ mutex_unlock(&priv->dmabuf_lock);
+ÂÂÂ return obj;
+}
+
+static void dmabuf_exp_wait_obj_free(struct gntdev_priv *priv,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct xen_dmabuf_wait_obj *obj)
+{
+ÂÂÂ struct xen_dmabuf_wait_obj *cur_obj, *q;
+
+ÂÂÂ mutex_lock(&priv->dmabuf_lock);
+ÂÂÂ list_for_each_entry_safe(cur_obj, q, &priv->dmabuf_exp_wait_list, next)
+ÂÂÂÂÂÂÂ if (cur_obj == obj) {
+ÂÂÂÂÂÂÂÂÂÂÂ list_del(&obj->next);
+ÂÂÂÂÂÂÂÂÂÂÂ kfree(obj);
+ÂÂÂÂÂÂÂÂÂÂÂ break;
+ÂÂÂÂÂÂÂ }
+ÂÂÂ mutex_unlock(&priv->dmabuf_lock);
+}
+
+static int dmabuf_exp_wait_obj_wait(struct xen_dmabuf_wait_obj *obj,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ u32 wait_to_ms)
+{
+ÂÂÂ if (wait_for_completion_timeout(&obj->completion,
+ÂÂÂÂÂÂÂÂÂÂÂ msecs_to_jiffies(wait_to_ms)) <= 0)
+ÂÂÂÂÂÂÂ return -ETIMEDOUT;
+
+ÂÂÂ return 0;
+}
+
+static void dmabuf_exp_wait_obj_signal(struct gntdev_priv *priv,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct xen_dmabuf *xen_dmabuf)
+{
+ÂÂÂ struct xen_dmabuf_wait_obj *obj, *q;
+
+ÂÂÂ list_for_each_entry_safe(obj, q, &priv->dmabuf_exp_wait_list, next)
+ÂÂÂÂÂÂÂ if (obj->xen_dmabuf == xen_dmabuf) {
+ÂÂÂÂÂÂÂÂÂÂÂ pr_debug("Found xen_dmabuf in the wait list, wake\n");
+ÂÂÂÂÂÂÂÂÂÂÂ complete_all(&obj->completion);
+ÂÂÂÂÂÂÂ }
+}
+
+static struct xen_dmabuf *
+dmabuf_exp_wait_obj_get_by_fd(struct gntdev_priv *priv, int fd)
+{
+ÂÂÂ struct xen_dmabuf *q, *xen_dmabuf, *ret = ERR_PTR(-ENOENT);
+
+ÂÂÂ mutex_lock(&priv->dmabuf_lock);
+ÂÂÂ list_for_each_entry_safe(xen_dmabuf, q, &priv->dmabuf_exp_list, next)
+ÂÂÂÂÂÂÂ if (xen_dmabuf->fd == fd) {
+ÂÂÂÂÂÂÂÂÂÂÂ pr_debug("Found xen_dmabuf in the wait list\n");
+ÂÂÂÂÂÂÂÂÂÂÂ kref_get(&xen_dmabuf->u.exp.refcount);
+ÂÂÂÂÂÂÂÂÂÂÂ ret = xen_dmabuf;
+ÂÂÂÂÂÂÂÂÂÂÂ break;
+ÂÂÂÂÂÂÂ }
+ÂÂÂ mutex_unlock(&priv->dmabuf_lock);
+ÂÂÂ return ret;
+}
+
 static int dmabuf_exp_wait_released(struct gntdev_priv *priv, int fd,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ int wait_to_ms)
 {
-ÂÂÂ return -ETIMEDOUT;
+ÂÂÂ struct xen_dmabuf *xen_dmabuf;
+ÂÂÂ struct xen_dmabuf_wait_obj *obj;
+ÂÂÂ int ret;
+
+ÂÂÂ pr_debug("Will wait for dma-buf with fd %d\n", fd);
+ÂÂÂ /*
+ÂÂÂÂ * Try to find the DMA buffer: if not found means that
+ÂÂÂÂ * either the buffer has already been released or file descriptor
+ÂÂÂÂ * provided is wrong.
+ÂÂÂÂ */
+ÂÂÂ xen_dmabuf = dmabuf_exp_wait_obj_get_by_fd(priv, fd);
+ÂÂÂ if (IS_ERR(xen_dmabuf))
+ÂÂÂÂÂÂÂ return PTR_ERR(xen_dmabuf);
+
+ÂÂÂ /*
+ÂÂÂÂ * xen_dmabuf still exists and is reference count locked by us now,
+ÂÂÂÂ * so prepare to wait: allocate wait object and add it to the wait list,
+ÂÂÂÂ * so we can find it on release.
+ÂÂÂÂ */
+ÂÂÂ obj = dmabuf_exp_wait_obj_new(priv, xen_dmabuf);
+ÂÂÂ if (IS_ERR(obj)) {
+ÂÂÂÂÂÂÂ pr_err("Failed to setup wait object, ret %ld\n", PTR_ERR(obj));
+ÂÂÂÂÂÂÂ return PTR_ERR(obj);
+ÂÂÂ }
+
+ÂÂÂ ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
+ÂÂÂ dmabuf_exp_wait_obj_free(priv, obj);
+ÂÂÂ return ret;
+}
+
+/* ------------------------------------------------------------------ */
+/* DMA buffer export support.ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ */
+/* ------------------------------------------------------------------ */
+
+static struct sg_table *
+dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
+{
+ÂÂÂ struct sg_table *sgt;
+ÂÂÂ int ret;
+
+ÂÂÂ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ÂÂÂ if (!sgt) {
+ÂÂÂÂÂÂÂ ret = -ENOMEM;
+ÂÂÂÂÂÂÂ goto out;
+ÂÂÂ }
+
+ÂÂÂ ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ nr_pages << PAGE_SHIFT,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ GFP_KERNEL);
+ÂÂÂ if (ret)
+ÂÂÂÂÂÂÂ goto out;
+
+ÂÂÂ return sgt;
+
+out:
+ÂÂÂ kfree(sgt);
+ÂÂÂ return ERR_PTR(ret);
+}
+
+static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct device *target_dev,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct dma_buf_attachment *attach)
+{
+ÂÂÂ struct xen_dmabuf_attachment *xen_dmabuf_attach;
+
+ÂÂÂ xen_dmabuf_attach = kzalloc(sizeof(*xen_dmabuf_attach), GFP_KERNEL);
+ÂÂÂ if (!xen_dmabuf_attach)
+ÂÂÂÂÂÂÂ return -ENOMEM;
+
+ÂÂÂ xen_dmabuf_attach->dir = DMA_NONE;
+ÂÂÂ attach->priv = xen_dmabuf_attach;
+ÂÂÂ /* Might need to pin the pages of the buffer now. */
+ÂÂÂ return 0;
+}
+
+static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct dma_buf_attachment *attach)
+{
+ÂÂÂ struct xen_dmabuf_attachment *xen_dmabuf_attach = attach->priv;
+
+ÂÂÂ if (xen_dmabuf_attach) {
+ÂÂÂÂÂÂÂ struct sg_table *sgt = xen_dmabuf_attach->sgt;
+
+ÂÂÂÂÂÂÂ if (sgt) {
+ÂÂÂÂÂÂÂÂÂÂÂ if (xen_dmabuf_attach->dir != DMA_NONE)
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ dma_unmap_sg_attrs(attach->dev, sgt->sgl,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ sgt->nents,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ xen_dmabuf_attach->dir,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ DMA_ATTR_SKIP_CPU_SYNC);
+ÂÂÂÂÂÂÂÂÂÂÂ sg_free_table(sgt);
+ÂÂÂÂÂÂÂ }
+
+ÂÂÂÂÂÂÂ kfree(sgt);
+ÂÂÂÂÂÂÂ kfree(xen_dmabuf_attach);
+ÂÂÂÂÂÂÂ attach->priv = NULL;
+ÂÂÂ }
+ÂÂÂ /* Might need to unpin the pages of the buffer now. */
+}
+
+static struct sg_table *
+dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ enum dma_data_direction dir)
+{
+ÂÂÂ struct xen_dmabuf_attachment *xen_dmabuf_attach = attach->priv;
+ÂÂÂ struct xen_dmabuf *xen_dmabuf = attach->dmabuf->priv;
+ÂÂÂ struct sg_table *sgt;
+
+ÂÂÂ pr_debug("Mapping %d pages for dev %p\n", xen_dmabuf->nr_pages,
+ÂÂÂÂÂÂÂÂ attach->dev);
+
+ÂÂÂ if (WARN_ON(dir == DMA_NONE || !xen_dmabuf_attach))
+ÂÂÂÂÂÂÂ return ERR_PTR(-EINVAL);
+
+ÂÂÂ /* Return the cached mapping when possible. */
+ÂÂÂ if (xen_dmabuf_attach->dir == dir)
+ÂÂÂÂÂÂÂ return xen_dmabuf_attach->sgt;
Also, I am not sure if this mechanism of reusing previously generated sgt
for other mappings is universally ok for any use-cases... I don't know if
it is acceptable as per the specification.
so I'll probably allocate a new sgt each time and do not reuse it
as now
I see other drivers in the kernel do the same. I think that *should*Yes, as I say at the top of the file dma-buf handling is DRM PRIME+Not sure if it's ok to do nothing here because the spec says this function is
+ÂÂÂ /*
+ÂÂÂÂ * Two mappings with different directions for the same attachment are
+ÂÂÂÂ * not allowed.
+ÂÂÂÂ */
+ÂÂÂ if (WARN_ON(xen_dmabuf_attach->dir != DMA_NONE))
+ÂÂÂÂÂÂÂ return ERR_PTR(-EBUSY);
+
+ÂÂÂ sgt = dmabuf_pages_to_sgt(xen_dmabuf->pages, xen_dmabuf->nr_pages);
+ÂÂÂ if (!IS_ERR(sgt)) {
+ÂÂÂÂÂÂÂ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ DMA_ATTR_SKIP_CPU_SYNC)) {
+ÂÂÂÂÂÂÂÂÂÂÂ sg_free_table(sgt);
+ÂÂÂÂÂÂÂÂÂÂÂ kfree(sgt);
+ÂÂÂÂÂÂÂÂÂÂÂ sgt = ERR_PTR(-ENOMEM);
+ÂÂÂÂÂÂÂ } else {
+ÂÂÂÂÂÂÂÂÂÂÂ xen_dmabuf_attach->sgt = sgt;
+ÂÂÂÂÂÂÂÂÂÂÂ xen_dmabuf_attach->dir = dir;
+ÂÂÂÂÂÂÂ }
+ÂÂÂ }
+ÂÂÂ if (IS_ERR(sgt))
+ÂÂÂÂÂÂÂ pr_err("Failed to map sg table for dev %p\n", attach->dev);
+ÂÂÂ return sgt;
+}
+
+static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct sg_table *sgt,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ enum dma_data_direction dir)
+{
+ÂÂÂ /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
mandatory and it should unmap and "release" &sg_table associated with it.
ÂÂÂÂ/**
ÂÂÂÂ * @unmap_dma_buf:
ÂÂÂÂ *
ÂÂÂÂ * This is called by dma_buf_unmap_attachment() and should unmap and
ÂÂÂÂ * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
ÂÂÂÂ * It should also unpin the backing storage if this is the last mapping
ÂÂÂÂ * of the DMA buffer, it the exporter supports backing storage
ÂÂÂÂ * migration.
ÂÂÂÂ */
based, so I have the workflow just like in there.
Do you think we have to be more strict and rework this?
Daniel, what do you think?
on xen_dmabuf.release callback which is refcounted+}When and how would this allocation be freed? I don't see any ioctl for freeing up
+
+static void dmabuf_exp_release(struct kref *kref)
+{
+ÂÂÂ struct xen_dmabuf *xen_dmabuf =
+ÂÂÂÂÂÂÂ container_of(kref, struct xen_dmabuf, u.exp.refcount);
+
+ÂÂÂ dmabuf_exp_wait_obj_signal(xen_dmabuf->priv, xen_dmabuf);
+ÂÂÂ list_del(&xen_dmabuf->next);
+ÂÂÂ kfree(xen_dmabuf);
+}
+
+static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
+{
+ÂÂÂ struct xen_dmabuf *xen_dmabuf = dma_buf->priv;
+ÂÂÂ struct gntdev_priv *priv = xen_dmabuf->priv;
+
+ÂÂÂ gntdev_remove_map(priv, xen_dmabuf->u.exp.map);
+ÂÂÂ mutex_lock(&priv->dmabuf_lock);
+ÂÂÂ kref_put(&xen_dmabuf->u.exp.refcount, dmabuf_exp_release);
+ÂÂÂ mutex_unlock(&priv->dmabuf_lock);
+}
+
+static void *dmabuf_exp_ops_kmap_atomic(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ unsigned long page_num)
+{
+ÂÂÂ /* Not implemented. */
+ÂÂÂ return NULL;
+}
+
+static void dmabuf_exp_ops_kunmap_atomic(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ unsigned long page_num, void *addr)
+{
+ÂÂÂ /* Not implemented. */
+}
+
+static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ unsigned long page_num)
+{
+ÂÂÂ /* Not implemented. */
+ÂÂÂ return NULL;
+}
+
+static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ unsigned long page_num, void *addr)
+{
+ÂÂÂ /* Not implemented. */
+}
+
+static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct vm_area_struct *vma)
+{
+ÂÂÂ /* Not implemented. */
+ÂÂÂ return 0;
+}
+
+static const struct dma_buf_ops dmabuf_exp_ops =Â {
+ÂÂÂ .attach = dmabuf_exp_ops_attach,
+ÂÂÂ .detach = dmabuf_exp_ops_detach,
+ÂÂÂ .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
+ÂÂÂ .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
+ÂÂÂ .release = dmabuf_exp_ops_release,
+ÂÂÂ .map = dmabuf_exp_ops_kmap,
+ÂÂÂ .map_atomic = dmabuf_exp_ops_kmap_atomic,
+ÂÂÂ .unmap = dmabuf_exp_ops_kunmap,
+ÂÂÂ .unmap_atomic = dmabuf_exp_ops_kunmap_atomic,
+ÂÂÂ .mmap = dmabuf_exp_ops_mmap,
+};
+
+static int dmabuf_export(struct gntdev_priv *priv, struct grant_map *map,
+ÂÂÂÂÂÂÂÂÂÂÂÂ int *fd)
+{
+ÂÂÂ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ÂÂÂ struct xen_dmabuf *xen_dmabuf;
+ÂÂÂ int ret = 0;
+
+ÂÂÂ xen_dmabuf = kzalloc(sizeof(*xen_dmabuf), GFP_KERNEL);
+ÂÂÂ if (!xen_dmabuf)
+ÂÂÂÂÂÂÂ return -ENOMEM;
+
+ÂÂÂ kref_init(&xen_dmabuf->u.exp.refcount);
+
+ÂÂÂ xen_dmabuf->priv = priv;
+ÂÂÂ xen_dmabuf->nr_pages = map->count;
+ÂÂÂ xen_dmabuf->pages = map->pages;
+ÂÂÂ xen_dmabuf->u.exp.map = map;
+
+ÂÂÂ exp_info.exp_name = KBUILD_MODNAME;
+ÂÂÂ if (map->dma_dev->driver && map->dma_dev->driver->owner)
+ÂÂÂÂÂÂÂ exp_info.owner = map->dma_dev->driver->owner;
+ÂÂÂ else
+ÂÂÂÂÂÂÂ exp_info.owner = THIS_MODULE;
+ÂÂÂ exp_info.ops = &dmabuf_exp_ops;
+ÂÂÂ exp_info.size = map->count << PAGE_SHIFT;
+ÂÂÂ exp_info.flags = O_RDWR;
+ÂÂÂ exp_info.priv = xen_dmabuf;
+
+ÂÂÂ xen_dmabuf->dmabuf = dma_buf_export(&exp_info);
+ÂÂÂ if (IS_ERR(xen_dmabuf->dmabuf)) {
+ÂÂÂÂÂÂÂ ret = PTR_ERR(xen_dmabuf->dmabuf);
+ÂÂÂÂÂÂÂ xen_dmabuf->dmabuf = NULL;
+ÂÂÂÂÂÂÂ goto fail;
+ÂÂÂ }
+
+ÂÂÂ ret = dma_buf_fd(xen_dmabuf->dmabuf, O_CLOEXEC);
+ÂÂÂ if (ret < 0)
+ÂÂÂÂÂÂÂ goto fail;
+
+ÂÂÂ xen_dmabuf->fd = ret;
+ÂÂÂ *fd = ret;
+
+ÂÂÂ pr_debug("Exporting DMA buffer with fd %d\n", ret);
+
+ÂÂÂ mutex_lock(&priv->dmabuf_lock);
+ÂÂÂ list_add(&xen_dmabuf->next, &priv->dmabuf_exp_list);
+ÂÂÂ mutex_unlock(&priv->dmabuf_lock);
+ÂÂÂ return 0;
+
+fail:
+ÂÂÂ if (xen_dmabuf->dmabuf)
+ÂÂÂÂÂÂÂ dma_buf_put(xen_dmabuf->dmabuf);
+ÂÂÂ kfree(xen_dmabuf);
+ÂÂÂ return ret;
+}
+
+static struct grant_map *
+dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ int count)
+{
+ÂÂÂ struct grant_map *map;
+
+ÂÂÂ if (unlikely(count <= 0))
+ÂÂÂÂÂÂÂ return ERR_PTR(-EINVAL);
+
+ÂÂÂ if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
+ÂÂÂÂÂÂÂ (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
+ÂÂÂÂÂÂÂ pr_err("Wrong dma-buf flags: either WC or coherent, not both\n");
+ÂÂÂÂÂÂÂ return ERR_PTR(-EINVAL);
+ÂÂÂ }
+
+ÂÂÂ map = gntdev_alloc_map(priv, count, dmabuf_flags);
+ÂÂÂ if (!map)
+ÂÂÂÂÂÂÂ return ERR_PTR(-ENOMEM);
+
+ÂÂÂ if (unlikely(atomic_add_return(count, &pages_mapped) > limit)) {
+ÂÂÂÂÂÂÂ pr_err("can't map: over limit\n");
+ÂÂÂÂÂÂÂ gntdev_put_map(NULL, map);
+ÂÂÂÂÂÂÂ return ERR_PTR(-ENOMEM);
+ÂÂÂ }
+ÂÂÂ return map;
 }
shared pages.
  static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ int count, u32 domid, u32 *refs, u32 *fd)
 {
+ÂÂÂ struct grant_map *map;
+ÂÂÂ int i, ret;
+
ÂÂÂÂÂ *fd = -1;
-ÂÂÂ return -EINVAL;
+
+ÂÂÂ if (use_ptemod) {
+ÂÂÂÂÂÂÂ pr_err("Cannot provide dma-buf: use_ptemode %d\n",
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ use_ptemod);
+ÂÂÂÂÂÂÂ return -EINVAL;
+ÂÂÂ }
+
+ÂÂÂ map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
+ÂÂÂ if (IS_ERR(map))
+ÂÂÂÂÂÂÂ return PTR_ERR(map);
+
+ÂÂÂ for (i = 0; i < count; i++) {
+ÂÂÂÂÂÂÂ map->grants[i].domid = domid;
+ÂÂÂÂÂÂÂ map->grants[i].ref = refs[i];
+ÂÂÂ }
+
+ÂÂÂ mutex_lock(&priv->lock);
+ÂÂÂ gntdev_add_map(priv, map);
+ÂÂÂ mutex_unlock(&priv->lock);
+
+ÂÂÂ map->flags |= GNTMAP_host_map;
+#if defined(CONFIG_X86)
+ÂÂÂ map->flags |= GNTMAP_device_map;
+#endif
+
+ÂÂÂ ret = map_grant_pages(map);
+ÂÂÂ if (ret < 0)
+ÂÂÂÂÂÂÂ goto out;
+
+ÂÂÂ ret = dmabuf_export(priv, map, fd);
+ÂÂÂ if (ret < 0)
+ÂÂÂÂÂÂÂ goto out;
+
+ÂÂÂ return 0;
+
+out:
+ÂÂÂ gntdev_remove_map(priv, map);
+ÂÂÂ return ret;
 }
  /* ------------------------------------------------------------------ */
--
2.17.0