It can be deleted without walking the list and no reason to do that walk.
On 06/12/2018 09:41 AM, Oleksandr Andrushchenko wrote:
From: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>
1. Create a dma-buf from grant references provided by the foreign
ÂÂÂ domain. By default dma-buf is backed by system memory pages, but
ÂÂÂ by providing GNTDEV_DMA_FLAG_XXX flags it can also be created
ÂÂÂ as a DMA write-combine/coherent buffer, e.g. allocated with
ÂÂÂ corresponding dma_alloc_xxx API.
ÂÂÂ Export the resulting buffer as a new dma-buf.
2. Implement waiting for the dma-buf to be released: block until the
ÂÂÂ dma-buf with the file descriptor provided is released.
ÂÂÂ If within the time-out provided the buffer is not released then
ÂÂÂ -ETIMEDOUT error is returned. If the buffer with the file descriptor
ÂÂÂ does not exist or has already been released, then -ENOENT is
ÂÂÂ returned. For valid file descriptors this must not be treated as
ÂÂÂ error.
3. Make gntdev's common code and structures available to dma-buf.
Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>
---
 drivers/xen/gntdev-common.h | 4 +
 drivers/xen/gntdev-dmabuf.c | 470 +++++++++++++++++++++++++++++++++++-
 drivers/xen/gntdev.c | 10 +
 3 files changed, 482 insertions(+), 2 deletions(-)
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index a3408fd39b07..72f80dbce861 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -89,4 +89,8 @@ bool gntdev_account_mapped_pages(int count);
  int gntdev_map_grant_pages(struct gntdev_grant_map *map);
 +#ifdef CONFIG_XEN_GNTDEV_DMABUF
+void gntdev_remove_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
+#endif
+
 #endif
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index dc57c6a25525..84cba67c6ad7 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -3,13 +3,53 @@
 /*
ÂÂ * Xen dma-buf functionality for gntdev.
ÂÂ *
+ * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
+ *
ÂÂ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
ÂÂ */
 +#include <linux/dma-buf.h>
 #include <linux/slab.h>
 +#include <xen/grant_table.h>
+#include <xen/gntdev.h>
+
+#include "gntdev-common.h"
 #include "gntdev-dmabuf.h"
 +struct gntdev_dmabuf {
+ÂÂÂ struct gntdev_dmabuf_priv *priv;
+ÂÂÂ struct dma_buf *dmabuf;
+ÂÂÂ struct list_head next;
+ÂÂÂ int fd;
+
+ÂÂÂ union {
+ÂÂÂÂÂÂÂ struct {
+ÂÂÂÂÂÂÂÂÂÂÂ /* Exported buffers are reference counted. */
+ÂÂÂÂÂÂÂÂÂÂÂ struct kref refcount;
+
+ÂÂÂÂÂÂÂÂÂÂÂ struct gntdev_priv *priv;
+ÂÂÂÂÂÂÂÂÂÂÂ struct gntdev_grant_map *map;
+ÂÂÂÂÂÂÂ } exp;
+ÂÂÂ } u;
+
+ÂÂÂ /* Number of pages this buffer has. */
+ÂÂÂ int nr_pages;
+ÂÂÂ /* Pages of this buffer. */
+ÂÂÂ struct page **pages;
+};
+
+struct gntdev_dmabuf_wait_obj {
+ÂÂÂ struct list_head next;
+ÂÂÂ struct gntdev_dmabuf *gntdev_dmabuf;
+ÂÂÂ struct completion completion;
+};
+
+struct gntdev_dmabuf_attachment {
+ÂÂÂ struct sg_table *sgt;
+ÂÂÂ enum dma_data_direction dir;
+};
+
 struct gntdev_dmabuf_priv {
ÂÂÂÂÂ /* List of exported DMA buffers. */
ÂÂÂÂÂ struct list_head exp_list;
@@ -23,17 +63,439 @@ struct gntdev_dmabuf_priv {
  /* Implementation of wait for exported DMA buffer to be released. */
 +static void dmabuf_exp_release(struct kref *kref);
+
+static struct gntdev_dmabuf_wait_obj *
+dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
+ÂÂÂÂÂÂÂÂÂÂÂ struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ÂÂÂ struct gntdev_dmabuf_wait_obj *obj;
+
+ÂÂÂ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ÂÂÂ if (!obj)
+ÂÂÂÂÂÂÂ return ERR_PTR(-ENOMEM);
+
+ÂÂÂ init_completion(&obj->completion);
+ÂÂÂ obj->gntdev_dmabuf = gntdev_dmabuf;
+
+ÂÂÂ mutex_lock(&priv->lock);
+ÂÂÂ list_add(&obj->next, &priv->exp_wait_list);
+ÂÂÂ /* Put our reference and wait for gntdev_dmabuf's release to fire. */
+ÂÂÂ kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
+ÂÂÂ mutex_unlock(&priv->lock);
+ÂÂÂ return obj;
+}
+
+static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct gntdev_dmabuf_wait_obj *obj)
+{
+ÂÂÂ struct gntdev_dmabuf_wait_obj *cur_obj, *q;
+
+ÂÂÂ mutex_lock(&priv->lock);
+ÂÂÂ list_for_each_entry_safe(cur_obj, q, &priv->exp_wait_list, next)
+ÂÂÂÂÂÂÂ if (cur_obj == obj) {
+ÂÂÂÂÂÂÂÂÂÂÂ list_del(&obj->next);
+ÂÂÂÂÂÂÂÂÂÂÂ kfree(obj);
+ÂÂÂÂÂÂÂÂÂÂÂ break;
+ÂÂÂÂÂÂÂ }
+ÂÂÂ mutex_unlock(&priv->lock);
Do we really need to walk the list?
And if we do, do we need the safe variant of the walk? We are holding the lock. Here and elsewhere.You are perfectly right. I will not use safe variant of the walk, no need for that
How about dmabuf_exp_wait_obj_get_dmabuf_by_fd?
+}
+
+static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ u32 wait_to_ms)
+{
+ÂÂÂ if (wait_for_completion_timeout(&obj->completion,
+ÂÂÂÂÂÂÂÂÂÂÂ msecs_to_jiffies(wait_to_ms)) <= 0)
+ÂÂÂÂÂÂÂ return -ETIMEDOUT;
+
+ÂÂÂ return 0;
+}
+
+static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ÂÂÂ struct gntdev_dmabuf_wait_obj *obj, *q;
+
+ÂÂÂ list_for_each_entry_safe(obj, q, &priv->exp_wait_list, next)
+ÂÂÂÂÂÂÂ if (obj->gntdev_dmabuf == gntdev_dmabuf) {
+ÂÂÂÂÂÂÂÂÂÂÂ pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
+ÂÂÂÂÂÂÂÂÂÂÂ complete_all(&obj->completion);
+ÂÂÂÂÂÂÂÂÂÂÂ break;
+ÂÂÂÂÂÂÂ }
+}
+
+static struct gntdev_dmabuf *
+dmabuf_exp_wait_obj_get_by_fd(struct gntdev_dmabuf_priv *priv, int fd)
The name of this routine implies (to me) that we are getting a wait object but IIUIC we are getting a gntdev_dmabuf that we are going to later associate with a wait object.
Will remove =0;
+{
+ÂÂÂ struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
+
+ÂÂÂ mutex_lock(&priv->lock);
+ÂÂÂ list_for_each_entry_safe(gntdev_dmabuf, q, &priv->exp_list, next)
+ÂÂÂÂÂÂÂ if (gntdev_dmabuf->fd == fd) {
+ÂÂÂÂÂÂÂÂÂÂÂ pr_debug("Found gntdev_dmabuf in the wait list\n");
+ÂÂÂÂÂÂÂÂÂÂÂ kref_get(&gntdev_dmabuf->u.exp.refcount);
+ÂÂÂÂÂÂÂÂÂÂÂ ret = gntdev_dmabuf;
+ÂÂÂÂÂÂÂÂÂÂÂ break;
+ÂÂÂÂÂÂÂ }
+ÂÂÂ mutex_unlock(&priv->lock);
+ÂÂÂ return ret;
+}
+
 int gntdev_dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ int wait_to_ms)
 {
-ÂÂÂ return -EINVAL;
+ÂÂÂ struct gntdev_dmabuf *gntdev_dmabuf;
+ÂÂÂ struct gntdev_dmabuf_wait_obj *obj;
+ÂÂÂ int ret;
+
+ÂÂÂ pr_debug("Will wait for dma-buf with fd %d\n", fd);
+ÂÂÂ /*
+ÂÂÂÂ * Try to find the DMA buffer: if not found means that
+ÂÂÂÂ * either the buffer has already been released or file descriptor
+ÂÂÂÂ * provided is wrong.
+ÂÂÂÂ */
+ÂÂÂ gntdev_dmabuf = dmabuf_exp_wait_obj_get_by_fd(priv, fd);
+ÂÂÂ if (IS_ERR(gntdev_dmabuf))
+ÂÂÂÂÂÂÂ return PTR_ERR(gntdev_dmabuf);
+
+ÂÂÂ /*
+ÂÂÂÂ * gntdev_dmabuf still exists and is reference count locked by us now,
+ÂÂÂÂ * so prepare to wait: allocate wait object and add it to the wait list,
+ÂÂÂÂ * so we can find it on release.
+ÂÂÂÂ */
+ÂÂÂ obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
+ÂÂÂ if (IS_ERR(obj))
+ÂÂÂÂÂÂÂ return PTR_ERR(obj);
+
+ÂÂÂ ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
+ÂÂÂ dmabuf_exp_wait_obj_free(priv, obj);
+ÂÂÂ return ret;
+}
+
+/* DMA buffer export support. */
+
+static struct sg_table *
+dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
+{
+ÂÂÂ struct sg_table *sgt;
+ÂÂÂ int ret;
+
+ÂÂÂ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ÂÂÂ if (!sgt) {
+ÂÂÂÂÂÂÂ ret = -ENOMEM;
+ÂÂÂÂÂÂÂ goto out;
+ÂÂÂ }
+
+ÂÂÂ ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ nr_pages << PAGE_SHIFT,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ GFP_KERNEL);
+ÂÂÂ if (ret)
+ÂÂÂÂÂÂÂ goto out;
+
+ÂÂÂ return sgt;
+
+out:
+ÂÂÂ kfree(sgt);
+ÂÂÂ return ERR_PTR(ret);
+}
+
+static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct device *target_dev,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct dma_buf_attachment *attach)
+{
+ÂÂÂ struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
+
+ÂÂÂ gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ GFP_KERNEL);
+ÂÂÂ if (!gntdev_dmabuf_attach)
+ÂÂÂÂÂÂÂ return -ENOMEM;
+
+ÂÂÂ gntdev_dmabuf_attach->dir = DMA_NONE;
+ÂÂÂ attach->priv = gntdev_dmabuf_attach;
+ÂÂÂ return 0;
+}
+
+static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct dma_buf_attachment *attach)
+{
+ÂÂÂ struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
+
+ÂÂÂ if (gntdev_dmabuf_attach) {
+ÂÂÂÂÂÂÂ struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
+
+ÂÂÂÂÂÂÂ if (sgt) {
+ÂÂÂÂÂÂÂÂÂÂÂ if (gntdev_dmabuf_attach->dir != DMA_NONE)
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ dma_unmap_sg_attrs(attach->dev, sgt->sgl,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ sgt->nents,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ gntdev_dmabuf_attach->dir,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ DMA_ATTR_SKIP_CPU_SYNC);
+ÂÂÂÂÂÂÂÂÂÂÂ sg_free_table(sgt);
+ÂÂÂÂÂÂÂ }
+
+ÂÂÂÂÂÂÂ kfree(sgt);
+ÂÂÂÂÂÂÂ kfree(gntdev_dmabuf_attach);
+ÂÂÂÂÂÂÂ attach->priv = NULL;
+ÂÂÂ }
+}
+
+static struct sg_table *
+dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂ enum dma_data_direction dir)
+{
+ÂÂÂ struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
+ÂÂÂ struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
+ÂÂÂ struct sg_table *sgt;
+
+ÂÂÂ pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
+ÂÂÂÂÂÂÂÂ attach->dev);
+
+ÂÂÂ if (dir == DMA_NONE || !gntdev_dmabuf_attach)
+ÂÂÂÂÂÂÂ return ERR_PTR(-EINVAL);
+
+ÂÂÂ /* Return the cached mapping when possible. */
+ÂÂÂ if (gntdev_dmabuf_attach->dir == dir)
+ÂÂÂÂÂÂÂ return gntdev_dmabuf_attach->sgt;
+
+ÂÂÂ /*
+ÂÂÂÂ * Two mappings with different directions for the same attachment are
+ÂÂÂÂ * not allowed.
+ÂÂÂÂ */
+ÂÂÂ if (gntdev_dmabuf_attach->dir != DMA_NONE)
+ÂÂÂÂÂÂÂ return ERR_PTR(-EBUSY);
+
+ÂÂÂ sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ gntdev_dmabuf->nr_pages);
+ÂÂÂ if (!IS_ERR(sgt)) {
+ÂÂÂÂÂÂÂ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ DMA_ATTR_SKIP_CPU_SYNC)) {
+ÂÂÂÂÂÂÂÂÂÂÂ sg_free_table(sgt);
+ÂÂÂÂÂÂÂÂÂÂÂ kfree(sgt);
+ÂÂÂÂÂÂÂÂÂÂÂ sgt = ERR_PTR(-ENOMEM);
+ÂÂÂÂÂÂÂ } else {
+ÂÂÂÂÂÂÂÂÂÂÂ gntdev_dmabuf_attach->sgt = sgt;
+ÂÂÂÂÂÂÂÂÂÂÂ gntdev_dmabuf_attach->dir = dir;
+ÂÂÂÂÂÂÂ }
+ÂÂÂ }
+ÂÂÂ if (IS_ERR(sgt))
+ÂÂÂÂÂÂÂ pr_debug("Failed to map sg table for dev %p\n", attach->dev);
+ÂÂÂ return sgt;
+}
+
+static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct sg_table *sgt,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ enum dma_data_direction dir)
+{
+ÂÂÂ /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
+}
+
+static void dmabuf_exp_release(struct kref *kref)
+{
+ÂÂÂ struct gntdev_dmabuf *gntdev_dmabuf =
+ÂÂÂÂÂÂÂ container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
+
+ÂÂÂ dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
+ÂÂÂ list_del(&gntdev_dmabuf->next);
+ÂÂÂ kfree(gntdev_dmabuf);
+}
+
+static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
+{
+ÂÂÂ struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
+ÂÂÂ struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
+
+ÂÂÂ gntdev_remove_map(gntdev_dmabuf->u.exp.priv, gntdev_dmabuf->u.exp.map);
+ÂÂÂ mutex_lock(&priv->lock);
+ÂÂÂ kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
+ÂÂÂ mutex_unlock(&priv->lock);
+}
+
+static void *dmabuf_exp_ops_kmap_atomic(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ unsigned long page_num)
+{
+ÂÂÂ /* Not implemented. */
+ÂÂÂ return NULL;
+}
+
+static void dmabuf_exp_ops_kunmap_atomic(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ unsigned long page_num, void *addr)
+{
+ÂÂÂ /* Not implemented. */
+}
+
+static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ unsigned long page_num)
+{
+ÂÂÂ /* Not implemented. */
+ÂÂÂ return NULL;
+}
+
+static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ unsigned long page_num, void *addr)
+{
+ÂÂÂ /* Not implemented. */
+}
+
+static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct vm_area_struct *vma)
+{
+ÂÂÂ /* Not implemented. */
+ÂÂÂ return 0;
+}
+
+static const struct dma_buf_ops dmabuf_exp_ops =Â {
+ÂÂÂ .attach = dmabuf_exp_ops_attach,
+ÂÂÂ .detach = dmabuf_exp_ops_detach,
+ÂÂÂ .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
+ÂÂÂ .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
+ÂÂÂ .release = dmabuf_exp_ops_release,
+ÂÂÂ .map = dmabuf_exp_ops_kmap,
+ÂÂÂ .map_atomic = dmabuf_exp_ops_kmap_atomic,
+ÂÂÂ .unmap = dmabuf_exp_ops_kunmap,
+ÂÂÂ .unmap_atomic = dmabuf_exp_ops_kunmap_atomic,
+ÂÂÂ .mmap = dmabuf_exp_ops_mmap,
+};
+
+struct gntdev_dmabuf_export_args {
+ÂÂÂ struct gntdev_priv *priv;
+ÂÂÂ struct gntdev_grant_map *map;
+ÂÂÂ struct gntdev_dmabuf_priv *dmabuf_priv;
+ÂÂÂ struct device *dev;
+ÂÂÂ int count;
+ÂÂÂ struct page **pages;
+ÂÂÂ u32 fd;
+};
+
+static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
+{
+ÂÂÂ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ÂÂÂ struct gntdev_dmabuf *gntdev_dmabuf;
+ÂÂÂ int ret = 0;
Not necessary.
Will print hex value of the flags+
+ÂÂÂ gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
+ÂÂÂ if (!gntdev_dmabuf)
+ÂÂÂÂÂÂÂ return -ENOMEM;
+
+ÂÂÂ kref_init(&gntdev_dmabuf->u.exp.refcount);
+
+ÂÂÂ gntdev_dmabuf->priv = args->dmabuf_priv;
+ÂÂÂ gntdev_dmabuf->nr_pages = args->count;
+ÂÂÂ gntdev_dmabuf->pages = args->pages;
+ÂÂÂ gntdev_dmabuf->u.exp.priv = args->priv;
+ÂÂÂ gntdev_dmabuf->u.exp.map = args->map;
+
+ÂÂÂ exp_info.exp_name = KBUILD_MODNAME;
+ÂÂÂ if (args->dev->driver && args->dev->driver->owner)
+ÂÂÂÂÂÂÂ exp_info.owner = args->dev->driver->owner;
+ÂÂÂ else
+ÂÂÂÂÂÂÂ exp_info.owner = THIS_MODULE;
+ÂÂÂ exp_info.ops = &dmabuf_exp_ops;
+ÂÂÂ exp_info.size = args->count << PAGE_SHIFT;
+ÂÂÂ exp_info.flags = O_RDWR;
+ÂÂÂ exp_info.priv = gntdev_dmabuf;
+
+ÂÂÂ gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
+ÂÂÂ if (IS_ERR(gntdev_dmabuf->dmabuf)) {
+ÂÂÂÂÂÂÂ ret = PTR_ERR(gntdev_dmabuf->dmabuf);
+ÂÂÂÂÂÂÂ gntdev_dmabuf->dmabuf = NULL;
+ÂÂÂÂÂÂÂ goto fail;
+ÂÂÂ }
+
+ÂÂÂ ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
+ÂÂÂ if (ret < 0)
+ÂÂÂÂÂÂÂ goto fail;
+
+ÂÂÂ gntdev_dmabuf->fd = ret;
+ÂÂÂ args->fd = ret;
+
+ÂÂÂ pr_debug("Exporting DMA buffer with fd %d\n", ret);
+
+ÂÂÂ mutex_lock(&args->dmabuf_priv->lock);
+ÂÂÂ list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
+ÂÂÂ mutex_unlock(&args->dmabuf_priv->lock);
+ÂÂÂ return 0;
+
+fail:
+ÂÂÂ if (gntdev_dmabuf->dmabuf)
+ÂÂÂÂÂÂÂ dma_buf_put(gntdev_dmabuf->dmabuf);
+ÂÂÂ kfree(gntdev_dmabuf);
+ÂÂÂ return ret;
+}
+
+static struct gntdev_grant_map *
+dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ int count)
+{
+ÂÂÂ struct gntdev_grant_map *map;
+
+ÂÂÂ if (unlikely(count <= 0))
+ÂÂÂÂÂÂÂ return ERR_PTR(-EINVAL);
+
+ÂÂÂ if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
+ÂÂÂÂÂÂÂ (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
+ÂÂÂÂÂÂÂ pr_debug("Wrong dma-buf flags: either WC or coherent, not both\n");
Why not just print the value of the flags?
Will add
+ÂÂÂÂÂÂÂ return ERR_PTR(-EINVAL);
+ÂÂÂ }
+
+ÂÂÂ map = gntdev_alloc_map(priv, count, dmabuf_flags);
+ÂÂÂ if (!map)
+ÂÂÂÂÂÂÂ return ERR_PTR(-ENOMEM);
+
+ÂÂÂ if (unlikely(gntdev_account_mapped_pages(count))) {
+ÂÂÂÂÂÂÂ pr_debug("can't map: over limit\n");
I think printing @count value here would be useful.
No, will remove. I was thinking here about if user-space ignores
+ÂÂÂÂÂÂÂ gntdev_put_map(NULL, map);
+ÂÂÂÂÂÂÂ return ERR_PTR(-ENOMEM);
+ÂÂÂ }
+ÂÂÂ return map;
 }
  int gntdev_dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ int count, u32 domid, u32 *refs, u32 *fd)
 {
+ÂÂÂ struct gntdev_grant_map *map;
+ÂÂÂ struct gntdev_dmabuf_export_args args;
+ÂÂÂ int i, ret;
+
ÂÂÂÂÂ *fd = -1;
Is this still needed?
Well, I wish I could, but the main difference when calling gntdev_put_map(priv, map)
-ÂÂÂ return -EINVAL;
+
+ÂÂÂ map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
+ÂÂÂ if (IS_ERR(map))
+ÂÂÂÂÂÂÂ return PTR_ERR(map);
+
+ÂÂÂ for (i = 0; i < count; i++) {
+ÂÂÂÂÂÂÂ map->grants[i].domid = domid;
+ÂÂÂÂÂÂÂ map->grants[i].ref = refs[i];
+ÂÂÂ }
+
+ÂÂÂ mutex_lock(&priv->lock);
+ÂÂÂ gntdev_add_map(priv, map);
+ÂÂÂ mutex_unlock(&priv->lock);
+
+ÂÂÂ map->flags |= GNTMAP_host_map;
+#if defined(CONFIG_X86)
+ÂÂÂ map->flags |= GNTMAP_device_map;
+#endif
+
+ÂÂÂ ret = gntdev_map_grant_pages(map);
+ÂÂÂ if (ret < 0)
+ÂÂÂÂÂÂÂ goto out;
+
+ÂÂÂ args.priv = priv;
+ÂÂÂ args.map = map;
+ÂÂÂ args.dev = priv->dma_dev;
+ÂÂÂ args.dmabuf_priv = priv->dmabuf_priv;
+ÂÂÂ args.count = map->count;
+ÂÂÂ args.pages = map->pages;
+
+ÂÂÂ ret = dmabuf_exp_from_pages(&args);
+ÂÂÂ if (ret < 0)
+ÂÂÂÂÂÂÂ goto out;
+
+ÂÂÂ *fd = args.fd;
+ÂÂÂ return 0;
+
+out:
+ÂÂÂ gntdev_remove_map(priv, map);
+ÂÂÂ return ret;
 }
  /* DMA buffer import support. */
@@ -63,6 +525,10 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
ÂÂÂÂÂ if (!priv)
ÂÂÂÂÂÂÂÂÂ return ERR_PTR(-ENOMEM);
 + mutex_init(&priv->lock);
+ÂÂÂ INIT_LIST_HEAD(&priv->exp_list);
+ÂÂÂ INIT_LIST_HEAD(&priv->exp_wait_list);
+
ÂÂÂÂÂ return priv;
 }
 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index e82660d81d7e..5f93cd534840 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -262,6 +262,16 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
ÂÂÂÂÂ gntdev_free_map(map);
 }
 +#ifdef CONFIG_XEN_GNTDEV_DMABUF
+void gntdev_remove_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
+{
+ÂÂÂ mutex_lock(&priv->lock);
+ÂÂÂ list_del(&map->next);
+ÂÂÂ gntdev_put_map(NULL /* already removed */, map);
Why not pass call gntdev_put_map(priv, map) and then not have this routine at all?
I really dislike the fact that we are taking a lock here that gntdev_put_map() takes as well, although not with NULL argument. (And yes, I see that gntdev_release() does it too.)This can be re-factored later I guess?
Thank you,
-boris
+ÂÂÂ mutex_unlock(&priv->lock);
+}
+#endif
+
 /* ------------------------------------------------------------------ */
  static int find_grant_ptes(pte_t *pte, pgtable_t token,