Re: [PATCH] drm/xen-front: Make shmem backed display buffer coherent

From: Oleksandr Andrushchenko
Date: Wed Dec 19 2018 - 03:18:12 EST

On 12/18/18 9:20 PM, Noralf TrÃnnes wrote:

Den 27.11.2018 11.32, skrev Oleksandr Andrushchenko:
From: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>

When GEM backing storage is allocated with drm_gem_get_pages
the backing pages may be cached, thus making it possible that
the backend sees only partial content of the buffer which may
lead to screen artifacts. Make sure that the frontend's
memory is coherent and the backend always sees correct display
buffer content.

Fixes: c575b7eeb89f ("drm/xen-front: Add support for Xen PV display frontend")

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>
 drivers/gpu/drm/xen/xen_drm_front_gem.c | 62 +++++++++++++++++++------
 1 file changed, 48 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 47ff019d3aef..c592735e49d2 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -33,8 +33,11 @@ struct xen_gem_object {
ÂÂÂÂÂ /* set for buffers allocated by the backend */
ÂÂÂÂÂ bool be_alloc;
 - /* this is for imported PRIME buffer */
-ÂÂÂ struct sg_table *sgt_imported;
+ÂÂÂ /*
+ÂÂÂÂ * this is for imported PRIME buffer or the one allocated via
+ÂÂÂÂ * drm_gem_get_pages.
+ÂÂÂÂ */
+ÂÂÂ struct sg_table *sgt;
  static inline struct xen_gem_object *
@@ -77,10 +80,21 @@ static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
ÂÂÂÂÂ return xen_obj;
 +struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
+ÂÂÂ struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+ÂÂÂ if (!xen_obj->pages)
+ÂÂÂ return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
ÂÂÂÂÂ struct xen_drm_front_drm_info *drm_info = dev->dev_private;
ÂÂÂÂÂ struct xen_gem_object *xen_obj;
+ÂÂÂ struct address_space *mapping;
ÂÂÂÂÂ int ret;
 Â size = round_up(size, PAGE_SIZE);
@@ -113,10 +127,14 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
ÂÂÂÂÂÂÂÂÂ xen_obj->be_alloc = true;
ÂÂÂÂÂÂÂÂÂ return xen_obj;
ÂÂÂÂÂÂ * need to allocate backing pages now, so we can share those
ÂÂÂÂÂÂ * with the backend

Let's see if I understand what you're doing:

Here you say that the pages should be DMA accessible for devices that can
only see 4GB.

Yes, your understanding is correct. As we are a para-virtualized device we

do not have strict requirements for 32-bit DMA. But, via dma-buf export,

the buffer we create can be used by real HW, e.g. one can pass-through

real HW devices into a guest domain and they can import our buffer (yes,

they can be IOMMU backed and other conditions may apply).

So, this is why we are limiting to DMA32 here, just to allow more possible


+ÂÂÂ mapping = xen_obj->base.filp->f_mapping;
+ÂÂÂ mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
ÂÂÂÂÂ xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
ÂÂÂÂÂ xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
ÂÂÂÂÂ if (IS_ERR_OR_NULL(xen_obj->pages)) {
@@ -125,8 +143,27 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
ÂÂÂÂÂÂÂÂÂ goto fail;
 + xen_obj->sgt = xen_drm_front_gem_get_sg_table(&xen_obj->base);
+ÂÂÂ if (IS_ERR_OR_NULL(xen_obj->sgt)){
+ÂÂÂÂÂÂÂ ret = PTR_ERR(xen_obj->sgt);
+ÂÂÂÂÂÂÂ xen_obj->sgt = NULL;
+ÂÂÂÂÂÂÂ goto fail_put_pages;
+ÂÂÂ }
+ÂÂÂ if (!dma_map_sg(dev->dev, xen_obj->sgt->sgl, xen_obj->sgt->nents,

Are you using the DMA streaming API as a way to flush the caches?
Does this mean that GFP_USER isn't making the buffer coherent?

No, it didn't help. I had a question [1] if there are any other better way

to achieve the same, but didn't have any response yet. So, I implemented

it via DMA API which helped.


+ÂÂÂÂÂÂÂ goto fail_free_sgt;
+ÂÂÂ }
ÂÂÂÂÂ return xen_obj;
+ÂÂÂ sg_free_table(xen_obj->sgt);
+ÂÂÂ xen_obj->sgt = NULL;
+ÂÂÂ drm_gem_put_pages(&xen_obj->base, xen_obj->pages, true, false);
+ÂÂÂ xen_obj->pages = NULL;
ÂÂÂÂÂ DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
ÂÂÂÂÂ return ERR_PTR(ret);
@@ -149,7 +186,7 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
ÂÂÂÂÂ struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 Â if (xen_obj->base.import_attach) {
-ÂÂÂÂÂÂÂ drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
+ÂÂÂÂÂÂÂ drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt);
ÂÂÂÂÂÂÂÂÂ gem_free_pages_array(xen_obj);
ÂÂÂÂÂ } else {
ÂÂÂÂÂÂÂÂÂ if (xen_obj->pages) {
@@ -158,6 +195,13 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ gem_free_pages_array(xen_obj);
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ if (xen_obj->sgt) {
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ dma_unmap_sg(xen_obj->>dev,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ sg_free_table(xen_obj->sgt);
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ drm_gem_put_pages(&xen_obj->base,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ xen_obj->pages, true, false);
@@ -174,16 +218,6 @@ struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
ÂÂÂÂÂ return xen_obj->pages;
 -struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
-ÂÂÂ struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
-ÂÂÂ if (!xen_obj->pages)
-ÂÂÂ return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
 struct drm_gem_object *
 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct dma_buf_attachment *attach,
@@ -203,7 +237,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
ÂÂÂÂÂ if (ret < 0)
ÂÂÂÂÂÂÂÂÂ return ERR_PTR(ret);
 - xen_obj->sgt_imported = sgt;
+ÂÂÂ xen_obj->sgt = sgt;
 Â ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
dri-devel mailing list

Thank you,