On Tue, Aug 16, 2016 at 02:58:38PM +0200, Noralf Trønnes wrote:
Den 15.08.2016 08:59, skrev Daniel Vetter:Yeah, sounds good for adding the above two. Feel free to roll them out to
On Sun, Aug 14, 2016 at 06:52:04PM +0200, Noralf Trønnes wrote:<snip>
The SimpleDRM driver binds to simple-framebuffer devices and provides a
DRM/KMS API. It provides only a single CRTC+encoder+connector combination
plus one initial mode.
Userspace can create dumb-buffers which can be blit into the real
framebuffer similar to UDL. No access to the real framebuffer is allowed
(compared to earlier version of this driver) to avoid security issues.
Furthermore, this way we can support arbitrary modes as long as we have a
conversion-helper.
The driver was originally written by David Herrmann in 2014.
My main contribution is to make use of drm_simple_kms_helper and
rework the probe path to avoid use of the deprecated drm_platform_init()
and drm_driver.{load,unload}().
Additions have also been made for later changes to the Device Tree binding
document, like support for clocks, regulators and having the node under
/chosen. This code was lifted verbatim from simplefb.c.
Cc: dh.herrmann@xxxxxxxxx
Cc: libv@xxxxxxxxx
Signed-off-by: Noralf Trønnes <noralf@xxxxxxxxxxx>
<snip>diff --git a/drivers/gpu/drm/simpledrm/simpledrm_gem.c b/drivers/gpu/drm/simpledrm/simpledrm_gem.c
new file mode 100644
index 0000000..81bd7c5
--- /dev/null
+++ b/drivers/gpu/drm/simpledrm/simpledrm_gem.c
There's already a drm_gem_dumb_destroy() in drm_gem.c+struct sdrm_gem_object *sdrm_gem_alloc_object(struct drm_device *ddev,I wonder whether some dumb+gem helpers wouldn't be useful to roll out. At
+ size_t size)
+{
+ struct sdrm_gem_object *obj;
+
+ WARN_ON(!size || (size & ~PAGE_MASK) != 0);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ drm_gem_private_object_init(ddev, &obj->base, size);
+ return obj;
+}
+
+void sdrm_gem_free_object(struct drm_gem_object *gobj)
+{
+ struct sdrm_gem_object *obj = to_sdrm_bo(gobj);
+ struct drm_device *ddev = gobj->dev;
+
+ if (obj->pages) {
+ /* kill all user-space mappings */
+ drm_vma_node_unmap(&gobj->vma_node,
+ ddev->anon_inode->i_mapping);
+ sdrm_gem_put_pages(obj);
+ }
+
+ if (gobj->import_attach)
+ drm_prime_gem_destroy(gobj, obj->sg);
+
+ drm_gem_free_mmap_offset(gobj);
+ drm_gem_object_release(gobj);
+ kfree(obj);
+}
+
+int sdrm_dumb_create(struct drm_file *dfile, struct drm_device *ddev,
+ struct drm_mode_create_dumb *args)
+{
+ struct sdrm_gem_object *obj;
+ int r;
+
+ if (args->flags)
+ return -EINVAL;
+
+ /* overflow checks are done by DRM core */
+ args->pitch = (args->bpp + 7) / 8 * args->width;
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+
+ obj = sdrm_gem_alloc_object(ddev, args->size);
+ if (!obj)
+ return -ENOMEM;
+
+ r = drm_gem_handle_create(dfile, &obj->base, &args->handle);
+ if (r) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return r;
+ }
+
+ /* handle owns a reference */
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return 0;
+}
+
+int sdrm_dumb_destroy(struct drm_file *dfile, struct drm_device *ddev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(dfile, handle);
+}
least drm_dumb_gem_destroy.c would be pretty simple.
The drm_driver->gem_create_object callback makes it possible to do a
generic dumb create:
int drm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct drm_gem_object *obj;
int ret;
if (!dev->driver->gem_create_object)
return -EINVAL;
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
args->size = args->pitch * args->height;
obj = dev->driver->gem_create_object(dev, args->size);
if (!obj)
return -ENOMEM;
ret = drm_gem_handle_create(file, obj, &args->handle);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
EXPORT_SYMBOL(drm_gem_dumb_create);
struct drm_gem_object *sdrm_gem_create_object(struct drm_device *ddev,
size_t size)
{
struct sdrm_gem_object *sobj;
sobj = sdrm_gem_alloc_object(ddev, size);
if (!sobj)
return ERR_PTR(-ENOMEM);
return &sobj->base;
}
Maybe this addition to drm_gem.c as well:+There's still some struct mutex here.
+int sdrm_dumb_map_offset(struct drm_file *dfile, struct drm_device *ddev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *gobj;
+ int r;
+
+ mutex_lock(&ddev->struct_mutex);
+
+ gobj = drm_gem_object_lookup(dfile, handle);
+ if (!gobj) {
+ r = -ENOENT;
+ goto out_unlock;
+ }
+
+ r = drm_gem_create_mmap_offset(gobj);
+ if (r)
+ goto out_unref;
+
+ *offset = drm_vma_node_offset_addr(&gobj->vma_node);
+
+out_unref:
+ drm_gem_object_unreference(gobj);
+out_unlock:
+ mutex_unlock(&ddev->struct_mutex);
+ return r;
+}
+
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct drm_gem_object *obj;
int ret;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out_unref;
*offset = drm_vma_node_offset_addr(&obj->vma_node);
out_unref:
drm_gem_object_unreference_unlocked(obj);
return ret;
}
drivers (or not).
That's still a lot for what amounts to reimplementing mmap on shmem, butI don't know what that means, but looking at vc4 it seems I can use+int sdrm_drm_mmap(struct file *filp, struct vm_area_struct *vma)Why can't we just redirect to the underlying shmem mmap here (after
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_vma_offset_node *node;
+ struct drm_gem_object *gobj;
+ struct sdrm_gem_object *obj;
+ size_t size, i, num;
+ int r;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+
+ if (!drm_vma_node_is_allowed(node, filp))
+ return -EACCES;
+
+ gobj = container_of(node, struct drm_gem_object, vma_node);
+ obj = to_sdrm_bo(gobj);
+ size = drm_vma_node_size(node) << PAGE_SHIFT;
+ if (size < vma->vm_end - vma->vm_start)
+ return r;
+
+ r = sdrm_gem_get_pages(obj);
+ if (r < 0)
+ return r;
+
+ /* prevent dmabuf-imported mmap to user-space */
+ if (!obj->pages)
+ return -EACCES;
+
+ vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ for (i = 0; i < num; ++i) {
+ r = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+ obj->pages[i]);
+ if (r < 0) {
+ if (i > 0)
+ zap_vma_ptes(vma, vma->vm_start, i * PAGE_SIZE);
+ return r;
+ }
+ }
+
+ return 0;
+}
argument checking)?
drm_gem_mmap() to remove some boilerplate.
int sdrm_drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long vm_flags = vma->vm_flags;
struct sdrm_gem_object *sobj;
struct drm_gem_object *obj;
size_t i, num;
int r;
r = drm_gem_mmap(filp, vma);
if (r)
return r;
obj = vma->vm_private_data;
sobj = to_sdrm_bo(obj);
r = sdrm_gem_get_pages(obj);
if (r < 0)
return r;
/* prevent dmabuf-imported mmap to user-space */
if (!obj->pages)
return -EACCES;
/* drm_gem_mmap() sets flags that we don't want */
vma->vm_flags = vm_flags | VM_DONTEXPAND;
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
for (i = 0; i < num; ++i) {
r = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
obj->pages[i]);
if (r < 0) {
if (i > 0)
zap_vma_ptes(vma, vma->vm_start, i * PAGE_SIZE);
return r;
}
}
return 0;
}
drm_gem_mmap() requires drm_driver->gem_vm_ops to be set:
const struct vm_operations_struct sdrm_gem_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
And browsing the code a bit more shows that tegra, udl, etnaviv and vgem
does the vm_insert_page() thing in the vm_operations_struct->fault()
handler.
So maybe this makes sense for simpledrm as well:
static int sdrm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct sdrm_gem_object *sobj = to_sdrm_bo(obj);
loff_t num_pages;
pgoff_t offset;
int r;
if (!sobj->pages)
return VM_FAULT_SIGBUS;
offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
r = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
sobj->pages[offset]);
switch (r) {
case -EAGAIN:
case 0:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
}
return VM_FAULT_SIGBUS;
}
badly. What I mean with redirecting is pointing the entire ->mmap
operation to the mmap implementation for the underlying mmap. Roughly:
/* normal gem mmap checks first */
/* redirect to shmem mmap */
vma->vm_file = obj->filp;
vma->vm_pgoff = 0;
return obj->filp->f_op->mmap(obj->filp, vma);
Much less code ;-)