Hi
Am 08.07.21 um 23:37 schrieb Daniel Vetter:
On Thu, Jun 24, 2021 at 11:52 AM Thomas Zimmermann tzimmermann@suse.de wrote:
Moving the driver-specific mmap code into a GEM object function allows for using DRM helpers for various mmap callbacks.
The respective vgem functions are being removed. The file_operations structure vgem_driver_fops is now being created by the helper macro DEFINE_DRM_GEM_FOPS().
Signed-off-by: Thomas Zimmermann tzimmermann@suse.de
Might be this one, might be a different one (there's also a bunch of core kernel patches that got into drm-tip together with this patch), but vgem goes boom after this landed in CI:
https://intel-gfx-ci.01.org/tree/drm-tip/igt@vgem_basic@unload.html
Can you pls take a quick look? It's in dma-buf fault stuff, so not entirely unlikely. Ville pointed this out on irc.
The patch at
https://lore.kernel.org/dri-devel/20210709114731.31467-1-tzimmermann@suse.de...
fixes the issue for me.
Best regards Thomas
-Daniel
drivers/gpu/drm/vgem/vgem_drv.c | 46 ++++----------------------------- 1 file changed, 5 insertions(+), 41 deletions(-)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index bf38a7e319d1..df634aa52638 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -239,32 +239,7 @@ static struct drm_ioctl_desc vgem_ioctls[] = { DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW), };
-static int vgem_mmap(struct file *filp, struct vm_area_struct *vma) -{
unsigned long flags = vma->vm_flags;
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
/* Keep the WC mmaping set by drm_gem_mmap() but our pages
* are ordinary and not special.
*/
vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
-}
-static const struct file_operations vgem_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = vgem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.release = drm_release,
-}; +DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) { @@ -387,24 +362,12 @@ static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *ma vgem_unpin_pages(bo); }
-static int vgem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
+static int vgem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) {
int ret;
if (obj->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!obj->filp)
return -ENODEV;
ret = call_mmap(obj->filp, vma);
if (ret)
return ret;
vma_set_file(vma, obj->filp); vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); return 0;
}
@@ -416,6 +379,7 @@ static const struct drm_gem_object_funcs vgem_gem_object_funcs = { .get_sg_table = vgem_prime_get_sg_table, .vmap = vgem_prime_vmap, .vunmap = vgem_prime_vunmap,
};.mmap = vgem_prime_mmap, .vm_ops = &vgem_gem_vm_ops,
@@ -433,7 +397,7 @@ static const struct drm_driver vgem_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = vgem_prime_import, .gem_prime_import_sg_table = vgem_prime_import_sg_table,
.gem_prime_mmap = vgem_prime_mmap,
.gem_prime_mmap = drm_gem_prime_mmap, .name = DRIVER_NAME, .desc = DRIVER_DESC,
-- 2.32.0