Hi Herrmann
-----Original Message----- From: David Herrmann [mailto:dh.herrmann@gmail.com] Sent: Monday, October 13, 2014 10:27 PM To: Cheng, Yao Cc: Intel Graphics Development; Jiang, Fei; dri-devel@lists.freedesktop.org; Vetter, Daniel Subject: Re: [RFC PATCH 2/3] drm/ipvr: drm driver for vxd392
Hi
+static struct drm_ioctl_desc ipvr_gem_ioctls[] = {
DRM_IOCTL_DEF_DRV(IPVR_CONTEXT_CREATE,
ipvr_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(IPVR_CONTEXT_DESTROY,
ipvr_context_destroy_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(IPVR_MISC,
ipvr_misc_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(IPVR_GEM_EXECBUFFER,
ipvr_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(IPVR_GEM_BUSY,
ipvr_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(IPVR_GEM_CREATE,
ipvr_gem_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(IPVR_GEM_MMAP,
ipvr_gem_mmap_ioctl, DRM_UNLOCKED),
Why do you need this ioctl? mmap() should work perfectly fine. I don't see why you require people to use a ipvr specific ioctl to map buffers.
Many thanks to your comments, in our existing libdrm helper and userspace drivers, mmap_ioctl was the interface of mapping objects. We continued using the ioctl way for compatibility. Is it mandatory to implement mmap() to replace mmap_ioctl for GEM drivers?
DRM_IOCTL_DEF_DRV(IPVR_SYNC_CPU,
ipvr_sync_cpu_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(IPVR_GEM_WAIT,
ipvr_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(IPVR_GEM_USERPTR,
ipvr_gem_userptr_ioctl, DRM_UNLOCKED), };
+static void ipvr_gem_init(struct drm_device *dev) {
struct drm_ipvr_private *dev_priv = dev->dev_private;
dev_priv->ipvr_bo_slab = kmem_cache_create("ipvr_gem_object",
sizeof(union drm_ipvr_gem_objects), 0,
SLAB_HWCACHE_ALIGN, NULL);
INIT_LIST_HEAD(&dev_priv->ipvr_mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->ipvr_mm.bound_list);
spin_lock_init(&dev_priv->ipvr_mm.object_stat_lock);
dev_priv->ipvr_mm.interruptible = true; }
+static void ipvr_gem_setup_mmu(struct drm_device *dev,
unsigned long linear_start,
unsigned long linear_end,
unsigned long tiling_start,
unsigned long tiling_end) {
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where hardware apparently
prefetches
* past the end of the object, and we've seen multiple hangs with the
* GPU head pointer stuck in a batchbuffer bound at last page of the
* aperture. One page should be enough to keep any prefetching
inside
* of the aperture.
*/
struct drm_ipvr_private *dev_priv = dev->dev_private;
struct ipvr_address_space *addr_space = &dev_priv->addr_space;
/* todo: add sanity check */
addr_space->dev = dev_priv->dev;
INIT_LIST_HEAD(&addr_space->active_list);
INIT_LIST_HEAD(&addr_space->inactive_list);
/* Subtract the guard page ... */
drm_mm_init(&addr_space->linear_mm, linear_start,
linear_end - linear_start - PAGE_SIZE);
dev_priv->addr_space.linear_start = linear_start;
dev_priv->addr_space.linear_total = linear_end - linear_start;
drm_mm_init(&addr_space->tiling_mm, tiling_start,
tiling_end - tiling_start - PAGE_SIZE);
dev_priv->addr_space.tiling_start = tiling_start;
dev_priv->addr_space.tiling_total = tiling_end - tiling_start;
+}
+static void ipvr_do_takedown(struct drm_device *dev) {
/* todo: need check if need clean up mm here */
ipvr_ved_uninit(dev);
+}
+static int32_t ipvr_drm_unload(struct drm_device *dev) {
struct drm_ipvr_private *dev_priv = dev->dev_private;
BUG_ON(!dev->platformdev);
BUG_ON(atomic_read(&dev_priv->ved_power_usage));
IPVR_DEBUG_ENTRY("entered.");
if (dev_priv) {
WARN_ON(pm_runtime_get_sync(&dev->platformdev->dev) <
- 0);
if (dev_priv->ipvr_bo_slab)
kmem_cache_destroy(dev_priv->ipvr_bo_slab);
ipvr_fence_driver_fini(dev_priv);
ipvr_do_takedown(dev);
- WARN_ON(pm_runtime_put_sync_suspend(&dev->platformdev->dev)
< 0);
if (dev_priv->validate_ctx.buffers)
vfree(dev_priv->validate_ctx.buffers);
if (dev_priv->pf_pd) {
ipvr_mmu_free_pagedir(dev_priv->pf_pd);
dev_priv->pf_pd = NULL;
}
if (dev_priv->mmu) {
ipvr_mmu_driver_takedown(dev_priv->mmu);
dev_priv->mmu = NULL;
}
if (dev_priv->ved_reg_base) {
iounmap(dev_priv->ved_reg_base - dev_priv-
ved_reg_offset);
dev_priv->ved_reg_base = NULL;
dev_priv->ved_reg_offset = 0;
}
list_del(&dev_priv->default_ctx.head);
idr_remove(&dev_priv->ipvr_ctx_idr, dev_priv-
default_ctx.ctx_id);
kfree(dev_priv);
}
pm_runtime_disable(&dev->platformdev->dev);
return 0;
+}
+static int32_t ipvr_drm_load(struct drm_device *dev, unsigned long +flags) {
struct drm_ipvr_private *dev_priv;
int32_t ctx_id, ret = 0;
struct platform_device *platdev;
struct resource *res_mmio, *res_reg;
void __iomem* mmio_addr;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
dev->dev_private = dev_priv;
dev_priv->dev = dev;
BUG_ON(!dev->platformdev);
platdev = dev->platformdev;
mutex_init(&dev_priv->cmdbuf_mutex);
INIT_LIST_HEAD(&dev_priv->validate_ctx.validate_list);
dev_priv->pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (!dev_priv->pci_root) {
kfree(dev_priv);
return -ENODEV;
}
dev->irq = platform_get_irq(platdev, 0);
if (dev->irq < 0) {
ret = -ENODEV;
goto out_err;
}
res_mmio = platform_get_resource(platdev, IORESOURCE_MEM, 0);
res_reg = platform_get_resource(platdev, IORESOURCE_REG, 0);
if (!res_mmio || !res_reg) {
ret = -ENXIO;
goto out_err;
}
mmio_addr = ioremap_wc(res_mmio->start,
res_mmio->end - res_mmio->start);
if (IS_ERR(mmio_addr)) {
ret = -EACCES;
goto out_err;
}
dev_priv->ved_reg_base = mmio_addr + res_reg->start;
dev_priv->ved_reg_offset = res_reg->start;
IPVR_DEBUG_VED("ved_reg_base is %p, range is 0x%llx - 0x%llx.\n",
dev_priv->ved_reg_base,
res_reg->start, res_reg->end);
platform_set_drvdata(dev->platformdev, dev);
pm_runtime_enable(&dev->platformdev->dev);
if (pm_runtime_get_sync(&dev->platformdev->dev) < 0) {
ret = -EBUSY;
goto out_err;
}
IPVR_DEBUG_INIT("MSVDX_CORE_REV_OFFSET by readl is 0x%x.\n",
readl(dev_priv->ved_reg_base + 0x640));
IPVR_DEBUG_INIT("MSVDX_CORE_REV_OFFSET by
VED_REG_READ32 is 0x%x.\n",
VED_REG_READ32(MSVDX_CORE_REV_OFFSET));
/* mmu init */
dev_priv->mmu = ipvr_mmu_driver_init((void *)0,
drm_ipvr_trap_pagefaults,
0, dev_priv);
if (!dev_priv->mmu) {
ret = -EBUSY;
goto out_err;
}
dev_priv->pf_pd = ipvr_mmu_alloc_pd(dev_priv->mmu, 1, 0);
if (!dev_priv->pf_pd) {
ret = -ENOMEM;
goto out_err;
}
ipvr_mmu_set_pd_context(ipvr_mmu_get_default_pd(dev_priv-
mmu), 0);
ipvr_mmu_set_pd_context(dev_priv->pf_pd, 1);
/*
* Initialize sequence numbers for the different command
* submission mechanisms.
*/
dev_priv->last_seq = 1;
ipvr_gem_init(dev);
ipvr_gem_setup_mmu(dev,
IPVR_MEM_MMU_LINEAR_START,
IPVR_MEM_MMU_LINEAR_END,
IPVR_MEM_MMU_TILING_START,
IPVR_MEM_MMU_TILING_END);
ipvr_ved_init(dev);
WARN_ON(pm_runtime_put_sync_suspend(&dev->platformdev-
dev) <
- 0);
dev_priv->ved_private->ved_needs_reset = 1;
mutex_init(&dev_priv->ved_pm_mutex);
atomic_set(&dev_priv->ved_power_usage, 0);
ipvr_fence_driver_init(dev_priv);
dev_priv->validate_ctx.buffers =
vmalloc(IPVR_NUM_VALIDATE_BUFFERS *
sizeof(struct ipvr_validate_buffer));
if (!dev_priv->validate_ctx.buffers) {
ret = -ENOMEM;
goto out_err;
}
/* ipvr context initialization */
INIT_LIST_HEAD(&dev_priv->ipvr_ctx_list);
spin_lock_init(&dev_priv->ipvr_ctx_lock);
idr_init(&dev_priv->ipvr_ctx_idr);
/* default ipvr context is used for scaling, rotation case */
ctx_id = idr_alloc(&dev_priv->ipvr_ctx_idr, &dev_priv->default_ctx,
IPVR_MIN_CONTEXT_ID, IPVR_MAX_CONTEXT_ID,
GFP_KERNEL);
if (ctx_id < 0) {
return -ENOMEM;
goto out_err;
}
dev_priv->default_ctx.ctx_id = ctx_id;
INIT_LIST_HEAD(&dev_priv->default_ctx.head);
dev_priv->default_ctx.ctx_type = 0;
dev_priv->default_ctx.ipvr_fpriv = NULL;
/* don't need protect with spinlock during module load stage */
list_add(&dev_priv->default_ctx.head, &dev_priv->ipvr_ctx_list);
dev_priv->default_ctx.tiling_scheme = 0;
dev_priv->default_ctx.tiling_stride = 0;
return 0;
+out_err:
ipvr_drm_unload(dev);
return ret;
+}
+/*
- The .open() method is called every time the device is opened by an
- application. Drivers can allocate per-file private data in this
+method and
- store them in the struct drm_file::driver_priv field. Note that
+the .open()
- method is called before .firstopen().
- */
+static int32_t +ipvr_drm_open(struct drm_device *dev, struct drm_file *file_priv) {
struct drm_ipvr_file_private *ipvr_fp;
IPVR_DEBUG_ENTRY("enter\n");
ipvr_fp = kzalloc(sizeof(*ipvr_fp), GFP_KERNEL);
if (unlikely(ipvr_fp == NULL))
return -ENOMEM;
file_priv->driver_priv = ipvr_fp;
return 0;
+}
+/*
- The close operation is split into .preclose() and .postclose() methods.
- Drivers must stop and cleanup all per-file operations in the
+.preclose()
- method. For instance pending vertical blanking and page flip
+events must be
- cancelled. No per-file operation is allowed on the file handle
+after
- returning from the .preclose() method.
- */
+static void +ipvr_drm_preclose(struct drm_device *dev, struct drm_file *file_priv) +{
/* if user didn't destory ctx explicitly, remove ctx here */
struct drm_ipvr_private *dev_priv;
struct drm_ipvr_file_private *ipvr_fpriv;
struct ved_private *ved_priv;
struct ipvr_context *ipvr_ctx = NULL;
unsigned long irq_flags;
IPVR_DEBUG_ENTRY("enter\n");
dev_priv = dev->dev_private;
ipvr_fpriv = file_priv->driver_priv;
ved_priv = dev_priv->ved_private;
if (ipvr_fpriv->ctx_id == IPVR_CONTEXT_INVALID_ID)
return;
ipvr_ctx = (struct ipvr_context *)
idr_find(&dev_priv->ipvr_ctx_idr, ipvr_fpriv->ctx_id);
if (!ipvr_ctx || (ipvr_ctx->ipvr_fpriv != ipvr_fpriv)) {
IPVR_DEBUG_GENERAL("ctx for id %d has already destroyed\n",
ipvr_fpriv->ctx_id);
return;
}
IPVR_DEBUG_PM("Video:remove context profile %d,
entrypoint %d\n",
(ipvr_ctx->ctx_type >> 8) & 0xff,
(ipvr_ctx->ctx_type & 0xff));
mutex_lock(&ved_priv->ved_mutex);
if (ved_priv->ipvr_ctx == ipvr_ctx )
ved_priv->ipvr_ctx = NULL;
mutex_unlock(&ved_priv->ved_mutex);
spin_lock_irqsave(&dev_priv->ipvr_ctx_lock, irq_flags);
list_del(&ipvr_ctx->head);
ipvr_fpriv->ctx_id = IPVR_CONTEXT_INVALID_ID;
spin_unlock_irqrestore(&dev_priv->ipvr_ctx_lock, irq_flags);
idr_remove(&dev_priv->ipvr_ctx_idr, ipvr_ctx->ctx_id);
kfree(ipvr_ctx );
+}
+/*
- Finally the .postclose() method is called as the last step of the
+close
- operation, right before calling the .lastclose() method if no
+other open
- file handle exists for the device. Drivers that have allocated
+per-file
- private data in the .open() method should free it here.
- */
+static void +ipvr_drm_postclose(struct drm_device *dev, struct drm_file +*file_priv) {
struct drm_ipvr_file_private *ipvr_fpriv = file_priv->driver_priv;
IPVR_DEBUG_ENTRY("enter\n");
kfree(ipvr_fpriv);
+}
postclose() is deprecated. You can safely move this into preclose().
Thx, will update it in next version.
+static irqreturn_t ipvr_irq_handler(int32_t irq, void *arg) {
struct drm_device *dev = (struct drm_device *) arg;
WARN_ON(ved_irq_handler(dev));
return IRQ_HANDLED;
+}
+static const struct file_operations ipvr_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
.compat_ioctl = drm_ioctl,
+#endif
/* no need to define mmap. User space maps bo with
+DRM_IPVR_GEM_MMAP */ };
+static int32_t ipvr_drm_freeze(struct drm_device *dev) {
int32_t ret;
int32_t power_usage;
struct drm_ipvr_private *dev_priv = dev->dev_private;
IPVR_DEBUG_ENTRY("enter\n");
power_usage = atomic_read(&dev_priv->ved_power_usage);
BUG_ON(power_usage < 0);
if (power_usage > 0) {
IPVR_DEBUG_PM("VED power usage is %d, skip freezing\n",
power_usage);
return 0;
}
ret = ved_check_idle(dev);
if (ret) {
IPVR_DEBUG_PM("VED check idle fail: %d, skip freezing\n", ret);
return 0;
}
if (dev->irq_enabled) {
ret = drm_irq_uninstall(dev);
if (unlikely(ret)) {
IPVR_ERROR("Error uninstalling IRQ handler: %d\n", ret);
return -EFAULT;
}
IPVR_DEBUG_PM("Successfully uninstalled IRQ\n");
}
else
IPVR_DEBUG_PM("irq_enabled is %d\n",
- dev->irq_enabled);
if (is_ved_on(dev)) {
if (!ved_power_off(dev)) {
IPVR_ERROR("Failed to power off VED\n");
return -EFAULT;
}
IPVR_DEBUG_PM("Successfully powered off\n");
} else {
IPVR_DEBUG_PM("Skiped power-off since already powered
off\n");
}
return 0;
+}
+static int32_t ipvr_drm_thaw(struct drm_device *dev) {
int ret;
IPVR_DEBUG_ENTRY("enter\n");
if (!is_ved_on(dev)) {
if (!ved_power_on(dev)) {
IPVR_ERROR("Failed to power on VED\n");
return -EFAULT;
}
IPVR_DEBUG_PM("Successfully powered on\n");
} else {
IPVR_DEBUG_PM("Skiped power-on since already powered
on\n");
}
if (!dev->irq_enabled) {
ret = drm_irq_install(dev, dev->irq);
if (ret) {
IPVR_ERROR("Error installing IRQ handler: %d\n", ret);
return -EFAULT;
}
IPVR_DEBUG_PM("Successfully installed IRQ\n");
}
else
IPVR_DEBUG_PM("irq_enabled is %d\n",
- dev->irq_enabled);
return 0;
+}
+static int32_t ipvr_pm_suspend(struct device *dev) {
struct platform_device *platformdev = to_platform_device(dev);
struct drm_device *drm_dev = platform_get_drvdata(platformdev);
IPVR_DEBUG_PM("PM suspend called\n");
BUG_ON(!drm_dev);
return ipvr_drm_freeze(drm_dev); } static int32_t
+ipvr_pm_resume(struct device *dev) {
struct platform_device *platformdev = to_platform_device(dev);
struct drm_device *drm_dev = platform_get_drvdata(platformdev);
IPVR_DEBUG_PM("PM resume called\n");
BUG_ON(!drm_dev);
return ipvr_drm_thaw(drm_dev); }
+/*
- dump GEM API is mainly for dumb buffers suitable for scanout,
- it is not needed for ipvr driver.
- gem_vm_ops is used for mmap case, also not needed for ipvr
- todo: prime support can be enabled later */ static struct
+drm_driver ipvr_drm_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM,
.load = ipvr_drm_load,
.unload = ipvr_drm_unload,
.open = ipvr_drm_open,
.preclose = ipvr_drm_preclose,
.postclose = ipvr_drm_postclose,
.irq_handler = ipvr_irq_handler,
.set_busid = drm_platform_set_busid,
This should be NULL for new drivers.
.gem_free_object = ipvr_gem_free_object, #ifdef
+CONFIG_DEBUG_FS
.debugfs_init = ipvr_debugfs_init,
.debugfs_cleanup = ipvr_debugfs_cleanup, #endif
.ioctls = ipvr_gem_ioctls,
.num_ioctls = ARRAY_SIZE(ipvr_gem_ioctls),
.fops = &ipvr_fops,
.name = IPVR_DRIVER_NAME,
.desc = IPVR_DRIVER_DESC,
.date = IPVR_DRIVER_DATE,
.major = IPVR_DRIVER_MAJOR,
.minor = IPVR_DRIVER_MINOR,
.patchlevel = IPVR_DRIVER_PATCHLEVEL, };
+static int32_t ipvr_plat_probe(struct platform_device *device) {
return drm_platform_init(&ipvr_drm_driver, device);
drm_platform_init() should be used. Please use drm_dev_alloc(), drm_dev_register() directly. udl and tegra should serve as example.
Thx, will update it in next version.
+}
+static int32_t ipvr_plat_remove(struct platform_device *device) {
drm_put_dev(platform_get_drvdata(device));
Again, please use drm_dev_unregister() + drm_dev_unref() directly.
Thx, will update it in next version.
Thanks David
return 0;
+}
+static struct dev_pm_ops ipvr_pm_ops = {
.suspend = ipvr_pm_suspend,
.resume = ipvr_pm_resume,
.freeze = ipvr_pm_suspend,
.thaw = ipvr_pm_resume,
.poweroff = ipvr_pm_suspend,
.restore = ipvr_pm_resume,
+#ifdef CONFIG_PM_RUNTIME
.runtime_suspend = ipvr_pm_suspend,
.runtime_resume = ipvr_pm_resume, #endif };
+static struct platform_driver ipvr_plat_driver = {
.driver = {
.name = "ipvr-ved",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
.pm = &ipvr_pm_ops,
+#endif
},
.probe = ipvr_plat_probe,
.remove = ipvr_plat_remove,
+};
+module_platform_driver(ipvr_plat_driver); +MODULE_LICENSE("GPL");