Hi Noralf.
Only nitpicks, I have not the background to review the actual implmentation. So no tags from me to put on the commit.
Sam
+/**
- drm_gem_shmem_create - Allocate an object with the given size
- @dev: DRM device
- @size: Size of the object to allocate
- This function creates a shmem GEM object. The default cache mode is
- DRM_GEM_SHMEM_BO_CACHED. The &drm_driver->gem_create_object callback can be
- used override this.
used to override this. ^^
- Returns:
- A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
- error code on failure.
- */
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) +{
- struct drm_gem_shmem_object *shmem;
- struct drm_gem_object *obj;
- int ret;
- size = PAGE_ALIGN(size);
- if (dev->driver->gem_create_object)
obj = dev->driver->gem_create_object(dev, size);
- else
obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!obj)
return ERR_PTR(-ENOMEM);
- shmem = to_drm_gem_shmem_obj(obj);
- if (!dev->driver->gem_create_object)
shmem->cache_mode = DRM_GEM_SHMEM_BO_CACHED;
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
goto err_free;
Some users of drm_gem_object_init() calls drm_gem_object_put_unlocked(obj) when there is an error. Others call kfree() liek in this case.
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
goto err_release;
- mutex_init(&shmem->pages_lock);
- mutex_init(&shmem->vmap_lock);
- return shmem;
+err_release:
- drm_gem_object_release(obj);
+err_free:
- kfree(shmem);
- return ERR_PTR(ret);
+} +EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) +{
- struct drm_gem_object *obj = &shmem->base;
- struct page **pages;
- if (shmem->pages_use_count++ > 0)
return 0;
- pages = drm_gem_get_pages(obj);
- if (IS_ERR(pages)) {
DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
shmem->pages_use_count = 0;
return PTR_ERR(pages);
- }
- shmem->pages = pages;
- return 0;
+}
+/*
- drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
- @shmem: shmem GEM object
- This function makes sure that backing pages exists for the shmem GEM object
- and increases the use count.
- Returns:
- 0 on success or a negative error code on failure.
- */
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +{
- int ret;
- ret = mutex_lock_interruptible(&shmem->pages_lock);
- if (ret)
return ret;
- ret = drm_gem_shmem_get_pages_locked(shmem);
- mutex_unlock(&shmem->pages_lock);
- return ret;
+} +EXPORT_SYMBOL(drm_gem_shmem_get_pages);
The functions is named *_unlocked, but called with a lock held. Inconsistent?
+static void drm_gem_shmem_put_pages_unlocked(struct drm_gem_shmem_object *shmem) +{
- struct drm_gem_object *obj = &shmem->base;
- if (WARN_ON_ONCE(!shmem->pages_use_count))
return;
- if (--shmem->pages_use_count > 0)
return;
- drm_gem_put_pages(obj, shmem->pages,
shmem->pages_mark_dirty_on_put,
shmem->pages_mark_accessed_on_put);
- shmem->pages = NULL;
+}
+/*
- drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
- @shmem: shmem GEM object
- This function decreases the use count and puts the backing pages when use drops to zero.
- */
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +{
- mutex_lock(&shmem->pages_lock);
- drm_gem_shmem_put_pages_unlocked(shmem);
- mutex_unlock(&shmem->pages_lock);
+} +EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) +{
- struct drm_gem_object *obj = &shmem->base;
- int ret;
- if (shmem->vmap_use_count++ > 0)
return 0;
- ret = drm_gem_shmem_get_pages(shmem);
- if (ret)
goto err_zero_use;
- if (obj->import_attach) {
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
- } else {
pgprot_t prot;
switch (shmem->cache_mode) {
case DRM_GEM_SHMEM_BO_UNKNOWN:
No printout to help the coder that did not set this?
ret = -EINVAL;
goto err_put_pages;
case DRM_GEM_SHMEM_BO_WRITECOMBINED:
prot = pgprot_writecombine(PAGE_KERNEL);
break;
case DRM_GEM_SHMEM_BO_UNCACHED:
prot = pgprot_noncached(PAGE_KERNEL);
break;
case DRM_GEM_SHMEM_BO_CACHED:
prot = PAGE_KERNEL;
break;
}
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot);
- }
- if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
ret = -ENOMEM;
goto err_put_pages;
- }
- return 0;
+err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+err_zero_use:
- shmem->vmap_use_count = 0;
- return ret;
+}
+/*
- drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
- @shmem: shmem GEM object
- This function makes sure that a virtual address exists for the buffer backing
- the shmem GEM object.
- Returns:
- 0 on success or a negative error code on failure.
- */
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem) +{
- int ret;
- ret = mutex_lock_interruptible(&shmem->vmap_lock);
- if (ret)
return ret;
- ret = drm_gem_shmem_vmap_locked(shmem);
- mutex_unlock(&shmem->vmap_lock);
- return ret;
+} +EXPORT_SYMBOL(drm_gem_shmem_vmap);
+static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) +{
- struct drm_gem_object *obj = &shmem->base;
- if (WARN_ON_ONCE(!shmem->vmap_use_count))
return;
- if (--shmem->vmap_use_count > 0)
return;
- if (obj->import_attach)
dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
- else
vunmap(shmem->vaddr);
- shmem->vaddr = NULL;
- drm_gem_shmem_put_pages(shmem);
+}
+/*
- drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
- @shmem: shmem GEM object
- This function removes the virtual address when use count drops to zero.
- */
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem) +{
- mutex_lock(&shmem->vmap_lock);
- drm_gem_shmem_vunmap_locked(shmem);
- mutex_unlock(&shmem->vmap_lock);
+} +EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+static struct drm_gem_shmem_object * +drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
uint32_t *handle)
+{
- struct drm_gem_shmem_object *shmem;
- int ret;
- shmem = drm_gem_shmem_create(dev, size);
- if (IS_ERR(shmem))
return shmem;
- /*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
*/
- ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
- /* drop reference from allocate - handle holds it now. */
- drm_gem_object_put_unlocked(&shmem->base);
- if (ret)
return ERR_PTR(ret);
- return shmem;
+}
+/**
- drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
- @file: DRM file structure to create the dumb buffer for
- @dev: DRM device
- @args: IOCTL data
- This function computes the pitch of the dumb buffer and rounds it up to an
- integer number of bytes per pixel. Drivers for hardware that doesn't have
- any additional restrictions on the pitch can directly use this function as
- their &drm_driver.dumb_create callback.
- For hardware with additional restrictions, drivers can adjust the fields
- set up by userspace before calling into this function.
- Returns:
- 0 on success or a negative error code on failure.
- */
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
+{
- u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- struct drm_gem_shmem_object *shmem;
- if (!args->pitch || !args->size) {
args->pitch = min_pitch;
args->size = args->pitch * args->height;
- } else {
/* ensure sane minimum values */
if (args->pitch < min_pitch)
args->pitch = min_pitch;
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
- }
- shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
- return PTR_ERR_OR_ZERO(shmem);
+} +EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) +{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff_t pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- loff_t num_pages = obj->size >> PAGE_SHIFT;
- struct page *page;
- if (pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
return VM_FAULT_SIGBUS;
- page = shmem->pages[pgoff];
- return vmf_insert_page(vma, vmf->address, page);
+}
+static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) +{
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_gem_shmem_put_pages(shmem);
- drm_gem_vm_close(vma);
+}
+const struct vm_operations_struct drm_gem_shmem_vm_ops = {
- .fault = drm_gem_shmem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_shmem_vm_close,
+}; +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
+static int drm_gem_shmem_mmap_obj(struct drm_gem_shmem_object *shmem,
struct vm_area_struct *vma)
+{
- int ret;
- ret = drm_gem_shmem_get_pages(shmem);
- if (ret)
goto err_close;
- /* VM_PFNMAP was set by drm_gem_mmap() */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
- switch (shmem->cache_mode) {
- case DRM_GEM_SHMEM_BO_UNKNOWN:
ret = -EINVAL;
Print to help the programmer?
goto err_put_pages;
- case DRM_GEM_SHMEM_BO_WRITECOMBINED:
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
break;
- case DRM_GEM_SHMEM_BO_UNCACHED:
vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
break;
- case DRM_GEM_SHMEM_BO_CACHED:
/*
* Shunt off cached objs to shmem file so they have their own
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
fput(vma->vm_file);
get_file(shmem->base.filp);
vma->vm_pgoff = 0;
vma->vm_file = shmem->base.filp;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
break;
- }
- return 0;
+err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+err_close:
- drm_gem_vm_close(vma);
- return ret;
+}