Hello,
Upon Rob Clark's request here's a repost of two patches that implement GEM CMA objects import and export as dma-buf file handles.
The code is based on the Exynos DRM DMA-BUF implementation. The exporter role has been successfully tested with the Renesas R-Car DU driver.
Laurent Pinchart (2): drm: GEM CMA: Split object creation into object alloc and DMA memory alloc drm: GEM CMA: Add DRM PRIME support
drivers/gpu/drm/drm_gem_cma_helper.c | 376 ++++++++++++++++++++++++++++++++--- include/drm/drm_gem_cma_helper.h | 9 + 2 files changed, 356 insertions(+), 29 deletions(-)
This allows creating a GEM CMA object without an associated DMA memory buffer, and will be used to implement DRM PRIME support.
Signed-off-by: Laurent Pinchart laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/drm_gem_cma_helper.c | 83 +++++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 0a7e011..8cce330 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -32,62 +32,73 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; }
-static void drm_gem_cma_buf_destroy(struct drm_device *drm, - struct drm_gem_cma_object *cma_obj) -{ - dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr, - cma_obj->paddr); -} - /* - * drm_gem_cma_create - allocate an object with the given size + * __drm_gem_cma_create - Create a GEM CMA object without allocating memory + * @drm: The drm device + * @size: The GEM object size * - * returns a struct drm_gem_cma_object* on success or ERR_PTR values - * on failure. + * This function creates and initializes a GEM CMA object of the given size, but + * doesn't allocate any memory to back the object. + * + * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure. */ -struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, - unsigned int size) +static struct drm_gem_cma_object * +__drm_gem_cma_create(struct drm_device *drm, unsigned int size) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret;
- size = round_up(size, PAGE_SIZE); - cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); if (!cma_obj) return ERR_PTR(-ENOMEM);
- cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, - &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); - if (!cma_obj->vaddr) { - dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); - ret = -ENOMEM; - goto err_dma_alloc; - } - gem_obj = &cma_obj->base;
ret = drm_gem_object_init(drm, gem_obj, size); if (ret) - goto err_obj_init; + goto error;
ret = drm_gem_create_mmap_offset(gem_obj); - if (ret) - goto err_create_mmap_offset; + if (ret) { + drm_gem_object_release(gem_obj); + goto error; + }
return cma_obj;
-err_create_mmap_offset: - drm_gem_object_release(gem_obj); +error: + kfree(cma_obj); + return ERR_PTR(ret); +}
-err_obj_init: - drm_gem_cma_buf_destroy(drm, cma_obj); +/* + * drm_gem_cma_create - allocate an object with the given size + * + * returns a struct drm_gem_cma_object* on success or ERR_PTR values + * on failure. + */ +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, + unsigned int size) +{ + struct drm_gem_cma_object *cma_obj;
-err_dma_alloc: - kfree(cma_obj); + size = round_up(size, PAGE_SIZE);
- return ERR_PTR(ret); + cma_obj = __drm_gem_cma_create(drm, size); + if (IS_ERR(cma_obj)) + return cma_obj; + + cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, + &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); + if (!cma_obj->vaddr) { + dev_err(drm->dev, "failed to allocate buffer with size %d\n", + size); + drm_gem_cma_free_object(&cma_obj->base); + return ERR_PTR(-ENOMEM); + } + + return cma_obj; } EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -143,11 +154,13 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) if (gem_obj->map_list.map) drm_gem_free_mmap_offset(gem_obj);
- drm_gem_object_release(gem_obj); - cma_obj = to_drm_gem_cma_obj(gem_obj);
- drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj); + if (cma_obj->vaddr) + dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size, + cma_obj->vaddr, cma_obj->paddr); + + drm_gem_object_release(gem_obj);
kfree(cma_obj); }
Signed-off-by: Laurent Pinchart laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/drm_gem_cma_helper.c | 311 ++++++++++++++++++++++++++++++++++- include/drm/drm_gem_cma_helper.h | 9 + 2 files changed, 317 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 8cce330..c428a51 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -21,6 +21,9 @@ #include <linux/slab.h> #include <linux/mutex.h> #include <linux/export.h> +#if CONFIG_DMA_SHARED_BUFFER +#include <linux/dma-buf.h> +#endif #include <linux/dma-mapping.h>
#include <drm/drmP.h> @@ -82,6 +85,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, unsigned int size) { struct drm_gem_cma_object *cma_obj; + struct sg_table *sgt = NULL; + int ret;
size = round_up(size, PAGE_SIZE);
@@ -94,11 +99,29 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, if (!cma_obj->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); - drm_gem_cma_free_object(&cma_obj->base); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto error; }
+ sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL); + if (sgt == NULL) { + ret = -ENOMEM; + goto error; + } + + ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr, + cma_obj->paddr, size); + if (ret < 0) + goto error; + + cma_obj->sgt = sgt; + return cma_obj; + +error: + kfree(sgt); + drm_gem_cma_free_object(&cma_obj->base); + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -156,9 +179,16 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
cma_obj = to_drm_gem_cma_obj(gem_obj);
- if (cma_obj->vaddr) + if (cma_obj->vaddr) { dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size, cma_obj->vaddr, cma_obj->paddr); + if (cma_obj->sgt) { + sg_free_table(cma_obj->sgt); + kfree(cma_obj->sgt); + } + } else if (gem_obj->import_attach) { + drm_prime_gem_destroy(gem_obj, cma_obj->sgt); + }
drm_gem_object_release(gem_obj);
@@ -282,4 +312,279 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m seq_printf(m, "\n"); } EXPORT_SYMBOL_GPL(drm_gem_cma_describe); + +/* ----------------------------------------------------------------------------- + * DMA-BUF + */ + +#if CONFIG_DMA_SHARED_BUFFER +struct drm_gem_cma_dmabuf_attachment { + struct sg_table sgt; + enum dma_data_direction dir; +}; + +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev, + struct dma_buf_attachment *attach) +{ + struct drm_gem_cma_dmabuf_attachment *cma_attach; + + cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL); + if (!cma_attach) + return -ENOMEM; + + cma_attach->dir = DMA_NONE; + attach->priv = cma_attach; + + return 0; +} + +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; + struct sg_table *sgt; + + if (cma_attach == NULL) + return; + + sgt = &cma_attach->sgt; + + if (cma_attach->dir != DMA_NONE) + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, + cma_attach->dir); + + sg_free_table(sgt); + kfree(cma_attach); + attach->priv = NULL; +} + +static struct sg_table * +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; + struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv; + struct drm_device *drm = cma_obj->base.dev; + struct scatterlist *rd, *wr; + struct sg_table *sgt; + unsigned int i; + int nents, ret; + + DRM_DEBUG_PRIME("\n"); + + if (WARN_ON(dir == DMA_NONE)) + return ERR_PTR(-EINVAL); + + /* Return the cached mapping when possible. */ + if (cma_attach->dir == dir) + return &cma_attach->sgt; + + /* Two mappings with different directions for the same attachment are + * not allowed. + */ + if (WARN_ON(cma_attach->dir != DMA_NONE)) + return ERR_PTR(-EBUSY); + + sgt = &cma_attach->sgt; + + ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to alloc sgt.\n"); + return ERR_PTR(-ENOMEM); + } + + mutex_lock(&drm->struct_mutex); + + rd = cma_obj->sgt->sgl; + wr = sgt->sgl; + for (i = 0; i < sgt->orig_nents; ++i) { + sg_set_page(wr, sg_page(rd), rd->length, rd->offset); + rd = sg_next(rd); + wr = sg_next(wr); + } + + nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); + if (!nents) { + DRM_ERROR("failed to map sgl with iommu.\n"); + sgt = ERR_PTR(-EIO); + goto err_unlock; + } + + cma_attach->dir = dir; + attach->priv = cma_attach; + + DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size); + +err_unlock: + mutex_unlock(&drm->struct_mutex); + return sgt; +} + +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + /* Nothing to do. */ +} + +static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + + DRM_DEBUG_PRIME("%s\n", __FILE__); + + /* + * drm_gem_cma_dmabuf_release() call means that file object's + * f_count is 0 and it calls drm_gem_object_handle_unreference() + * to drop the references that these values had been increased + * at drm_prime_handle_to_fd() + */ + if (cma_obj->base.export_dma_buf == dmabuf) { + cma_obj->base.export_dma_buf = NULL; + + /* + * drop this gem object refcount to release allocated buffer + * and resources. + */ + drm_gem_object_unreference_unlocked(&cma_obj->base); + } +} + +static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf, + unsigned long page_num) +{ + /* TODO */ + + return NULL; +} + +static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf, + unsigned long page_num, void *addr) +{ + /* TODO */ +} + +static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf, + unsigned long page_num) +{ + /* TODO */ + + return NULL; +} + +static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf, + unsigned long page_num, void *addr) +{ + /* TODO */ +} + +static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + return -ENOTTY; +} + +static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + + return cma_obj->vaddr; +} + +static struct dma_buf_ops drm_gem_cma_dmabuf_ops = { + .attach = drm_gem_cma_dmabuf_attach, + .detach = drm_gem_cma_dmabuf_detach, + .map_dma_buf = drm_gem_cma_dmabuf_map, + .unmap_dma_buf = drm_gem_cma_dmabuf_unmap, + .kmap = drm_gem_cma_dmabuf_kmap, + .kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic, + .kunmap = drm_gem_cma_dmabuf_kunmap, + .kunmap_atomic = drm_gem_cma_dmabuf_kunmap_atomic, + .mmap = drm_gem_cma_dmabuf_mmap, + .vmap = drm_gem_cma_dmabuf_vmap, + .release = drm_gem_cma_dmabuf_release, +}; + +struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm, + struct drm_gem_object *obj, int flags) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops, + cma_obj->base.size, flags); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export); + +struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm, + struct dma_buf *dma_buf) +{ + struct drm_gem_cma_object *cma_obj; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + int ret; + + DRM_DEBUG_PRIME("%s\n", __FILE__); + + /* is this one of own objects? */ + if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) { + struct drm_gem_object *obj; + + cma_obj = dma_buf->priv; + obj = &cma_obj->base; + + /* is it from our device? */ + if (obj->dev == drm) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(obj); + dma_buf_put(dma_buf); + return obj; + } + } + + /* Create a CMA GEM buffer. */ + cma_obj = __drm_gem_cma_create(drm, dma_buf->size); + if (IS_ERR(cma_obj)) + return ERR_PTR(PTR_ERR(cma_obj)); + + /* Attach to the buffer and map it. Make sure the mapping is contiguous + * on the device memory bus, as that's all we support. + */ + attach = dma_buf_attach(dma_buf, drm->dev); + if (IS_ERR(attach)) { + ret = -EINVAL; + goto error_gem_free; + } + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(sgt)) { + ret = sgt ? PTR_ERR(sgt) : -ENOMEM; + goto error_buf_detach; + } + + if (sgt->nents != 1) { + ret = -EINVAL; + goto error_buf_unmap; + } + + cma_obj->base.import_attach = attach; + cma_obj->paddr = sg_dma_address(sgt->sgl); + cma_obj->sgt = sgt; + + DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, + dma_buf->size); + + return &cma_obj->base; + +error_buf_unmap: + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); +error_buf_detach: + dma_buf_detach(dma_buf, attach); +error_gem_free: + drm_gem_cma_free_object(&cma_obj->base); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import); #endif diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 63397ce..6e17251 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -4,6 +4,9 @@ struct drm_gem_cma_object { struct drm_gem_object base; dma_addr_t paddr; + struct sg_table *sgt; + + /* For objects with DMA memory allocated by GEM CMA */ void *vaddr; };
@@ -45,4 +48,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops; void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); #endif
+struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev, + struct drm_gem_object *obj, + int flags); +struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev, + struct dma_buf *dma_buf); + #endif /* __DRM_GEM_CMA_HELPER_H__ */
Hi Laurent, a few mostly-minor comments, although from a quick look the sg_alloc_table()/sg_free_table() doesn't look quite right in all cases. The other comments could just be a subject for a later patch if it is something that somebody needs some day..
On Mon, Apr 15, 2013 at 9:57 AM, Laurent Pinchart laurent.pinchart+renesas@ideasonboard.com wrote:
don't we miss a sg_free_table() in this error path? Or, well, I guess you still call it in _detach(), but if you call _map() again, I think we'll re-sg_alloc_table(), which doesn't seem quite right..
hmm, I wonder if it makes sense to support _unmap() and then _map() again with a different direction? Not entirely sure when that would be needed and I suppose it is ok to add later.
again, not really sure if it is required, but it should be pretty trivial to support kmap and friends
should also be pretty trivial to redirect _dmabuf_mmap() into drm_gem_cma_mmap()..
Hi Rob,
Thank you for the review.
On Monday 15 April 2013 15:00:58 Rob Clark wrote:
[snip]
Indeed, good catch. I'll fix it.
I don't have a use case for that right now, I would thus vote for adding it later if needed :-)
+}
[snip]
It will require a bit of code shuffling, but I'll give it a try.
dri-devel@lists.freedesktop.org