A resource will be a shmem based resource or a (planned) vram based resource, so it makes sense to factor out common fields (resource handle, dumb, mapped).
Signed-off-by: Gurchetan Singh gurchetansingh@chromium.org --- drivers/gpu/drm/virtio/virtgpu_drv.h | 11 ++++++++--- drivers/gpu/drm/virtio/virtgpu_object.c | 20 +++++++++++--------- drivers/gpu/drm/virtio/virtgpu_vq.c | 6 ++++-- 3 files changed, 23 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index ce73895cf74b..595b5f3dc105 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -69,16 +69,21 @@ struct virtio_gpu_object_params { struct virtio_gpu_object { struct drm_gem_shmem_object base; uint32_t hw_res_handle; - - struct sg_table *pages; uint32_t mapped; - bool dumb; bool created; }; #define gem_to_virtio_gpu_obj(gobj) \ container_of((gobj), struct virtio_gpu_object, base.base)
+struct virtio_gpu_object_shmem { + struct virtio_gpu_object base; + struct sg_table *pages; +}; + +#define to_virtio_gpu_shmem(virtio_gpu_object) \ + container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base) + struct virtio_gpu_object_array { struct ww_acquire_ctx ticket; struct list_head next; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index c5cad949eb8d..a7d4d871431e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -65,16 +65,17 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) { struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (bo->pages) { + if (shmem->pages) { if (bo->mapped) { dma_unmap_sg(vgdev->vdev->dev.parent, - bo->pages->sgl, bo->mapped, + shmem->pages->sgl, bo->mapped, DMA_TO_DEVICE); bo->mapped = 0; } - sg_free_table(bo->pages); - bo->pages = NULL; + sg_free_table(shmem->pages); + shmem->pages = NULL; drm_gem_shmem_unpin(&bo->base.base); } virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); @@ -133,6 +134,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, unsigned int *nents) { bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); struct scatterlist *sg; int si, ret;
@@ -140,19 +142,19 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, if (ret < 0) return -EINVAL;
- bo->pages = drm_gem_shmem_get_sg_table(&bo->base.base); - if (!bo->pages) { + shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); + if (!shmem->pages) { drm_gem_shmem_unpin(&bo->base.base); return -EINVAL; }
if (use_dma_api) { bo->mapped = dma_map_sg(vgdev->vdev->dev.parent, - bo->pages->sgl, bo->pages->nents, + shmem->pages->sgl, shmem->pages->nents, DMA_TO_DEVICE); *nents = bo->mapped; } else { - *nents = bo->pages->nents; + *nents = shmem->pages->nents; }
*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), @@ -162,7 +164,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, return -ENOMEM; }
- for_each_sg(bo->pages->sgl, sg, *nents, si) { + for_each_sg(shmem->pages->sgl, sg, *nents, si) { (*ents)[si].addr = cpu_to_le64(use_dma_api ? sg_dma_address(sg) : sg_phys(sg)); diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 5e2375e0f7bb..73854915ec34 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -600,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api) dma_sync_sg_for_device(vgdev->vdev->dev.parent, - bo->pages->sgl, bo->pages->nents, + shmem->pages->sgl, shmem->pages->nents, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); @@ -1015,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api) dma_sync_sg_for_device(vgdev->vdev->dev.parent, - bo->pages->sgl, bo->pages->nents, + shmem->pages->sgl, shmem->pages->nents, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
This function won't be useable for hostmem objects.
Signed-off-by: Gurchetan Singh gurchetansingh@chromium.org --- drivers/gpu/drm/virtio/virtgpu_drv.h | 2 +- drivers/gpu/drm/virtio/virtgpu_object.c | 4 ++-- drivers/gpu/drm/virtio/virtgpu_vq.c | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 595b5f3dc105..014a0c1f21b1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -371,7 +371,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object **bo_ptr, struct virtio_gpu_fence *fence);
-bool virtio_gpu_is_shmem(struct drm_gem_object *obj); +bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
/* virtgpu_prime.c */ struct drm_gem_object *virtgpu_gem_prime_import_sg_table( diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index a7d4d871431e..11f0d4548613 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -110,9 +110,9 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { .mmap = drm_gem_shmem_mmap, };
-bool virtio_gpu_is_shmem(struct drm_gem_object *obj) +bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) { - return obj->funcs == &virtio_gpu_shmem_funcs; + return bo->base.base.funcs == &virtio_gpu_shmem_funcs; }
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 73854915ec34..a44261ba1c5d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -526,7 +526,8 @@ static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, bo = vbuf->resp_cb_data; vbuf->resp_cb_data = NULL;
- virtio_gpu_cleanup_object(bo); + if (bo && virtio_gpu_is_shmem(bo)) + virtio_gpu_cleanup_object(bo); }
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
This function won't be useable for hostmem objects.
@@ -526,7 +526,8 @@ static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, bo = vbuf->resp_cb_data; vbuf->resp_cb_data = NULL;
- virtio_gpu_cleanup_object(bo);
- if (bo && virtio_gpu_is_shmem(bo))
virtio_gpu_cleanup_object(bo);
Its not that simple, the virtio_gpu_resource_id_put() call in virtio_gpu_cleanup_object() is needed for all objects. We also must free all objects.
I'd suggest to move the virtio_gpu_is_shmem() check to virtio_gpu_cleanup_object().
cheers, Gerd
Hi,
struct virtio_gpu_object { struct drm_gem_shmem_object base; uint32_t hw_res_handle;
- struct sg_table *pages; uint32_t mapped;
- bool dumb; bool created;
}; #define gem_to_virtio_gpu_obj(gobj) \ container_of((gobj), struct virtio_gpu_object, base.base)
+struct virtio_gpu_object_shmem {
- struct virtio_gpu_object base;
- struct sg_table *pages;
+};
mapped can be moved too.
@@ -600,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
Should we pass struct virtio_gpu_object_shmem to virtio_gpu_cmd_transfer_to_host_2d (+friends) instead?
hostmem will not need transfers ...
cheers, Gerd
On Tue, Mar 3, 2020 at 1:56 AM Gerd Hoffmann kraxel@redhat.com wrote:
Hi,
struct virtio_gpu_object { struct drm_gem_shmem_object base; uint32_t hw_res_handle;
struct sg_table *pages; uint32_t mapped;
bool dumb; bool created;
}; #define gem_to_virtio_gpu_obj(gobj) \ container_of((gobj), struct virtio_gpu_object, base.base)
+struct virtio_gpu_object_shmem {
struct virtio_gpu_object base;
struct sg_table *pages;
+};
mapped can be moved too.
@@ -600,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct
virtio_gpu_device *vgdev,
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
Should we pass struct virtio_gpu_object_shmem to virtio_gpu_cmd_transfer_to_host_2d (+friends) instead?
It ends up being a little more complicated, due to casting to virtio_gpu_object and then virtio_gpu_object_shmem in virtio_gpu_transfer_to_host_ioctl, so I omitted it v2...
hostmem will not need transfers ...
cheers, Gerd
dri-devel@lists.freedesktop.org