Signed-off-by: Gerd Hoffmann kraxel@redhat.com
Gerd Hoffmann (4): drm/virtio: simplify virtio_gpu_alloc_cmd drm/virtio: resource teardown tweaks drm/virtio: move mapping teardown to virtio_gpu_cleanup_object() drm/virtio: move virtio_gpu_mem_entry initialization to new function
drivers/gpu/drm/virtio/virtgpu_drv.h | 9 +- drivers/gpu/drm/virtio/virtgpu_object.c | 82 +++++++++++-- drivers/gpu/drm/virtio/virtgpu_vq.c | 155 +++++++----------------- 3 files changed, 124 insertions(+), 122 deletions(-)
Just call virtio_gpu_alloc_cmd_resp with some fixed args instead of duplicating most of the function body.
Signed-off-by: Gerd Hoffmann kraxel@redhat.com --- drivers/gpu/drm/virtio/virtgpu_vq.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 5914e79d3429..6d6d55dc384e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -109,23 +109,6 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, return vbuf; }
-static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer **vbuffer_p, - int size) -{ - struct virtio_gpu_vbuffer *vbuf; - - vbuf = virtio_gpu_get_vbuf(vgdev, size, - sizeof(struct virtio_gpu_ctrl_hdr), - NULL, NULL); - if (IS_ERR(vbuf)) { - *vbuffer_p = NULL; - return ERR_CAST(vbuf); - } - *vbuffer_p = vbuf; - return vbuf->buf; -} - static struct virtio_gpu_update_cursor* virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer **vbuffer_p) @@ -161,6 +144,14 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, return (struct virtio_gpu_command *)vbuf->buf; }
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int size) +{ + return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size, + sizeof(struct virtio_gpu_ctrl_hdr), NULL); +} + static void free_vbuf(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf) {
Add new virtio_gpu_cleanup_object() helper function for object cleanup. Wire up callback function for resource unref, do cleanup from callback when we know the host stopped using the resource.
Signed-off-by: Gerd Hoffmann kraxel@redhat.com --- drivers/gpu/drm/virtio/virtgpu_drv.h | 3 ++- drivers/gpu/drm/virtio/virtgpu_object.c | 19 ++++++++++---- drivers/gpu/drm/virtio/virtgpu_vq.c | 35 ++++++++++++++++++++++--- 3 files changed, 48 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 7e69c06e168e..372dd248cf02 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -262,7 +262,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, - uint32_t resource_id); + struct virtio_gpu_object *bo); void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, uint64_t offset, uint32_t width, uint32_t height, @@ -355,6 +355,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, u64 last_seq);
/* virtio_gpu_object */ +void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo); struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, size_t size); int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 017a9e0fc3bb..28a161af7503 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -61,6 +61,14 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t } }
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) +{ + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + + virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); + drm_gem_shmem_free_object(&bo->base.base); +} + static void virtio_gpu_free_object(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); @@ -68,11 +76,12 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
if (bo->pages) virtio_gpu_object_detach(vgdev, bo); - if (bo->created) - virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle); - virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); - - drm_gem_shmem_free_object(obj); + if (bo->created) { + virtio_gpu_cmd_unref_resource(vgdev, bo); + /* completion handler calls virtio_gpu_cleanup_object() */ + return; + } + virtio_gpu_cleanup_object(bo); }
static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = { diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 6d6d55dc384e..6e8097e4c214 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -152,6 +152,15 @@ static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, sizeof(struct virtio_gpu_ctrl_hdr), NULL); }
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int size, + virtio_gpu_resp_cb cb) +{ + return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size, + sizeof(struct virtio_gpu_ctrl_hdr), NULL); +} + static void free_vbuf(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf) { @@ -494,17 +503,37 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, bo->created = true; }
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_object *bo; + + bo = gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); + kfree(vbuf->objs); + vbuf->objs = NULL; + + virtio_gpu_cleanup_object(bo); +} + void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, - uint32_t resource_id) + struct virtio_gpu_object *bo) { struct virtio_gpu_resource_unref *cmd_p; struct virtio_gpu_vbuffer *vbuf;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), + virtio_gpu_cmd_unref_cb); memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + + /* + * We are in the release callback and do NOT want refcount + * bo, so do NOT use virtio_gpu_array_add_obj(). + */ + vbuf->objs = virtio_gpu_array_alloc(1); + vbuf->objs->objs[0] = &bo->base.base;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); }
On Wed, Feb 5, 2020 at 3:00 AM Gerd Hoffmann kraxel@redhat.com wrote:
Add new virtio_gpu_cleanup_object() helper function for object cleanup. Wire up callback function for resource unref, do cleanup from callback when we know the host stopped using the resource.
Signed-off-by: Gerd Hoffmann kraxel@redhat.com
drivers/gpu/drm/virtio/virtgpu_drv.h | 3 ++- drivers/gpu/drm/virtio/virtgpu_object.c | 19 ++++++++++---- drivers/gpu/drm/virtio/virtgpu_vq.c | 35 ++++++++++++++++++++++--- 3 files changed, 48 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 7e69c06e168e..372dd248cf02 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -262,7 +262,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
struct virtio_gpu_object *bo);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, uint64_t offset, uint32_t width, uint32_t height, @@ -355,6 +355,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, u64 last_seq);
/* virtio_gpu_object */ +void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo); struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, size_t size); int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 017a9e0fc3bb..28a161af7503 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -61,6 +61,14 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t } }
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) +{
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
drm_gem_shmem_free_object(&bo->base.base);
+}
static void virtio_gpu_free_object(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); @@ -68,11 +76,12 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
if (bo->pages) virtio_gpu_object_detach(vgdev, bo);
if (bo->created)
virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
drm_gem_shmem_free_object(obj);
if (bo->created) {
virtio_gpu_cmd_unref_resource(vgdev, bo);
/* completion handler calls virtio_gpu_cleanup_object() */
nitpick: we don't need this comment when virtio_gpu_cmd_unref_cb is defined by this file and passed to virtio_gpu_cmd_unref_resource.
I happen to be looking at our error handling paths. I think we want virtio_gpu_queue_fenced_ctrl_buffer to call vbuf->resp_cb on errors.
return;
}
virtio_gpu_cleanup_object(bo);
}
static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = { diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 6d6d55dc384e..6e8097e4c214 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -152,6 +152,15 @@ static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, sizeof(struct virtio_gpu_ctrl_hdr), NULL); }
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer **vbuffer_p,
int size,
virtio_gpu_resp_cb cb)
+{
return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
sizeof(struct virtio_gpu_ctrl_hdr), NULL);
+}
static void free_vbuf(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf) { @@ -494,17 +503,37 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, bo->created = true; }
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
+{
struct virtio_gpu_object *bo;
bo = gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
kfree(vbuf->objs);
vbuf->objs = NULL;
virtio_gpu_cleanup_object(bo);
+}
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id)
struct virtio_gpu_object *bo)
{ struct virtio_gpu_resource_unref *cmd_p; struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
virtio_gpu_cmd_unref_cb); memset(cmd_p, 0, sizeof(*cmd_p)); cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
cmd_p->resource_id = cpu_to_le32(resource_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
/*
* We are in the release callback and do NOT want refcount
* bo, so do NOT use virtio_gpu_array_add_obj().
*/
vbuf->objs = virtio_gpu_array_alloc(1);
vbuf->objs->objs[0] = &bo->base.base
This is an abuse of obj array. Add "void *private_data;" to virtio_gpu_vbuffer and use that maybe?
Otherwise, simply
// abuse objs field to pass our private data; must reset in the resp_cb vbuf->objs = (virtio_gpu_object_array *) bo;
makes it easier to see what is going on.
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
2.18.1
drm_gem_shmem_free_object(obj);
if (bo->created) {
virtio_gpu_cmd_unref_resource(vgdev, bo);
/* completion handler calls virtio_gpu_cleanup_object() */
nitpick: we don't need this comment when virtio_gpu_cmd_unref_cb is defined by this file and passed to virtio_gpu_cmd_unref_resource.
I want virtio_gpu_cmd_unref_cb + virtio_gpu_cmd_unref_resource being placed next to each other so it is easier to see how they work hand in hand.
I happen to be looking at our error handling paths. I think we want virtio_gpu_queue_fenced_ctrl_buffer to call vbuf->resp_cb on errors.
/me was thinking about that too. Yes, we will need either that, or a separate vbuf->error_cb callback. That'll be another patch though.
/*
* We are in the release callback and do NOT want refcount
* bo, so do NOT use virtio_gpu_array_add_obj().
*/
vbuf->objs = virtio_gpu_array_alloc(1);
vbuf->objs->objs[0] = &bo->base.base
This is an abuse of obj array. Add "void *private_data;" to virtio_gpu_vbuffer and use that maybe?
I'd name that *cb_data, but yes, that makes sense.
cheers, Gerd
On Wed, Feb 5, 2020 at 10:43 PM Gerd Hoffmann kraxel@redhat.com wrote:
drm_gem_shmem_free_object(obj);
if (bo->created) {
virtio_gpu_cmd_unref_resource(vgdev, bo);
/* completion handler calls virtio_gpu_cleanup_object() */
nitpick: we don't need this comment when virtio_gpu_cmd_unref_cb is defined by this file and passed to virtio_gpu_cmd_unref_resource.
I want virtio_gpu_cmd_unref_cb + virtio_gpu_cmd_unref_resource being placed next to each other so it is easier to see how they work hand in hand.
I happen to be looking at our error handling paths. I think we want virtio_gpu_queue_fenced_ctrl_buffer to call vbuf->resp_cb on errors.
/me was thinking about that too. Yes, we will need either that, or a separate vbuf->error_cb callback. That'll be another patch though.
Or the new virtio_gpu_queue_ctrl_sgs can return errors rather than eating errors.
Yeah, that should be another patch.
/*
* We are in the release callback and do NOT want refcount
* bo, so do NOT use virtio_gpu_array_add_obj().
*/
vbuf->objs = virtio_gpu_array_alloc(1);
vbuf->objs->objs[0] = &bo->base.base
This is an abuse of obj array. Add "void *private_data;" to virtio_gpu_vbuffer and use that maybe?
I'd name that *cb_data, but yes, that makes sense.
Sounds great.
cheers, Gerd
Stop sending DETACH_BACKING commands, that will happening anyway when releasing resources via UNREF. Handle guest-side cleanup in virtio_gpu_cleanup_object(), called when the host finished processing the UNREF command.
Signed-off-by: Gerd Hoffmann kraxel@redhat.com --- drivers/gpu/drm/virtio/virtgpu_drv.h | 2 -- drivers/gpu/drm/virtio/virtgpu_object.c | 14 ++++++-- drivers/gpu/drm/virtio/virtgpu_vq.c | 46 ------------------------- 3 files changed, 12 insertions(+), 50 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 372dd248cf02..15fb3c12f22f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -280,8 +280,6 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *obj, struct virtio_gpu_fence *fence); -void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *obj); int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev); void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 28a161af7503..bce2b3d843fe 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -23,6 +23,7 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+#include <linux/dma-mapping.h> #include <linux/moduleparam.h>
#include "virtgpu_drv.h" @@ -65,6 +66,17 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) { struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ if (bo->pages) { + if (bo->mapped) { + dma_unmap_sg(vgdev->vdev->dev.parent, + bo->pages->sgl, bo->mapped, + DMA_TO_DEVICE); + bo->mapped = 0; + } + sg_free_table(bo->pages); + bo->pages = NULL; + drm_gem_shmem_unpin(&bo->base.base); + } virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); drm_gem_shmem_free_object(&bo->base.base); } @@ -74,8 +86,6 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj) struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- if (bo->pages) - virtio_gpu_object_detach(vgdev, bo); if (bo->created) { virtio_gpu_cmd_unref_resource(vgdev, bo); /* completion handler calls virtio_gpu_cleanup_object() */ diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 6e8097e4c214..e258186bedb2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -538,22 +538,6 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); }
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, - uint32_t resource_id, - struct virtio_gpu_fence *fence) -{ - struct virtio_gpu_resource_detach_backing *cmd_p; - struct virtio_gpu_vbuffer *vbuf; - - cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); - memset(cmd_p, 0, sizeof(*cmd_p)); - - cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); - cmd_p->resource_id = cpu_to_le32(resource_id); - - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); -} - void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, uint32_t scanout_id, uint32_t resource_id, uint32_t width, uint32_t height, @@ -1148,36 +1132,6 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, return 0; }
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *obj) -{ - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); - - if (WARN_ON_ONCE(!obj->pages)) - return; - - if (use_dma_api && obj->mapped) { - struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); - /* detach backing and wait for the host process it ... */ - virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); - dma_fence_wait(&fence->f, true); - dma_fence_put(&fence->f); - - /* ... then tear down iommu mappings */ - dma_unmap_sg(vgdev->vdev->dev.parent, - obj->pages->sgl, obj->mapped, - DMA_TO_DEVICE); - obj->mapped = 0; - } else { - virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); - } - - sg_free_table(obj->pages); - obj->pages = NULL; - - drm_gem_shmem_unpin(&obj->base.base); -} - void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, struct virtio_gpu_output *output) {
Introduce new virtio_gpu_object_shmem_init() helper function which will create the virtio_gpu_mem_entry array, containing the backing storage information for the host. For the most path this just moves code from virtio_gpu_object_attach().
Signed-off-by: Gerd Hoffmann kraxel@redhat.com --- drivers/gpu/drm/virtio/virtgpu_drv.h | 4 ++ drivers/gpu/drm/virtio/virtgpu_object.c | 49 +++++++++++++++++++++++++ drivers/gpu/drm/virtio/virtgpu_vq.c | 49 ++----------------------- 3 files changed, 56 insertions(+), 46 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 15fb3c12f22f..be62a7469b04 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -71,6 +71,10 @@ struct virtio_gpu_object {
struct sg_table *pages; uint32_t mapped; + + struct virtio_gpu_mem_entry *ents; + unsigned int nents; + bool dumb; bool created; }; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index bce2b3d843fe..4e82e269a1f4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -121,6 +121,49 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, return &bo->base.base; }
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + struct scatterlist *sg; + int si, ret; + + ret = drm_gem_shmem_pin(&bo->base.base); + if (ret < 0) + return -EINVAL; + + bo->pages = drm_gem_shmem_get_sg_table(&bo->base.base); + if (bo->pages == NULL) { + drm_gem_shmem_unpin(&bo->base.base); + return -EINVAL; + } + + if (use_dma_api) { + bo->mapped = dma_map_sg(vgdev->vdev->dev.parent, + bo->pages->sgl, bo->pages->nents, + DMA_TO_DEVICE); + bo->nents = bo->mapped; + } else { + bo->nents = bo->pages->nents; + } + + bo->ents = kmalloc_array(bo->nents, sizeof(struct virtio_gpu_mem_entry), + GFP_KERNEL); + if (!bo->ents) { + DRM_ERROR("failed to allocate ent list\n"); + return -ENOMEM; + } + + for_each_sg(bo->pages->sgl, sg, bo->nents, si) { + bo->ents[si].addr = cpu_to_le64(use_dma_api + ? sg_dma_address(sg) + : sg_phys(sg)); + bo->ents[si].length = cpu_to_le32(sg->length); + bo->ents[si].padding = 0; + } + return 0; +} + int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_params *params, struct virtio_gpu_object **bo_ptr, @@ -165,6 +208,12 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, objs, fence); }
+ ret = virtio_gpu_object_shmem_init(vgdev, bo); + if (ret != 0) { + virtio_gpu_free_object(&shmem_obj->base); + return ret; + } + ret = virtio_gpu_object_attach(vgdev, bo, NULL); if (ret != 0) { virtio_gpu_free_object(&shmem_obj->base); diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index e258186bedb2..7db91376f2f2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -1081,54 +1081,11 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *obj, struct virtio_gpu_fence *fence) { - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); - struct virtio_gpu_mem_entry *ents; - struct scatterlist *sg; - int si, nents, ret; - - if (WARN_ON_ONCE(!obj->created)) - return -EINVAL; - if (WARN_ON_ONCE(obj->pages)) - return -EINVAL; - - ret = drm_gem_shmem_pin(&obj->base.base); - if (ret < 0) - return -EINVAL; - - obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); - if (obj->pages == NULL) { - drm_gem_shmem_unpin(&obj->base.base); - return -EINVAL; - } - - if (use_dma_api) { - obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, - obj->pages->sgl, obj->pages->nents, - DMA_TO_DEVICE); - nents = obj->mapped; - } else { - nents = obj->pages->nents; - } - - /* gets freed when the ring has consumed it */ - ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), - GFP_KERNEL); - if (!ents) { - DRM_ERROR("failed to allocate ent list\n"); - return -ENOMEM; - } - - for_each_sg(obj->pages->sgl, sg, nents, si) { - ents[si].addr = cpu_to_le64(use_dma_api - ? sg_dma_address(sg) - : sg_phys(sg)); - ents[si].length = cpu_to_le32(sg->length); - ents[si].padding = 0; - } - virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, - ents, nents, + obj->ents, obj->nents, fence); + obj->ents = NULL; + obj->nents = 0; return 0; }
On Wed, Feb 5, 2020 at 3:00 AM Gerd Hoffmann kraxel@redhat.com wrote:
Introduce new virtio_gpu_object_shmem_init() helper function which will create the virtio_gpu_mem_entry array, containing the backing storage information for the host. For the most path this just moves code from virtio_gpu_object_attach().
Signed-off-by: Gerd Hoffmann kraxel@redhat.com
drivers/gpu/drm/virtio/virtgpu_drv.h | 4 ++ drivers/gpu/drm/virtio/virtgpu_object.c | 49 +++++++++++++++++++++++++ drivers/gpu/drm/virtio/virtgpu_vq.c | 49 ++----------------------- 3 files changed, 56 insertions(+), 46 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 15fb3c12f22f..be62a7469b04 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -71,6 +71,10 @@ struct virtio_gpu_object {
struct sg_table *pages; uint32_t mapped;
struct virtio_gpu_mem_entry *ents;
unsigned int nents;
bool dumb; bool created;
}; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index bce2b3d843fe..4e82e269a1f4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -121,6 +121,49 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, return &bo->base.base; }
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo)
+{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct scatterlist *sg;
int si, ret;
ret = drm_gem_shmem_pin(&bo->base.base);
if (ret < 0)
return -EINVAL;
bo->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
if (bo->pages == NULL) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;
}
if (use_dma_api) {
bo->mapped = dma_map_sg(vgdev->vdev->dev.parent,
bo->pages->sgl, bo->pages->nents,
DMA_TO_DEVICE);
bo->nents = bo->mapped;
} else {
bo->nents = bo->pages->nents;
}
bo->ents = kmalloc_array(bo->nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
if (!bo->ents) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
}
for_each_sg(bo->pages->sgl, sg, bo->nents, si) {
bo->ents[si].addr = cpu_to_le64(use_dma_api
? sg_dma_address(sg)
: sg_phys(sg));
bo->ents[si].length = cpu_to_le32(sg->length);
bo->ents[si].padding = 0;
}
return 0;
+}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_params *params, struct virtio_gpu_object **bo_ptr, @@ -165,6 +208,12 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, objs, fence); }
ret = virtio_gpu_object_shmem_init(vgdev, bo);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}
ret = virtio_gpu_object_attach(vgdev, bo, NULL); if (ret != 0) { virtio_gpu_free_object(&shmem_obj->base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index e258186bedb2..7db91376f2f2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -1081,54 +1081,11 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *obj, struct virtio_gpu_fence *fence) {
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
int si, nents, ret;
if (WARN_ON_ONCE(!obj->created))
return -EINVAL;
if (WARN_ON_ONCE(obj->pages))
return -EINVAL;
ret = drm_gem_shmem_pin(&obj->base.base);
if (ret < 0)
return -EINVAL;
obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
if (obj->pages == NULL) {
drm_gem_shmem_unpin(&obj->base.base);
return -EINVAL;
}
if (use_dma_api) {
obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
obj->pages->sgl, obj->pages->nents,
DMA_TO_DEVICE);
nents = obj->mapped;
} else {
nents = obj->pages->nents;
}
/* gets freed when the ring has consumed it */
ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
if (!ents) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
}
for_each_sg(obj->pages->sgl, sg, nents, si) {
ents[si].addr = cpu_to_le64(use_dma_api
? sg_dma_address(sg)
: sg_phys(sg));
ents[si].length = cpu_to_le32(sg->length);
ents[si].padding = 0;
}
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents,
obj->ents, obj->nents, fence);
obj->ents = NULL;
obj->nents = 0;
Hm, if the entries are temporary, can we allocate and initialize them in this function?
virtio_gpu_object_shmem_init will just pin and map pages. Maybe rename it to virtio_gpu_object_pin_pages (and add a helper virtio_gpu_object_unpin_pages for use by virtio_gpu_cleanup_object).
Because we pin pages on object creation, virtio_gpu_gem_funcs does not need to provide the optional pin/unpin hooks.
return 0;
}
-- 2.18.1
Hi,
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents,
obj->ents, obj->nents, fence);
obj->ents = NULL;
obj->nents = 0;
Hm, if the entries are temporary, can we allocate and initialize them in this function?
Well, the plan for CREATE_RESOURCE_BLOB is to use obj->ents too ...
cheers, Gerd
On Thu, Feb 6, 2020 at 12:55 AM Gerd Hoffmann kraxel@redhat.com wrote:
Hi,
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents,
obj->ents, obj->nents, fence);
obj->ents = NULL;
obj->nents = 0;
Hm, if the entries are temporary, can we allocate and initialize them in this function?
Well, the plan for CREATE_RESOURCE_BLOB is to use obj->ents too ...
Is obj->ents needed after CREATE_RESOURCE_BLOB? If not, having yet another helper
ents = virtio_gpu_object_alloc_mem_entries(..., &count);
seems cleaner. We would also be able to get rid of virtio_gpu_object_attach.
cheers, Gerd
dri-devel@lists.freedesktop.org