On Tue, Sep 08, 2020 at 08:47:41AM +0200, Gerd Hoffmann wrote:
These days dma ops can be overridden per device, and the virtio core
"can be overridden" or "are"? The comment above vring_use_dma_api() suggests that's not yet done. If that's wrong then I think updating the comment would be really good. -Daniel
uses that to handle the dma quirks transparently for the rest of the kernel. So we can drop the virtio_has_dma_quirk() checks, just use the dma api unconditionally and depend on the virtio core having setup dma_ops as needed.
Signed-off-by: Gerd Hoffmann kraxel@redhat.com
drivers/gpu/drm/virtio/virtgpu_object.c | 19 ++++++------------- drivers/gpu/drm/virtio/virtgpu_vq.c | 16 ++++++---------- 2 files changed, 12 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 729f98ad7c02..9c35ce64ff9e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -141,7 +141,6 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, struct virtio_gpu_mem_entry **ents, unsigned int *nents) {
- bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); struct scatterlist *sg; int si, ret;
@@ -162,15 +161,11 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, return -EINVAL; }
- if (use_dma_api) {
shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
shmem->pages->sgl,
shmem->pages->nents,
DMA_TO_DEVICE);
*nents = shmem->mapped;
- } else {
*nents = shmem->pages->nents;
- }
shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
shmem->pages->sgl,
shmem->pages->nents,
DMA_TO_DEVICE);
*nents = shmem->mapped;
*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), GFP_KERNEL);
@@ -180,9 +175,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, }
for_each_sg(shmem->pages->sgl, sg, *nents, si) {
(*ents)[si].addr = cpu_to_le64(use_dma_api
? sg_dma_address(sg)
: sg_phys(sg));
(*ents)[si].length = cpu_to_le32(sg->length); (*ents)[si].padding = 0; }(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index c93c2db35aaf..1c1d2834547d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -599,13 +599,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1015,13 +1013,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p));
-- 2.27.0