2016-12-13 Gerd Hoffmann kraxel@redhat.com:
Hi,
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev) +{
- struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
- struct virtio_gpu_fence *fence;
- unsigned long irq_flags;
- fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
- if (!fence)
return NULL;
- spin_lock_irqsave(&drv->lock, irq_flags);
- fence->drv = drv;
- fence->seq = ++drv->sync_seq;
- dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock,
drv->context, fence->seq);
- spin_unlock_irqrestore(&drv->lock, irq_flags);
seq assignment ...
- return fence;
+}
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence **fence)
struct virtio_gpu_fence *fence)
{ struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; unsigned long irq_flags;
- *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
- if ((*fence) == NULL)
return -ENOMEM;
- spin_lock_irqsave(&drv->lock, irq_flags);
- (*fence)->drv = drv;
- (*fence)->seq = ++drv->sync_seq;
- dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
drv->context, (*fence)->seq);
... must stay here. Otherwise requests can be submitted to the virt queue with fence sequence numbers out of order.
Yes, makes sense. So I'll just leave the kmalloc in there.
Gustavo