On Sun, 2018-01-07 at 15:51 +0100, Lucas Stach wrote:
This moves away from using the internal seqno as the userspace fence reference. By moving to a generic ID, we can later replace the internal fence by something different than the etnaviv seqno fence.
Signed-off-by: Lucas Stach l.stach@pengutronix.de
Reviewed-by: Philipp Zabel p.zabel@pengutronix.de
regards Philipp
drivers/gpu/drm/etnaviv/etnaviv_gem.h | 1 + drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 56 +++++++++++++++++++--------- drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 1 + 4 files changed, 42 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index be72a9833f2b..c30964152381 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -104,6 +104,7 @@ struct etnaviv_gem_submit { struct kref refcount; struct etnaviv_gpu *gpu; struct dma_fence *out_fence, *in_fence;
- int out_fence_id; struct list_head node; /* GPU active submit list */ struct etnaviv_cmdbuf cmdbuf; bool runtime_resumed;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 1f8202bca061..919c8dc39f32 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -563,7 +563,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, }
args->fence_fd = out_fence_fd;
- args->fence = submit->out_fence->seqno;
- args->fence = submit->out_fence_id;
err_submit_objects: etnaviv_submit_put(submit); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 21d0d22f1168..935d99be748e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1010,6 +1010,7 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu) /* fence object management */ struct etnaviv_fence { struct etnaviv_gpu *gpu;
- int id; struct dma_fence base;
};
@@ -1046,6 +1047,11 @@ static void etnaviv_fence_release(struct dma_fence *fence) { struct etnaviv_fence *f = to_etnaviv_fence(fence);
- /* first remove from IDR, so fence can not be looked up anymore */
- mutex_lock(&f->gpu->lock);
- idr_remove(&f->gpu->fence_idr, f->id);
- mutex_unlock(&f->gpu->lock);
- kfree_rcu(f, base.rcu);
}
@@ -1072,6 +1078,11 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) if (!f) return NULL;
f->id = idr_alloc_cyclic(&gpu->fence_idr, &f->base, 0, INT_MAX, GFP_KERNEL);
if (f->id < 0) {
kfree(f);
return NULL;
} f->gpu = gpu;
dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
@@ -1220,35 +1231,43 @@ static void retire_worker(struct work_struct *work) }
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 fence, struct timespec *timeout)
- u32 id, struct timespec *timeout)
{
- struct dma_fence *fence; int ret;
- if (fence_after(fence, gpu->next_fence)) {
DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
fence, gpu->next_fence);
return -EINVAL;
- }
/*
* Look up the fence and take a reference. The mutex only synchronizes
* the IDR lookup with the fence release. We might still find a fence
* whose refcount has already dropped to zero. dma_fence_get_rcu
* pretends we didn't find a fence in that case.
*/
ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
return ret;
fence = idr_find(&gpu->fence_idr, id);
if (fence)
fence = dma_fence_get_rcu(fence);
mutex_unlock(&gpu->lock);
if (!fence)
return 0;
if (!timeout) { /* No timeout was requested: just test for completion */
ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
} else { unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
ret = wait_event_interruptible_timeout(gpu->fence_event,
fence_completed(gpu, fence),
remaining);
if (ret == 0) {
DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
fence, gpu->retired_fence,
gpu->completed_fence);
ret = dma_fence_wait_timeout(fence, true, remaining);
if (ret == 0) ret = -ETIMEDOUT;
} else if (ret != -ERESTARTSYS) {
else if (ret != -ERESTARTSYS) ret = 0;
}
}
dma_fence_put(fence); return ret;
}
@@ -1380,6 +1399,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, ret = -ENOMEM; goto out_unlock; }
submit->out_fence_id = to_etnaviv_fence(submit->out_fence)->id;
gpu->active_fence = submit->out_fence->seqno;
@@ -1484,7 +1504,6 @@ static irqreturn_t irq_handler(int irq, void *data) continue;
gpu->event[event].fence = NULL;
dma_fence_signal(fence); /* * Events can be processed out of order. Eg,
@@ -1497,6 +1516,7 @@ static irqreturn_t irq_handler(int irq, void *data) */ if (fence_after(fence->seqno, gpu->completed_fence)) gpu->completed_fence = fence->seqno;
dma_fence_signal(fence); event_free(gpu, event);
}
@@ -1694,6 +1714,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
gpu->drm = drm; gpu->fence_context = dma_fence_context_alloc(1);
idr_init(&gpu->fence_idr); spin_lock_init(&gpu->fence_spinlock);
INIT_LIST_HEAD(&gpu->active_submit_list);
@@ -1745,6 +1766,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, }
gpu->drm = NULL;
idr_destroy(&gpu->fence_idr);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) thermal_cooling_device_unregister(gpu->cooling);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 7623905210dc..0170eb0a0923 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -128,6 +128,7 @@ struct etnaviv_gpu { u32 idle_mask;
/* Fencing support */
- struct idr fence_idr; u32 next_fence; u32 active_fence; u32 completed_fence;