Looks like for correct debugging we need to know the scheduler even earlier. So move picking a rq for an entity into job creation.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 50 ++++++++++++++++++++----------- drivers/gpu/drm/scheduler/sched_fence.c | 2 +- 2 files changed, 33 insertions(+), 19 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index bd7883d1b964..bb2bd4c07e85 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -552,6 +552,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) return sched_job; }
+/** + * drm_sched_entity_select_rq - select a new rq for the entity + * + * @entity: scheduler entity + * + * Check all prerequisites and select a new rq for the entity for load + * balancing. + */ +static void drm_sched_entity_select_rq(struct drm_sched_entity *entity) +{ + struct dma_fence *fence; + struct drm_sched_rq *rq; + + if (!spsc_queue_count(&entity->job_queue) == 0 || + entity->num_rq_list <= 1) + return; + + fence = READ_ONCE(entity->last_scheduled); + if (fence && !dma_fence_is_signaled(fence)) + return; + + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); +} + /** * drm_sched_entity_push_job - Submit a job to the entity's job queue * @@ -567,25 +595,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { - struct drm_sched_rq *rq = entity->rq; bool first;
- first = spsc_queue_count(&entity->job_queue) == 0; - if (first && (entity->num_rq_list > 1)) { - struct dma_fence *fence; - - fence = READ_ONCE(entity->last_scheduled); - if (fence == NULL || dma_fence_is_signaled(fence)) { - rq = drm_sched_entity_get_free_sched(entity); - spin_lock(&entity->rq_lock); - drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - spin_unlock(&entity->rq_lock); - } - } - - sched_job->sched = entity->rq->sched; - sched_job->s_fence->sched = entity->rq->sched; trace_drm_sched_job(sched_job, entity); atomic_inc(&entity->rq->sched->num_jobs); WRITE_ONCE(entity->last_user, current->group_leader); @@ -790,7 +801,10 @@ int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, void *owner) { - struct drm_gpu_scheduler *sched = entity->rq->sched; + struct drm_gpu_scheduler *sched; + + drm_sched_entity_select_rq(entity); + sched = entity->rq->sched;
job->sched = sched; job->entity = entity; diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 6dab18d288d7..4029312fdd81 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -172,7 +172,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, return NULL;
fence->owner = owner; - fence->sched = NULL; + fence->sched = entity->rq->sched; spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
On Wed, Aug 8, 2018 at 4:44 PM Christian König < ckoenig.leichtzumerken@gmail.com> wrote:
Looks like for correct debugging we need to know the scheduler even earlier. So move picking a rq for an entity into job creation.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 50 ++++++++++++++++++++----------- drivers/gpu/drm/scheduler/sched_fence.c | 2 +- 2 files changed, 33 insertions(+), 19 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index bd7883d1b964..bb2bd4c07e85 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -552,6 +552,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) return sched_job; }
+/**
- drm_sched_entity_select_rq - select a new rq for the entity
- @entity: scheduler entity
- Check all prerequisites and select a new rq for the entity for load
- balancing.
- */
+static void drm_sched_entity_select_rq(struct drm_sched_entity *entity) +{
struct dma_fence *fence;
struct drm_sched_rq *rq;
if (!spsc_queue_count(&entity->job_queue) == 0 ||
entity->num_rq_list <= 1)
return;
fence = READ_ONCE(entity->last_scheduled);
if (fence && !dma_fence_is_signaled(fence))
return;
rq = drm_sched_entity_get_free_sched(entity);
We can add something like this here: if (rq == entity->rq) return;
to avoid redundant reschedules. But then again this is a slight improvement over the original case so we might as well move it to a different patch.
With or without this change the patch is Reviewed-by: Nayan Deshmukh < nayan26deshmukh@gmail.com>
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
+}
/**
- drm_sched_entity_push_job - Submit a job to the entity's job queue
@@ -567,25 +595,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) {
struct drm_sched_rq *rq = entity->rq; bool first;
first = spsc_queue_count(&entity->job_queue) == 0;
if (first && (entity->num_rq_list > 1)) {
struct dma_fence *fence;
fence = READ_ONCE(entity->last_scheduled);
if (fence == NULL || dma_fence_is_signaled(fence)) {
rq = drm_sched_entity_get_free_sched(entity);
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
}
}
sched_job->sched = entity->rq->sched;
sched_job->s_fence->sched = entity->rq->sched; trace_drm_sched_job(sched_job, entity); atomic_inc(&entity->rq->sched->num_jobs); WRITE_ONCE(entity->last_user, current->group_leader);
@@ -790,7 +801,10 @@ int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, void *owner) {
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_gpu_scheduler *sched;
drm_sched_entity_select_rq(entity);
sched = entity->rq->sched; job->sched = sched; job->entity = entity;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 6dab18d288d7..4029312fdd81 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -172,7 +172,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, return NULL;
fence->owner = owner;
fence->sched = NULL;
fence->sched = entity->rq->sched; spin_lock_init(&fence->lock); seq = atomic_inc_return(&entity->fence_seq);
-- 2.14.1
dri-devel@lists.freedesktop.org