Make sure we access last_scheduled only after checking that there are no more jobs on the entity.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 8ee249886473..bd7883d1b964 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -568,19 +568,20 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { struct drm_sched_rq *rq = entity->rq; - bool first, reschedule, idle; + bool first;
- idle = entity->last_scheduled == NULL || - dma_fence_is_signaled(entity->last_scheduled); first = spsc_queue_count(&entity->job_queue) == 0; - reschedule = idle && first && (entity->num_rq_list > 1); + if (first && (entity->num_rq_list > 1)) { + struct dma_fence *fence;
- if (reschedule) { - rq = drm_sched_entity_get_free_sched(entity); - spin_lock(&entity->rq_lock); - drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - spin_unlock(&entity->rq_lock); + fence = READ_ONCE(entity->last_scheduled); + if (fence == NULL || dma_fence_is_signaled(fence)) { + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); + } }
sched_job->sched = entity->rq->sched;
Ping, Nayan any comments on that or can I commit it?
This is just a stripped down version of my original last_scheduled improvement patch.
Christian.
Am 07.08.2018 um 14:54 schrieb Christian König:
Make sure we access last_scheduled only after checking that there are no more jobs on the entity.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 8ee249886473..bd7883d1b964 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -568,19 +568,20 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { struct drm_sched_rq *rq = entity->rq;
- bool first, reschedule, idle;
- bool first;
- idle = entity->last_scheduled == NULL ||
first = spsc_queue_count(&entity->job_queue) == 0;dma_fence_is_signaled(entity->last_scheduled);
- reschedule = idle && first && (entity->num_rq_list > 1);
- if (first && (entity->num_rq_list > 1)) {
struct dma_fence *fence;
- if (reschedule) {
rq = drm_sched_entity_get_free_sched(entity);
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
fence = READ_ONCE(entity->last_scheduled);
if (fence == NULL || dma_fence_is_signaled(fence)) {
rq = drm_sched_entity_get_free_sched(entity);
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
}
}
sched_job->sched = entity->rq->sched;
Hi Christian,
On Wed, Aug 8, 2018 at 4:20 PM Christian König < ckoenig.leichtzumerken@gmail.com> wrote:
Ping, Nayan any comments on that or can I commit it?
This is just a stripped down version of my original last_scheduled improvement patch.
I had missed the mail yesterday. I was just looking at the code right now.
The code looks fine but I wanted to run it once before giving it a Rb.
I was wondering why you stripped it down. Was there some problem with the previous one?
Cheers, Nayan
Christian.
Am 07.08.2018 um 14:54 schrieb Christian König:
Make sure we access last_scheduled only after checking that there are no more jobs on the entity.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 8ee249886473..bd7883d1b964 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -568,19 +568,20 @@ void drm_sched_entity_push_job(struct
drm_sched_job *sched_job,
struct drm_sched_entity *entity)
{ struct drm_sched_rq *rq = entity->rq;
bool first, reschedule, idle;
bool first;
idle = entity->last_scheduled == NULL ||
dma_fence_is_signaled(entity->last_scheduled); first = spsc_queue_count(&entity->job_queue) == 0;
reschedule = idle && first && (entity->num_rq_list > 1);
if (first && (entity->num_rq_list > 1)) {
struct dma_fence *fence;
if (reschedule) {
rq = drm_sched_entity_get_free_sched(entity);
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
fence = READ_ONCE(entity->last_scheduled);
if (fence == NULL || dma_fence_is_signaled(fence)) {
rq = drm_sched_entity_get_free_sched(entity);
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
} } sched_job->sched = entity->rq->sched;
Am 08.08.2018 um 12:55 schrieb Nayan Deshmukh:
Hi Christian,
On Wed, Aug 8, 2018 at 4:20 PM Christian König <ckoenig.leichtzumerken@gmail.com mailto:ckoenig.leichtzumerken@gmail.com> wrote:
Ping, Nayan any comments on that or can I commit it? This is just a stripped down version of my original last_scheduled improvement patch.
I had missed the mail yesterday. I was just looking at the code right now. The code looks fine but I wanted to run it once before giving it a Rb.
I was wondering why you stripped it down. Was there some problem with the previous one?
No, not really. But this is a bug fix and bug fixes should be as simple as possible.
Improvements should come in separate patches. Didn't thought initially that this is actually a fix.
BTW: Another small fix is required, looks like we need the scheduler correctly set for debugging even earlier.
Christian.
Cheers, Nayan
Christian. Am 07.08.2018 um 14:54 schrieb Christian König: > Make sure we access last_scheduled only after checking that there are no > more jobs on the entity. > > Signed-off-by: Christian König <christian.koenig@amd.com <mailto:christian.koenig@amd.com>> > --- > drivers/gpu/drm/scheduler/gpu_scheduler.c | 21 +++++++++++---------- > 1 file changed, 11 insertions(+), 10 deletions(-) > > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c > index 8ee249886473..bd7883d1b964 100644 > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c > @@ -568,19 +568,20 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, > struct drm_sched_entity *entity) > { > struct drm_sched_rq *rq = entity->rq; > - bool first, reschedule, idle; > + bool first; > > - idle = entity->last_scheduled == NULL || > - dma_fence_is_signaled(entity->last_scheduled); > first = spsc_queue_count(&entity->job_queue) == 0; > - reschedule = idle && first && (entity->num_rq_list > 1); > + if (first && (entity->num_rq_list > 1)) { > + struct dma_fence *fence; > > - if (reschedule) { > - rq = drm_sched_entity_get_free_sched(entity); > - spin_lock(&entity->rq_lock); > - drm_sched_rq_remove_entity(entity->rq, entity); > - entity->rq = rq; > - spin_unlock(&entity->rq_lock); > + fence = READ_ONCE(entity->last_scheduled); > + if (fence == NULL || dma_fence_is_signaled(fence)) { > + rq = drm_sched_entity_get_free_sched(entity); > + spin_lock(&entity->rq_lock); > + drm_sched_rq_remove_entity(entity->rq, entity); > + entity->rq = rq; > + spin_unlock(&entity->rq_lock); > + } > } > > sched_job->sched = entity->rq->sched;
On Wed, Aug 8, 2018 at 4:36 PM Christian König < ckoenig.leichtzumerken@gmail.com> wrote:
Am 08.08.2018 um 12:55 schrieb Nayan Deshmukh:
Hi Christian,
On Wed, Aug 8, 2018 at 4:20 PM Christian König < ckoenig.leichtzumerken@gmail.com> wrote:
Ping, Nayan any comments on that or can I commit it?
This is just a stripped down version of my original last_scheduled improvement patch.
I had missed the mail yesterday. I was just looking at the code right
now. The code looks fine but I wanted to run it once before giving it a Rb.
I was wondering why you stripped it down. Was there some problem with the previous one?
No, not really. But this is a bug fix and bug fixes should be as simple as possible.
Improvements should come in separate patches. Didn't thought initially that this is actually a fix.
BTW: Another small fix is required, looks like we need the scheduler correctly set for debugging even earlier.
Yes I saw the mail earlier. I can work on the patch if you have not started working on it already.
Nayan
Christian.
Cheers, Nayan
Christian.
Am 07.08.2018 um 14:54 schrieb Christian König:
Make sure we access last_scheduled only after checking that there are no more jobs on the entity.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 8ee249886473..bd7883d1b964 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -568,19 +568,20 @@ void drm_sched_entity_push_job(struct
drm_sched_job *sched_job,
struct drm_sched_entity *entity)
{ struct drm_sched_rq *rq = entity->rq;
bool first, reschedule, idle;
bool first;
idle = entity->last_scheduled == NULL ||
dma_fence_is_signaled(entity->last_scheduled); first = spsc_queue_count(&entity->job_queue) == 0;
reschedule = idle && first && (entity->num_rq_list > 1);
if (first && (entity->num_rq_list > 1)) {
struct dma_fence *fence;
if (reschedule) {
rq = drm_sched_entity_get_free_sched(entity);
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
fence = READ_ONCE(entity->last_scheduled);
if (fence == NULL || dma_fence_is_signaled(fence)) {
rq = drm_sched_entity_get_free_sched(entity);
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
} } sched_job->sched = entity->rq->sched;
On Wed, Aug 8, 2018 at 4:42 PM Nayan Deshmukh nayan26deshmukh@gmail.com wrote:
On Wed, Aug 8, 2018 at 4:36 PM Christian König < ckoenig.leichtzumerken@gmail.com> wrote:
Am 08.08.2018 um 12:55 schrieb Nayan Deshmukh:
Hi Christian,
On Wed, Aug 8, 2018 at 4:20 PM Christian König < ckoenig.leichtzumerken@gmail.com> wrote:
Ping, Nayan any comments on that or can I commit it?
This is just a stripped down version of my original last_scheduled improvement patch.
I had missed the mail yesterday. I was just looking at the code right
now. The code looks fine but I wanted to run it once before giving it a Rb.
I was wondering why you stripped it down. Was there some problem with the previous one?
No, not really. But this is a bug fix and bug fixes should be as simple as possible.
Improvements should come in separate patches. Didn't thought initially that this is actually a fix.
BTW: Another small fix is required, looks like we need the scheduler correctly set for debugging even earlier.
Yes I saw the mail earlier. I can work on the patch if you have not started working on it already.
I guess I was late for this. I will look at your patch now.
Nayan
Christian.
Cheers, Nayan
Christian.
Am 07.08.2018 um 14:54 schrieb Christian König:
Make sure we access last_scheduled only after checking that there are
no
more jobs on the entity.
Signed-off-by: Christian König christian.koenig@amd.com
Reviewed-by: Nayan Deshmukh nayan26deshmukh@gmail.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 8ee249886473..bd7883d1b964 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -568,19 +568,20 @@ void drm_sched_entity_push_job(struct
drm_sched_job *sched_job,
struct drm_sched_entity *entity)
{ struct drm_sched_rq *rq = entity->rq;
bool first, reschedule, idle;
bool first;
idle = entity->last_scheduled == NULL ||
dma_fence_is_signaled(entity->last_scheduled); first = spsc_queue_count(&entity->job_queue) == 0;
reschedule = idle && first && (entity->num_rq_list > 1);
if (first && (entity->num_rq_list > 1)) {
struct dma_fence *fence;
if (reschedule) {
rq = drm_sched_entity_get_free_sched(entity);
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
fence = READ_ONCE(entity->last_scheduled);
if (fence == NULL || dma_fence_is_signaled(fence)) {
rq = drm_sched_entity_get_free_sched(entity);
spin_lock(&entity->rq_lock);
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
spin_unlock(&entity->rq_lock);
} } sched_job->sched = entity->rq->sched;
dri-devel@lists.freedesktop.org