These are the potential run queues on which the jobs from this entity can be scheduled. We will use this to do load balancing.
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 8 ++++++++ include/drm/gpu_scheduler.h | 7 ++++++- 2 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 3f2fc5e8242a..a3eacc35cf98 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -179,6 +179,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, unsigned int num_rq_list, atomic_t *guilty) { + int i; + if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) return -EINVAL;
@@ -186,6 +188,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, INIT_LIST_HEAD(&entity->list); entity->rq = rq_list[0]; entity->guilty = guilty; + entity->num_rq_list = num_rq_list; + entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), + GFP_KERNEL); + for (i = 0; i < num_rq_list; ++i) + entity->rq_list[i] = rq_list[i]; entity->last_scheduled = NULL;
spin_lock_init(&entity->rq_lock); @@ -363,6 +370,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
dma_fence_put(entity->last_scheduled); entity->last_scheduled = NULL; + kfree(entity->rq_list); } EXPORT_SYMBOL(drm_sched_entity_fini);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 091b9afcd184..a60896222a3e 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -50,7 +50,10 @@ enum drm_sched_priority { * * @list: used to append this struct to the list of entities in the * runqueue. - * @rq: runqueue to which this entity belongs. + * @rq: runqueue on which this entity is currently scheduled. + * @rq_list: a list of run queues on which jobs from this entity can + * be scheduled + * @num_rq_list: number of run queues in the rq_list * @rq_lock: lock to modify the runqueue to which this entity belongs. * @job_queue: the list of jobs of this entity. * @fence_seq: a linearly increasing seqno incremented with each @@ -74,6 +77,8 @@ enum drm_sched_priority { struct drm_sched_entity { struct list_head list; struct drm_sched_rq *rq; + struct drm_sched_rq **rq_list; + unsigned int num_rq_list; spinlock_t rq_lock;
struct spsc_queue job_queue;
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++ include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 5 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index a3eacc35cf98..375f6f7f6a93 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
+ atomic_inc(&entity->rq->sched->num_jobs); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */ @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
dma_fence_get(&s_fence->finished); atomic_dec(&sched->hw_rq_count); + atomic_dec(&sched->num_jobs); drm_sched_fence_finished(s_fence);
trace_drm_sched_process_job(s_fence); @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); + atomic_set(&sched->num_jobs, 0); atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */ diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index a60896222a3e..89881ce974a5 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -260,6 +260,7 @@ struct drm_sched_backend_ops { * @job_list_lock: lock to protect the ring_mirror_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked * guilty and it will be considered for scheduling further. + * @num_jobs: the number of jobs in queue in the scheduler * * One scheduler is implemented for each hardware ring. */ @@ -277,6 +278,7 @@ struct drm_gpu_scheduler { struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit; + atomic_t num_jobs; };
int drm_sched_init(struct drm_gpu_scheduler *sched,
On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
This should need a commmit message.
Thanks, Ray
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++ include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 5 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index a3eacc35cf98..375f6f7f6a93 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
atomic_inc(&entity->rq->sched->num_jobs); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
@@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
dma_fence_get(&s_fence->finished); atomic_dec(&sched->hw_rq_count);
atomic_dec(&sched->num_jobs); drm_sched_fence_finished(s_fence);
trace_drm_sched_process_job(s_fence);
@@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0);
atomic_set(&sched->num_jobs, 0); atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index a60896222a3e..89881ce974a5 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
- @job_list_lock: lock to protect the ring_mirror_list.
- @hang_limit: once the hangs by a job crosses this limit then it is marked
guilty and it will be considered for scheduling further.
*/
- @num_jobs: the number of jobs in queue in the scheduler
- One scheduler is implemented for each hardware ring.
@@ -277,6 +278,7 @@ struct drm_gpu_scheduler { struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit;
- atomic_t num_jobs;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
2.14.3
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Yeah, I've actually added one before pushing it to amd-staging-drm-next.
But thanks for the reminder, wanted to note that to Nayan as well :)
Christian.
Am 01.08.2018 um 15:15 schrieb Huang Rui:
On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
This should need a commmit message.
Thanks, Ray
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++ include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 5 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index a3eacc35cf98..375f6f7f6a93 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
atomic_inc(&entity->rq->sched->num_jobs); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
@@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
dma_fence_get(&s_fence->finished); atomic_dec(&sched->hw_rq_count);
atomic_dec(&sched->num_jobs); drm_sched_fence_finished(s_fence);
trace_drm_sched_process_job(s_fence);
@@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0);
atomic_set(&sched->num_jobs, 0); atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index a60896222a3e..89881ce974a5 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
- @job_list_lock: lock to protect the ring_mirror_list.
- @hang_limit: once the hangs by a job crosses this limit then it is marked
guilty and it will be considered for scheduling further.
*/
- @num_jobs: the number of jobs in queue in the scheduler
- One scheduler is implemented for each hardware ring.
@@ -277,6 +278,7 @@ struct drm_gpu_scheduler { struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit;
atomic_t num_jobs; };
int drm_sched_init(struct drm_gpu_scheduler *sched,
-- 2.14.3
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
On Wed, Aug 01, 2018 at 09:06:29PM +0800, Christian König wrote:
Yeah, I've actually added one before pushing it to amd-staging-drm-next.
But thanks for the reminder, wanted to note that to Nayan as well :)
Yes, a soft reminder to Nayan. Thanks Nayan for the contribution. :-)
Thanks, Ray
Christian.
Am 01.08.2018 um 15:15 schrieb Huang Rui:
On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
This should need a commmit message.
Thanks, Ray
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++ include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 5 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index a3eacc35cf98..375f6f7f6a93 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
atomic_inc(&entity->rq->sched->num_jobs); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
@@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
dma_fence_get(&s_fence->finished); atomic_dec(&sched->hw_rq_count);
atomic_dec(&sched->num_jobs); drm_sched_fence_finished(s_fence);
trace_drm_sched_process_job(s_fence);
@@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0);
atomic_set(&sched->num_jobs, 0); atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index a60896222a3e..89881ce974a5 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
- @job_list_lock: lock to protect the ring_mirror_list.
- @hang_limit: once the hangs by a job crosses this limit then it is marked
guilty and it will be considered for scheduling further.
*/
- @num_jobs: the number of jobs in queue in the scheduler
- One scheduler is implemented for each hardware ring.
@@ -277,6 +278,7 @@ struct drm_gpu_scheduler { struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit;
atomic_t num_jobs; };
int drm_sched_init(struct drm_gpu_scheduler *sched,
-- 2.14.3
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
Thanks for the reminders. I felt that the commit header was sufficient enough but I guess that didn't cover the motivation for the change.
Thanks Christian for adding the commit message.
Regards, Nayan
On Thu, Aug 2, 2018 at 8:16 AM Huang Rui ray.huang@amd.com wrote:
On Wed, Aug 01, 2018 at 09:06:29PM +0800, Christian König wrote:
Yeah, I've actually added one before pushing it to amd-staging-drm-next.
But thanks for the reminder, wanted to note that to Nayan as well :)
Yes, a soft reminder to Nayan. Thanks Nayan for the contribution. :-)
Thanks, Ray
Christian.
Am 01.08.2018 um 15:15 schrieb Huang Rui:
On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
This should need a commmit message.
Thanks, Ray
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++ include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 5 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index a3eacc35cf98..375f6f7f6a93 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct
drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->num_jobs); first = spsc_queue_push(&entity->job_queue,
&sched_job->queue_node);
/* first job wakes up scheduler */ @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct
dma_fence *f, struct dma_fence_cb *cb)
dma_fence_get(&s_fence->finished); atomic_dec(&sched->hw_rq_count);
atomic_dec(&sched->num_jobs); drm_sched_fence_finished(s_fence);
trace_drm_sched_process_job(s_fence);
@@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler
*sched,
INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0);
atomic_set(&sched->num_jobs, 0); atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index a60896222a3e..89881ce974a5 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
- @job_list_lock: lock to protect the ring_mirror_list.
- @hang_limit: once the hangs by a job crosses this limit then it
is marked
guilty and it will be considered for scheduling
further.
*/
- @num_jobs: the number of jobs in queue in the scheduler
- One scheduler is implemented for each hardware ring.
@@ -277,6 +278,7 @@ struct drm_gpu_scheduler { struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit;
atomic_t num_jobs; };
int drm_sched_init(struct drm_gpu_scheduler *sched,
-- 2.14.3
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
The function selects the run queue from the rq_list with the least load. The load is decided by the number of jobs in a scheduler.
v2: avoid using atomic read twice consecutively, instead store it locally
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 375f6f7f6a93..fb4e542660b0 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) return true; }
+/** + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load + * + * @entity: scheduler entity + * + * Return the pointer to the rq with least load. + */ +static struct drm_sched_rq * +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) +{ + struct drm_sched_rq *rq = NULL; + unsigned int min_jobs = UINT_MAX, num_jobs; + int i; + + for (i = 0; i < entity->num_rq_list; ++i) { + num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); + if (num_jobs < min_jobs) { + min_jobs = num_jobs; + rq = entity->rq_list[i]; + } + } + + return rq; +} + static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb) {
Clarification question - if the run queues belong to different schedulers they effectively point to different rings,
it means we allow to move (reschedule) a drm_sched_entity from one ring to another - i assume that the idea int the first place, that
you have a set of HW rings and you can utilize any of them for your jobs (like compute rings). Correct ?
Andrey
On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
The function selects the run queue from the rq_list with the least load. The load is decided by the number of jobs in a scheduler.
v2: avoid using atomic read twice consecutively, instead store it locally
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 375f6f7f6a93..fb4e542660b0 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) return true; }
+/**
- drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
- @entity: scheduler entity
- Return the pointer to the rq with least load.
- */
+static struct drm_sched_rq * +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) +{
- struct drm_sched_rq *rq = NULL;
- unsigned int min_jobs = UINT_MAX, num_jobs;
- int i;
- for (i = 0; i < entity->num_rq_list; ++i) {
num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
if (num_jobs < min_jobs) {
min_jobs = num_jobs;
rq = entity->rq_list[i];
}
- }
- return rq;
+}
- static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb) {
Yes, that is correct.
Nayan
On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky Andrey.Grodzovsky@amd.com wrote:
Clarification question - if the run queues belong to different schedulers they effectively point to different rings,
it means we allow to move (reschedule) a drm_sched_entity from one ring to another - i assume that the idea int the first place, that
you have a set of HW rings and you can utilize any of them for your jobs (like compute rings). Correct ?
Andrey
On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
The function selects the run queue from the rq_list with the least load. The load is decided by the number of jobs in a scheduler.
v2: avoid using atomic read twice consecutively, instead store it locally
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 25
+++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 375f6f7f6a93..fb4e542660b0 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct
drm_sched_entity *entity)
return true;
}
+/**
- drm_sched_entity_get_free_sched - Get the rq from rq_list with least
load
- @entity: scheduler entity
- Return the pointer to the rq with least load.
- */
+static struct drm_sched_rq * +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) +{
struct drm_sched_rq *rq = NULL;
unsigned int min_jobs = UINT_MAX, num_jobs;
int i;
for (i = 0; i < entity->num_rq_list; ++i) {
num_jobs =
atomic_read(&entity->rq_list[i]->sched->num_jobs);
if (num_jobs < min_jobs) {
min_jobs = num_jobs;
rq = entity->rq_list[i];
}
}
return rq;
+}
- static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb) {
Series is Acked-by: Andrey Grodzovsky andrey.grodzovsky@amd.com
Andrey
On 08/01/2018 12:06 PM, Nayan Deshmukh wrote:
Yes, that is correct.
Nayan
On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky <Andrey.Grodzovsky@amd.com mailto:Andrey.Grodzovsky@amd.com> wrote:
Clarification question - if the run queues belong to different schedulers they effectively point to different rings, it means we allow to move (reschedule) a drm_sched_entity from one ring to another - i assume that the idea int the first place, that you have a set of HW rings and you can utilize any of them for your jobs (like compute rings). Correct ? Andrey On 08/01/2018 04:20 AM, Nayan Deshmukh wrote: > The function selects the run queue from the rq_list with the > least load. The load is decided by the number of jobs in a > scheduler. > > v2: avoid using atomic read twice consecutively, instead store > it locally > > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com <mailto:nayan26deshmukh@gmail.com>> > --- > drivers/gpu/drm/scheduler/gpu_scheduler.c | 25 +++++++++++++++++++++++++ > 1 file changed, 25 insertions(+) > > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c > index 375f6f7f6a93..fb4e542660b0 100644 > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c > @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) > return true; > } > > +/** > + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load > + * > + * @entity: scheduler entity > + * > + * Return the pointer to the rq with least load. > + */ > +static struct drm_sched_rq * > +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) > +{ > + struct drm_sched_rq *rq = NULL; > + unsigned int min_jobs = UINT_MAX, num_jobs; > + int i; > + > + for (i = 0; i < entity->num_rq_list; ++i) { > + num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); > + if (num_jobs < min_jobs) { > + min_jobs = num_jobs; > + rq = entity->rq_list[i]; > + } > + } > + > + return rq; > +} > + > static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, > struct dma_fence_cb *cb) > {
Another big question: I agree the general idea is good to balance scheduler load for same ring family. But, when same entity job run on different scheduler, that means the later job could be completed ahead of front, Right? That will break fence design, later fence must be signaled after front fence in same fence context.
Anything I missed?
Regards, David Zhou
From: dri-devel dri-devel-bounces@lists.freedesktop.org On Behalf Of Nayan Deshmukh Sent: Thursday, August 02, 2018 12:07 AM To: Grodzovsky, Andrey Andrey.Grodzovsky@amd.com Cc: amd-gfx@lists.freedesktop.org; Maling list - DRI developers dri-devel@lists.freedesktop.org; Koenig, Christian Christian.Koenig@amd.com Subject: Re: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2
Yes, that is correct.
Nayan
On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky <Andrey.Grodzovsky@amd.commailto:Andrey.Grodzovsky@amd.com> wrote: Clarification question - if the run queues belong to different schedulers they effectively point to different rings,
it means we allow to move (reschedule) a drm_sched_entity from one ring to another - i assume that the idea int the first place, that
you have a set of HW rings and you can utilize any of them for your jobs (like compute rings). Correct ?
Andrey
On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
The function selects the run queue from the rq_list with the least load. The load is decided by the number of jobs in a scheduler.
v2: avoid using atomic read twice consecutively, instead store it locally
Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.commailto:nayan26deshmukh@gmail.com>
drivers/gpu/drm/scheduler/gpu_scheduler.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 375f6f7f6a93..fb4e542660b0 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) return true; }
+/**
- drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
- @entity: scheduler entity
- Return the pointer to the rq with least load.
- */
+static struct drm_sched_rq * +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) +{
struct drm_sched_rq *rq = NULL;
unsigned int min_jobs = UINT_MAX, num_jobs;
int i;
for (i = 0; i < entity->num_rq_list; ++i) {
num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
if (num_jobs < min_jobs) {
min_jobs = num_jobs;
rq = entity->rq_list[i];
}
}
return rq;
+}
- static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb) {
Hi David,
On Thu, Aug 2, 2018 at 8:22 AM Zhou, David(ChunMing) David1.Zhou@amd.com wrote:
Another big question:
I agree the general idea is good to balance scheduler load for same ring family.
But, when same entity job run on different scheduler, that means the later job could be completed ahead of front, Right?
Really good question. To avoid this senario we do not move an entity which already has a job in the hardware queue. We only move entities whose last_scheduled fence has been signalled which means that the last submitted job of this entity has finished executing.
Moving an entity which already has a job in the hardware queue will hinder the dependency optimization that we are using and hence will not anyway lead to a better performance. I have talked about the issue in more detail here [1]. Please let me know if you have any more doubts regarding this.
Cheers, Nayan
[1] http://ndesh26.github.io/gsoc/2018/06/14/GSoC-Update-A-Curious-Case-of-Depen...
That will break fence design, later fence must be signaled after front
fence in same fence context.
Anything I missed?
Regards,
David Zhou
*From:* dri-devel dri-devel-bounces@lists.freedesktop.org *On Behalf Of *Nayan Deshmukh *Sent:* Thursday, August 02, 2018 12:07 AM *To:* Grodzovsky, Andrey Andrey.Grodzovsky@amd.com *Cc:* amd-gfx@lists.freedesktop.org; Maling list - DRI developers < dri-devel@lists.freedesktop.org>; Koenig, Christian < Christian.Koenig@amd.com> *Subject:* Re: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2
Yes, that is correct.
Nayan
On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky Andrey.Grodzovsky@amd.com wrote:
Clarification question - if the run queues belong to different schedulers they effectively point to different rings,
it means we allow to move (reschedule) a drm_sched_entity from one ring to another - i assume that the idea int the first place, that
you have a set of HW rings and you can utilize any of them for your jobs (like compute rings). Correct ?
Andrey
On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
The function selects the run queue from the rq_list with the least load. The load is decided by the number of jobs in a scheduler.
v2: avoid using atomic read twice consecutively, instead store it locally
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com
drivers/gpu/drm/scheduler/gpu_scheduler.c | 25
+++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 375f6f7f6a93..fb4e542660b0 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct
drm_sched_entity *entity)
return true;
}
+/**
- drm_sched_entity_get_free_sched - Get the rq from rq_list with least
load
- @entity: scheduler entity
- Return the pointer to the rq with least load.
- */
+static struct drm_sched_rq * +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) +{
struct drm_sched_rq *rq = NULL;
unsigned int min_jobs = UINT_MAX, num_jobs;
int i;
for (i = 0; i < entity->num_rq_list; ++i) {
num_jobs =
atomic_read(&entity->rq_list[i]->sched->num_jobs);
if (num_jobs < min_jobs) {
min_jobs = num_jobs;
rq = entity->rq_list[i];
}
}
return rq;
+}
- static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb) {
On 2018年08月02日 14:01, Nayan Deshmukh wrote:
Hi David,
On Thu, Aug 2, 2018 at 8:22 AM Zhou, David(ChunMing) <David1.Zhou@amd.com mailto:David1.Zhou@amd.com> wrote:
Another big question: I agree the general idea is good to balance scheduler load for same ring family. But, when same entity job run on different scheduler, that means the later job could be completed ahead of front, Right?
Really good question. To avoid this senario we do not move an entity which already has a job in the hardware queue. We only move entities whose last_scheduled fence has been signalled which means that the last submitted job of this entity has finished executing.
Good handling I missed when reviewing them.
Cheers, David Zhou
Moving an entity which already has a job in the hardware queue will hinder the dependency optimization that we are using and hence will not anyway lead to a better performance. I have talked about the issue in more detail here [1]. Please let me know if you have any more doubts regarding this.
Cheers, Nayan
[1] http://ndesh26.github.io/gsoc/2018/06/14/GSoC-Update-A-Curious-Case-of-Depen...
That will break fence design, later fence must be signaled after front fence in same fence context. Anything I missed? Regards, David Zhou *From:* dri-devel <dri-devel-bounces@lists.freedesktop.org <mailto:dri-devel-bounces@lists.freedesktop.org>> *On Behalf Of *Nayan Deshmukh *Sent:* Thursday, August 02, 2018 12:07 AM *To:* Grodzovsky, Andrey <Andrey.Grodzovsky@amd.com <mailto:Andrey.Grodzovsky@amd.com>> *Cc:* amd-gfx@lists.freedesktop.org <mailto:amd-gfx@lists.freedesktop.org>; Maling list - DRI developers <dri-devel@lists.freedesktop.org <mailto:dri-devel@lists.freedesktop.org>>; Koenig, Christian <Christian.Koenig@amd.com <mailto:Christian.Koenig@amd.com>> *Subject:* Re: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2 Yes, that is correct. Nayan On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky <Andrey.Grodzovsky@amd.com <mailto:Andrey.Grodzovsky@amd.com>> wrote: Clarification question - if the run queues belong to different schedulers they effectively point to different rings, it means we allow to move (reschedule) a drm_sched_entity from one ring to another - i assume that the idea int the first place, that you have a set of HW rings and you can utilize any of them for your jobs (like compute rings). Correct ? Andrey On 08/01/2018 04:20 AM, Nayan Deshmukh wrote: > The function selects the run queue from the rq_list with the > least load. The load is decided by the number of jobs in a > scheduler. > > v2: avoid using atomic read twice consecutively, instead store > it locally > > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com <mailto:nayan26deshmukh@gmail.com>> > --- > drivers/gpu/drm/scheduler/gpu_scheduler.c | 25 +++++++++++++++++++++++++ > 1 file changed, 25 insertions(+) > > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c > index 375f6f7f6a93..fb4e542660b0 100644 > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c > @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) > return true; > } > > +/** > + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load > + * > + * @entity: scheduler entity > + * > + * Return the pointer to the rq with least load. > + */ > +static struct drm_sched_rq * > +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) > +{ > + struct drm_sched_rq *rq = NULL; > + unsigned int min_jobs = UINT_MAX, num_jobs; > + int i; > + > + for (i = 0; i < entity->num_rq_list; ++i) { > + num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); > + if (num_jobs < min_jobs) { > + min_jobs = num_jobs; > + rq = entity->rq_list[i]; > + } > + } > + > + return rq; > +} > + > static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, > struct dma_fence_cb *cb) > {
This is the first attempt to move entities between schedulers to have dynamic load balancing. We just move entities with no jobs for now as moving the ones with jobs will lead to other compilcations like ensuring that the other scheduler does not remove a job from the current entity while we are moving.
v2: remove unused variable and an unecessary check
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index fb4e542660b0..087fa479f7e0 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -539,6 +539,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) if (!sched_job) return NULL;
+ sched_job->sched = sched; + sched_job->s_fence->sched = sched; while ((entity->dependency = sched->ops->dependency(sched_job, entity))) if (drm_sched_entity_add_dependency_cb(entity)) return NULL; @@ -569,11 +571,23 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { - struct drm_gpu_scheduler *sched = sched_job->sched; - bool first = false; + struct drm_sched_rq *rq = entity->rq; + bool first = false, reschedule, idle;
- trace_drm_sched_job(sched_job, entity); + idle = entity->last_scheduled == NULL || + dma_fence_is_signaled(entity->last_scheduled); + first = spsc_queue_count(&entity->job_queue) == 0; + reschedule = idle && first && (entity->num_rq_list > 1);
+ if (reschedule) { + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); + } + + trace_drm_sched_job(sched_job, entity); atomic_inc(&entity->rq->sched->num_jobs); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
@@ -588,7 +602,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, } drm_sched_rq_add_entity(entity->rq, entity); spin_unlock(&entity->rq_lock); - drm_sched_wakeup(sched); + drm_sched_wakeup(entity->rq->sched); } } EXPORT_SYMBOL(drm_sched_entity_push_job);
Am 01.08.2018 um 10:19 schrieb Nayan Deshmukh:
These are the potential run queues on which the jobs from this entity can be scheduled. We will use this to do load balancing.
Signed-off-by: Nayan Deshmukh nayan26deshmukh@gmail.com
Reviewed-by: Christian König christian.koenig@amd.com for the whole series.
I also just pushed them into our internal branch for upstreaming.
Thanks for all the work, Christian.
drivers/gpu/drm/scheduler/gpu_scheduler.c | 8 ++++++++ include/drm/gpu_scheduler.h | 7 ++++++- 2 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 3f2fc5e8242a..a3eacc35cf98 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -179,6 +179,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, unsigned int num_rq_list, atomic_t *guilty) {
- int i;
- if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) return -EINVAL;
@@ -186,6 +188,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, INIT_LIST_HEAD(&entity->list); entity->rq = rq_list[0]; entity->guilty = guilty;
entity->num_rq_list = num_rq_list;
entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
GFP_KERNEL);
for (i = 0; i < num_rq_list; ++i)
entity->rq_list[i] = rq_list[i];
entity->last_scheduled = NULL;
spin_lock_init(&entity->rq_lock);
@@ -363,6 +370,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
dma_fence_put(entity->last_scheduled); entity->last_scheduled = NULL;
- kfree(entity->rq_list); } EXPORT_SYMBOL(drm_sched_entity_fini);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 091b9afcd184..a60896222a3e 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -50,7 +50,10 @@ enum drm_sched_priority {
- @list: used to append this struct to the list of entities in the
runqueue.
- @rq: runqueue to which this entity belongs.
- @rq: runqueue on which this entity is currently scheduled.
- @rq_list: a list of run queues on which jobs from this entity can
be scheduled
- @num_rq_list: number of run queues in the rq_list
- @rq_lock: lock to modify the runqueue to which this entity belongs.
- @job_queue: the list of jobs of this entity.
- @fence_seq: a linearly increasing seqno incremented with each
@@ -74,6 +77,8 @@ enum drm_sched_priority { struct drm_sched_entity { struct list_head list; struct drm_sched_rq *rq;
struct drm_sched_rq **rq_list;
unsigned int num_rq_list; spinlock_t rq_lock;
struct spsc_queue job_queue;
dri-devel@lists.freedesktop.org