+ dri-devel since GPU scheduler is a shared component.
On Wed, Apr 7, 2021 at 9:31 AM Roy Sun Roy.Sun@amd.com wrote:
Update the timestamp of scheduled fence on HW completion of the previous fences
This allow more accurate tracking of the fence execution in HW
Signed-off-by: David M Nieto david.nieto@amd.com Signed-off-by: Roy Sun Roy.Sun@amd.com
drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 92d8de24d0a1..4e5d8d4af010 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -515,7 +515,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) EXPORT_SYMBOL(drm_sched_resubmit_jobs);
/**
- drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
- drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from pending list
- @sched: scheduler instance
- @max: job numbers to relaunch
@@ -671,7 +671,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) static struct drm_sched_job * drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) {
struct drm_sched_job *job;
struct drm_sched_job *job, *next; /* * Don't destroy jobs while the timeout worker is running OR thread
@@ -690,6 +690,13 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) if (job && dma_fence_is_signaled(&job->s_fence->finished)) { /* remove job from pending_list */ list_del_init(&job->list);
/* account for the next fence in the queue */
next = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
if (next) {
next->s_fence->scheduled.timestamp =
job->s_fence->finished.timestamp;
} } else { job = NULL; /* queue timeout for next job */
-- 2.31.1
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
dri-devel@lists.freedesktop.org