In some user scenarios, the get_timeline_name callback uses the flags to decide which way to return the timeline string name. Once the trace_dma_fence_init event is enabled, it will call get_timeline_name callback to dump the fence structure. However, at this moment, the flags are always 0, and it might trigger some issues in get_timeline_name callback implementation of different gpu driver. So make a member to initialize the flags in dma_fence_init().
Signed-off-by: Huang Rui ray.huang@amd.com --- drivers/dma-buf/dma-fence.c | 2 +- include/linux/dma-fence.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 066400ed8841..3e0622bf385f 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -952,7 +952,7 @@ dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, fence->lock = lock; fence->context = context; fence->seqno = seqno; - fence->flags = 0UL; + fence->flags = ops->init_flags; fence->error = 0;
trace_dma_fence_init(fence); diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 1ea691753bd3..f9270c5bc07a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -131,6 +131,13 @@ struct dma_fence_ops { */ bool use_64bit_seqno;
+ /** + * @init_flags: + * + * The initial value of fence flags (A mask of DMA_FENCE_FLAG_* defined). + */ + unsigned long init_flags; + /** * @get_driver_name: *
Initialize the flags for embedded fence in the job at dma_fence_init(). Otherwise, we will go a wrong way in amdgpu_fence_get_timeline_name callback and trigger a null pointer panic once we enabled the trace event here.
[ 156.131790] BUG: kernel NULL pointer dereference, address: 00000000000002a0 [ 156.131804] #PF: supervisor read access in kernel mode [ 156.131811] #PF: error_code(0x0000) - not-present page [ 156.131817] PGD 0 P4D 0 [ 156.131824] Oops: 0000 [#1] PREEMPT SMP PTI [ 156.131832] CPU: 6 PID: 1404 Comm: sdma0 Tainted: G OE 5.16.0-rc1-custom #1 [ 156.131842] Hardware name: Gigabyte Technology Co., Ltd. Z170XP-SLI/Z170XP-SLI-CF, BIOS F20 11/04/2016 [ 156.131848] RIP: 0010:strlen+0x0/0x20 [ 156.131859] Code: 89 c0 c3 0f 1f 80 00 00 00 00 48 01 fe eb 0f 0f b6 07 38 d0 74 10 48 83 c7 01 84 c0 74 05 48 39 f7 75 ec 31 c0 c3 48 89 f8 c3 <80> 3f 00 74 10 48 89 f8 48 83 c0 01 80 38 00 75 f7 48 29 f8 c3 31 [ 156.131872] RSP: 0018:ffff9bd0018dbcf8 EFLAGS: 00010206 [ 156.131880] RAX: 00000000000002a0 RBX: ffff8d0305ef01b0 RCX: 000000000000000b [ 156.131888] RDX: ffff8d03772ab924 RSI: ffff8d0305ef01b0 RDI: 00000000000002a0 [ 156.131895] RBP: ffff9bd0018dbd60 R08: ffff8d03002094d0 R09: 0000000000000000 [ 156.131901] R10: 000000000000005e R11: 0000000000000065 R12: ffff8d03002094d0 [ 156.131907] R13: 000000000000001f R14: 0000000000070018 R15: 0000000000000007 [ 156.131914] FS: 0000000000000000(0000) GS:ffff8d062ed80000(0000) knlGS:0000000000000000 [ 156.131923] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 156.131929] CR2: 00000000000002a0 CR3: 000000001120a005 CR4: 00000000003706e0 [ 156.131937] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 156.131942] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 156.131949] Call Trace: [ 156.131953] <TASK> [ 156.131957] ? trace_event_raw_event_dma_fence+0xcc/0x200 [ 156.131973] ? ring_buffer_unlock_commit+0x23/0x130 [ 156.131982] dma_fence_init+0x92/0xb0 [ 156.131993] amdgpu_fence_emit+0x10d/0x2b0 [amdgpu] [ 156.132302] amdgpu_ib_schedule+0x2f9/0x580 [amdgpu] [ 156.132586] amdgpu_job_run+0xed/0x220 [amdgpu]
Signed-off-by: Huang Rui ray.huang@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3b7e86ea7167..e2aa71904278 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -76,7 +76,7 @@ void amdgpu_fence_slab_fini(void) /* * Cast helper */ -static const struct dma_fence_ops amdgpu_fence_ops; +static struct dma_fence_ops amdgpu_fence_ops; static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) { struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); @@ -158,21 +158,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd }
seq = ++ring->fence_drv.sync_seq; - if (job != NULL && job->job_run_counter) { + if (job && job->job_run_counter) { /* reinit seq for resubmitted jobs */ fence->seqno = seq; + set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags); } else { + amdgpu_fence_ops.init_flags = 0; + if (job) + set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, + &amdgpu_fence_ops.init_flags); + dma_fence_init(fence, &amdgpu_fence_ops, &ring->fence_drv.lock, adev->fence_context + ring->idx, seq); }
- if (job != NULL) { - /* mark this fence has a parent job */ - set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags); - } - amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); pm_runtime_get_noresume(adev_to_drm(adev)->dev); @@ -720,7 +721,7 @@ static void amdgpu_fence_release(struct dma_fence *f) call_rcu(&f->rcu, amdgpu_fence_free); }
-static const struct dma_fence_ops amdgpu_fence_ops = { +static struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, .enable_signaling = amdgpu_fence_enable_signaling,
Am 13.12.21 um 07:34 schrieb Huang Rui:
Initialize the flags for embedded fence in the job at dma_fence_init(). Otherwise, we will go a wrong way in amdgpu_fence_get_timeline_name callback and trigger a null pointer panic once we enabled the trace event here.
[ 156.131790] BUG: kernel NULL pointer dereference, address: 00000000000002a0 [ 156.131804] #PF: supervisor read access in kernel mode [ 156.131811] #PF: error_code(0x0000) - not-present page [ 156.131817] PGD 0 P4D 0 [ 156.131824] Oops: 0000 [#1] PREEMPT SMP PTI [ 156.131832] CPU: 6 PID: 1404 Comm: sdma0 Tainted: G OE 5.16.0-rc1-custom #1 [ 156.131842] Hardware name: Gigabyte Technology Co., Ltd. Z170XP-SLI/Z170XP-SLI-CF, BIOS F20 11/04/2016 [ 156.131848] RIP: 0010:strlen+0x0/0x20 [ 156.131859] Code: 89 c0 c3 0f 1f 80 00 00 00 00 48 01 fe eb 0f 0f b6 07 38 d0 74 10 48 83 c7 01 84 c0 74 05 48 39 f7 75 ec 31 c0 c3 48 89 f8 c3 <80> 3f 00 74 10 48 89 f8 48 83 c0 01 80 38 00 75 f7 48 29 f8 c3 31 [ 156.131872] RSP: 0018:ffff9bd0018dbcf8 EFLAGS: 00010206 [ 156.131880] RAX: 00000000000002a0 RBX: ffff8d0305ef01b0 RCX: 000000000000000b [ 156.131888] RDX: ffff8d03772ab924 RSI: ffff8d0305ef01b0 RDI: 00000000000002a0 [ 156.131895] RBP: ffff9bd0018dbd60 R08: ffff8d03002094d0 R09: 0000000000000000 [ 156.131901] R10: 000000000000005e R11: 0000000000000065 R12: ffff8d03002094d0 [ 156.131907] R13: 000000000000001f R14: 0000000000070018 R15: 0000000000000007 [ 156.131914] FS: 0000000000000000(0000) GS:ffff8d062ed80000(0000) knlGS:0000000000000000 [ 156.131923] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 156.131929] CR2: 00000000000002a0 CR3: 000000001120a005 CR4: 00000000003706e0 [ 156.131937] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 156.131942] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 156.131949] Call Trace: [ 156.131953] <TASK> [ 156.131957] ? trace_event_raw_event_dma_fence+0xcc/0x200 [ 156.131973] ? ring_buffer_unlock_commit+0x23/0x130 [ 156.131982] dma_fence_init+0x92/0xb0 [ 156.131993] amdgpu_fence_emit+0x10d/0x2b0 [amdgpu] [ 156.132302] amdgpu_ib_schedule+0x2f9/0x580 [amdgpu] [ 156.132586] amdgpu_job_run+0xed/0x220 [amdgpu]
Signed-off-by: Huang Rui ray.huang@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3b7e86ea7167..e2aa71904278 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -76,7 +76,7 @@ void amdgpu_fence_slab_fini(void) /*
- Cast helper
*/ -static const struct dma_fence_ops amdgpu_fence_ops; +static struct dma_fence_ops amdgpu_fence_ops; static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) { struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); @@ -158,21 +158,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd }
seq = ++ring->fence_drv.sync_seq;
- if (job != NULL && job->job_run_counter) {
- if (job && job->job_run_counter) { /* reinit seq for resubmitted jobs */ fence->seqno = seq;
} else {set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
amdgpu_fence_ops.init_flags = 0;
if (job)
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT,
&amdgpu_fence_ops.init_flags);
That is utterly nonsense. The amdgpu_fence_ops are global and can't be modified like that.
Christian.
- dma_fence_init(fence, &amdgpu_fence_ops, &ring->fence_drv.lock, adev->fence_context + ring->idx, seq); }
- if (job != NULL) {
/* mark this fence has a parent job */
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
- }
- amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); pm_runtime_get_noresume(adev_to_drm(adev)->dev);
@@ -720,7 +721,7 @@ static void amdgpu_fence_release(struct dma_fence *f) call_rcu(&f->rcu, amdgpu_fence_free); }
-static const struct dma_fence_ops amdgpu_fence_ops = { +static struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, .enable_signaling = amdgpu_fence_enable_signaling,
[AMD Official Use Only]
-----Original Message----- From: Koenig, Christian Christian.Koenig@amd.com Sent: Tuesday, December 14, 2021 4:01 PM To: Huang, Ray Ray.Huang@amd.com; dri-devel@lists.freedesktop.org; Daniel Vetter daniel.vetter@ffwll.ch; Sumit Semwal sumit.semwal@linaro.org Cc: amd-gfx@lists.freedesktop.org; linux-media@vger.kernel.org; Deucher, Alexander Alexander.Deucher@amd.com; Liu, Monk Monk.Liu@amd.com Subject: Re: [PATCH 2/3] drm/amdgpu: fix the fence timeline null pointer
Am 13.12.21 um 07:34 schrieb Huang Rui:
Initialize the flags for embedded fence in the job at dma_fence_init(). Otherwise, we will go a wrong way in amdgpu_fence_get_timeline_name callback and trigger a null pointer panic once we enabled the trace event here.
[ 156.131790] BUG: kernel NULL pointer dereference, address: 00000000000002a0 [ 156.131804] #PF: supervisor read access in kernel mode [ 156.131811] #PF: error_code(0x0000) - not-present page [ 156.131817] PGD 0 P4D 0 [ 156.131824] Oops: 0000 [#1] PREEMPT SMP PTI [ 156.131832] CPU: 6 PID: 1404 Comm: sdma0 Tainted: G OE 5.16.0-
rc1-custom #1
[ 156.131842] Hardware name: Gigabyte Technology Co., Ltd. Z170XP-SLI/Z170XP-SLI-CF, BIOS F20 11/04/2016 [ 156.131848] RIP: 0010:strlen+0x0/0x20 [ 156.131859] Code: 89 c0 c3 0f 1f 80 00 00 00 00 48 01 fe eb 0f 0f b6 07 38 d0 74 10 48 83 c7 01 84 c0 74 05 48 39 f7 75 ec 31 c0 c3 48 89 f8 c3 <80> 3f 00 74 10 48 89 f8 48 83 c0 01 80 38 00 75 f7 48 29 f8 c3 31 [ 156.131872] RSP: 0018:ffff9bd0018dbcf8 EFLAGS: 00010206 [ 156.131880] RAX: 00000000000002a0 RBX: ffff8d0305ef01b0 RCX: 000000000000000b [ 156.131888] RDX: ffff8d03772ab924 RSI: ffff8d0305ef01b0 RDI: 00000000000002a0 [ 156.131895] RBP: ffff9bd0018dbd60 R08: ffff8d03002094d0 R09: 0000000000000000 [ 156.131901] R10: 000000000000005e R11: 0000000000000065 R12: ffff8d03002094d0 [ 156.131907] R13: 000000000000001f R14: 0000000000070018 R15: 0000000000000007 [ 156.131914] FS: 0000000000000000(0000) GS:ffff8d062ed80000(0000) knlGS:0000000000000000 [ 156.131923] CS: 0010 DS: 0000 ES: 0000 CR0:
0000000080050033 [ 156.131929] CR2: 00000000000002a0 CR3: 000000001120a005 CR4: 00000000003706e0 [ 156.131937] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 156.131942] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 156.131949] Call Trace:
[ 156.131953] <TASK> [ 156.131957] ? trace_event_raw_event_dma_fence+0xcc/0x200 [ 156.131973] ? ring_buffer_unlock_commit+0x23/0x130 [ 156.131982] dma_fence_init+0x92/0xb0 [ 156.131993] amdgpu_fence_emit+0x10d/0x2b0 [amdgpu] [ 156.132302] amdgpu_ib_schedule+0x2f9/0x580 [amdgpu] [ 156.132586] amdgpu_job_run+0xed/0x220 [amdgpu]
Signed-off-by: Huang Rui ray.huang@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3b7e86ea7167..e2aa71904278 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -76,7 +76,7 @@ void amdgpu_fence_slab_fini(void) /*
- Cast helper
*/ -static const struct dma_fence_ops amdgpu_fence_ops; +static struct dma_fence_ops amdgpu_fence_ops; static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence
*f)
{ struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); @@ -158,21 +158,22 @@ int amdgpu_fence_emit(struct
amdgpu_ring *ring, struct dma_fence **f, struct amd
}
seq = ++ring->fence_drv.sync_seq;
- if (job != NULL && job->job_run_counter) {
- if (job && job->job_run_counter) { /* reinit seq for resubmitted jobs */ fence->seqno = seq;
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT,
&fence->flags);
} else {
amdgpu_fence_ops.init_flags = 0;
if (job)
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT,
&amdgpu_fence_ops.init_flags);
That is utterly nonsense. The amdgpu_fence_ops are global and can't be modified like that.
Please check the reply in patch1, we need initialize the fence and assign the flags together, otherwise it will trigger the panic once the trace event is enabled.
Thanks, Ray
Christian.
- dma_fence_init(fence, &amdgpu_fence_ops, &ring->fence_drv.lock, adev->fence_context + ring->idx, seq); }
- if (job != NULL) {
/* mark this fence has a parent job */
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT,
&fence->flags);
- }
- amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); pm_runtime_get_noresume(adev_to_drm(adev)->dev);
@@ -720,7 +721,7 @@ static void amdgpu_fence_release(struct
dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free); }
-static const struct dma_fence_ops amdgpu_fence_ops = { +static struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, .enable_signaling = amdgpu_fence_enable_signaling,
Use __string(), __assign_str() and __get_str() helpers in the TRACE_EVENT() instead of string definitions in gpu scheduler trace.
[ 158.890890] ------------[ cut here ]------------ [ 158.890899] fmt: 'entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d ' current_buffer: ' Xorg-1588 [001] ..... 149.391136: drm_sched_job: entity=0000000076f0d517, id=1, fence=000000008dd56028, ring=' [ 158.890910] WARNING: CPU: 6 PID: 1617 at kernel/trace/trace.c:3830 trace_check_vprintf+0x481/0x4a0
Signed-off-by: Huang Rui ray.huang@amd.com --- drivers/gpu/drm/scheduler/gpu_scheduler_trace.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index 877ce9b127f1..4e397790c195 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -38,6 +38,7 @@ TRACE_EVENT(drm_sched_job, TP_STRUCT__entry( __field(struct drm_sched_entity *, entity) __field(struct dma_fence *, fence) + __string(name, sched_job->sched->name) __field(const char *, name) __field(uint64_t, id) __field(u32, job_count) @@ -48,14 +49,14 @@ TRACE_EVENT(drm_sched_job, __entry->entity = entity; __entry->id = sched_job->id; __entry->fence = &sched_job->s_fence->finished; - __entry->name = sched_job->sched->name; + __assign_str(name, sched_job->sched->name); __entry->job_count = spsc_queue_count(&entity->job_queue); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", __entry->entity, __entry->id, - __entry->fence, __entry->name, + __entry->fence, __get_str(name), __entry->job_count, __entry->hw_job_count) );
@@ -65,7 +66,7 @@ TRACE_EVENT(drm_run_job, TP_STRUCT__entry( __field(struct drm_sched_entity *, entity) __field(struct dma_fence *, fence) - __field(const char *, name) + __string(name, sched_job->sched->name) __field(uint64_t, id) __field(u32, job_count) __field(int, hw_job_count) @@ -75,14 +76,14 @@ TRACE_EVENT(drm_run_job, __entry->entity = entity; __entry->id = sched_job->id; __entry->fence = &sched_job->s_fence->finished; - __entry->name = sched_job->sched->name; + __assign_str(name, sched_job->sched->name); __entry->job_count = spsc_queue_count(&entity->job_queue); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", __entry->entity, __entry->id, - __entry->fence, __entry->name, + __entry->fence, __get_str(name), __entry->job_count, __entry->hw_job_count) );
@@ -103,7 +104,7 @@ TRACE_EVENT(drm_sched_job_wait_dep, TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence), TP_ARGS(sched_job, fence), TP_STRUCT__entry( - __field(const char *,name) + __string(name, sched_job->sched->name) __field(uint64_t, id) __field(struct dma_fence *, fence) __field(uint64_t, ctx) @@ -111,14 +112,14 @@ TRACE_EVENT(drm_sched_job_wait_dep, ),
TP_fast_assign( - __entry->name = sched_job->sched->name; + __assign_str(name, sched_job->sched->name); __entry->id = sched_job->id; __entry->fence = fence; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ), TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u", - __entry->name, __entry->id, + __get_str(name), __entry->id, __entry->fence, __entry->ctx, __entry->seqno) );
A soft reminder. May I know any comments of this patch, just a minor warning fix?
Thanks, Ray
On Mon, Dec 13, 2021 at 02:34:22PM +0800, Huang, Ray wrote:
Use __string(), __assign_str() and __get_str() helpers in the TRACE_EVENT() instead of string definitions in gpu scheduler trace.
[ 158.890890] ------------[ cut here ]------------ [ 158.890899] fmt: 'entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d ' current_buffer: ' Xorg-1588 [001] ..... 149.391136: drm_sched_job: entity=0000000076f0d517, id=1, fence=000000008dd56028, ring=' [ 158.890910] WARNING: CPU: 6 PID: 1617 at kernel/trace/trace.c:3830 trace_check_vprintf+0x481/0x4a0
Signed-off-by: Huang Rui ray.huang@amd.com
drivers/gpu/drm/scheduler/gpu_scheduler_trace.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index 877ce9b127f1..4e397790c195 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -38,6 +38,7 @@ TRACE_EVENT(drm_sched_job, TP_STRUCT__entry( __field(struct drm_sched_entity *, entity) __field(struct dma_fence *, fence)
__string(name, sched_job->sched->name) __field(const char *, name) __field(uint64_t, id) __field(u32, job_count)
@@ -48,14 +49,14 @@ TRACE_EVENT(drm_sched_job, __entry->entity = entity; __entry->id = sched_job->id; __entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", __entry->entity, __entry->id,__assign_str(name, sched_job->sched->name); __entry->job_count = spsc_queue_count(&entity->job_queue); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ),
__entry->fence, __entry->name,
__entry->fence, __get_str(name), __entry->job_count, __entry->hw_job_count)
);
@@ -65,7 +66,7 @@ TRACE_EVENT(drm_run_job, TP_STRUCT__entry( __field(struct drm_sched_entity *, entity) __field(struct dma_fence *, fence)
__field(const char *, name)
__string(name, sched_job->sched->name) __field(uint64_t, id) __field(u32, job_count) __field(int, hw_job_count)
@@ -75,14 +76,14 @@ TRACE_EVENT(drm_run_job, __entry->entity = entity; __entry->id = sched_job->id; __entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", __entry->entity, __entry->id,__assign_str(name, sched_job->sched->name); __entry->job_count = spsc_queue_count(&entity->job_queue); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ),
__entry->fence, __entry->name,
__entry->fence, __get_str(name), __entry->job_count, __entry->hw_job_count)
);
@@ -103,7 +104,7 @@ TRACE_EVENT(drm_sched_job_wait_dep, TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence), TP_ARGS(sched_job, fence), TP_STRUCT__entry(
__field(const char *,name)
__string(name, sched_job->sched->name) __field(uint64_t, id) __field(struct dma_fence *, fence) __field(uint64_t, ctx)
@@ -111,14 +112,14 @@ TRACE_EVENT(drm_sched_job_wait_dep, ),
TP_fast_assign(
__entry->name = sched_job->sched->name;
TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",__assign_str(name, sched_job->sched->name); __entry->id = sched_job->id; __entry->fence = fence; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ),
__entry->name, __entry->id,
__get_str(name), __entry->id, __entry->fence, __entry->ctx, __entry->seqno)
);
2.25.1
Ping~
Seems this patch is missed.
Thanks, Ray
On Mon, Dec 13, 2021 at 02:34:22PM +0800, Huang, Ray wrote:
Use __string(), __assign_str() and __get_str() helpers in the TRACE_EVENT() instead of string definitions in gpu scheduler trace.
[ 158.890890] ------------[ cut here ]------------ [ 158.890899] fmt: 'entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d ' current_buffer: ' Xorg-1588 [001] ..... 149.391136: drm_sched_job: entity=0000000076f0d517, id=1, fence=000000008dd56028, ring=' [ 158.890910] WARNING: CPU: 6 PID: 1617 at kernel/trace/trace.c:3830 trace_check_vprintf+0x481/0x4a0
Signed-off-by: Huang Rui ray.huang@amd.com
drivers/gpu/drm/scheduler/gpu_scheduler_trace.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index 877ce9b127f1..4e397790c195 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -38,6 +38,7 @@ TRACE_EVENT(drm_sched_job, TP_STRUCT__entry( __field(struct drm_sched_entity *, entity) __field(struct dma_fence *, fence)
__string(name, sched_job->sched->name) __field(const char *, name) __field(uint64_t, id) __field(u32, job_count)
@@ -48,14 +49,14 @@ TRACE_EVENT(drm_sched_job, __entry->entity = entity; __entry->id = sched_job->id; __entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", __entry->entity, __entry->id,__assign_str(name, sched_job->sched->name); __entry->job_count = spsc_queue_count(&entity->job_queue); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ),
__entry->fence, __entry->name,
__entry->fence, __get_str(name), __entry->job_count, __entry->hw_job_count)
);
@@ -65,7 +66,7 @@ TRACE_EVENT(drm_run_job, TP_STRUCT__entry( __field(struct drm_sched_entity *, entity) __field(struct dma_fence *, fence)
__field(const char *, name)
__string(name, sched_job->sched->name) __field(uint64_t, id) __field(u32, job_count) __field(int, hw_job_count)
@@ -75,14 +76,14 @@ TRACE_EVENT(drm_run_job, __entry->entity = entity; __entry->id = sched_job->id; __entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", __entry->entity, __entry->id,__assign_str(name, sched_job->sched->name); __entry->job_count = spsc_queue_count(&entity->job_queue); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ),
__entry->fence, __entry->name,
__entry->fence, __get_str(name), __entry->job_count, __entry->hw_job_count)
);
@@ -103,7 +104,7 @@ TRACE_EVENT(drm_sched_job_wait_dep, TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence), TP_ARGS(sched_job, fence), TP_STRUCT__entry(
__field(const char *,name)
__string(name, sched_job->sched->name) __field(uint64_t, id) __field(struct dma_fence *, fence) __field(uint64_t, ctx)
@@ -111,14 +112,14 @@ TRACE_EVENT(drm_sched_job_wait_dep, ),
TP_fast_assign(
__entry->name = sched_job->sched->name;
TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",__assign_str(name, sched_job->sched->name); __entry->id = sched_job->id; __entry->fence = fence; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ),
__entry->name, __entry->id,
__get_str(name), __entry->id, __entry->fence, __entry->ctx, __entry->seqno)
);
2.25.1
Am 13.12.21 um 07:34 schrieb Huang Rui:
In some user scenarios, the get_timeline_name callback uses the flags to decide which way to return the timeline string name. Once the trace_dma_fence_init event is enabled, it will call get_timeline_name callback to dump the fence structure. However, at this moment, the flags are always 0, and it might trigger some issues in get_timeline_name callback implementation of different gpu driver. So make a member to initialize the flags in dma_fence_init().
Well that doesn't make much sense to me.
None of the dma_fence callbacks is called from the dma_fence_init function (or at least shouldn't). So drivers always have the opportunity to to adjust the flags.
So please explain the rational again?
Christian.
Signed-off-by: Huang Rui ray.huang@amd.com
drivers/dma-buf/dma-fence.c | 2 +- include/linux/dma-fence.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 066400ed8841..3e0622bf385f 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -952,7 +952,7 @@ dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, fence->lock = lock; fence->context = context; fence->seqno = seqno;
- fence->flags = 0UL;
fence->flags = ops->init_flags; fence->error = 0;
trace_dma_fence_init(fence);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 1ea691753bd3..f9270c5bc07a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -131,6 +131,13 @@ struct dma_fence_ops { */ bool use_64bit_seqno;
- /**
* @init_flags:
*
* The initial value of fence flags (A mask of DMA_FENCE_FLAG_* defined).
*/
- unsigned long init_flags;
- /**
- @get_driver_name:
[AMD Official Use Only]
-----Original Message----- From: Koenig, Christian Christian.Koenig@amd.com Sent: Tuesday, December 14, 2021 4:00 PM To: Huang, Ray Ray.Huang@amd.com; dri-devel@lists.freedesktop.org; Daniel Vetter daniel.vetter@ffwll.ch; Sumit Semwal sumit.semwal@linaro.org Cc: amd-gfx@lists.freedesktop.org; linux-media@vger.kernel.org; Deucher, Alexander Alexander.Deucher@amd.com; Liu, Monk Monk.Liu@amd.com Subject: Re: [PATCH 1/3] dma-buf: make the flags can be configured during dma fence init
Am 13.12.21 um 07:34 schrieb Huang Rui:
In some user scenarios, the get_timeline_name callback uses the flags to decide which way to return the timeline string name. Once the trace_dma_fence_init event is enabled, it will call get_timeline_name callback to dump the fence structure. However, at this moment, the flags are always 0, and it might trigger some issues in get_timeline_name callback implementation of different gpu driver. So make a member to initialize the flags in dma_fence_init().
Well that doesn't make much sense to me.
None of the dma_fence callbacks is called from the dma_fence_init function (or at least shouldn't). So drivers always have the opportunity to to adjust the flags.
So please explain the rational again?
Once we enable trace_dma_fence_init event, we will call get_driver_name and get_timeline_name callback function to dump the names in dma_fence_init(). At that time, the flags are always 0. However, in amdgpu_fence_get_timeline_name(), it will check the flags (AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT) to select which way to get the ring. If the fence should be actually embedded in the job (will be set after that), it still will trigger a kernel panic (please see patch2) because it go with a wrong way. Because we cannot set the flags at the start of dma_fence_init. That is the problem.
Thanks, Ray
Christian.
Signed-off-by: Huang Rui ray.huang@amd.com
drivers/dma-buf/dma-fence.c | 2 +- include/linux/dma-fence.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 066400ed8841..3e0622bf385f 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -952,7 +952,7 @@ dma_fence_init(struct dma_fence *fence, const
struct dma_fence_ops *ops,
fence->lock = lock; fence->context = context; fence->seqno = seqno;
- fence->flags = 0UL;
fence->flags = ops->init_flags; fence->error = 0;
trace_dma_fence_init(fence);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 1ea691753bd3..f9270c5bc07a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -131,6 +131,13 @@ struct dma_fence_ops { */ bool use_64bit_seqno;
- /**
* @init_flags:
*
* The initial value of fence flags (A mask of DMA_FENCE_FLAG_*
defined).
*/
- unsigned long init_flags;
- /**
- @get_driver_name:
Am 14.12.21 um 10:19 schrieb Huang, Ray:
[AMD Official Use Only]
-----Original Message----- From: Koenig, Christian Christian.Koenig@amd.com Sent: Tuesday, December 14, 2021 4:00 PM To: Huang, Ray Ray.Huang@amd.com; dri-devel@lists.freedesktop.org; Daniel Vetter daniel.vetter@ffwll.ch; Sumit Semwal sumit.semwal@linaro.org Cc: amd-gfx@lists.freedesktop.org; linux-media@vger.kernel.org; Deucher, Alexander Alexander.Deucher@amd.com; Liu, Monk Monk.Liu@amd.com Subject: Re: [PATCH 1/3] dma-buf: make the flags can be configured during dma fence init
Am 13.12.21 um 07:34 schrieb Huang Rui:
In some user scenarios, the get_timeline_name callback uses the flags to decide which way to return the timeline string name. Once the trace_dma_fence_init event is enabled, it will call get_timeline_name callback to dump the fence structure. However, at this moment, the flags are always 0, and it might trigger some issues in get_timeline_name callback implementation of different gpu driver. So make a member to initialize the flags in dma_fence_init().
Well that doesn't make much sense to me.
None of the dma_fence callbacks is called from the dma_fence_init function (or at least shouldn't). So drivers always have the opportunity to to adjust the flags.
So please explain the rational again?
Once we enable trace_dma_fence_init event, we will call get_driver_name and get_timeline_name callback function to dump the names in dma_fence_init(). At that time, the flags are always 0. However, in amdgpu_fence_get_timeline_name(), it will check the flags (AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT) to select which way to get the ring. If the fence should be actually embedded in the job (will be set after that), it still will trigger a kernel panic (please see patch2) because it go with a wrong way. Because we cannot set the flags at the start of dma_fence_init. That is the problem.
Well then I think we should fix the whole approach instead because what you try to do here is utterly nonsense. You can't modify the ops structure on the fly because that is used by all the fences.
Instead please just duplicate the amdgpu_fence_ops() and separate them into two structure, one for each case.
This way we should also be able to completely drop the AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT flag.
Regards, Christian.
Thanks, Ray
Christian.
Signed-off-by: Huang Rui ray.huang@amd.com
drivers/dma-buf/dma-fence.c | 2 +- include/linux/dma-fence.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 066400ed8841..3e0622bf385f 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -952,7 +952,7 @@ dma_fence_init(struct dma_fence *fence, const
struct dma_fence_ops *ops,
fence->lock = lock; fence->context = context; fence->seqno = seqno;
- fence->flags = 0UL;
fence->flags = ops->init_flags; fence->error = 0;
trace_dma_fence_init(fence);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 1ea691753bd3..f9270c5bc07a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -131,6 +131,13 @@ struct dma_fence_ops { */ bool use_64bit_seqno;
- /**
* @init_flags:
*
* The initial value of fence flags (A mask of DMA_FENCE_FLAG_*
defined).
*/
- unsigned long init_flags;
- /**
- @get_driver_name:
[AMD Official Use Only]
-----Original Message----- From: Koenig, Christian Christian.Koenig@amd.com Sent: Tuesday, December 14, 2021 5:24 PM To: Huang, Ray Ray.Huang@amd.com; dri-devel@lists.freedesktop.org; Daniel Vetter daniel.vetter@ffwll.ch; Sumit Semwal sumit.semwal@linaro.org Cc: amd-gfx@lists.freedesktop.org; linux-media@vger.kernel.org; Deucher, Alexander Alexander.Deucher@amd.com; Liu, Monk Monk.Liu@amd.com Subject: Re: [PATCH 1/3] dma-buf: make the flags can be configured during dma fence init
Am 14.12.21 um 10:19 schrieb Huang, Ray:
[AMD Official Use Only]
-----Original Message----- From: Koenig, Christian Christian.Koenig@amd.com Sent: Tuesday, December 14, 2021 4:00 PM To: Huang, Ray Ray.Huang@amd.com; dri-devel@lists.freedesktop.org; Daniel Vetter daniel.vetter@ffwll.ch; Sumit Semwal sumit.semwal@linaro.org Cc: amd-gfx@lists.freedesktop.org; linux-media@vger.kernel.org; Deucher, Alexander Alexander.Deucher@amd.com; Liu, Monk Monk.Liu@amd.com Subject: Re: [PATCH 1/3] dma-buf: make the flags can be configured during dma fence init
Am 13.12.21 um 07:34 schrieb Huang Rui:
In some user scenarios, the get_timeline_name callback uses the flags to decide which way to return the timeline string name. Once the trace_dma_fence_init event is enabled, it will call get_timeline_name callback to dump the fence structure. However, at this moment, the flags are always 0, and it might trigger some issues in get_timeline_name callback implementation of different gpu driver. So make a member to initialize the flags in dma_fence_init().
Well that doesn't make much sense to me.
None of the dma_fence callbacks is called from the dma_fence_init function (or at least shouldn't). So drivers always have the opportunity to to adjust the flags.
So please explain the rational again?
Once we enable trace_dma_fence_init event, we will call get_driver_name
and get_timeline_name callback function to dump the names in dma_fence_init().
At that time, the flags are always 0. However, in
amdgpu_fence_get_timeline_name(), it will check the flags (AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT) to select which way to get the ring.
If the fence should be actually embedded in the job (will be set after that),
it still will trigger a kernel panic (please see patch2) because it go with a wrong way. Because we cannot set the flags at the start of dma_fence_init. That is the problem.
Well then I think we should fix the whole approach instead because what you try to do here is utterly nonsense. You can't modify the ops structure on the fly because that is used by all the fences.
Instead please just duplicate the amdgpu_fence_ops() and separate them into two structure, one for each case.
This way we should also be able to completely drop the AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT flag.
OK, you mean this flag is not one of them in standard dma_fence_flag_bits and it AMD specific, so we would better to drop this to align the dam_fence structure design?
Thanks, Ray
Regards, Christian.
Thanks, Ray
Christian.
Signed-off-by: Huang Rui ray.huang@amd.com
drivers/dma-buf/dma-fence.c | 2 +- include/linux/dma-fence.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 066400ed8841..3e0622bf385f 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -952,7 +952,7 @@ dma_fence_init(struct dma_fence *fence, const
struct dma_fence_ops *ops,
fence->lock = lock; fence->context = context; fence->seqno = seqno;
- fence->flags = 0UL;
fence->flags = ops->init_flags; fence->error = 0;
trace_dma_fence_init(fence);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 1ea691753bd3..f9270c5bc07a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -131,6 +131,13 @@ struct dma_fence_ops { */ bool use_64bit_seqno;
- /**
* @init_flags:
*
* The initial value of fence flags (A mask of DMA_FENCE_FLAG_*
defined).
*/
- unsigned long init_flags;
- /**
- @get_driver_name:
Am 14.12.21 um 10:44 schrieb Huang, Ray:
[AMD Official Use Only]
-----Original Message----- From: Koenig, Christian Christian.Koenig@amd.com Sent: Tuesday, December 14, 2021 5:24 PM To: Huang, Ray Ray.Huang@amd.com; dri-devel@lists.freedesktop.org; Daniel Vetter daniel.vetter@ffwll.ch; Sumit Semwal sumit.semwal@linaro.org Cc: amd-gfx@lists.freedesktop.org; linux-media@vger.kernel.org; Deucher, Alexander Alexander.Deucher@amd.com; Liu, Monk Monk.Liu@amd.com Subject: Re: [PATCH 1/3] dma-buf: make the flags can be configured during dma fence init
Am 14.12.21 um 10:19 schrieb Huang, Ray:
[AMD Official Use Only]
-----Original Message----- From: Koenig, Christian Christian.Koenig@amd.com Sent: Tuesday, December 14, 2021 4:00 PM To: Huang, Ray Ray.Huang@amd.com; dri-devel@lists.freedesktop.org; Daniel Vetter daniel.vetter@ffwll.ch; Sumit Semwal sumit.semwal@linaro.org Cc: amd-gfx@lists.freedesktop.org; linux-media@vger.kernel.org; Deucher, Alexander Alexander.Deucher@amd.com; Liu, Monk Monk.Liu@amd.com Subject: Re: [PATCH 1/3] dma-buf: make the flags can be configured during dma fence init
Am 13.12.21 um 07:34 schrieb Huang Rui:
In some user scenarios, the get_timeline_name callback uses the flags to decide which way to return the timeline string name. Once the trace_dma_fence_init event is enabled, it will call get_timeline_name callback to dump the fence structure. However, at this moment, the flags are always 0, and it might trigger some issues in get_timeline_name callback implementation of different gpu driver. So make a member to initialize the flags in dma_fence_init().
Well that doesn't make much sense to me.
None of the dma_fence callbacks is called from the dma_fence_init function (or at least shouldn't). So drivers always have the opportunity to to adjust the flags.
So please explain the rational again?
Once we enable trace_dma_fence_init event, we will call get_driver_name
and get_timeline_name callback function to dump the names in dma_fence_init().
At that time, the flags are always 0. However, in
amdgpu_fence_get_timeline_name(), it will check the flags (AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT) to select which way to get the ring.
If the fence should be actually embedded in the job (will be set after that),
it still will trigger a kernel panic (please see patch2) because it go with a wrong way. Because we cannot set the flags at the start of dma_fence_init. That is the problem.
Well then I think we should fix the whole approach instead because what you try to do here is utterly nonsense. You can't modify the ops structure on the fly because that is used by all the fences.
Instead please just duplicate the amdgpu_fence_ops() and separate them into two structure, one for each case.
This way we should also be able to completely drop the AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT flag.
OK, you mean this flag is not one of them in standard dma_fence_flag_bits and it AMD specific, so we would better to drop this to align the dam_fence structure design?
Well yes and no. We can use driver private flags, it's just that for this use case it doesn't make much sense.
See what the functions do, for example amdgpu_fence_enable_signaling():
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
ring = to_amdgpu_ring(job->base.sched); } else { ring = to_amdgpu_fence(f)->ring; }
if (!timer_pending(&ring->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(ring);
return true;
That can much cleaner be done as
static bool amdgpu_fence_enable_signaling(struct dma_fence *f) { if (!timer_pending(&ring->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); return true; }
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) { .... }
If we want to avoid the duplication of logic we should just move the timer_pending() check into a separate function.
Regards, Christian.
Thanks, Ray
Regards, Christian.
Thanks, Ray
Christian.
Signed-off-by: Huang Rui ray.huang@amd.com
drivers/dma-buf/dma-fence.c | 2 +- include/linux/dma-fence.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 066400ed8841..3e0622bf385f 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -952,7 +952,7 @@ dma_fence_init(struct dma_fence *fence, const
struct dma_fence_ops *ops,
fence->lock = lock; fence->context = context; fence->seqno = seqno;
- fence->flags = 0UL;
fence->flags = ops->init_flags; fence->error = 0;
trace_dma_fence_init(fence);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 1ea691753bd3..f9270c5bc07a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -131,6 +131,13 @@ struct dma_fence_ops { */ bool use_64bit_seqno;
- /**
* @init_flags:
*
* The initial value of fence flags (A mask of DMA_FENCE_FLAG_*
defined).
*/
- unsigned long init_flags;
/** * @get_driver_name: *
dri-devel@lists.freedesktop.org