Tested this patch set more extensively in the last two weeks and fixed tons of additional bugs.
Still only testing with hand made DRM patches, but those are now rather reliable at least on amdgpu. Setting up igt is the next thing on the TODO list.
UAPI seems to be pretty solid already except for two changes: 1. Dropping an extra flag in the wait interface which was default behavior anyway. 2. Dropped the extra indirection in the query interface.
Additional to that I'm thinking if we shouldn't replace the flags parameter to find_fence() with a timeout value instead to limit how long we want to wait for a fence to appear.
Please test and comment, Christian.
For a lot of use cases we need 64bit sequence numbers. Currently drivers overload the dma_fence structure to store the additional bits.
Stop doing that and make the sequence number in the dma_fence always 64bit.
For compatibility with hardware which can do only 32bit sequences the comparisons in __dma_fence_is_later still only takes the lower 32bits as significant.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-fence.c | 2 +- drivers/dma-buf/sw_sync.c | 2 +- drivers/dma-buf/sync_file.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 2 +- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- drivers/gpu/drm/i915/intel_engine_cs.c | 2 +- drivers/gpu/drm/vgem/vgem_fence.c | 4 ++-- include/linux/dma-fence.h | 14 +++++++------- 8 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 1551ca7df394..37e24b69e94b 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -615,7 +615,7 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout); */ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno) + spinlock_t *lock, u64 context, u64 seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 53c1d6d36a64..32dcf7b4c935 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -172,7 +172,7 @@ static bool timeline_fence_enable_signaling(struct dma_fence *fence) static void timeline_fence_value_str(struct dma_fence *fence, char *str, int size) { - snprintf(str, size, "%d", fence->seqno); + snprintf(str, size, "%lld", fence->seqno); }
static void timeline_fence_timeline_value_str(struct dma_fence *fence, diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 35dd06479867..4f6305ca52c8 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -144,7 +144,7 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) } else { struct dma_fence *fence = sync_file->fence;
- snprintf(buf, len, "%s-%s%llu-%d", + snprintf(buf, len, "%s-%s%llu-%lld", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, @@ -258,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
i_b++; } else { - if (pt_a->seqno - pt_b->seqno <= INT_MAX) + if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno)) add_fence(fences, &i, pt_a); else add_fence(fences, &i, pt_b); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 12f2bf97611f..bfaf5c6323be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -388,7 +388,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, soffset, eoffset, eoffset - soffset);
if (i->fence) - seq_printf(m, " protected by 0x%08x on context %llu", + seq_printf(m, " protected by 0x%016llx on context %llu", i->fence->seqno, i->fence->context);
seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 6dbeed079ae5..11bcdabd5177 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -393,7 +393,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t) if (!fence) return;
- pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n", + pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n", cb->dma->ops->get_driver_name(cb->dma), cb->dma->ops->get_timeline_name(cb->dma), cb->dma->seqno, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 217ed3ee1cab..f28a66c67d34 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1236,7 +1236,7 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
- drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n", + drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n", prefix, rq->global_seqno, i915_request_completed(rq) ? "!" : "", diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index c1c420afe2dd..eb17c0cd3727 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -53,13 +53,13 @@ static void vgem_fence_release(struct dma_fence *base)
static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) { - snprintf(str, size, "%u", fence->seqno); + snprintf(str, size, "%llu", fence->seqno); }
static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) { - snprintf(str, size, "%u", + snprintf(str, size, "%llu", dma_fence_is_signaled(fence) ? fence->seqno : 0); }
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 02dba8cd033d..1393529c0c1f 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -77,7 +77,7 @@ struct dma_fence { struct list_head cb_list; spinlock_t *lock; u64 context; - unsigned seqno; + u64 seqno; unsigned long flags; ktime_t timestamp; int error; @@ -244,7 +244,7 @@ struct dma_fence_ops { };
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno); + spinlock_t *lock, u64 context, u64 seqno);
void dma_fence_release(struct kref *kref); void dma_fence_free(struct dma_fence *fence); @@ -414,9 +414,9 @@ dma_fence_is_signaled(struct dma_fence *fence) * Returns true if f1 is chronologically later than f2. Both fences must be * from the same context, since a seqno is not common across contexts. */ -static inline bool __dma_fence_is_later(u32 f1, u32 f2) +static inline bool __dma_fence_is_later(u64 f1, u64 f2) { - return (int)(f1 - f2) > 0; + return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; }
/** @@ -547,21 +547,21 @@ u64 dma_fence_context_alloc(unsigned num); do { \ struct dma_fence *__ff = (f); \ if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ - pr_info("f %llu#%u: " fmt, \ + pr_info("f %llu#%llu: " fmt, \ __ff->context, __ff->seqno, ##args); \ } while (0)
#define DMA_FENCE_WARN(f, fmt, args...) \ do { \ struct dma_fence *__ff = (f); \ - pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ ##args); \ } while (0)
#define DMA_FENCE_ERR(f, fmt, args...) \ do { \ struct dma_fence *__ff = (f); \ - pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ ##args); \ } while (0)
On 2018年11月28日 22:50, Christian König wrote:
For a lot of use cases we need 64bit sequence numbers. Currently drivers overload the dma_fence structure to store the additional bits.
Stop doing that and make the sequence number in the dma_fence always 64bit.
For compatibility with hardware which can do only 32bit sequences the comparisons in __dma_fence_is_later still only takes the lower 32bits as significant.
Can't we compare 64bit variable directly? Can we do it as below?
-static inline bool __dma_fence_is_later(u32 f1, u32 f2) +static inline bool __dma_fence_is_later(u64 f1, u64 f2) { - return (int)(f1 - f2) > 0; + return (f1 > f2) ? true : false;
}
-David
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-fence.c | 2 +- drivers/dma-buf/sw_sync.c | 2 +- drivers/dma-buf/sync_file.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 2 +- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- drivers/gpu/drm/i915/intel_engine_cs.c | 2 +- drivers/gpu/drm/vgem/vgem_fence.c | 4 ++-- include/linux/dma-fence.h | 14 +++++++------- 8 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 1551ca7df394..37e24b69e94b 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -615,7 +615,7 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout); */ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno)
{ BUG_ON(!lock); BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);spinlock_t *lock, u64 context, u64 seqno)
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 53c1d6d36a64..32dcf7b4c935 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -172,7 +172,7 @@ static bool timeline_fence_enable_signaling(struct dma_fence *fence) static void timeline_fence_value_str(struct dma_fence *fence, char *str, int size) {
- snprintf(str, size, "%d", fence->seqno);
snprintf(str, size, "%lld", fence->seqno); }
static void timeline_fence_timeline_value_str(struct dma_fence *fence,
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 35dd06479867..4f6305ca52c8 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -144,7 +144,7 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) } else { struct dma_fence *fence = sync_file->fence;
snprintf(buf, len, "%s-%s%llu-%d",
snprintf(buf, len, "%s-%s%llu-%lld", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context,
@@ -258,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
i_b++; } else {
if (pt_a->seqno - pt_b->seqno <= INT_MAX)
if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno)) add_fence(fences, &i, pt_a); else add_fence(fences, &i, pt_b);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 12f2bf97611f..bfaf5c6323be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -388,7 +388,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, soffset, eoffset, eoffset - soffset);
if (i->fence)
seq_printf(m, " protected by 0x%08x on context %llu",
seq_printf(m, " protected by 0x%016llx on context %llu", i->fence->seqno, i->fence->context);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 6dbeed079ae5..11bcdabd5177 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -393,7 +393,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t) if (!fence) return;
- pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n",
- pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n", cb->dma->ops->get_driver_name(cb->dma), cb->dma->ops->get_timeline_name(cb->dma), cb->dma->seqno,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 217ed3ee1cab..f28a66c67d34 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1236,7 +1236,7 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
- drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n",
- drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n", prefix, rq->global_seqno, i915_request_completed(rq) ? "!" : "",
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index c1c420afe2dd..eb17c0cd3727 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -53,13 +53,13 @@ static void vgem_fence_release(struct dma_fence *base)
static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) {
- snprintf(str, size, "%u", fence->seqno);
snprintf(str, size, "%llu", fence->seqno); }
static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) {
- snprintf(str, size, "%u",
- snprintf(str, size, "%llu", dma_fence_is_signaled(fence) ? fence->seqno : 0); }
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 02dba8cd033d..1393529c0c1f 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -77,7 +77,7 @@ struct dma_fence { struct list_head cb_list; spinlock_t *lock; u64 context;
- unsigned seqno;
- u64 seqno; unsigned long flags; ktime_t timestamp; int error;
@@ -244,7 +244,7 @@ struct dma_fence_ops { };
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno);
spinlock_t *lock, u64 context, u64 seqno);
void dma_fence_release(struct kref *kref); void dma_fence_free(struct dma_fence *fence);
@@ -414,9 +414,9 @@ dma_fence_is_signaled(struct dma_fence *fence)
- Returns true if f1 is chronologically later than f2. Both fences must be
- from the same context, since a seqno is not common across contexts.
*/ -static inline bool __dma_fence_is_later(u32 f1, u32 f2) +static inline bool __dma_fence_is_later(u64 f1, u64 f2) {
- return (int)(f1 - f2) > 0;
return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; }
/**
@@ -547,21 +547,21 @@ u64 dma_fence_context_alloc(unsigned num); do { \ struct dma_fence *__ff = (f); \ if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
pr_info("f %llu#%u: " fmt, \
pr_info("f %llu#%llu: " fmt, \ __ff->context, __ff->seqno, ##args); \
} while (0)
#define DMA_FENCE_WARN(f, fmt, args...) \ do { \ struct dma_fence *__ff = (f); \
pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ ##args); \
} while (0)
#define DMA_FENCE_ERR(f, fmt, args...) \ do { \ struct dma_fence *__ff = (f); \
pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
} while (0)pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ ##args); \
Am 29.11.18 um 09:08 schrieb zhoucm1:
On 2018年11月28日 22:50, Christian König wrote:
For a lot of use cases we need 64bit sequence numbers. Currently drivers overload the dma_fence structure to store the additional bits.
Stop doing that and make the sequence number in the dma_fence always 64bit.
For compatibility with hardware which can do only 32bit sequences the comparisons in __dma_fence_is_later still only takes the lower 32bits as significant.
Can't we compare 64bit variable directly? Can we do it as below?
As I wrote we don't want to do this because we have tons of hardware (including AMDs UVD/VCE engines) which can only do 32bit fences.
So only the lower 32bits are used as significant here.
Christian.
-static inline bool __dma_fence_is_later(u32 f1, u32 f2) +static inline bool __dma_fence_is_later(u64 f1, u64 f2) { - return (int)(f1 - f2) > 0; + return (f1 > f2) ? true : false;
}
-David
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-fence.c | 2 +- drivers/dma-buf/sw_sync.c | 2 +- drivers/dma-buf/sync_file.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 2 +- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- drivers/gpu/drm/i915/intel_engine_cs.c | 2 +- drivers/gpu/drm/vgem/vgem_fence.c | 4 ++-- include/linux/dma-fence.h | 14 +++++++------- 8 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 1551ca7df394..37e24b69e94b 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -615,7 +615,7 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout); */ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno) + spinlock_t *lock, u64 context, u64 seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 53c1d6d36a64..32dcf7b4c935 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -172,7 +172,7 @@ static bool timeline_fence_enable_signaling(struct dma_fence *fence) static void timeline_fence_value_str(struct dma_fence *fence, char *str, int size) { - snprintf(str, size, "%d", fence->seqno); + snprintf(str, size, "%lld", fence->seqno); } static void timeline_fence_timeline_value_str(struct dma_fence *fence, diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 35dd06479867..4f6305ca52c8 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -144,7 +144,7 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) } else { struct dma_fence *fence = sync_file->fence; - snprintf(buf, len, "%s-%s%llu-%d", + snprintf(buf, len, "%s-%s%llu-%lld", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, @@ -258,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, i_b++; } else { - if (pt_a->seqno - pt_b->seqno <= INT_MAX) + if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno)) add_fence(fences, &i, pt_a); else add_fence(fences, &i, pt_b); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 12f2bf97611f..bfaf5c6323be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -388,7 +388,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, soffset, eoffset, eoffset - soffset); if (i->fence) - seq_printf(m, " protected by 0x%08x on context %llu", + seq_printf(m, " protected by 0x%016llx on context %llu", i->fence->seqno, i->fence->context); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 6dbeed079ae5..11bcdabd5177 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -393,7 +393,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t) if (!fence) return; - pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n", + pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n", cb->dma->ops->get_driver_name(cb->dma), cb->dma->ops->get_timeline_name(cb->dma), cb->dma->seqno, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 217ed3ee1cab..f28a66c67d34 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1236,7 +1236,7 @@ static void print_request(struct drm_printer *m, x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); - drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n", + drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n", prefix, rq->global_seqno, i915_request_completed(rq) ? "!" : "", diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index c1c420afe2dd..eb17c0cd3727 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -53,13 +53,13 @@ static void vgem_fence_release(struct dma_fence *base) static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) { - snprintf(str, size, "%u", fence->seqno); + snprintf(str, size, "%llu", fence->seqno); } static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) { - snprintf(str, size, "%u", + snprintf(str, size, "%llu", dma_fence_is_signaled(fence) ? fence->seqno : 0); } diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 02dba8cd033d..1393529c0c1f 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -77,7 +77,7 @@ struct dma_fence { struct list_head cb_list; spinlock_t *lock; u64 context; - unsigned seqno; + u64 seqno; unsigned long flags; ktime_t timestamp; int error; @@ -244,7 +244,7 @@ struct dma_fence_ops { }; void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno); + spinlock_t *lock, u64 context, u64 seqno); void dma_fence_release(struct kref *kref); void dma_fence_free(struct dma_fence *fence); @@ -414,9 +414,9 @@ dma_fence_is_signaled(struct dma_fence *fence) * Returns true if f1 is chronologically later than f2. Both fences must be * from the same context, since a seqno is not common across contexts. */ -static inline bool __dma_fence_is_later(u32 f1, u32 f2) +static inline bool __dma_fence_is_later(u64 f1, u64 f2) { - return (int)(f1 - f2) > 0; + return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; } /** @@ -547,21 +547,21 @@ u64 dma_fence_context_alloc(unsigned num); do { \ struct dma_fence *__ff = (f); \ if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ - pr_info("f %llu#%u: " fmt, \ + pr_info("f %llu#%llu: " fmt, \ __ff->context, __ff->seqno, ##args); \ } while (0) #define DMA_FENCE_WARN(f, fmt, args...) \ do { \ struct dma_fence *__ff = (f); \ - pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ ##args); \ } while (0) #define DMA_FENCE_ERR(f, fmt, args...) \ do { \ struct dma_fence *__ff = (f); \ - pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ ##args); \ } while (0)
Lockless container implementation similar to a dma_fence_array, but with only two elements per node and automatic garbage collection.
v2: properly document dma_fence_chain_for_each, add dma_fence_chain_find_seqno, drop prev reference during garbage collection if it's not a chain fence.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/Makefile | 3 +- drivers/dma-buf/dma-fence-chain.c | 235 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-fence-chain.h | 79 +++++++++++++ 3 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 drivers/dma-buf/dma-fence-chain.c create mode 100644 include/linux/dma-fence-chain.h
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 0913a6ccab5a..1f006e083eb9 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,4 +1,5 @@ -obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o +obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ + reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c new file mode 100644 index 000000000000..de05101fc48d --- /dev/null +++ b/drivers/dma-buf/dma-fence-chain.c @@ -0,0 +1,235 @@ +/* + * fence-chain: chain fences together in a timeline + * + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * Authors: + * Christian König christian.koenig@amd.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/dma-fence-chain.h> + +static bool dma_fence_chain_enable_signaling(struct dma_fence *fence); + +/** + * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence + * @chain: chain node to get the previous node from + * + * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the + * chain node. + */ +static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain) +{ + struct dma_fence *prev; + + rcu_read_lock(); + prev = dma_fence_get_rcu_safe(&chain->prev); + rcu_read_unlock(); + return prev; +} + +/** + * dma_fence_chain_walk - chain walking function + * @fence: current chain node + * + * Walk the chain to the next node. Returns the next fence or NULL if we are at + * the end of the chain. Garbage collects chain nodes which are already + * signaled. + */ +struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) +{ + struct dma_fence_chain *chain, *prev_chain; + struct dma_fence *prev, *replacement, *tmp; + + chain = to_dma_fence_chain(fence); + if (!chain) { + dma_fence_put(fence); + return NULL; + } + + while ((prev = dma_fence_chain_get_prev(chain))) { + + prev_chain = to_dma_fence_chain(prev); + if (prev_chain) { + if (!dma_fence_is_signaled(prev_chain->fence)) + break; + + replacement = dma_fence_chain_get_prev(prev_chain); + } else { + if (!dma_fence_is_signaled(prev)) + break; + + replacement = NULL; + } + + tmp = cmpxchg(&chain->prev, prev, replacement); + if (tmp == prev) + dma_fence_put(tmp); + else + dma_fence_put(replacement); + dma_fence_put(prev); + } + + dma_fence_put(fence); + return prev; +} +EXPORT_SYMBOL(dma_fence_chain_walk); + +/** + * dma_fence_chain_find_seqno - find fence chain node by seqno + * @pfence: pointer to the chain node where to start + * @seqno: the sequence number to search for + * + * Advance the fence pointer to the chain node which will signal this sequence + * number. If no sequence number is provided then this is a no-op. + * + * Returns EINVAL if the fence is not a chain node or the sequence number has + * not yet advanced far enough. + */ +int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) +{ + struct dma_fence_chain *chain; + + if (!seqno) + return 0; + + chain = to_dma_fence_chain(*pfence); + if (!chain || chain->base.seqno < seqno) + return -EINVAL; + + dma_fence_chain_for_each(*pfence) { + if ((*pfence)->context != chain->base.context || + to_dma_fence_chain(*pfence)->prev_seqno < seqno) + break; + } + dma_fence_put(&chain->base); + + return 0; +} +EXPORT_SYMBOL(dma_fence_chain_find_seqno); + +static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence) +{ + return "dma_fence_chain"; +} + +static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +} + +static void dma_fence_chain_irq_work(struct irq_work *work) +{ + struct dma_fence_chain *chain; + + chain = container_of(work, typeof(*chain), work); + + /* Try to rearm the callback */ + if (!dma_fence_chain_enable_signaling(&chain->base)) + /* Ok, we are done. No more unsignaled fences left */ + dma_fence_signal(&chain->base); +} + +static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct dma_fence_chain *chain; + + chain = container_of(cb, typeof(*chain), cb); + irq_work_queue(&chain->work); + dma_fence_put(f); +} + +static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) +{ + struct dma_fence_chain *head = to_dma_fence_chain(fence); + + dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence; + + if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) + return true; + } + return false; +} + +static bool dma_fence_chain_signaled(struct dma_fence *fence) +{ + dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence; + + if (!dma_fence_is_signaled(f)) { + dma_fence_put(fence); + return false; + } + } + + return true; +} + +static void dma_fence_chain_release(struct dma_fence *fence) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + + dma_fence_put(chain->prev); + dma_fence_put(chain->fence); + dma_fence_free(fence); +} + +const struct dma_fence_ops dma_fence_chain_ops = { + .get_driver_name = dma_fence_chain_get_driver_name, + .get_timeline_name = dma_fence_chain_get_timeline_name, + .enable_signaling = dma_fence_chain_enable_signaling, + .signaled = dma_fence_chain_signaled, + .release = dma_fence_chain_release, +}; +EXPORT_SYMBOL(dma_fence_chain_ops); + +/** + * dma_fence_chain_init - initialize a fence chain + * @chain: the chain node to initialize + * @prev: the previous fence + * @fence: the current fence + * + * Initialize a new chain node and either start a new chain or add the node to + * the existing chain of the previous fence. + */ +void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno) +{ + struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev); + uint64_t context; + + spin_lock_init(&chain->lock); + chain->prev = prev; + chain->fence = fence; + chain->prev_seqno = 0; + init_irq_work(&chain->work, dma_fence_chain_irq_work); + + /* Try to reuse the context of the previous chain node. */ + if (prev_chain && seqno > prev->seqno && + __dma_fence_is_later(seqno, prev->seqno)) { + context = prev->context; + chain->prev_seqno = prev->seqno; + } else { + context = dma_fence_context_alloc(1); + /* Make sure that we always have a valid sequence number. */ + if (prev_chain) + seqno = max(prev->seqno, seqno); + } + + dma_fence_init(&chain->base, &dma_fence_chain_ops, + &chain->lock, context, seqno); +} +EXPORT_SYMBOL(dma_fence_chain_init); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h new file mode 100644 index 000000000000..3bb0efd6bc65 --- /dev/null +++ b/include/linux/dma-fence-chain.h @@ -0,0 +1,79 @@ +/* + * fence-chain: chain fences together in a timeline + * + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * Authors: + * Christian König christian.koenig@amd.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_DMA_FENCE_CHAIN_H +#define __LINUX_DMA_FENCE_CHAIN_H + +#include <linux/dma-fence.h> +#include <linux/irq_work.h> + +/** + * struct dma_fence_chain - fence to represent an node of a fence chain + * @base: fence base class + * @lock: spinlock for fence handling + * @prev: previous fence of the chain + * @prev_seqno: original previous seqno before garbage collection + * @fence: encapsulated fence + * @cb: callback structure for signaling + * @work: irq work item for signaling + */ +struct dma_fence_chain { + struct dma_fence base; + spinlock_t lock; + struct dma_fence *prev; + u64 prev_seqno; + struct dma_fence *fence; + struct dma_fence_cb cb; + struct irq_work work; +}; + +extern const struct dma_fence_ops dma_fence_chain_ops; + +/** + * to_dma_fence_chain - cast a fence to a dma_fence_chain + * @fence: fence to cast to a dma_fence_array + * + * Returns NULL if the fence is not a dma_fence_chain, + * or the dma_fence_chain otherwise. + */ +static inline struct dma_fence_chain * +to_dma_fence_chain(struct dma_fence *fence) +{ + if (!fence || fence->ops != &dma_fence_chain_ops) + return NULL; + + return container_of(fence, struct dma_fence_chain, base); +} + +/** + * dma_fence_chain_for_each - iterate over all fences in chain + * @fence: starting point as well as current fence + * + * Iterate over all fences in the chain. We keep a reference to the current + * fence while inside the loop which must be dropped when breaking out. + */ +#define dma_fence_chain_for_each(fence) \ + for (dma_fence_get(fence);fence;fence=dma_fence_chain_walk(fence)) + +struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence); +int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno); +void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno); + +#endif /* __LINUX_DMA_FENCE_CHAIN_H */
Quoting Christian König (2018-11-28 14:50:12)
+/**
- dma_fence_chain_for_each - iterate over all fences in chain
- @fence: starting point as well as current fence
- Iterate over all fences in the chain. We keep a reference to the current
- fence while inside the loop which must be dropped when breaking out.
- */
+#define dma_fence_chain_for_each(fence) \
for (dma_fence_get(fence);fence;fence=dma_fence_chain_walk(fence))
That's a nasty macro. Can we have separate vars for iter and head?
Reading,
+static bool dma_fence_chain_signaled(struct dma_fence *fence) +{
dma_fence_chain_for_each(fence) {
struct dma_fence_chain *chain = to_dma_fence_chain(fence);
struct dma_fence *f = chain ? chain->fence : fence;
if (!dma_fence_is_signaled(f)) {
dma_fence_put(fence);
return false;
}
}
return true;
+}
it's not clear whether the intent there is to use the in parameter fence or an iter.
for (it = dma_fence_get(fence); it; it = dma_fence_chain_walk(it))
dma_fence_chain_for_each(it, fence) { struct dma_fence_chain *chain = to_dma_fence_chain(it); struct dma_fence *f = chain ? chain->fence : it;
if (!dma_fence_is_signaled(f)) { dma_fence_put(it); return false; } } -Chris
Am 29.11.18 um 11:32 schrieb Chris Wilson:
Quoting Christian König (2018-11-28 14:50:12)
+/**
- dma_fence_chain_for_each - iterate over all fences in chain
- @fence: starting point as well as current fence
- Iterate over all fences in the chain. We keep a reference to the current
- fence while inside the loop which must be dropped when breaking out.
- */
+#define dma_fence_chain_for_each(fence) \
for (dma_fence_get(fence);fence;fence=dma_fence_chain_walk(fence))
That's a nasty macro. Can we have separate vars for iter and head?
Good idea, I was also running into some issues where making this distinct could have made it more easier to understand.
Going to change that, Christian.
Reading,
+static bool dma_fence_chain_signaled(struct dma_fence *fence) +{
dma_fence_chain_for_each(fence) {
struct dma_fence_chain *chain = to_dma_fence_chain(fence);
struct dma_fence *f = chain ? chain->fence : fence;
if (!dma_fence_is_signaled(f)) {
dma_fence_put(fence);
return false;
}
}
return true;
+}
it's not clear whether the intent there is to use the in parameter fence or an iter.
for (it = dma_fence_get(fence); it; it = dma_fence_chain_walk(it))
dma_fence_chain_for_each(it, fence) { struct dma_fence_chain *chain = to_dma_fence_chain(it); struct dma_fence *f = chain ? chain->fence : it;
if (!dma_fence_is_signaled(f)) { dma_fence_put(it); return false; } } -Chris
On 2018年11月28日 22:50, Christian König wrote:
Lockless container implementation similar to a dma_fence_array, but with only two elements per node and automatic garbage collection.
v2: properly document dma_fence_chain_for_each, add dma_fence_chain_find_seqno, drop prev reference during garbage collection if it's not a chain fence.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/Makefile | 3 +- drivers/dma-buf/dma-fence-chain.c | 235 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-fence-chain.h | 79 +++++++++++++ 3 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 drivers/dma-buf/dma-fence-chain.c create mode 100644 include/linux/dma-fence-chain.h
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 0913a6ccab5a..1f006e083eb9 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,4 +1,5 @@ -obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o +obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.oreservation.o seqno-fence.o
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c new file mode 100644 index 000000000000..de05101fc48d --- /dev/null +++ b/drivers/dma-buf/dma-fence-chain.c @@ -0,0 +1,235 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 as published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
- */
+#include <linux/dma-fence-chain.h>
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+/**
- dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
- @chain: chain node to get the previous node from
- Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
- chain node.
- */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain) +{
- struct dma_fence *prev;
- rcu_read_lock();
- prev = dma_fence_get_rcu_safe(&chain->prev);
- rcu_read_unlock();
- return prev;
+}
+/**
- dma_fence_chain_walk - chain walking function
- @fence: current chain node
- Walk the chain to the next node. Returns the next fence or NULL if we are at
- the end of the chain. Garbage collects chain nodes which are already
- signaled.
- */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) +{
- struct dma_fence_chain *chain, *prev_chain;
- struct dma_fence *prev, *replacement, *tmp;
- chain = to_dma_fence_chain(fence);
- if (!chain) {
dma_fence_put(fence);
return NULL;
- }
- while ((prev = dma_fence_chain_get_prev(chain))) {
prev_chain = to_dma_fence_chain(prev);
if (prev_chain) {
if (!dma_fence_is_signaled(prev_chain->fence))
break;
replacement = dma_fence_chain_get_prev(prev_chain);
} else {
if (!dma_fence_is_signaled(prev))
break;
replacement = NULL;
}
tmp = cmpxchg(&chain->prev, prev, replacement);
if (tmp == prev)
dma_fence_put(tmp);
else
dma_fence_put(replacement);
dma_fence_put(prev);
- }
- dma_fence_put(fence);
- return prev;
+} +EXPORT_SYMBOL(dma_fence_chain_walk);
+/**
- dma_fence_chain_find_seqno - find fence chain node by seqno
- @pfence: pointer to the chain node where to start
- @seqno: the sequence number to search for
- Advance the fence pointer to the chain node which will signal this sequence
- number. If no sequence number is provided then this is a no-op.
- Returns EINVAL if the fence is not a chain node or the sequence number has
- not yet advanced far enough.
- */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) +{
- struct dma_fence_chain *chain;
- if (!seqno)
return 0;
- chain = to_dma_fence_chain(*pfence);
- if (!chain || chain->base.seqno < seqno)
return -EINVAL;
- dma_fence_chain_for_each(*pfence) {
if ((*pfence)->context != chain->base.context ||
to_dma_fence_chain(*pfence)->prev_seqno < seqno)
break;
- }
- dma_fence_put(&chain->base);
- return 0;
+} +EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence) +{
return "dma_fence_chain";
+}
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence) +{
return "unbound";
+}
+static void dma_fence_chain_irq_work(struct irq_work *work) +{
- struct dma_fence_chain *chain;
- chain = container_of(work, typeof(*chain), work);
- /* Try to rearm the callback */
- if (!dma_fence_chain_enable_signaling(&chain->base))
/* Ok, we are done. No more unsignaled fences left */
dma_fence_signal(&chain->base);
+}
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{
- struct dma_fence_chain *chain;
- chain = container_of(cb, typeof(*chain), cb);
- irq_work_queue(&chain->work);
- dma_fence_put(f);
+}
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) +{
- struct dma_fence_chain *head = to_dma_fence_chain(fence);
- dma_fence_chain_for_each(fence) {
struct dma_fence_chain *chain = to_dma_fence_chain(fence);
struct dma_fence *f = chain ? chain->fence : fence;
if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb))
return true;
- }
- return false;
+}
+static bool dma_fence_chain_signaled(struct dma_fence *fence) +{
- dma_fence_chain_for_each(fence) {
struct dma_fence_chain *chain = to_dma_fence_chain(fence);
struct dma_fence *f = chain ? chain->fence : fence;
if (!dma_fence_is_signaled(f)) {
dma_fence_put(fence);
return false;
}
- }
- return true;
+}
+static void dma_fence_chain_release(struct dma_fence *fence) +{
- struct dma_fence_chain *chain = to_dma_fence_chain(fence);
- dma_fence_put(chain->prev);
- dma_fence_put(chain->fence);
- dma_fence_free(fence);
+}
+const struct dma_fence_ops dma_fence_chain_ops = {
- .get_driver_name = dma_fence_chain_get_driver_name,
- .get_timeline_name = dma_fence_chain_get_timeline_name,
- .enable_signaling = dma_fence_chain_enable_signaling,
- .signaled = dma_fence_chain_signaled,
- .release = dma_fence_chain_release,
+}; +EXPORT_SYMBOL(dma_fence_chain_ops);
+/**
- dma_fence_chain_init - initialize a fence chain
- @chain: the chain node to initialize
- @prev: the previous fence
- @fence: the current fence
- Initialize a new chain node and either start a new chain or add the node to
- the existing chain of the previous fence.
- */
+void dma_fence_chain_init(struct dma_fence_chain *chain,
struct dma_fence *prev,
struct dma_fence *fence,
uint64_t seqno)
+{
- struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
- uint64_t context;
- spin_lock_init(&chain->lock);
- chain->prev = prev;
- chain->fence = fence;
- chain->prev_seqno = 0;
- init_irq_work(&chain->work, dma_fence_chain_irq_work);
- /* Try to reuse the context of the previous chain node. */
- if (prev_chain && seqno > prev->seqno &&
__dma_fence_is_later(seqno, prev->seqno)) {
As your patch#1 makes __dma_fence_is_later only be valid for 32bit, we cannot use it for 64bit here, we should remove it from here, just compare seqno directly.
context = prev->context;
chain->prev_seqno = prev->seqno;
- } else {
context = dma_fence_context_alloc(1);
/* Make sure that we always have a valid sequence number. */
if (prev_chain)
seqno = max(prev->seqno, seqno);
I still cannot judge if this case is proper, but I prefer we just drop it when seqno is <= last seqno. we can image that when we enable export/import, we could mess them. So we shouldn't change timeline existing signal point any time.
-David
- }
- dma_fence_init(&chain->base, &dma_fence_chain_ops,
&chain->lock, context, seqno);
+} +EXPORT_SYMBOL(dma_fence_chain_init); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h new file mode 100644 index 000000000000..3bb0efd6bc65 --- /dev/null +++ b/include/linux/dma-fence-chain.h @@ -0,0 +1,79 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 as published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
- */
+#ifndef __LINUX_DMA_FENCE_CHAIN_H +#define __LINUX_DMA_FENCE_CHAIN_H
+#include <linux/dma-fence.h> +#include <linux/irq_work.h>
+/**
- struct dma_fence_chain - fence to represent an node of a fence chain
- @base: fence base class
- @lock: spinlock for fence handling
- @prev: previous fence of the chain
- @prev_seqno: original previous seqno before garbage collection
- @fence: encapsulated fence
- @cb: callback structure for signaling
- @work: irq work item for signaling
- */
+struct dma_fence_chain {
- struct dma_fence base;
- spinlock_t lock;
- struct dma_fence *prev;
- u64 prev_seqno;
- struct dma_fence *fence;
- struct dma_fence_cb cb;
- struct irq_work work;
+};
+extern const struct dma_fence_ops dma_fence_chain_ops;
+/**
- to_dma_fence_chain - cast a fence to a dma_fence_chain
- @fence: fence to cast to a dma_fence_array
- Returns NULL if the fence is not a dma_fence_chain,
- or the dma_fence_chain otherwise.
- */
+static inline struct dma_fence_chain * +to_dma_fence_chain(struct dma_fence *fence) +{
- if (!fence || fence->ops != &dma_fence_chain_ops)
return NULL;
- return container_of(fence, struct dma_fence_chain, base);
+}
+/**
- dma_fence_chain_for_each - iterate over all fences in chain
- @fence: starting point as well as current fence
- Iterate over all fences in the chain. We keep a reference to the current
- fence while inside the loop which must be dropped when breaking out.
- */
+#define dma_fence_chain_for_each(fence) \
- for (dma_fence_get(fence);fence;fence=dma_fence_chain_walk(fence))
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence); +int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno); +void dma_fence_chain_init(struct dma_fence_chain *chain,
struct dma_fence *prev,
struct dma_fence *fence,
uint64_t seqno);
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
Am 03.12.18 um 06:25 schrieb zhoucm1:
On 2018年11月28日 22:50, Christian König wrote:
Lockless container implementation similar to a dma_fence_array, but with only two elements per node and automatic garbage collection.
v2: properly document dma_fence_chain_for_each, add dma_fence_chain_find_seqno, drop prev reference during garbage collection if it's not a chain fence.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/Makefile | 3 +- drivers/dma-buf/dma-fence-chain.c | 235 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-fence-chain.h | 79 +++++++++++++ 3 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 drivers/dma-buf/dma-fence-chain.c create mode 100644 include/linux/dma-fence-chain.h
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 0913a6ccab5a..1f006e083eb9 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,4 +1,5 @@ -obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o +obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ + reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c new file mode 100644 index 000000000000..de05101fc48d --- /dev/null +++ b/drivers/dma-buf/dma-fence-chain.c @@ -0,0 +1,235 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- * Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or
modify it
- under the terms of the GNU General Public License version 2 as
published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful,
but WITHOUT
- ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for
- more details.
- */
+#include <linux/dma-fence-chain.h>
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+/**
- dma_fence_chain_get_prev - use RCU to get a reference to the
previous fence
- @chain: chain node to get the previous node from
- Use dma_fence_get_rcu_safe to get a reference to the previous
fence of the
- chain node.
- */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain) +{ + struct dma_fence *prev;
+ rcu_read_lock(); + prev = dma_fence_get_rcu_safe(&chain->prev); + rcu_read_unlock(); + return prev; +}
+/**
- dma_fence_chain_walk - chain walking function
- @fence: current chain node
- Walk the chain to the next node. Returns the next fence or NULL
if we are at
- the end of the chain. Garbage collects chain nodes which are already
- signaled.
- */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) +{ + struct dma_fence_chain *chain, *prev_chain; + struct dma_fence *prev, *replacement, *tmp;
+ chain = to_dma_fence_chain(fence); + if (!chain) { + dma_fence_put(fence); + return NULL; + }
+ while ((prev = dma_fence_chain_get_prev(chain))) {
+ prev_chain = to_dma_fence_chain(prev); + if (prev_chain) { + if (!dma_fence_is_signaled(prev_chain->fence)) + break;
+ replacement = dma_fence_chain_get_prev(prev_chain); + } else { + if (!dma_fence_is_signaled(prev)) + break;
+ replacement = NULL; + }
+ tmp = cmpxchg(&chain->prev, prev, replacement); + if (tmp == prev) + dma_fence_put(tmp); + else + dma_fence_put(replacement); + dma_fence_put(prev); + }
+ dma_fence_put(fence); + return prev; +} +EXPORT_SYMBOL(dma_fence_chain_walk);
+/**
- dma_fence_chain_find_seqno - find fence chain node by seqno
- @pfence: pointer to the chain node where to start
- @seqno: the sequence number to search for
- Advance the fence pointer to the chain node which will signal
this sequence
- number. If no sequence number is provided then this is a no-op.
- Returns EINVAL if the fence is not a chain node or the sequence
number has
- not yet advanced far enough.
- */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) +{ + struct dma_fence_chain *chain;
+ if (!seqno) + return 0;
+ chain = to_dma_fence_chain(*pfence); + if (!chain || chain->base.seqno < seqno) + return -EINVAL;
+ dma_fence_chain_for_each(*pfence) { + if ((*pfence)->context != chain->base.context || + to_dma_fence_chain(*pfence)->prev_seqno < seqno) + break; + } + dma_fence_put(&chain->base);
+ return 0; +} +EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence) +{ + return "dma_fence_chain"; +}
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +}
+static void dma_fence_chain_irq_work(struct irq_work *work) +{ + struct dma_fence_chain *chain;
+ chain = container_of(work, typeof(*chain), work);
+ /* Try to rearm the callback */ + if (!dma_fence_chain_enable_signaling(&chain->base)) + /* Ok, we are done. No more unsignaled fences left */ + dma_fence_signal(&chain->base); +}
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct dma_fence_chain *chain;
+ chain = container_of(cb, typeof(*chain), cb); + irq_work_queue(&chain->work); + dma_fence_put(f); +}
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) +{ + struct dma_fence_chain *head = to_dma_fence_chain(fence);
+ dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) + return true; + } + return false; +}
+static bool dma_fence_chain_signaled(struct dma_fence *fence) +{ + dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_is_signaled(f)) { + dma_fence_put(fence); + return false; + } + }
+ return true; +}
+static void dma_fence_chain_release(struct dma_fence *fence) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ dma_fence_put(chain->prev); + dma_fence_put(chain->fence); + dma_fence_free(fence); +}
+const struct dma_fence_ops dma_fence_chain_ops = { + .get_driver_name = dma_fence_chain_get_driver_name, + .get_timeline_name = dma_fence_chain_get_timeline_name, + .enable_signaling = dma_fence_chain_enable_signaling, + .signaled = dma_fence_chain_signaled, + .release = dma_fence_chain_release, +}; +EXPORT_SYMBOL(dma_fence_chain_ops);
+/**
- dma_fence_chain_init - initialize a fence chain
- @chain: the chain node to initialize
- @prev: the previous fence
- @fence: the current fence
- Initialize a new chain node and either start a new chain or add
the node to
- the existing chain of the previous fence.
- */
+void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno) +{ + struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev); + uint64_t context;
+ spin_lock_init(&chain->lock); + chain->prev = prev; + chain->fence = fence; + chain->prev_seqno = 0; + init_irq_work(&chain->work, dma_fence_chain_irq_work);
+ /* Try to reuse the context of the previous chain node. */ + if (prev_chain && seqno > prev->seqno && + __dma_fence_is_later(seqno, prev->seqno)) {
As your patch#1 makes __dma_fence_is_later only be valid for 32bit, we cannot use it for 64bit here, we should remove it from here, just compare seqno directly.
That is intentional. We must make sure that the number both increments as 64bit number as well as not wraps around as 32bit number.
In other words the largest difference between two sequence numbers userspace is allowed to submit is 1<<31.
+ context = prev->context; + chain->prev_seqno = prev->seqno; + } else { + context = dma_fence_context_alloc(1); + /* Make sure that we always have a valid sequence number. */ + if (prev_chain) + seqno = max(prev->seqno, seqno);
I still cannot judge if this case is proper, but I prefer we just drop it when seqno is <= last seqno. we can image that when we enable export/import, we could mess them. So we shouldn't change timeline existing signal point any time.
Yeah, but we can't lose fences either. This is the most defensive approach I can think of.
E.g. we still ad the fence, but start a new timeline so all queries for all previous sequence number will always wait for everything.
Regards, Christian.
-David
+ }
+ dma_fence_init(&chain->base, &dma_fence_chain_ops, + &chain->lock, context, seqno); +} +EXPORT_SYMBOL(dma_fence_chain_init); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h new file mode 100644 index 000000000000..3bb0efd6bc65 --- /dev/null +++ b/include/linux/dma-fence-chain.h @@ -0,0 +1,79 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- * Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or
modify it
- under the terms of the GNU General Public License version 2 as
published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful,
but WITHOUT
- ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for
- more details.
- */
+#ifndef __LINUX_DMA_FENCE_CHAIN_H +#define __LINUX_DMA_FENCE_CHAIN_H
+#include <linux/dma-fence.h> +#include <linux/irq_work.h>
+/**
- struct dma_fence_chain - fence to represent an node of a fence chain
- @base: fence base class
- @lock: spinlock for fence handling
- @prev: previous fence of the chain
- @prev_seqno: original previous seqno before garbage collection
- @fence: encapsulated fence
- @cb: callback structure for signaling
- @work: irq work item for signaling
- */
+struct dma_fence_chain { + struct dma_fence base; + spinlock_t lock; + struct dma_fence *prev; + u64 prev_seqno; + struct dma_fence *fence; + struct dma_fence_cb cb; + struct irq_work work; +};
+extern const struct dma_fence_ops dma_fence_chain_ops;
+/**
- to_dma_fence_chain - cast a fence to a dma_fence_chain
- @fence: fence to cast to a dma_fence_array
- Returns NULL if the fence is not a dma_fence_chain,
- or the dma_fence_chain otherwise.
- */
+static inline struct dma_fence_chain * +to_dma_fence_chain(struct dma_fence *fence) +{ + if (!fence || fence->ops != &dma_fence_chain_ops) + return NULL;
+ return container_of(fence, struct dma_fence_chain, base); +}
+/**
- dma_fence_chain_for_each - iterate over all fences in chain
- @fence: starting point as well as current fence
- Iterate over all fences in the chain. We keep a reference to the
current
- fence while inside the loop which must be dropped when breaking out.
- */
+#define dma_fence_chain_for_each(fence) \ + for (dma_fence_get(fence);fence;fence=dma_fence_chain_walk(fence))
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence); +int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno); +void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno);
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
在 2018/12/3 19:00, Christian König 写道:
Am 03.12.18 um 06:25 schrieb zhoucm1:
On 2018年11月28日 22:50, Christian König wrote:
Lockless container implementation similar to a dma_fence_array, but with only two elements per node and automatic garbage collection.
v2: properly document dma_fence_chain_for_each, add dma_fence_chain_find_seqno, drop prev reference during garbage collection if it's not a chain fence.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/Makefile | 3 +- drivers/dma-buf/dma-fence-chain.c | 235 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-fence-chain.h | 79 +++++++++++++ 3 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 drivers/dma-buf/dma-fence-chain.c create mode 100644 include/linux/dma-fence-chain.h
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 0913a6ccab5a..1f006e083eb9 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,4 +1,5 @@ -obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o +obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ + reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c new file mode 100644 index 000000000000..de05101fc48d --- /dev/null +++ b/drivers/dma-buf/dma-fence-chain.c @@ -0,0 +1,235 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- * Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or
modify it
- under the terms of the GNU General Public License version 2 as
published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful,
but WITHOUT
- ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for
- more details.
- */
+#include <linux/dma-fence-chain.h>
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+/**
- dma_fence_chain_get_prev - use RCU to get a reference to the
previous fence
- @chain: chain node to get the previous node from
- Use dma_fence_get_rcu_safe to get a reference to the previous
fence of the
- chain node.
- */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain) +{ + struct dma_fence *prev;
+ rcu_read_lock(); + prev = dma_fence_get_rcu_safe(&chain->prev); + rcu_read_unlock(); + return prev; +}
+/**
- dma_fence_chain_walk - chain walking function
- @fence: current chain node
- Walk the chain to the next node. Returns the next fence or NULL
if we are at
- the end of the chain. Garbage collects chain nodes which are
already
- signaled.
- */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) +{ + struct dma_fence_chain *chain, *prev_chain; + struct dma_fence *prev, *replacement, *tmp;
+ chain = to_dma_fence_chain(fence); + if (!chain) { + dma_fence_put(fence); + return NULL; + }
+ while ((prev = dma_fence_chain_get_prev(chain))) {
+ prev_chain = to_dma_fence_chain(prev); + if (prev_chain) { + if (!dma_fence_is_signaled(prev_chain->fence)) + break;
+ replacement = dma_fence_chain_get_prev(prev_chain); + } else { + if (!dma_fence_is_signaled(prev)) + break;
+ replacement = NULL; + }
+ tmp = cmpxchg(&chain->prev, prev, replacement); + if (tmp == prev) + dma_fence_put(tmp); + else + dma_fence_put(replacement); + dma_fence_put(prev); + }
+ dma_fence_put(fence); + return prev; +} +EXPORT_SYMBOL(dma_fence_chain_walk);
+/**
- dma_fence_chain_find_seqno - find fence chain node by seqno
- @pfence: pointer to the chain node where to start
- @seqno: the sequence number to search for
- Advance the fence pointer to the chain node which will signal
this sequence
- number. If no sequence number is provided then this is a no-op.
- Returns EINVAL if the fence is not a chain node or the sequence
number has
- not yet advanced far enough.
- */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) +{ + struct dma_fence_chain *chain;
+ if (!seqno) + return 0;
+ chain = to_dma_fence_chain(*pfence); + if (!chain || chain->base.seqno < seqno) + return -EINVAL;
+ dma_fence_chain_for_each(*pfence) { + if ((*pfence)->context != chain->base.context || + to_dma_fence_chain(*pfence)->prev_seqno < seqno) + break; + } + dma_fence_put(&chain->base);
+ return 0; +} +EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence) +{ + return "dma_fence_chain"; +}
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +}
+static void dma_fence_chain_irq_work(struct irq_work *work) +{ + struct dma_fence_chain *chain;
+ chain = container_of(work, typeof(*chain), work);
+ /* Try to rearm the callback */ + if (!dma_fence_chain_enable_signaling(&chain->base)) + /* Ok, we are done. No more unsignaled fences left */ + dma_fence_signal(&chain->base); +}
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct dma_fence_chain *chain;
+ chain = container_of(cb, typeof(*chain), cb); + irq_work_queue(&chain->work); + dma_fence_put(f); +}
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) +{ + struct dma_fence_chain *head = to_dma_fence_chain(fence);
+ dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) + return true; + } + return false; +}
+static bool dma_fence_chain_signaled(struct dma_fence *fence) +{ + dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_is_signaled(f)) { + dma_fence_put(fence); + return false; + } + }
+ return true; +}
+static void dma_fence_chain_release(struct dma_fence *fence) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ dma_fence_put(chain->prev); + dma_fence_put(chain->fence); + dma_fence_free(fence); +}
+const struct dma_fence_ops dma_fence_chain_ops = { + .get_driver_name = dma_fence_chain_get_driver_name, + .get_timeline_name = dma_fence_chain_get_timeline_name, + .enable_signaling = dma_fence_chain_enable_signaling, + .signaled = dma_fence_chain_signaled, + .release = dma_fence_chain_release, +}; +EXPORT_SYMBOL(dma_fence_chain_ops);
+/**
- dma_fence_chain_init - initialize a fence chain
- @chain: the chain node to initialize
- @prev: the previous fence
- @fence: the current fence
- Initialize a new chain node and either start a new chain or add
the node to
- the existing chain of the previous fence.
- */
+void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno) +{ + struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev); + uint64_t context;
+ spin_lock_init(&chain->lock); + chain->prev = prev; + chain->fence = fence; + chain->prev_seqno = 0; + init_irq_work(&chain->work, dma_fence_chain_irq_work);
+ /* Try to reuse the context of the previous chain node. */ + if (prev_chain && seqno > prev->seqno && + __dma_fence_is_later(seqno, prev->seqno)) {
As your patch#1 makes __dma_fence_is_later only be valid for 32bit, we cannot use it for 64bit here, we should remove it from here, just compare seqno directly.
That is intentional. We must make sure that the number both increments as 64bit number as well as not wraps around as 32bit number.
In other words the largest difference between two sequence numbers userspace is allowed to submit is 1<<31.
Why? no one can make sure that, application users would only think it is an uint64 sequence nubmer, and they can signal any advanced point. I already see umd guys writing timeline test use max_uint64-1 as a final signal. We shouldn't add this limitation here.
-David
+ context = prev->context; + chain->prev_seqno = prev->seqno; + } else { + context = dma_fence_context_alloc(1); + /* Make sure that we always have a valid sequence number. */ + if (prev_chain) + seqno = max(prev->seqno, seqno);
I still cannot judge if this case is proper, but I prefer we just drop it when seqno is <= last seqno. we can image that when we enable export/import, we could mess them. So we shouldn't change timeline existing signal point any time.
Yeah, but we can't lose fences either. This is the most defensive approach I can think of.
E.g. we still ad the fence, but start a new timeline so all queries for all previous sequence number will always wait for everything.
Regards, Christian.
-David
+ }
+ dma_fence_init(&chain->base, &dma_fence_chain_ops, + &chain->lock, context, seqno); +} +EXPORT_SYMBOL(dma_fence_chain_init); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h new file mode 100644 index 000000000000..3bb0efd6bc65 --- /dev/null +++ b/include/linux/dma-fence-chain.h @@ -0,0 +1,79 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- * Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or
modify it
- under the terms of the GNU General Public License version 2 as
published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful,
but WITHOUT
- ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for
- more details.
- */
+#ifndef __LINUX_DMA_FENCE_CHAIN_H +#define __LINUX_DMA_FENCE_CHAIN_H
+#include <linux/dma-fence.h> +#include <linux/irq_work.h>
+/**
- struct dma_fence_chain - fence to represent an node of a fence
chain
- @base: fence base class
- @lock: spinlock for fence handling
- @prev: previous fence of the chain
- @prev_seqno: original previous seqno before garbage collection
- @fence: encapsulated fence
- @cb: callback structure for signaling
- @work: irq work item for signaling
- */
+struct dma_fence_chain { + struct dma_fence base; + spinlock_t lock; + struct dma_fence *prev; + u64 prev_seqno; + struct dma_fence *fence; + struct dma_fence_cb cb; + struct irq_work work; +};
+extern const struct dma_fence_ops dma_fence_chain_ops;
+/**
- to_dma_fence_chain - cast a fence to a dma_fence_chain
- @fence: fence to cast to a dma_fence_array
- Returns NULL if the fence is not a dma_fence_chain,
- or the dma_fence_chain otherwise.
- */
+static inline struct dma_fence_chain * +to_dma_fence_chain(struct dma_fence *fence) +{ + if (!fence || fence->ops != &dma_fence_chain_ops) + return NULL;
+ return container_of(fence, struct dma_fence_chain, base); +}
+/**
- dma_fence_chain_for_each - iterate over all fences in chain
- @fence: starting point as well as current fence
- Iterate over all fences in the chain. We keep a reference to the
current
- fence while inside the loop which must be dropped when breaking
out.
- */
+#define dma_fence_chain_for_each(fence) \ + for (dma_fence_get(fence);fence;fence=dma_fence_chain_walk(fence))
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence); +int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno); +void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno);
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
Am 03.12.18 um 14:18 schrieb Chunming Zhou:
在 2018/12/3 19:00, Christian König 写道:
Am 03.12.18 um 06:25 schrieb zhoucm1:
On 2018年11月28日 22:50, Christian König wrote:
Lockless container implementation similar to a dma_fence_array, but with only two elements per node and automatic garbage collection.
v2: properly document dma_fence_chain_for_each, add dma_fence_chain_find_seqno, drop prev reference during garbage collection if it's not a chain fence.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/Makefile | 3 +- drivers/dma-buf/dma-fence-chain.c | 235 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-fence-chain.h | 79 +++++++++++++ 3 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 drivers/dma-buf/dma-fence-chain.c create mode 100644 include/linux/dma-fence-chain.h
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 0913a6ccab5a..1f006e083eb9 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,4 +1,5 @@ -obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o +obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ + reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c new file mode 100644 index 000000000000..de05101fc48d --- /dev/null +++ b/drivers/dma-buf/dma-fence-chain.c @@ -0,0 +1,235 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- * Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or
modify it
- under the terms of the GNU General Public License version 2 as
published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful,
but WITHOUT
- ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for
- more details.
- */
+#include <linux/dma-fence-chain.h>
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+/**
- dma_fence_chain_get_prev - use RCU to get a reference to the
previous fence
- @chain: chain node to get the previous node from
- Use dma_fence_get_rcu_safe to get a reference to the previous
fence of the
- chain node.
- */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain) +{ + struct dma_fence *prev;
+ rcu_read_lock(); + prev = dma_fence_get_rcu_safe(&chain->prev); + rcu_read_unlock(); + return prev; +}
+/**
- dma_fence_chain_walk - chain walking function
- @fence: current chain node
- Walk the chain to the next node. Returns the next fence or NULL
if we are at
- the end of the chain. Garbage collects chain nodes which are
already
- signaled.
- */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) +{ + struct dma_fence_chain *chain, *prev_chain; + struct dma_fence *prev, *replacement, *tmp;
+ chain = to_dma_fence_chain(fence); + if (!chain) { + dma_fence_put(fence); + return NULL; + }
+ while ((prev = dma_fence_chain_get_prev(chain))) {
+ prev_chain = to_dma_fence_chain(prev); + if (prev_chain) { + if (!dma_fence_is_signaled(prev_chain->fence)) + break;
+ replacement = dma_fence_chain_get_prev(prev_chain); + } else { + if (!dma_fence_is_signaled(prev)) + break;
+ replacement = NULL; + }
+ tmp = cmpxchg(&chain->prev, prev, replacement); + if (tmp == prev) + dma_fence_put(tmp); + else + dma_fence_put(replacement); + dma_fence_put(prev); + }
+ dma_fence_put(fence); + return prev; +} +EXPORT_SYMBOL(dma_fence_chain_walk);
+/**
- dma_fence_chain_find_seqno - find fence chain node by seqno
- @pfence: pointer to the chain node where to start
- @seqno: the sequence number to search for
- Advance the fence pointer to the chain node which will signal
this sequence
- number. If no sequence number is provided then this is a no-op.
- Returns EINVAL if the fence is not a chain node or the sequence
number has
- not yet advanced far enough.
- */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) +{ + struct dma_fence_chain *chain;
+ if (!seqno) + return 0;
+ chain = to_dma_fence_chain(*pfence); + if (!chain || chain->base.seqno < seqno) + return -EINVAL;
+ dma_fence_chain_for_each(*pfence) { + if ((*pfence)->context != chain->base.context || + to_dma_fence_chain(*pfence)->prev_seqno < seqno) + break; + } + dma_fence_put(&chain->base);
+ return 0; +} +EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence) +{ + return "dma_fence_chain"; +}
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +}
+static void dma_fence_chain_irq_work(struct irq_work *work) +{ + struct dma_fence_chain *chain;
+ chain = container_of(work, typeof(*chain), work);
+ /* Try to rearm the callback */ + if (!dma_fence_chain_enable_signaling(&chain->base)) + /* Ok, we are done. No more unsignaled fences left */ + dma_fence_signal(&chain->base); +}
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct dma_fence_chain *chain;
+ chain = container_of(cb, typeof(*chain), cb); + irq_work_queue(&chain->work); + dma_fence_put(f); +}
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) +{ + struct dma_fence_chain *head = to_dma_fence_chain(fence);
+ dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) + return true; + } + return false; +}
+static bool dma_fence_chain_signaled(struct dma_fence *fence) +{ + dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_is_signaled(f)) { + dma_fence_put(fence); + return false; + } + }
+ return true; +}
+static void dma_fence_chain_release(struct dma_fence *fence) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ dma_fence_put(chain->prev); + dma_fence_put(chain->fence); + dma_fence_free(fence); +}
+const struct dma_fence_ops dma_fence_chain_ops = { + .get_driver_name = dma_fence_chain_get_driver_name, + .get_timeline_name = dma_fence_chain_get_timeline_name, + .enable_signaling = dma_fence_chain_enable_signaling, + .signaled = dma_fence_chain_signaled, + .release = dma_fence_chain_release, +}; +EXPORT_SYMBOL(dma_fence_chain_ops);
+/**
- dma_fence_chain_init - initialize a fence chain
- @chain: the chain node to initialize
- @prev: the previous fence
- @fence: the current fence
- Initialize a new chain node and either start a new chain or add
the node to
- the existing chain of the previous fence.
- */
+void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno) +{ + struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev); + uint64_t context;
+ spin_lock_init(&chain->lock); + chain->prev = prev; + chain->fence = fence; + chain->prev_seqno = 0; + init_irq_work(&chain->work, dma_fence_chain_irq_work);
+ /* Try to reuse the context of the previous chain node. */ + if (prev_chain && seqno > prev->seqno && + __dma_fence_is_later(seqno, prev->seqno)) {
As your patch#1 makes __dma_fence_is_later only be valid for 32bit, we cannot use it for 64bit here, we should remove it from here, just compare seqno directly.
That is intentional. We must make sure that the number both increments as 64bit number as well as not wraps around as 32bit number.
In other words the largest difference between two sequence numbers userspace is allowed to submit is 1<<31.
Why? no one can make sure that, application users would only think it is an uint64 sequence nubmer, and they can signal any advanced point. I already see umd guys writing timeline test use max_uint64-1 as a final signal. We shouldn't add this limitation here.
We need to be backward compatible to hardware which can only do 32bit signaling with the dma-fence implementation.
Otherwise dma_fence_later() could return an inconsistent result and break at other places.
So if userspace wants to use more than 1<<31 difference between sequence numbers we need to push back on this.
Christian.
-David
+ context = prev->context; + chain->prev_seqno = prev->seqno; + } else { + context = dma_fence_context_alloc(1); + /* Make sure that we always have a valid sequence number. */ + if (prev_chain) + seqno = max(prev->seqno, seqno);
I still cannot judge if this case is proper, but I prefer we just drop it when seqno is <= last seqno. we can image that when we enable export/import, we could mess them. So we shouldn't change timeline existing signal point any time.
Yeah, but we can't lose fences either. This is the most defensive approach I can think of.
E.g. we still ad the fence, but start a new timeline so all queries for all previous sequence number will always wait for everything.
Regards, Christian.
-David
+ }
+ dma_fence_init(&chain->base, &dma_fence_chain_ops, + &chain->lock, context, seqno); +} +EXPORT_SYMBOL(dma_fence_chain_init); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h new file mode 100644 index 000000000000..3bb0efd6bc65 --- /dev/null +++ b/include/linux/dma-fence-chain.h @@ -0,0 +1,79 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- * Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or
modify it
- under the terms of the GNU General Public License version 2 as
published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful,
but WITHOUT
- ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for
- more details.
- */
+#ifndef __LINUX_DMA_FENCE_CHAIN_H +#define __LINUX_DMA_FENCE_CHAIN_H
+#include <linux/dma-fence.h> +#include <linux/irq_work.h>
+/**
- struct dma_fence_chain - fence to represent an node of a fence
chain
- @base: fence base class
- @lock: spinlock for fence handling
- @prev: previous fence of the chain
- @prev_seqno: original previous seqno before garbage collection
- @fence: encapsulated fence
- @cb: callback structure for signaling
- @work: irq work item for signaling
- */
+struct dma_fence_chain { + struct dma_fence base; + spinlock_t lock; + struct dma_fence *prev; + u64 prev_seqno; + struct dma_fence *fence; + struct dma_fence_cb cb; + struct irq_work work; +};
+extern const struct dma_fence_ops dma_fence_chain_ops;
+/**
- to_dma_fence_chain - cast a fence to a dma_fence_chain
- @fence: fence to cast to a dma_fence_array
- Returns NULL if the fence is not a dma_fence_chain,
- or the dma_fence_chain otherwise.
- */
+static inline struct dma_fence_chain * +to_dma_fence_chain(struct dma_fence *fence) +{ + if (!fence || fence->ops != &dma_fence_chain_ops) + return NULL;
+ return container_of(fence, struct dma_fence_chain, base); +}
+/**
- dma_fence_chain_for_each - iterate over all fences in chain
- @fence: starting point as well as current fence
- Iterate over all fences in the chain. We keep a reference to the
current
- fence while inside the loop which must be dropped when breaking
out.
- */
+#define dma_fence_chain_for_each(fence) \ + for (dma_fence_get(fence);fence;fence=dma_fence_chain_walk(fence))
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence); +int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno); +void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno);
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
在 2018/12/3 21:28, Christian König 写道:
Am 03.12.18 um 14:18 schrieb Chunming Zhou:
在 2018/12/3 19:00, Christian König 写道:
Am 03.12.18 um 06:25 schrieb zhoucm1:
On 2018年11月28日 22:50, Christian König wrote:
Lockless container implementation similar to a dma_fence_array, but with only two elements per node and automatic garbage collection.
v2: properly document dma_fence_chain_for_each, add dma_fence_chain_find_seqno, drop prev reference during garbage collection if it's not a chain fence.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/Makefile | 3 +- drivers/dma-buf/dma-fence-chain.c | 235 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-fence-chain.h | 79 +++++++++++++ 3 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 drivers/dma-buf/dma-fence-chain.c create mode 100644 include/linux/dma-fence-chain.h
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 0913a6ccab5a..1f006e083eb9 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,4 +1,5 @@ -obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o +obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ + reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c new file mode 100644 index 000000000000..de05101fc48d --- /dev/null +++ b/drivers/dma-buf/dma-fence-chain.c @@ -0,0 +1,235 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- * Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or
modify it
- under the terms of the GNU General Public License version 2 as
published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful,
but WITHOUT
- ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for
- more details.
- */
+#include <linux/dma-fence-chain.h>
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+/**
- dma_fence_chain_get_prev - use RCU to get a reference to the
previous fence
- @chain: chain node to get the previous node from
- Use dma_fence_get_rcu_safe to get a reference to the previous
fence of the
- chain node.
- */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain) +{ + struct dma_fence *prev;
+ rcu_read_lock(); + prev = dma_fence_get_rcu_safe(&chain->prev); + rcu_read_unlock(); + return prev; +}
+/**
- dma_fence_chain_walk - chain walking function
- @fence: current chain node
- Walk the chain to the next node. Returns the next fence or NULL
if we are at
- the end of the chain. Garbage collects chain nodes which are
already
- signaled.
- */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) +{ + struct dma_fence_chain *chain, *prev_chain; + struct dma_fence *prev, *replacement, *tmp;
+ chain = to_dma_fence_chain(fence); + if (!chain) { + dma_fence_put(fence); + return NULL; + }
+ while ((prev = dma_fence_chain_get_prev(chain))) {
+ prev_chain = to_dma_fence_chain(prev); + if (prev_chain) { + if (!dma_fence_is_signaled(prev_chain->fence)) + break;
+ replacement = dma_fence_chain_get_prev(prev_chain); + } else { + if (!dma_fence_is_signaled(prev)) + break;
+ replacement = NULL; + }
+ tmp = cmpxchg(&chain->prev, prev, replacement); + if (tmp == prev) + dma_fence_put(tmp); + else + dma_fence_put(replacement); + dma_fence_put(prev); + }
+ dma_fence_put(fence); + return prev; +} +EXPORT_SYMBOL(dma_fence_chain_walk);
+/**
- dma_fence_chain_find_seqno - find fence chain node by seqno
- @pfence: pointer to the chain node where to start
- @seqno: the sequence number to search for
- Advance the fence pointer to the chain node which will signal
this sequence
- number. If no sequence number is provided then this is a no-op.
- Returns EINVAL if the fence is not a chain node or the sequence
number has
- not yet advanced far enough.
- */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) +{ + struct dma_fence_chain *chain;
+ if (!seqno) + return 0;
+ chain = to_dma_fence_chain(*pfence); + if (!chain || chain->base.seqno < seqno) + return -EINVAL;
+ dma_fence_chain_for_each(*pfence) { + if ((*pfence)->context != chain->base.context || + to_dma_fence_chain(*pfence)->prev_seqno < seqno) + break; + } + dma_fence_put(&chain->base);
+ return 0; +} +EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence) +{ + return "dma_fence_chain"; +}
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +}
+static void dma_fence_chain_irq_work(struct irq_work *work) +{ + struct dma_fence_chain *chain;
+ chain = container_of(work, typeof(*chain), work);
+ /* Try to rearm the callback */ + if (!dma_fence_chain_enable_signaling(&chain->base)) + /* Ok, we are done. No more unsignaled fences left */ + dma_fence_signal(&chain->base); +}
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct dma_fence_chain *chain;
+ chain = container_of(cb, typeof(*chain), cb); + irq_work_queue(&chain->work); + dma_fence_put(f); +}
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) +{ + struct dma_fence_chain *head = to_dma_fence_chain(fence);
+ dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) + return true; + } + return false; +}
+static bool dma_fence_chain_signaled(struct dma_fence *fence) +{ + dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_is_signaled(f)) { + dma_fence_put(fence); + return false; + } + }
+ return true; +}
+static void dma_fence_chain_release(struct dma_fence *fence) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ dma_fence_put(chain->prev); + dma_fence_put(chain->fence); + dma_fence_free(fence); +}
+const struct dma_fence_ops dma_fence_chain_ops = { + .get_driver_name = dma_fence_chain_get_driver_name, + .get_timeline_name = dma_fence_chain_get_timeline_name, + .enable_signaling = dma_fence_chain_enable_signaling, + .signaled = dma_fence_chain_signaled, + .release = dma_fence_chain_release, +}; +EXPORT_SYMBOL(dma_fence_chain_ops);
+/**
- dma_fence_chain_init - initialize a fence chain
- @chain: the chain node to initialize
- @prev: the previous fence
- @fence: the current fence
- Initialize a new chain node and either start a new chain or add
the node to
- the existing chain of the previous fence.
- */
+void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno) +{ + struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev); + uint64_t context;
+ spin_lock_init(&chain->lock); + chain->prev = prev; + chain->fence = fence; + chain->prev_seqno = 0; + init_irq_work(&chain->work, dma_fence_chain_irq_work);
+ /* Try to reuse the context of the previous chain node. */ + if (prev_chain && seqno > prev->seqno && + __dma_fence_is_later(seqno, prev->seqno)) {
As your patch#1 makes __dma_fence_is_later only be valid for 32bit, we cannot use it for 64bit here, we should remove it from here, just compare seqno directly.
That is intentional. We must make sure that the number both increments as 64bit number as well as not wraps around as 32bit number.
In other words the largest difference between two sequence numbers userspace is allowed to submit is 1<<31.
Why? no one can make sure that, application users would only think it is an uint64 sequence nubmer, and they can signal any advanced point. I already see umd guys writing timeline test use max_uint64-1 as a final signal. We shouldn't add this limitation here.
We need to be backward compatible to hardware which can only do 32bit signaling with the dma-fence implementation.
I see that, you already explained that before. but can't we just grep low 32bit of seqno only when 32bit hardware try to use?
then we can make dma_fence_later use 64bit comparation.
Otherwise dma_fence_later() could return an inconsistent result and break at other places.
So if userspace wants to use more than 1<<31 difference between sequence numbers we need to push back on this.
It's rare case, but I don't think we can convince them add this limitation. So we cannot add this limitation here.
-David
Christian.
-David
+ context = prev->context; + chain->prev_seqno = prev->seqno; + } else { + context = dma_fence_context_alloc(1); + /* Make sure that we always have a valid sequence number. */ + if (prev_chain) + seqno = max(prev->seqno, seqno);
I still cannot judge if this case is proper, but I prefer we just drop it when seqno is <= last seqno. we can image that when we enable export/import, we could mess them. So we shouldn't change timeline existing signal point any time.
Yeah, but we can't lose fences either. This is the most defensive approach I can think of.
E.g. we still ad the fence, but start a new timeline so all queries for all previous sequence number will always wait for everything.
Regards, Christian.
-David
+ }
+ dma_fence_init(&chain->base, &dma_fence_chain_ops, + &chain->lock, context, seqno); +} +EXPORT_SYMBOL(dma_fence_chain_init); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h new file mode 100644 index 000000000000..3bb0efd6bc65 --- /dev/null +++ b/include/linux/dma-fence-chain.h @@ -0,0 +1,79 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- * Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or
modify it
- under the terms of the GNU General Public License version 2 as
published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful,
but WITHOUT
- ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for
- more details.
- */
+#ifndef __LINUX_DMA_FENCE_CHAIN_H +#define __LINUX_DMA_FENCE_CHAIN_H
+#include <linux/dma-fence.h> +#include <linux/irq_work.h>
+/**
- struct dma_fence_chain - fence to represent an node of a fence
chain
- @base: fence base class
- @lock: spinlock for fence handling
- @prev: previous fence of the chain
- @prev_seqno: original previous seqno before garbage collection
- @fence: encapsulated fence
- @cb: callback structure for signaling
- @work: irq work item for signaling
- */
+struct dma_fence_chain { + struct dma_fence base; + spinlock_t lock; + struct dma_fence *prev; + u64 prev_seqno; + struct dma_fence *fence; + struct dma_fence_cb cb; + struct irq_work work; +};
+extern const struct dma_fence_ops dma_fence_chain_ops;
+/**
- to_dma_fence_chain - cast a fence to a dma_fence_chain
- @fence: fence to cast to a dma_fence_array
- Returns NULL if the fence is not a dma_fence_chain,
- or the dma_fence_chain otherwise.
- */
+static inline struct dma_fence_chain * +to_dma_fence_chain(struct dma_fence *fence) +{ + if (!fence || fence->ops != &dma_fence_chain_ops) + return NULL;
+ return container_of(fence, struct dma_fence_chain, base); +}
+/**
- dma_fence_chain_for_each - iterate over all fences in chain
- @fence: starting point as well as current fence
- Iterate over all fences in the chain. We keep a reference to the
current
- fence while inside the loop which must be dropped when breaking
out.
- */
+#define dma_fence_chain_for_each(fence) \ + for (dma_fence_get(fence);fence;fence=dma_fence_chain_walk(fence))
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence); +int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno); +void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno);
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Am 03.12.18 um 14:44 schrieb Chunming Zhou:
在 2018/12/3 21:28, Christian König 写道:
Am 03.12.18 um 14:18 schrieb Chunming Zhou:
在 2018/12/3 19:00, Christian König 写道:
Am 03.12.18 um 06:25 schrieb zhoucm1:
On 2018年11月28日 22:50, Christian König wrote:
Lockless container implementation similar to a dma_fence_array, but with only two elements per node and automatic garbage collection.
v2: properly document dma_fence_chain_for_each, add dma_fence_chain_find_seqno, drop prev reference during garbage collection if it's not a chain fence.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/Makefile | 3 +- drivers/dma-buf/dma-fence-chain.c | 235 ++++++++++++++++++++++++++++++++++++++ include/linux/dma-fence-chain.h | 79 +++++++++++++ 3 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 drivers/dma-buf/dma-fence-chain.c create mode 100644 include/linux/dma-fence-chain.h
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 0913a6ccab5a..1f006e083eb9 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,4 +1,5 @@ -obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o +obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ + reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c new file mode 100644 index 000000000000..de05101fc48d --- /dev/null +++ b/drivers/dma-buf/dma-fence-chain.c @@ -0,0 +1,235 @@ +/*
- fence-chain: chain fences together in a timeline
- Copyright (C) 2018 Advanced Micro Devices, Inc.
- Authors:
- * Christian König christian.koenig@amd.com
- This program is free software; you can redistribute it and/or
modify it
- under the terms of the GNU General Public License version 2 as
published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful,
but WITHOUT
- ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for
- more details.
- */
+#include <linux/dma-fence-chain.h>
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+/**
- dma_fence_chain_get_prev - use RCU to get a reference to the
previous fence
- @chain: chain node to get the previous node from
- Use dma_fence_get_rcu_safe to get a reference to the previous
fence of the
- chain node.
- */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain) +{ + struct dma_fence *prev;
+ rcu_read_lock(); + prev = dma_fence_get_rcu_safe(&chain->prev); + rcu_read_unlock(); + return prev; +}
+/**
- dma_fence_chain_walk - chain walking function
- @fence: current chain node
- Walk the chain to the next node. Returns the next fence or NULL
if we are at
- the end of the chain. Garbage collects chain nodes which are
already
- signaled.
- */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) +{ + struct dma_fence_chain *chain, *prev_chain; + struct dma_fence *prev, *replacement, *tmp;
+ chain = to_dma_fence_chain(fence); + if (!chain) { + dma_fence_put(fence); + return NULL; + }
+ while ((prev = dma_fence_chain_get_prev(chain))) {
+ prev_chain = to_dma_fence_chain(prev); + if (prev_chain) { + if (!dma_fence_is_signaled(prev_chain->fence)) + break;
+ replacement = dma_fence_chain_get_prev(prev_chain); + } else { + if (!dma_fence_is_signaled(prev)) + break;
+ replacement = NULL; + }
+ tmp = cmpxchg(&chain->prev, prev, replacement); + if (tmp == prev) + dma_fence_put(tmp); + else + dma_fence_put(replacement); + dma_fence_put(prev); + }
+ dma_fence_put(fence); + return prev; +} +EXPORT_SYMBOL(dma_fence_chain_walk);
+/**
- dma_fence_chain_find_seqno - find fence chain node by seqno
- @pfence: pointer to the chain node where to start
- @seqno: the sequence number to search for
- Advance the fence pointer to the chain node which will signal
this sequence
- number. If no sequence number is provided then this is a no-op.
- Returns EINVAL if the fence is not a chain node or the sequence
number has
- not yet advanced far enough.
- */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) +{ + struct dma_fence_chain *chain;
+ if (!seqno) + return 0;
+ chain = to_dma_fence_chain(*pfence); + if (!chain || chain->base.seqno < seqno) + return -EINVAL;
+ dma_fence_chain_for_each(*pfence) { + if ((*pfence)->context != chain->base.context || + to_dma_fence_chain(*pfence)->prev_seqno < seqno) + break; + } + dma_fence_put(&chain->base);
+ return 0; +} +EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence) +{ + return "dma_fence_chain"; +}
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +}
+static void dma_fence_chain_irq_work(struct irq_work *work) +{ + struct dma_fence_chain *chain;
+ chain = container_of(work, typeof(*chain), work);
+ /* Try to rearm the callback */ + if (!dma_fence_chain_enable_signaling(&chain->base)) + /* Ok, we are done. No more unsignaled fences left */ + dma_fence_signal(&chain->base); +}
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct dma_fence_chain *chain;
+ chain = container_of(cb, typeof(*chain), cb); + irq_work_queue(&chain->work); + dma_fence_put(f); +}
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) +{ + struct dma_fence_chain *head = to_dma_fence_chain(fence);
+ dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) + return true; + } + return false; +}
+static bool dma_fence_chain_signaled(struct dma_fence *fence) +{ + dma_fence_chain_for_each(fence) { + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *f = chain ? chain->fence : fence;
+ if (!dma_fence_is_signaled(f)) { + dma_fence_put(fence); + return false; + } + }
+ return true; +}
+static void dma_fence_chain_release(struct dma_fence *fence) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ dma_fence_put(chain->prev); + dma_fence_put(chain->fence); + dma_fence_free(fence); +}
+const struct dma_fence_ops dma_fence_chain_ops = { + .get_driver_name = dma_fence_chain_get_driver_name, + .get_timeline_name = dma_fence_chain_get_timeline_name, + .enable_signaling = dma_fence_chain_enable_signaling, + .signaled = dma_fence_chain_signaled, + .release = dma_fence_chain_release, +}; +EXPORT_SYMBOL(dma_fence_chain_ops);
+/**
- dma_fence_chain_init - initialize a fence chain
- @chain: the chain node to initialize
- @prev: the previous fence
- @fence: the current fence
- Initialize a new chain node and either start a new chain or add
the node to
- the existing chain of the previous fence.
- */
+void dma_fence_chain_init(struct dma_fence_chain *chain, + struct dma_fence *prev, + struct dma_fence *fence, + uint64_t seqno) +{ + struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev); + uint64_t context;
+ spin_lock_init(&chain->lock); + chain->prev = prev; + chain->fence = fence; + chain->prev_seqno = 0; + init_irq_work(&chain->work, dma_fence_chain_irq_work);
+ /* Try to reuse the context of the previous chain node. */ + if (prev_chain && seqno > prev->seqno && + __dma_fence_is_later(seqno, prev->seqno)) {
As your patch#1 makes __dma_fence_is_later only be valid for 32bit, we cannot use it for 64bit here, we should remove it from here, just compare seqno directly.
That is intentional. We must make sure that the number both increments as 64bit number as well as not wraps around as 32bit number.
In other words the largest difference between two sequence numbers userspace is allowed to submit is 1<<31.
Why? no one can make sure that, application users would only think it is an uint64 sequence nubmer, and they can signal any advanced point. I already see umd guys writing timeline test use max_uint64-1 as a final signal. We shouldn't add this limitation here.
We need to be backward compatible to hardware which can only do 32bit signaling with the dma-fence implementation.
I see that, you already explained that before. but can't we just grep low 32bit of seqno only when 32bit hardware try to use?
then we can make dma_fence_later use 64bit comparation.
The problem is that we don't know at all times when to use a 32bit compare and when to use a 64bit compare.
What we could do is test if any of the upper 32bits of a sequence number is not 0 and if that is the case do a 64bit compare. This way max_uint64_t would still be handled correctly.
Christian.
Otherwise dma_fence_later() could return an inconsistent result and break at other places.
So if userspace wants to use more than 1<<31 difference between sequence numbers we need to push back on this.
It's rare case, but I don't think we can convince them add this limitation. So we cannot add this limitation here.
-David
-----Original Message----- From: Christian König ckoenig.leichtzumerken@gmail.com Sent: Monday, December 03, 2018 9:56 PM To: Zhou, David(ChunMing) David1.Zhou@amd.com; Koenig, Christian Christian.Koenig@amd.com; dri-devel@lists.freedesktop.org; amd- gfx@lists.freedesktop.org Subject: Re: [PATCH 02/11] dma-buf: add new dma_fence_chain container v2
Am 03.12.18 um 14:44 schrieb Chunming Zhou:
在 2018/12/3 21:28, Christian König 写道:
Am 03.12.18 um 14:18 schrieb Chunming Zhou:
在 2018/12/3 19:00, Christian König 写道:
Am 03.12.18 um 06:25 schrieb zhoucm1:
On 2018年11月28日 22:50, Christian König wrote: > Lockless container implementation similar to a dma_fence_array, > but with only two elements per node and automatic garbage > collection. > > v2: properly document dma_fence_chain_for_each, add > dma_fence_chain_find_seqno, > drop prev reference during garbage collection if it's not > a chain fence. > > Signed-off-by: Christian König christian.koenig@amd.com > ---
[snip]
> + > +/** > + * dma_fence_chain_init - initialize a fence chain > + * @chain: the chain node to initialize > + * @prev: the previous fence > + * @fence: the current fence > + * > + * Initialize a new chain node and either start a new chain or > +add > the node to > + * the existing chain of the previous fence. > + */ > +void dma_fence_chain_init(struct dma_fence_chain *chain, > + struct dma_fence *prev, > + struct dma_fence *fence, > + uint64_t seqno) > +{ > + struct dma_fence_chain *prev_chain = > +to_dma_fence_chain(prev); > + uint64_t context; > + > + spin_lock_init(&chain->lock); > + chain->prev = prev; > + chain->fence = fence; > + chain->prev_seqno = 0; > + init_irq_work(&chain->work, dma_fence_chain_irq_work); > + > + /* Try to reuse the context of the previous chain node. */ > + if (prev_chain && seqno > prev->seqno && > + __dma_fence_is_later(seqno, prev->seqno)) { As your patch#1 makes __dma_fence_is_later only be valid for 32bit, we cannot use it for 64bit here, we should remove it from here, just compare seqno directly.
That is intentional. We must make sure that the number both increments as 64bit number as well as not wraps around as 32bit
number.
In other words the largest difference between two sequence numbers userspace is allowed to submit is 1<<31.
Why? no one can make sure that, application users would only think it is an uint64 sequence nubmer, and they can signal any advanced point. I already see umd guys writing timeline test use max_uint64-1 as a final signal. We shouldn't add this limitation here.
We need to be backward compatible to hardware which can only do 32bit signaling with the dma-fence implementation.
I see that, you already explained that before. but can't we just grep low 32bit of seqno only when 32bit hardware try to use?
then we can make dma_fence_later use 64bit comparation.
The problem is that we don't know at all times when to use a 32bit compare and when to use a 64bit compare.
What we could do is test if any of the upper 32bits of a sequence number is not 0 and if that is the case do a 64bit compare. This way max_uint64_t would still be handled correctly.
Sounds we can have a try, and we need mask upper 32bits for 32bit hardware case in the meanwhile, right?
-David
Christian.
Otherwise dma_fence_later() could return an inconsistent result and break at other places.
So if userspace wants to use more than 1<<31 difference between sequence numbers we need to push back on this.
It's rare case, but I don't think we can convince them add this limitation. So we cannot add this limitation here.
-David
This reverts commit 9a09a42369a4a37a959c051d8e1a1f948c1529a4.
The whole interface isn't thought through. Since this function can't fail we actually can't allocate an object to store the sync point.
Sorry, I should have taken the lead on this from the very beginning and reviewed it more thoughtfully. Going to propose a new interface as a follow up change.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/drm_syncobj.c | 14 ++++++-------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/v3d/v3d_gem.c | 3 +-- drivers/gpu/drm/vc4/vc4_gem.c | 2 +- include/drm/drm_syncobj.h | 2 +- 6 files changed, 11 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 024dfbd87f11..dc54e9efd910 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1193,7 +1193,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i) - drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence); + drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); }
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index e2c5b3ca4824..b92e3c726229 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -156,13 +156,11 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, /** * drm_syncobj_replace_fence - replace fence in a sync object. * @syncobj: Sync object to replace fence in - * @point: timeline point * @fence: fence to install in sync file. * - * This replaces the fence on a sync object, or a timeline point fence. + * This replaces the fence on a sync object. */ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, - u64 point, struct dma_fence *fence) { struct dma_fence *old_fence; @@ -202,7 +200,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) &fence->lock, 0, 0); dma_fence_signal(&fence->base);
- drm_syncobj_replace_fence(syncobj, 0, &fence->base); + drm_syncobj_replace_fence(syncobj, &fence->base);
dma_fence_put(&fence->base);
@@ -254,7 +252,7 @@ void drm_syncobj_free(struct kref *kref) struct drm_syncobj *syncobj = container_of(kref, struct drm_syncobj, refcount); - drm_syncobj_replace_fence(syncobj, 0, NULL); + drm_syncobj_replace_fence(syncobj, NULL); kfree(syncobj); } EXPORT_SYMBOL(drm_syncobj_free); @@ -294,7 +292,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, }
if (fence) - drm_syncobj_replace_fence(syncobj, 0, fence); + drm_syncobj_replace_fence(syncobj, fence);
*out_syncobj = syncobj; return 0; @@ -479,7 +477,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, return -ENOENT; }
- drm_syncobj_replace_fence(syncobj, 0, fence); + drm_syncobj_replace_fence(syncobj, fence); dma_fence_put(fence); drm_syncobj_put(syncobj); return 0; @@ -950,7 +948,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, return ret;
for (i = 0; i < args->count_handles; i++) - drm_syncobj_replace_fence(syncobjs[i], 0, NULL); + drm_syncobj_replace_fence(syncobjs[i], NULL);
drm_syncobj_array_free(syncobjs, args->count_handles);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1aaccbe7e1de..786d719e652d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -2186,7 +2186,7 @@ signal_fence_array(struct i915_execbuffer *eb, if (!(flags & I915_EXEC_FENCE_SIGNAL)) continue;
- drm_syncobj_replace_fence(syncobj, 0, fence); + drm_syncobj_replace_fence(syncobj, fence); } }
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index b88c96911453..655be1a865fa 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -588,8 +588,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, /* Update the return sync object for the */ sync_out = drm_syncobj_find(file_priv, args->out_sync); if (sync_out) { - drm_syncobj_replace_fence(sync_out, 0, - exec->render_done_fence); + drm_syncobj_replace_fence(sync_out, exec->render_done_fence); drm_syncobj_put(sync_out); }
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 41881ce4132d..aea2b8dfec17 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, exec->fence = &fence->base;
if (out_sync) - drm_syncobj_replace_fence(out_sync, 0, exec->fence); + drm_syncobj_replace_fence(out_sync, exec->fence);
vc4_update_bo_seqnos(exec, seqno);
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 2eda44def639..b1fe921f8e8f 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -131,7 +131,7 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, u32 handle); -void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point, +void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, struct dma_fence *fence); int drm_syncobj_find_fence(struct drm_file *file_private, u32 handle, u64 point, u64 flags,
Extract of useful code from the timeline work. Let's use just a single stub fence instance instead of allocating a new one all the time.
Signed-off-by: Chunming Zhou david1.zhou@amd.com Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/drm_syncobj.c | 67 ++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 32 deletions(-)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index b92e3c726229..f78321338c1f 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -56,10 +56,8 @@ #include "drm_internal.h" #include <drm/drm_syncobj.h>
-struct drm_syncobj_stub_fence { - struct dma_fence base; - spinlock_t lock; -}; +static DEFINE_SPINLOCK(stub_fence_lock); +static struct dma_fence stub_fence;
static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence) { @@ -71,6 +69,25 @@ static const struct dma_fence_ops drm_syncobj_stub_fence_ops = { .get_timeline_name = drm_syncobj_stub_fence_get_name, };
+/** + * drm_syncobj_get_stub_fence - return a signaled fence + * + * Return a stub fence which is already signaled. + */ +static struct dma_fence *drm_syncobj_get_stub_fence(void) +{ + spin_lock(&stub_fence_lock); + if (!stub_fence.ops) { + dma_fence_init(&stub_fence, + &drm_syncobj_stub_fence_ops, + &stub_fence_lock, + 0, 0); + dma_fence_signal_locked(&stub_fence); + } + spin_unlock(&stub_fence_lock); + + return dma_fence_get(&stub_fence); +}
/** * drm_syncobj_find - lookup and reference a sync object. @@ -188,23 +205,18 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, } EXPORT_SYMBOL(drm_syncobj_replace_fence);
-static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) +/** + * drm_syncobj_assign_null_handle - assign a stub fence to the sync object + * @syncobj: sync object to assign the fence on + * + * Assign a already signaled stub fence to the sync object. + */ +static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) { - struct drm_syncobj_stub_fence *fence; - fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (fence == NULL) - return -ENOMEM; + struct dma_fence *fence = drm_syncobj_get_stub_fence();
- spin_lock_init(&fence->lock); - dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops, - &fence->lock, 0, 0); - dma_fence_signal(&fence->base); - - drm_syncobj_replace_fence(syncobj, &fence->base); - - dma_fence_put(&fence->base); - - return 0; + drm_syncobj_replace_fence(syncobj, fence); + dma_fence_put(fence); }
/** @@ -272,7 +284,6 @@ EXPORT_SYMBOL(drm_syncobj_free); int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, struct dma_fence *fence) { - int ret; struct drm_syncobj *syncobj;
syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); @@ -283,13 +294,8 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, INIT_LIST_HEAD(&syncobj->cb_list); spin_lock_init(&syncobj->lock);
- if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { - ret = drm_syncobj_assign_null_handle(syncobj); - if (ret < 0) { - drm_syncobj_put(syncobj); - return ret; - } - } + if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) + drm_syncobj_assign_null_handle(syncobj);
if (fence) drm_syncobj_replace_fence(syncobj, fence); @@ -980,11 +986,8 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret;
- for (i = 0; i < args->count_handles; i++) { - ret = drm_syncobj_assign_null_handle(syncobjs[i]); - if (ret < 0) - break; - } + for (i = 0; i < args->count_handles; i++) + drm_syncobj_assign_null_handle(syncobjs[i]);
drm_syncobj_array_free(syncobjs, args->count_handles);
Could you move this one to dma-fence as you said? Which will be used in other place as well.
-David
On 2018年11月28日 22:50, Christian König wrote:
Extract of useful code from the timeline work. Let's use just a single stub fence instance instead of allocating a new one all the time.
Signed-off-by: Chunming Zhou david1.zhou@amd.com Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/drm_syncobj.c | 67 ++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 32 deletions(-)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index b92e3c726229..f78321338c1f 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -56,10 +56,8 @@ #include "drm_internal.h" #include <drm/drm_syncobj.h>
-struct drm_syncobj_stub_fence {
- struct dma_fence base;
- spinlock_t lock;
-}; +static DEFINE_SPINLOCK(stub_fence_lock); +static struct dma_fence stub_fence;
static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence) { @@ -71,6 +69,25 @@ static const struct dma_fence_ops drm_syncobj_stub_fence_ops = { .get_timeline_name = drm_syncobj_stub_fence_get_name, };
+/**
- drm_syncobj_get_stub_fence - return a signaled fence
- Return a stub fence which is already signaled.
- */
+static struct dma_fence *drm_syncobj_get_stub_fence(void) +{
- spin_lock(&stub_fence_lock);
- if (!stub_fence.ops) {
dma_fence_init(&stub_fence,
&drm_syncobj_stub_fence_ops,
&stub_fence_lock,
0, 0);
dma_fence_signal_locked(&stub_fence);
- }
- spin_unlock(&stub_fence_lock);
- return dma_fence_get(&stub_fence);
+}
/**
- drm_syncobj_find - lookup and reference a sync object.
@@ -188,23 +205,18 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, } EXPORT_SYMBOL(drm_syncobj_replace_fence);
-static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) +/**
- drm_syncobj_assign_null_handle - assign a stub fence to the sync object
- @syncobj: sync object to assign the fence on
- Assign a already signaled stub fence to the sync object.
- */
+static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) {
- struct drm_syncobj_stub_fence *fence;
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (fence == NULL)
return -ENOMEM;
- struct dma_fence *fence = drm_syncobj_get_stub_fence();
- spin_lock_init(&fence->lock);
- dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
&fence->lock, 0, 0);
- dma_fence_signal(&fence->base);
- drm_syncobj_replace_fence(syncobj, &fence->base);
- dma_fence_put(&fence->base);
- return 0;
drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence); }
/**
@@ -272,7 +284,6 @@ EXPORT_SYMBOL(drm_syncobj_free); int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, struct dma_fence *fence) {
int ret; struct drm_syncobj *syncobj;
syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
@@ -283,13 +294,8 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, INIT_LIST_HEAD(&syncobj->cb_list); spin_lock_init(&syncobj->lock);
- if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
ret = drm_syncobj_assign_null_handle(syncobj);
if (ret < 0) {
drm_syncobj_put(syncobj);
return ret;
}
- }
if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
drm_syncobj_assign_null_handle(syncobj);
if (fence) drm_syncobj_replace_fence(syncobj, fence);
@@ -980,11 +986,8 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret;
- for (i = 0; i < args->count_handles; i++) {
ret = drm_syncobj_assign_null_handle(syncobjs[i]);
if (ret < 0)
break;
- }
for (i = 0; i < args->count_handles; i++)
drm_syncobj_assign_null_handle(syncobjs[i]);
drm_syncobj_array_free(syncobjs, args->count_handles);
This completes "drm/syncobj: Drop add/remove_callback from driver interface" and cleans up the implementation a bit.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/drm_syncobj.c | 91 ++++++++++++++----------------------------- include/drm/drm_syncobj.h | 21 ---------- 2 files changed, 30 insertions(+), 82 deletions(-)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index f78321338c1f..16e43ed1ec0b 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -56,6 +56,16 @@ #include "drm_internal.h" #include <drm/drm_syncobj.h>
+struct syncobj_wait_entry { + struct list_head node; + struct task_struct *task; + struct dma_fence *fence; + struct dma_fence_cb fence_cb; +}; + +static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, + struct syncobj_wait_entry *wait); + static DEFINE_SPINLOCK(stub_fence_lock); static struct dma_fence stub_fence;
@@ -115,58 +125,33 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, } EXPORT_SYMBOL(drm_syncobj_find);
-static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb, - drm_syncobj_func_t func) +static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj, + struct syncobj_wait_entry *wait) { - cb->func = func; - list_add_tail(&cb->node, &syncobj->cb_list); -} - -static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, - struct dma_fence **fence, - struct drm_syncobj_cb *cb, - drm_syncobj_func_t func) -{ - int ret; - - *fence = drm_syncobj_fence_get(syncobj); - if (*fence) - return 1; + if (wait->fence) + return;
spin_lock(&syncobj->lock); /* We've already tried once to get a fence and failed. Now that we * have the lock, try one more time just to be sure we don't add a * callback when a fence has already been set. */ - if (syncobj->fence) { - *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, - lockdep_is_held(&syncobj->lock))); - ret = 1; - } else { - *fence = NULL; - drm_syncobj_add_callback_locked(syncobj, cb, func); - ret = 0; - } + if (syncobj->fence) + wait->fence = dma_fence_get( + rcu_dereference_protected(syncobj->fence, 1)); + else + list_add_tail(&wait->node, &syncobj->cb_list); spin_unlock(&syncobj->lock); - - return ret; }
-void drm_syncobj_add_callback(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb, - drm_syncobj_func_t func) +static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj, + struct syncobj_wait_entry *wait) { - spin_lock(&syncobj->lock); - drm_syncobj_add_callback_locked(syncobj, cb, func); - spin_unlock(&syncobj->lock); -} + if (!wait->node.next) + return;
-void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb) -{ spin_lock(&syncobj->lock); - list_del_init(&cb->node); + list_del_init(&wait->node); spin_unlock(&syncobj->lock); }
@@ -181,7 +166,7 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, struct dma_fence *fence) { struct dma_fence *old_fence; - struct drm_syncobj_cb *cur, *tmp; + struct syncobj_wait_entry *cur, *tmp;
if (fence) dma_fence_get(fence); @@ -195,7 +180,7 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, if (fence != old_fence) { list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { list_del_init(&cur->node); - cur->func(syncobj, cur); + syncobj_wait_syncobj_func(syncobj, cur); } }
@@ -641,13 +626,6 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, &args->handle); }
-struct syncobj_wait_entry { - struct task_struct *task; - struct dma_fence *fence; - struct dma_fence_cb fence_cb; - struct drm_syncobj_cb syncobj_cb; -}; - static void syncobj_wait_fence_func(struct dma_fence *fence, struct dma_fence_cb *cb) { @@ -658,11 +636,8 @@ static void syncobj_wait_fence_func(struct dma_fence *fence, }
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb) + struct syncobj_wait_entry *wait) { - struct syncobj_wait_entry *wait = - container_of(cb, struct syncobj_wait_entry, syncobj_cb); - /* This happens inside the syncobj lock */ wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, lockdep_is_held(&syncobj->lock))); @@ -721,12 +696,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, */
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { - for (i = 0; i < count; ++i) { - drm_syncobj_fence_get_or_add_callback(syncobjs[i], - &entries[i].fence, - &entries[i].syncobj_cb, - syncobj_wait_syncobj_func); - } + for (i = 0; i < count; ++i) + drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); }
do { @@ -775,9 +746,7 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
cleanup_entries: for (i = 0; i < count; ++i) { - if (entries[i].syncobj_cb.func) - drm_syncobj_remove_callback(syncobjs[i], - &entries[i].syncobj_cb); + drm_syncobj_remove_wait(syncobjs[i], &entries[i]); if (entries[i].fence_cb.func) dma_fence_remove_callback(entries[i].fence, &entries[i].fence_cb); diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index b1fe921f8e8f..7c6ed845c70d 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -28,8 +28,6 @@
#include "linux/dma-fence.h"
-struct drm_syncobj_cb; - /** * struct drm_syncobj - sync object. * @@ -62,25 +60,6 @@ struct drm_syncobj { struct file *file; };
-typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb); - -/** - * struct drm_syncobj_cb - callback for drm_syncobj_add_callback - * @node: used by drm_syncob_add_callback to append this struct to - * &drm_syncobj.cb_list - * @func: drm_syncobj_func_t to call - * - * This struct will be initialized by drm_syncobj_add_callback, additional - * data can be passed along by embedding drm_syncobj_cb in another struct. - * The callback will get called the next time drm_syncobj_replace_fence is - * called. - */ -struct drm_syncobj_cb { - struct list_head node; - drm_syncobj_func_t func; -}; - void drm_syncobj_free(struct kref *kref);
/**
Use the dma_fence_chain object to create a timeline of fence objects instead of just replacing the existing fence.
v2: rebase and cleanup
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/drm_syncobj.c | 37 +++++++++++++++++++++++++++++++++++++ include/drm/drm_syncobj.h | 5 +++++ 2 files changed, 42 insertions(+)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 16e43ed1ec0b..5e0ae059a34f 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -155,6 +155,43 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj, spin_unlock(&syncobj->lock); }
+/** + * drm_syncobj_add_point - add new timeline point to the syncobj + * @syncobj: sync object to add timeline point do + * @chain: chain node to use to add the point + * @fence: fence to encapsulate in the chain node + * @point: sequence number to use for the point + * + * Add the chain node as new timeline point to the syncobj. + */ +void drm_syncobj_add_point(struct drm_syncobj *syncobj, + struct dma_fence_chain *chain, + struct dma_fence *fence, + uint64_t point) +{ + struct syncobj_wait_entry *cur, *tmp; + struct dma_fence *prev; + + dma_fence_get(fence); + + spin_lock(&syncobj->lock); + + prev = rcu_dereference_protected(syncobj->fence, + lockdep_is_held(&syncobj->lock)); + dma_fence_chain_init(chain, prev, fence, point); + rcu_assign_pointer(syncobj->fence, &chain->base); + + list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { + list_del_init(&cur->node); + syncobj_wait_syncobj_func(syncobj, cur); + } + spin_unlock(&syncobj->lock); + + /* Walk the chain once to trigger garbage collection */ + dma_fence_chain_for_each(fence); +} +EXPORT_SYMBOL(drm_syncobj_add_point); + /** * drm_syncobj_replace_fence - replace fence in a sync object. * @syncobj: Sync object to replace fence in diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 7c6ed845c70d..8acb4ae4f311 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -27,6 +27,7 @@ #define __DRM_SYNCOBJ_H__
#include "linux/dma-fence.h" +#include "linux/dma-fence-chain.h"
/** * struct drm_syncobj - sync object. @@ -110,6 +111,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, u32 handle); +void drm_syncobj_add_point(struct drm_syncobj *syncobj, + struct dma_fence_chain *chain, + struct dma_fence *fence, + uint64_t point); void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, struct dma_fence *fence); int drm_syncobj_find_fence(struct drm_file *file_private,
From: Chunming Zhou david1.zhou@amd.com
points array is one-to-one match with syncobjs array. v2: add seperate ioctl for timeline point wait, otherwise break uapi. v3: userspace can specify two kinds waits:: a. Wait for time point to be completed. b. and wait for time point to become available v4: rebase v5: add comment for xxx_WAIT_AVAILABLE v6: rebase and rework on new container v7: drop _WAIT_COMPLETED, it is the default anyway
Signed-off-by: Chunming Zhou david1.zhou@amd.com Signed-off-by: Christian König christian.koenig@amd.com Cc: Daniel Rakos Daniel.Rakos@amd.com Cc: Jason Ekstrand jason@jlekstrand.net Cc: Bas Nieuwenhuizen bas@basnieuwenhuizen.nl Cc: Dave Airlie airlied@redhat.com Cc: Chris Wilson chris@chris-wilson.co.uk --- drivers/gpu/drm/drm_internal.h | 2 + drivers/gpu/drm/drm_ioctl.c | 2 + drivers/gpu/drm/drm_syncobj.c | 151 +++++++++++++++++++++++++++++++++-------- include/uapi/drm/drm.h | 15 ++++ 4 files changed, 140 insertions(+), 30 deletions(-)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 0c4eb4a9ab31..566d44e3c782 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -183,6 +183,8 @@ int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 94bd872d56c4..a9a17ed35cc4 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -675,6 +675,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl, diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 5e0ae059a34f..afeb3b8931c7 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -61,6 +61,7 @@ struct syncobj_wait_entry { struct task_struct *task; struct dma_fence *fence; struct dma_fence_cb fence_cb; + u64 point; };
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, @@ -128,6 +129,9 @@ EXPORT_SYMBOL(drm_syncobj_find); static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj, struct syncobj_wait_entry *wait) { + struct dma_fence *fence; + int ret; + if (wait->fence) return;
@@ -136,11 +140,14 @@ static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj, * have the lock, try one more time just to be sure we don't add a * callback when a fence has already been set. */ - if (syncobj->fence) - wait->fence = dma_fence_get( - rcu_dereference_protected(syncobj->fence, 1)); - else + fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); + ret = dma_fence_chain_find_seqno(&fence, wait->point); + if (ret) { + dma_fence_put(fence); list_add_tail(&wait->node, &syncobj->cb_list); + } else { + wait->fence = fence; + } spin_unlock(&syncobj->lock); }
@@ -181,10 +188,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj, dma_fence_chain_init(chain, prev, fence, point); rcu_assign_pointer(syncobj->fence, &chain->base);
- list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { - list_del_init(&cur->node); + list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) syncobj_wait_syncobj_func(syncobj, cur); - } spin_unlock(&syncobj->lock);
/* Walk the chain once to trigger garbage collection */ @@ -215,10 +220,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, rcu_assign_pointer(syncobj->fence, fence);
if (fence != old_fence) { - list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { - list_del_init(&cur->node); + list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) syncobj_wait_syncobj_func(syncobj, cur); - } }
spin_unlock(&syncobj->lock); @@ -675,13 +678,25 @@ static void syncobj_wait_fence_func(struct dma_fence *fence, static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, struct syncobj_wait_entry *wait) { + struct dma_fence *fence; + int ret; + /* This happens inside the syncobj lock */ - wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, - lockdep_is_held(&syncobj->lock))); - wake_up_process(wait->task); + fence = rcu_dereference_protected(syncobj->fence, + lockdep_is_held(&syncobj->lock)); + dma_fence_get(fence); + ret = dma_fence_chain_find_seqno(&fence, wait->point); + if (ret) { + dma_fence_put(fence); + } else { + wait->fence = fence; + wake_up_process(wait->task); + list_del_init(&wait->node); + } }
static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, + void __user *user_points, uint32_t count, uint32_t flags, signed long timeout, @@ -689,12 +704,27 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, { struct syncobj_wait_entry *entries; struct dma_fence *fence; + uint64_t *points; uint32_t signaled_count, i;
- entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); - if (!entries) + points = kmalloc_array(count, sizeof(*points), GFP_KERNEL); + if (points == NULL) return -ENOMEM;
+ if (!user_points) { + memset(points, 0, count * sizeof(uint64_t)); + + } else if (copy_from_user(points, user_points, + sizeof(uint64_t) * count)) { + timeout = -EFAULT; + goto err_free_points; + } + + entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); + if (!entries) { + timeout = -ENOMEM; + goto err_free_points; + } /* Walk the list of sync objects and initialize entries. We do * this up-front so that we can properly return -EINVAL if there is * a syncobj with a missing fence and then never have the chance of @@ -702,9 +732,15 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, */ signaled_count = 0; for (i = 0; i < count; ++i) { + struct dma_fence *fence; + int ret; + entries[i].task = current; - entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); - if (!entries[i].fence) { + entries[i].point = points[i]; + fence = drm_syncobj_fence_get(syncobjs[i]); + ret = dma_fence_chain_find_seqno(&fence, points[i]); + if (ret) { + dma_fence_put(fence); if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { continue; } else { @@ -712,8 +748,10 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, goto cleanup_entries; } } + entries[i].fence = fence;
- if (dma_fence_is_signaled(entries[i].fence)) { + if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || + dma_fence_is_signaled(entries[i].fence)) { if (signaled_count == 0 && idx) *idx = i; signaled_count++; @@ -746,7 +784,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, if (!fence) continue;
- if (dma_fence_is_signaled(fence) || + if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || + dma_fence_is_signaled(fence) || (!entries[i].fence_cb.func && dma_fence_add_callback(fence, &entries[i].fence_cb, @@ -791,6 +830,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, } kfree(entries);
+err_free_points: + kfree(points); + return timeout; }
@@ -829,19 +871,33 @@ static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) static int drm_syncobj_array_wait(struct drm_device *dev, struct drm_file *file_private, struct drm_syncobj_wait *wait, - struct drm_syncobj **syncobjs) + struct drm_syncobj_timeline_wait *timeline_wait, + struct drm_syncobj **syncobjs, bool timeline) { - signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); + signed long timeout = 0; uint32_t first = ~0;
- timeout = drm_syncobj_array_wait_timeout(syncobjs, - wait->count_handles, - wait->flags, - timeout, &first); - if (timeout < 0) - return timeout; - - wait->first_signaled = first; + if (!timeline) { + timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); + timeout = drm_syncobj_array_wait_timeout(syncobjs, + NULL, + wait->count_handles, + wait->flags, + timeout, &first); + if (timeout < 0) + return timeout; + wait->first_signaled = first; + } else { + timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec); + timeout = drm_syncobj_array_wait_timeout(syncobjs, + u64_to_user_ptr(timeline_wait->points), + timeline_wait->count_handles, + timeline_wait->flags, + timeout, &first); + if (timeout < 0) + return timeout; + timeline_wait->first_signaled = first; + } return 0; }
@@ -927,13 +983,48 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, return ret;
ret = drm_syncobj_array_wait(dev, file_private, - args, syncobjs); + args, NULL, syncobjs, false); + + drm_syncobj_array_free(syncobjs, args->count_handles); + + return ret; +} + +int +drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_timeline_wait *args = data; + struct drm_syncobj **syncobjs; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) + return -EINVAL; + + if (args->count_handles == 0) + return -EINVAL; + + ret = drm_syncobj_array_find(file_private, + u64_to_user_ptr(args->handles), + args->count_handles, + &syncobjs); + if (ret < 0) + return ret; + + ret = drm_syncobj_array_wait(dev, file_private, + NULL, args, syncobjs, true);
drm_syncobj_array_free(syncobjs, args->count_handles);
return ret; }
+ int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private) diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index cebdb2541eb7..627032df23e6 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -738,6 +738,7 @@ struct drm_syncobj_handle {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) +#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) struct drm_syncobj_wait { __u64 handles; /* absolute timeout */ @@ -748,6 +749,19 @@ struct drm_syncobj_wait { __u32 pad; };
+struct drm_syncobj_timeline_wait { + __u64 handles; + /* wait on specific timeline point for every handles*/ + __u64 points; + /* absolute timeout */ + __s64 timeout_nsec; + __u32 count_handles; + __u32 flags; + __u32 first_signaled; /* only valid when not waiting all */ + __u32 pad; +}; + + struct drm_syncobj_array { __u64 handles; __u32 count_handles; @@ -910,6 +924,7 @@ extern "C" { #define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease) #define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait) /** * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f.
From: Chunming Zhou david1.zhou@amd.com
user mode can query timeline payload. v2: check return value of copy_to_user v3: handle querying entry by entry v4: rebase on new chain container, simplify interface
Signed-off-by: Chunming Zhou david1.zhou@amd.com Cc: Daniel Rakos Daniel.Rakos@amd.com Cc: Jason Ekstrand jason@jlekstrand.net Cc: Bas Nieuwenhuizen bas@basnieuwenhuizen.nl Cc: Dave Airlie airlied@redhat.com Cc: Christian König christian.koenig@amd.com Cc: Chris Wilson chris@chris-wilson.co.uk --- drivers/gpu/drm/drm_internal.h | 2 ++ drivers/gpu/drm/drm_ioctl.c | 2 ++ drivers/gpu/drm/drm_syncobj.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/drm.h | 11 +++++++++++ 4 files changed, 55 insertions(+)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 566d44e3c782..9c4826411a3c 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -189,6 +189,8 @@ int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private);
/* drm_framebuffer.c */ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index a9a17ed35cc4..7578ef6dc1d1 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -681,6 +681,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index afeb3b8931c7..a4964f0ad01d 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1090,3 +1090,43 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
return ret; } + +int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_timeline_query *args = data; + struct drm_syncobj **syncobjs; + uint64_t __user *points = u64_to_user_ptr(args->points); + uint32_t i; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->count_handles == 0) + return -EINVAL; + + ret = drm_syncobj_array_find(file_private, + u64_to_user_ptr(args->handles), + args->count_handles, + &syncobjs); + if (ret < 0) + return ret; + + for (i = 0; i < args->count_handles; i++) { + struct dma_fence_chain *chain; + struct dma_fence *fence; + uint64_t point; + + fence = drm_syncobj_fence_get(syncobjs[i]); + chain = to_dma_fence_chain(fence); + point = chain ? fence->seqno : 0; + ret = copy_to_user(&points[i], &point, sizeof(uint64_t)); + ret = ret ? -EFAULT : 0; + if (ret) + break; + } + drm_syncobj_array_free(syncobjs, args->count_handles); + + return ret; +} diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 627032df23e6..14ca02a3d5f2 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -768,6 +768,15 @@ struct drm_syncobj_array { __u32 pad; };
+struct drm_syncobj_timeline_query { + __u64 handles; + /* points are timeline syncobjs payloads returned by query ioctl */ + __u64 points; + __u32 count_handles; + __u32 pad; +}; + + /* Query current scanout sequence number */ struct drm_crtc_get_sequence { __u32 crtc_id; /* requested crtc_id */ @@ -925,6 +934,8 @@ extern "C" { #define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait) +#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_query) + /** * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f.
Implement finding the right timeline point in drm_syncobj_find_fence.
v2: return -EINVAL when the point is not submitted yet. v3: fix reference counting bug, add flags handling as well
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/drm_syncobj.c | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index a4964f0ad01d..9a4b8f221dac 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -264,16 +264,53 @@ int drm_syncobj_find_fence(struct drm_file *file_private, struct dma_fence **fence) { struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); - int ret = 0; + struct syncobj_wait_entry wait; + int ret;
if (!syncobj) return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj); - if (!*fence) { + drm_syncobj_put(syncobj); + + if (*fence) { + ret = dma_fence_chain_find_seqno(fence, point); + if (!ret) + return 0; + dma_fence_put(*fence); + } else { ret = -EINVAL; } - drm_syncobj_put(syncobj); + + if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) + return ret; + + memset(&wait, 0, sizeof(wait)); + wait.task = current; + wait.point = point; + drm_syncobj_fence_add_wait(syncobj, &wait); + + do { + set_current_state(TASK_INTERRUPTIBLE); + if (wait.fence) { + ret = 0; + break; + } + + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + schedule(); + } while (1); + + __set_current_state(TASK_RUNNING); + *fence = wait.fence; + + if (wait.node.next) + drm_syncobj_remove_wait(syncobj, &wait); + return ret; } EXPORT_SYMBOL(drm_syncobj_find_fence);
From: Chunming Zhou david1.zhou@amd.com
syncobj wait/signal operation is appending in command submission. v2: separate to two kinds in/out_deps functions
Signed-off-by: Chunming Zhou david1.zhou@amd.com Cc: Daniel Rakos Daniel.Rakos@amd.com Cc: Jason Ekstrand jason@jlekstrand.net Cc: Bas Nieuwenhuizen bas@basnieuwenhuizen.nl Cc: Dave Airlie airlied@redhat.com Cc: Christian König christian.koenig@amd.com Cc: Chris Wilson chris@chris-wilson.co.uk --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 10 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 147 +++++++++++++++++++++++++++------ include/uapi/drm/amdgpu_drm.h | 8 ++ 3 files changed, 140 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index adbad0e2d4ea..e3a2cba518f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -545,6 +545,12 @@ struct amdgpu_cs_chunk { void *kdata; };
+struct amdgpu_cs_post_dep { + struct drm_syncobj *syncobj; + struct dma_fence_chain *chain; + u64 point; +}; + struct amdgpu_cs_parser { struct amdgpu_device *adev; struct drm_file *filp; @@ -574,8 +580,8 @@ struct amdgpu_cs_parser { /* user fence */ struct amdgpu_bo_list_entry uf_entry;
- unsigned num_post_dep_syncobjs; - struct drm_syncobj **post_dep_syncobjs; + unsigned num_post_deps; + struct amdgpu_cs_post_dep *post_deps; };
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dc54e9efd910..580f1ea27157 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -213,6 +213,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs case AMDGPU_CHUNK_ID_DEPENDENCIES: case AMDGPU_CHUNK_ID_SYNCOBJ_IN: case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: + case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: + case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: break;
default: @@ -792,9 +794,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, ttm_eu_backoff_reservation(&parser->ticket, &parser->validated);
- for (i = 0; i < parser->num_post_dep_syncobjs; i++) - drm_syncobj_put(parser->post_dep_syncobjs[i]); - kfree(parser->post_dep_syncobjs); + for (i = 0; i < parser->num_post_deps; i++) { + drm_syncobj_put(parser->post_deps[i].syncobj); + kfree(parser->post_deps[i].chain); + } + kfree(parser->post_deps);
dma_fence_put(parser->fence);
@@ -1100,13 +1104,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, }
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, - uint32_t handle) + uint32_t handle, u64 point, + u64 flags) { - int r; struct dma_fence *fence; - r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence); - if (r) + int r; + + r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); + if (r) { + DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n", + handle, point, r); return r; + }
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); dma_fence_put(fence); @@ -1117,46 +1126,115 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { + struct drm_amdgpu_cs_chunk_sem *deps; unsigned num_deps; int i, r; - struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_sem); + for (i = 0; i < num_deps; ++i) { + r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle, + 0, 0); + if (r) + return r; + } + + return 0; +}
+ +static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk) +{ + struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; + unsigned num_deps; + int i, r; + + syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_syncobj); for (i = 0; i < num_deps; ++i) { - r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle); + r = amdgpu_syncobj_lookup_and_add_to_sync(p, + syncobj_deps[i].handle, + syncobj_deps[i].point, + syncobj_deps[i].flags); if (r) return r; } + return 0; }
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { + struct drm_amdgpu_cs_chunk_sem *deps; unsigned num_deps; int i; - struct drm_amdgpu_cs_chunk_sem *deps; + deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_sem);
- p->post_dep_syncobjs = kmalloc_array(num_deps, - sizeof(struct drm_syncobj *), - GFP_KERNEL); - p->num_post_dep_syncobjs = 0; + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), + GFP_KERNEL); + p->num_post_deps = 0; + + if (!p->post_deps) + return -ENOMEM; + + + for (i = 0; i < num_deps; ++i) { + p->post_deps[i].syncobj = + drm_syncobj_find(p->filp, deps[i].handle); + if (!p->post_deps[i].syncobj) + return -EINVAL; + p->post_deps[i].chain = NULL; + p->post_deps[i].point = 0; + p->num_post_deps++; + } + + return 0; +} + + +static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk + *chunk) +{ + struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; + unsigned num_deps; + int i; + + syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_syncobj); + + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), + GFP_KERNEL); + p->num_post_deps = 0;
- if (!p->post_dep_syncobjs) + if (!p->post_deps) return -ENOMEM;
for (i = 0; i < num_deps; ++i) { - p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); - if (!p->post_dep_syncobjs[i]) + struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; + + dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL); + if (!dep->chain) + return -ENOMEM; + + dep->syncobj = drm_syncobj_find(p->filp, + syncobj_deps[i].handle); + if (!dep->syncobj) { + kfree(dep->chain); return -EINVAL; - p->num_post_dep_syncobjs++; + } + dep->point = syncobj_deps[i].point; + p->num_post_deps++; } + return 0; }
@@ -1170,18 +1248,32 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
chunk = &p->chunks[i];
- if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) { + switch (chunk->chunk_id) { + case AMDGPU_CHUNK_ID_DEPENDENCIES: r = amdgpu_cs_process_fence_dep(p, chunk); if (r) return r; - } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) { + break; + case AMDGPU_CHUNK_ID_SYNCOBJ_IN: r = amdgpu_cs_process_syncobj_in_dep(p, chunk); if (r) return r; - } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) { + break; + case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: r = amdgpu_cs_process_syncobj_out_dep(p, chunk); if (r) return r; + break; + case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: + r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); + if (r) + return r; + break; + case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: + r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); + if (r) + return r; + break; } }
@@ -1192,8 +1284,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) { int i;
- for (i = 0; i < p->num_post_dep_syncobjs; ++i) - drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); + for (i = 0; i < p->num_post_deps; ++i) { + if (p->post_deps[i].chain) { + drm_syncobj_add_point(p->post_deps[i].syncobj, + p->post_deps[i].chain, + p->fence, p->post_deps[i].point); + p->post_deps[i].chain = NULL; + } else { + drm_syncobj_replace_fence(p->post_deps[i].syncobj, + p->fence); + } + } }
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index faaad04814e4..fcb8b601f7b5 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -526,6 +526,8 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 #define AMDGPU_CHUNK_ID_BO_HANDLES 0x06 +#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x07 +#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x08
struct drm_amdgpu_cs_chunk { __u32 chunk_id; @@ -601,6 +603,12 @@ struct drm_amdgpu_cs_chunk_sem { __u32 handle; };
+struct drm_amdgpu_cs_chunk_syncobj { + __u32 handle; + __u32 flags; + __u64 point; +}; + #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
From: Chunming Zhou david1.zhou@amd.com
Signed-off-by: Chunming Zhou david1.zhou@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 90f474f98b6e..316bfc1a6a75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -71,9 +71,10 @@ * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk). * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE. * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation. + * - 3.28.0 - Add syncobj timeline support to AMDGPU_CS. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 27 +#define KMS_DRIVER_MINOR 28 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
Looks very very good, and I applied them in my local, and tested by ./amdgpu_test -s 9 and synobj_basic/wait of IGT today.
+Daniel, Chris, Eric, Could you also have a look as well?
-David
On 2018年11月28日 22:50, Christian König wrote:
Tested this patch set more extensively in the last two weeks and fixed tons of additional bugs.
Still only testing with hand made DRM patches, but those are now rather reliable at least on amdgpu. Setting up igt is the next thing on the TODO list.
UAPI seems to be pretty solid already except for two changes:
- Dropping an extra flag in the wait interface which was default behavior anyway.
- Dropped the extra indirection in the query interface.
Additional to that I'm thinking if we shouldn't replace the flags parameter to find_fence() with a timeout value instead to limit how long we want to wait for a fence to appear.
Please test and comment, Christian.
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
dri-devel@lists.freedesktop.org