Quoting Sean Paul (2017-06-29 18:22:10)
On Thu, Jun 29, 2017 at 01:59:29PM +0100, Chris Wilson wrote:
The sync_pt were not adding themselves atomically to the timeline lists, corruption imminent. Only a single list is required to track the unsignaled sync_pt, so reduce it and rename the lock more appropriately along with using idiomatic names to distinguish a list from links along it.
Signed-off-by: Chris Wilson chris@chris-wilson.co.uk Cc: Sumit Semwal sumit.semwal@linaro.org Cc: Sean Paul seanpaul@chromium.org Cc: Gustavo Padovan gustavo@padovan.org
drivers/dma-buf/sw_sync.c | 39 ++++++++++++++------------------------- drivers/dma-buf/sync_debug.c | 9 ++++----- drivers/dma-buf/sync_debug.h | 21 ++++++++------------- 3 files changed, 26 insertions(+), 43 deletions(-)
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 6effa1ce010e..e51fe11bbbea 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -96,9 +96,8 @@ static struct sync_timeline *sync_timeline_create(const char *name) obj->context = dma_fence_context_alloc(1); strlcpy(obj->name, name, sizeof(obj->name));
INIT_LIST_HEAD(&obj->child_list_head);
INIT_LIST_HEAD(&obj->active_list_head);
spin_lock_init(&obj->child_list_lock);
INIT_LIST_HEAD(&obj->pt_list);
spin_lock_init(&obj->lock); sync_timeline_debug_add(obj);
@@ -139,17 +138,15 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
trace_sync_timeline(obj);
spin_lock_irq(&obj->child_list_lock);
spin_lock_irq(&obj->lock); obj->value += inc;
list_for_each_entry_safe(pt, next, &obj->active_list_head,
active_list) {
list_for_each_entry_safe(pt, next, &obj->pt_list, link) if (dma_fence_is_signaled_locked(&pt->base))
list_del_init(&pt->active_list);
}
list_del_init(&pt->link);
spin_unlock_irq(&obj->child_list_lock);
spin_unlock_irq(&obj->lock);
}
/** @@ -171,15 +168,15 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, if (!pt) return NULL;
spin_lock_irq(&obj->child_list_lock);
sync_timeline_get(obj);
dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
dma_fence_init(&pt->base, &timeline_fence_ops, &obj->lock, obj->context, value);
list_add_tail(&pt->child_list, &obj->child_list_head);
INIT_LIST_HEAD(&pt->active_list);
INIT_LIST_HEAD(&pt->link);
spin_unlock_irq(&obj->child_list_lock);
spin_lock_irq(&obj->lock);
if (!dma_fence_is_signaled_locked(&pt->base))
list_add_tail(&pt->link, &obj->pt_list);
spin_unlock_irq(&obj->lock); return pt;
} @@ -204,9 +201,8 @@ static void timeline_fence_release(struct dma_fence *fence)
spin_lock_irqsave(fence->lock, flags);
list_del(&pt->child_list);
if (!list_empty(&pt->active_list))
list_del(&pt->active_list);
if (!list_empty(&pt->link))
list_del(&pt->link); spin_unlock_irqrestore(fence->lock, flags);
@@ -223,13 +219,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
static bool timeline_fence_enable_signaling(struct dma_fence *fence) {
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
if (timeline_fence_signaled(fence))
return false;
list_add_tail(&pt->active_list, &parent->active_list_head); return true;
Shouldn't you still return false if the fence is already signaled?
Yes/no :)
In this case, it is immaterial as the only way the timeline can advance is underneath its big lock and by signaling all the fences. So by the time dma_fence calls fence->ops->enable_signaling under that same lock we already know that the fence isn't signaled and can't suddenly be signaled in the middle of the function call. -Chris