Am 11.08.19 um 18:25 schrieb Chris Wilson:
When one of the array of fences is signaled, propagate its errors to the parent fence-array (keeping the first error to be raised).
v2: Opencode cmpxchg_local to avoid compiler freakout. v3: Be careful not to flag an error if we race against signal-on-any. v4: Same applies to installing the signal cb. v5: Use cmpxchg to only set the error once before using a nifty idea by Christian to avoid changing the status after emitting the signal.
Signed-off-by: Chris Wilson chris@chris-wilson.co.uk Cc: Sumit Semwal sumit.semwal@linaro.org Cc: Gustavo Padovan gustavo@padovan.org Cc: Christian König christian.koenig@amd.com
Reviewed-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-fence-array.c | 32 ++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-)
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 12c6f64c0bc2..d3fbd950be94 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -13,6 +13,8 @@ #include <linux/slab.h> #include <linux/dma-fence-array.h>
+#define PENDING_ERROR 1
- static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) { return "dma_fence_array";
@@ -23,10 +25,29 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) return "unbound"; }
+static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
int error)
+{
- /*
* Propagate the first error reported by any of our fences, but only
* before we ourselves are signaled.
*/
- if (error)
cmpxchg(&array->base.error, PENDING_ERROR, error);
+}
+static void dma_fence_array_clear_pending_error(struct dma_fence_array *array) +{
- /* Clear the error flag if not actually set. */
- cmpxchg(&array->base.error, PENDING_ERROR, 0);
+}
static void irq_dma_fence_array_work(struct irq_work *wrk) { struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
dma_fence_array_clear_pending_error(array);
dma_fence_signal(&array->base); dma_fence_put(&array->base); }
@@ -38,6 +59,8 @@ static void dma_fence_array_cb_func(struct dma_fence *f, container_of(cb, struct dma_fence_array_cb, cb); struct dma_fence_array *array = array_cb->array;
- dma_fence_array_set_pending_error(array, f->error);
- if (atomic_dec_and_test(&array->num_pending)) irq_work_queue(&array->work); else
@@ -63,9 +86,14 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence) dma_fence_get(&array->base); if (dma_fence_add_callback(array->fences[i], &cb[i].cb, dma_fence_array_cb_func)) {
int error = array->fences[i]->error;
dma_fence_array_set_pending_error(array, error); dma_fence_put(&array->base);
if (atomic_dec_and_test(&array->num_pending))
if (atomic_dec_and_test(&array->num_pending)) {
dma_fence_array_clear_pending_error(array); return false;
} }}
@@ -142,6 +170,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); array->fences = fences;
- array->base.error = PENDING_ERROR;
- return array; } EXPORT_SYMBOL(dma_fence_array_create);