On Mon, Aug 16, 2021 at 06:51:33AM -0700, Matthew Brost wrote:
A subsequent patch will flip the locking hierarchy from ce->guc_state.lock -> sched_engine->lock to sched_engine->lock -> ce->guc_state.lock. As such we need to release the submit fence for a request from an IRQ to break a lock inversion - i.e. the fence must be release went holding ce->guc_state.lock and the releasing of the can acquire sched_engine->lock.
Signed-off-by: Matthew Brost matthew.brost@intel.com
Title should be "irq work", otherwise it reads a bit strange. Also these kind of nestings would be good to document in the kerneldoc too (maybe as you go even). -Daniel
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 15 ++++++++++++++- drivers/gpu/drm/i915/i915_request.h | 5 +++++ 2 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 8c560ed14976..9ae4633aa7cb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2017,6 +2017,14 @@ static const struct intel_context_ops guc_context_ops = { .create_virtual = guc_create_virtual, };
+static void submit_work_cb(struct irq_work *wrk) +{
- struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
- might_lock(&rq->engine->sched_engine->lock);
- i915_sw_fence_complete(&rq->submit);
+}
static void __guc_signal_context_fence(struct intel_context *ce) { struct i915_request *rq; @@ -2026,8 +2034,12 @@ static void __guc_signal_context_fence(struct intel_context *ce) if (!list_empty(&ce->guc_state.fences)) trace_intel_context_fence_release(ce);
- /*
* Use an IRQ to ensure locking order of sched_engine->lock ->
* ce->guc_state.lock is preserved.
list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)*/
i915_sw_fence_complete(&rq->submit);
irq_work_queue(&rq->submit_work);
INIT_LIST_HEAD(&ce->guc_state.fences);
} @@ -2137,6 +2149,7 @@ static int guc_request_alloc(struct i915_request *rq) spin_lock_irqsave(&ce->guc_state.lock, flags); if (context_wait_for_deregister_to_register(ce) || context_pending_disable(ce)) {
init_irq_work(&rq->submit_work, submit_work_cb);
i915_sw_fence_await(&rq->submit);
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 1bc1349ba3c2..d818cfbfc41d 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -218,6 +218,11 @@ struct i915_request { }; struct llist_head execute_cb; struct i915_sw_fence semaphore;
/**
* @submit_work: complete submit fence from an IRQ if needed for
* locking hierarchy reasons.
*/
struct irq_work submit_work;
/*
- A list of everyone we wait upon, and everyone who waits upon us.
-- 2.32.0