Daniel Vetter pointed out that locking in the GuC submission code was overly complicated, let's clean this up a bit before introducing more features in the GuC submission backend.
Also fix some CI failures, port fixes from our internal tree, and add a few more selftests for coverage.
Lastly, add some kernel DOC explaining how the GuC submission backend works.
v2: Fix logic error in 'Workaround reset G2H is received after schedule done G2H', don't propagate errors to dependent fences in execlists submissiom, resolve checkpatch issues, resend to correct lists v3: Fix issue kicking tasklet, drop guc_active, fix ref counting in xarray, add guc_id sub structure, drop inline fuctions, and various other cleanup suggested by Daniel v4: Address Daniele's feedback, rebase to tip, resend for CI
Signed-off-by: Matthew Brost matthew.brost@intel.com
Matthew Brost (27): drm/i915/guc: Fix blocked context accounting drm/i915/guc: Fix outstanding G2H accounting drm/i915/guc: Unwind context requests in reverse order drm/i915/guc: Don't drop ce->guc_active.lock when unwinding context drm/i915/guc: Process all G2H message at once in work queue drm/i915/guc: Workaround reset G2H is received after schedule done G2H Revert "drm/i915/gt: Propagate change in error status to children on unhold" drm/i915/selftests: Add a cancel request selftest that triggers a reset drm/i915/guc: Kick tasklet after queuing a request drm/i915/guc: Don't enable scheduling on a banned context, guc_id invalid, not registered drm/i915/guc: Copy whole golden context, set engine state size of subset drm/i915/selftests: Add initial GuC selftest for scrubbing lost G2H drm/i915/guc: Take context ref when cancelling request drm/i915/guc: Don't touch guc_state.sched_state without a lock drm/i915/guc: Reset LRC descriptor if register returns -ENODEV drm/i915: Allocate error capture in nowait context drm/i915/guc: Flush G2H work queue during reset drm/i915/guc: Release submit fence from an irq_work drm/i915/guc: Move guc_blocked fence to struct guc_state drm/i915/guc: Rework and simplify locking drm/i915/guc: Proper xarray usage for contexts_lookup drm/i915/guc: Drop pin count check trick between sched_disable and re-pin drm/i915/guc: Move GuC priority fields in context under guc_active drm/i915/guc: Move fields protected by guc->contexts_lock into sub structure drm/i915/guc: Drop guc_active move everything into guc_state drm/i915/guc: Add GuC kernel doc drm/i915/guc: Drop static inline functions intel_guc_submission.c
drivers/gpu/drm/i915/gt/intel_context.c | 19 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 81 +- .../drm/i915/gt/intel_execlists_submission.c | 4 - drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 6 +- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 19 +- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 28 +- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 6 +- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 996 +++++++++++------- drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 127 +++ drivers/gpu/drm/i915/i915_gpu_error.c | 39 +- drivers/gpu/drm/i915/i915_request.h | 23 +- drivers/gpu/drm/i915/i915_trace.h | 12 +- .../drm/i915/selftests/i915_live_selftests.h | 1 + drivers/gpu/drm/i915/selftests/i915_request.c | 100 ++ .../i915/selftests/intel_scheduler_helpers.c | 12 + .../i915/selftests/intel_scheduler_helpers.h | 2 + 16 files changed, 983 insertions(+), 492 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/uc/selftest_guc.c
Prior to this patch the blocked context counter was cleared on init_sched_state (used during registering a context & resets) which is incorrect. This state needs to be persistent or the counter can read the incorrect value resulting in scheduling never getting enabled again.
Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation") Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch Cc: stable@vger.kernel.org --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 87d8dc8f51b9..69faa39da178 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -152,7 +152,7 @@ static inline void init_sched_state(struct intel_context *ce) { /* Only should be called from guc_lrc_desc_pin() */ atomic_set(&ce->guc_sched_state_no_lock, 0); - ce->guc_state.sched_state = 0; + ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; }
static inline bool
A small race that could result in incorrect accounting of the number of outstanding G2H. Basically prior to this patch we did not increment the number of outstanding G2H if we encoutered a GT reset while sending a H2G. This was incorrect as the context state had already been updated to anticipate a G2H response thus the counter should be incremented.
Also always use helper when decrementing this value.
Fixes: f4eb1f3fe946 ("drm/i915/guc: Ensure G2H response has space in buffer") Signed-off-by: Matthew Brost matthew.brost@intel.com Cc: stable@vger.kernel.org --- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 69faa39da178..03a86da6011e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -352,6 +352,12 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, xa_unlock_irqrestore(&guc->context_lookup, flags); }
+static void decr_outstanding_submission_g2h(struct intel_guc *guc) +{ + if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) + wake_up_all(&guc->ct.wq); +} + static int guc_submission_send_busy_loop(struct intel_guc *guc, const u32 *action, u32 len, @@ -360,11 +366,12 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc, { int err;
- err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); - - if (!err && g2h_len_dw) + if (g2h_len_dw) atomic_inc(&guc->outstanding_submission_g2h);
+ err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); + GEM_BUG_ON(g2h_len_dw && err == -EBUSY); + return err; }
@@ -616,7 +623,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) init_sched_state(ce);
if (pending_enable || destroyed || deregister) { - atomic_dec(&guc->outstanding_submission_g2h); + decr_outstanding_submission_g2h(guc); if (deregister) guc_signal_context_fence(ce); if (destroyed) { @@ -635,7 +642,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) intel_engine_signal_breadcrumbs(ce->engine); } intel_context_sched_disable_unpin(ce); - atomic_dec(&guc->outstanding_submission_g2h); + decr_outstanding_submission_g2h(guc); spin_lock_irqsave(&ce->guc_state.lock, flags); guc_blocked_fence_complete(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); @@ -2583,12 +2590,6 @@ g2h_context_lookup(struct intel_guc *guc, u32 desc_idx) return ce; }
-static void decr_outstanding_submission_g2h(struct intel_guc *guc) -{ - if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) - wake_up_all(&guc->ct.wq); -} - int intel_guc_deregister_done_process_msg(struct intel_guc *guc, const u32 *msg, u32 len)
On 8/25/2021 8:23 PM, Matthew Brost wrote:
A small race that could result in incorrect accounting of the number of outstanding G2H. Basically prior to this patch we did not increment the number of outstanding G2H if we encoutered a GT reset while sending a H2G. This was incorrect as the context state had already been updated to anticipate a G2H response thus the counter should be incremented.
Also always use helper when decrementing this value.
Fixes: f4eb1f3fe946 ("drm/i915/guc: Ensure G2H response has space in buffer") Signed-off-by: Matthew Brost matthew.brost@intel.com Cc: stable@vger.kernel.org
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 69faa39da178..03a86da6011e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -352,6 +352,12 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, xa_unlock_irqrestore(&guc->context_lookup, flags); }
+static void decr_outstanding_submission_g2h(struct intel_guc *guc) +{
- if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
wake_up_all(&guc->ct.wq);
+}
- static int guc_submission_send_busy_loop(struct intel_guc *guc, const u32 *action, u32 len,
@@ -360,11 +366,12 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc, { int err;
- err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
- if (!err && g2h_len_dw)
if (g2h_len_dw) atomic_inc(&guc->outstanding_submission_g2h);
err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
GEM_BUG_ON(g2h_len_dw && err == -EBUSY);
AFAICS having a return g2h is not tied to not returning EBUSY, the only way to avoid EBUSY seems to be for loop to be true. maybe have instead:
GEM_BUG_ON(g2h_len_dw && !loop);
earlier on?
Daniele
- return err; }
@@ -616,7 +623,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) init_sched_state(ce);
if (pending_enable || destroyed || deregister) {
atomic_dec(&guc->outstanding_submission_g2h);
decr_outstanding_submission_g2h(guc); if (deregister) guc_signal_context_fence(ce); if (destroyed) {
@@ -635,7 +642,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) intel_engine_signal_breadcrumbs(ce->engine); } intel_context_sched_disable_unpin(ce);
atomic_dec(&guc->outstanding_submission_g2h);
decr_outstanding_submission_g2h(guc); spin_lock_irqsave(&ce->guc_state.lock, flags); guc_blocked_fence_complete(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
@@ -2583,12 +2590,6 @@ g2h_context_lookup(struct intel_guc *guc, u32 desc_idx) return ce; }
-static void decr_outstanding_submission_g2h(struct intel_guc *guc) -{
- if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
wake_up_all(&guc->ct.wq);
-}
- int intel_guc_deregister_done_process_msg(struct intel_guc *guc, const u32 *msg, u32 len)
On Thu, Aug 26, 2021 at 04:09:59PM -0700, Daniele Ceraolo Spurio wrote:
On 8/25/2021 8:23 PM, Matthew Brost wrote:
A small race that could result in incorrect accounting of the number of outstanding G2H. Basically prior to this patch we did not increment the number of outstanding G2H if we encoutered a GT reset while sending a H2G. This was incorrect as the context state had already been updated to anticipate a G2H response thus the counter should be incremented.
Also always use helper when decrementing this value.
Fixes: f4eb1f3fe946 ("drm/i915/guc: Ensure G2H response has space in buffer") Signed-off-by: Matthew Brost matthew.brost@intel.com Cc: stable@vger.kernel.org
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 69faa39da178..03a86da6011e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -352,6 +352,12 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, xa_unlock_irqrestore(&guc->context_lookup, flags); } +static void decr_outstanding_submission_g2h(struct intel_guc *guc) +{
- if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
wake_up_all(&guc->ct.wq);
+}
- static int guc_submission_send_busy_loop(struct intel_guc *guc, const u32 *action, u32 len,
@@ -360,11 +366,12 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc, { int err;
- err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
- if (!err && g2h_len_dw)
- if (g2h_len_dw) atomic_inc(&guc->outstanding_submission_g2h);
- err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
- GEM_BUG_ON(g2h_len_dw && err == -EBUSY);
AFAICS having a return g2h is not tied to not returning EBUSY, the only way to avoid EBUSY seems to be for loop to be true. maybe have instead:
GEM_BUG_ON(g2h_len_dw && !loop);
earlier on?
Yep, that is better. Can you respin this for me while I'm out?
Matt
Daniele
- return err; }
@@ -616,7 +623,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) init_sched_state(ce); if (pending_enable || destroyed || deregister) {
atomic_dec(&guc->outstanding_submission_g2h);
decr_outstanding_submission_g2h(guc); if (deregister) guc_signal_context_fence(ce); if (destroyed) {
@@ -635,7 +642,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) intel_engine_signal_breadcrumbs(ce->engine); } intel_context_sched_disable_unpin(ce);
atomic_dec(&guc->outstanding_submission_g2h);
decr_outstanding_submission_g2h(guc); spin_lock_irqsave(&ce->guc_state.lock, flags); guc_blocked_fence_complete(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
@@ -2583,12 +2590,6 @@ g2h_context_lookup(struct intel_guc *guc, u32 desc_idx) return ce; } -static void decr_outstanding_submission_g2h(struct intel_guc *guc) -{
- if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
wake_up_all(&guc->ct.wq);
-}
- int intel_guc_deregister_done_process_msg(struct intel_guc *guc, const u32 *msg, u32 len)
When unwinding requests on a reset context, if other requests in the context are in the priority list the requests could be resubmitted out of seqno order. Traverse the list of active requests in reverse and append to the head of the priority list to fix this.
Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface") Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Cc: stable@vger.kernel.org --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 03a86da6011e..8b1a82cfb52d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -804,9 +804,9 @@ __unwind_incomplete_requests(struct intel_context *ce)
spin_lock_irqsave(&sched_engine->lock, flags); spin_lock(&ce->guc_active.lock); - list_for_each_entry_safe(rq, rn, - &ce->guc_active.requests, - sched.link) { + list_for_each_entry_safe_reverse(rq, rn, + &ce->guc_active.requests, + sched.link) { if (i915_request_completed(rq)) continue;
@@ -823,7 +823,7 @@ __unwind_incomplete_requests(struct intel_context *ce) } GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
- list_add_tail(&rq->sched.link, pl); + list_add(&rq->sched.link, pl); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
spin_lock(&ce->guc_active.lock);
Don't drop ce->guc_active.lock when unwinding a context after reset. At one point we had to drop this because of a lock inversion but that is no longer the case. It is much safer to hold the lock so let's do that.
Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface") Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com Cc: stable@vger.kernel.org --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 4 ---- 1 file changed, 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 8b1a82cfb52d..d94e7e1a876f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -811,8 +811,6 @@ __unwind_incomplete_requests(struct intel_context *ce) continue;
list_del_init(&rq->sched.link); - spin_unlock(&ce->guc_active.lock); - __i915_request_unsubmit(rq);
/* Push the request back into the queue for later resubmission. */ @@ -825,8 +823,6 @@ __unwind_incomplete_requests(struct intel_context *ce)
list_add(&rq->sched.link, pl); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); - - spin_lock(&ce->guc_active.lock); } spin_unlock(&ce->guc_active.lock); spin_unlock_irqrestore(&sched_engine->lock, flags);
Rather than processing 1 G2H at a time and re-queuing the work queue if more messages exist, process all the G2H in a single pass of the work queue.
Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Cc: Daniel Vetter daniel.vetter@ffwll.ch Cc: Michal Wajdeczko michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 22b4733b55e2..20c710a74498 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -1042,9 +1042,9 @@ static void ct_incoming_request_worker_func(struct work_struct *w) container_of(w, struct intel_guc_ct, requests.worker); bool done;
- done = ct_process_incoming_requests(ct); - if (!done) - queue_work(system_unbound_wq, &ct->requests.worker); + do { + done = ct_process_incoming_requests(ct); + } while (!done); }
static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
If the context is reset as a result of the request cancellation the context reset G2H is received after schedule disable done G2H which is the wrong order. The schedule disable done G2H release the waiting request cancellation code which resubmits the context. This races with the context reset G2H which also wants to resubmit the context but in this case it really should be a NOP as request cancellation code owns the resubmit. Use some clever tricks of checking the context state to seal this race until the GuC firmware is fixed.
v2: (Checkpatch) - Fix typos v3: (Daniele) - State that is a bug in the GuC firmware
Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation") Signed-off-by: Matthew Brost matthew.brost@intel.com Cc: stable@vger.kernel.org --- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 41 ++++++++++++++++--- 1 file changed, 35 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index d94e7e1a876f..592b421e1429 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -831,17 +831,33 @@ __unwind_incomplete_requests(struct intel_context *ce) static void __guc_reset_context(struct intel_context *ce, bool stalled) { struct i915_request *rq; + unsigned long flags; u32 head; + bool skip = false;
intel_context_get(ce);
/* - * GuC will implicitly mark the context as non-schedulable - * when it sends the reset notification. Make sure our state - * reflects this change. The context will be marked enabled - * on resubmission. + * GuC will implicitly mark the context as non-schedulable when it sends + * the reset notification. Make sure our state reflects this change. The + * context will be marked enabled on resubmission. + * + * XXX: If the context is reset as a result of the request cancellation + * this G2H is received after the schedule disable complete G2H which is + * wrong as this creates a race between the request cancellation code + * re-submitting the context and this G2H handler. This is a bug in the + * GuC but can be worked around in the meantime but converting this to a + * NOP if a pending enable is in flight as this indicates that a request + * cancellation has occurred. */ - clr_context_enabled(ce); + spin_lock_irqsave(&ce->guc_state.lock, flags); + if (likely(!context_pending_enable(ce))) + clr_context_enabled(ce); + else + skip = true; + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(skip)) + goto out_put;
rq = intel_context_find_active_request(ce); if (!rq) { @@ -860,6 +876,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) out_replay: guc_reset_state(ce, head, stalled); __unwind_incomplete_requests(ce); +out_put: intel_context_put(ce); }
@@ -1604,6 +1621,13 @@ static void guc_context_cancel_request(struct intel_context *ce, guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head), true); } + + /* + * XXX: Racey if context is reset, see comment in + * __guc_reset_context(). + */ + flush_work(&ce_to_guc(ce)->ct.requests.worker); + guc_context_unblock(ce); } } @@ -2718,7 +2742,12 @@ static void guc_handle_context_reset(struct intel_guc *guc, { trace_intel_context_reset(ce);
- if (likely(!intel_context_is_banned(ce))) { + /* + * XXX: Racey if request cancellation has occurred, see comment in + * __guc_reset_context(). + */ + if (likely(!intel_context_is_banned(ce) && + !context_blocked(ce))) { capture_error_state(guc, ce); guc_context_replay(ce); }
On 8/25/2021 8:23 PM, Matthew Brost wrote:
If the context is reset as a result of the request cancellation the context reset G2H is received after schedule disable done G2H which is the wrong order. The schedule disable done G2H release the waiting request cancellation code which resubmits the context. This races with the context reset G2H which also wants to resubmit the context but in this case it really should be a NOP as request cancellation code owns the resubmit. Use some clever tricks of checking the context state to seal this race until the GuC firmware is fixed.
v2: (Checkpatch)
- Fix typos
v3: (Daniele)
- State that is a bug in the GuC firmware
Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation") Signed-off-by: Matthew Brost matthew.brost@intel.com Cc: stable@vger.kernel.org
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com
Daniele
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 41 ++++++++++++++++--- 1 file changed, 35 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index d94e7e1a876f..592b421e1429 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -831,17 +831,33 @@ __unwind_incomplete_requests(struct intel_context *ce) static void __guc_reset_context(struct intel_context *ce, bool stalled) { struct i915_request *rq;
unsigned long flags; u32 head;
bool skip = false;
intel_context_get(ce);
/*
* GuC will implicitly mark the context as non-schedulable
* when it sends the reset notification. Make sure our state
* reflects this change. The context will be marked enabled
* on resubmission.
* GuC will implicitly mark the context as non-schedulable when it sends
* the reset notification. Make sure our state reflects this change. The
* context will be marked enabled on resubmission.
*
* XXX: If the context is reset as a result of the request cancellation
* this G2H is received after the schedule disable complete G2H which is
* wrong as this creates a race between the request cancellation code
* re-submitting the context and this G2H handler. This is a bug in the
* GuC but can be worked around in the meantime but converting this to a
* NOP if a pending enable is in flight as this indicates that a request
*/* cancellation has occurred.
- clr_context_enabled(ce);
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (likely(!context_pending_enable(ce)))
clr_context_enabled(ce);
else
skip = true;
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(skip))
goto out_put;
rq = intel_context_find_active_request(ce); if (!rq) {
@@ -860,6 +876,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) out_replay: guc_reset_state(ce, head, stalled); __unwind_incomplete_requests(ce); +out_put: intel_context_put(ce); }
@@ -1604,6 +1621,13 @@ static void guc_context_cancel_request(struct intel_context *ce, guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head), true); }
/*
* XXX: Racey if context is reset, see comment in
* __guc_reset_context().
*/
flush_work(&ce_to_guc(ce)->ct.requests.worker);
- guc_context_unblock(ce); } }
@@ -2718,7 +2742,12 @@ static void guc_handle_context_reset(struct intel_guc *guc, { trace_intel_context_reset(ce);
- if (likely(!intel_context_is_banned(ce))) {
- /*
* XXX: Racey if request cancellation has occurred, see comment in
* __guc_reset_context().
*/
- if (likely(!intel_context_is_banned(ce) &&
capture_error_state(guc, ce); guc_context_replay(ce); }!context_blocked(ce))) {
Propagating errors to dependent fences is broken and can lead to errors from one client ending up in another. In 3761baae908a (Revert "drm/i915: Propagate errors on awaiting already signaled fences"), we attempted to get rid of fence error propagation but missed the case added in 8e9f84cf5cac ("drm/i915/gt: Propagate change in error status to children on unhold"). Revert that one too. This error was found by an up-and-coming selftest which triggers a reset during request cancellation and verifies that subsequent requests complete successfully.
v2: (Daniel Vetter) - Use revert v3: (Jason) - Update commit message
References: '3761baae908a ("Revert "drm/i915: Propagate errors on awaiting already signaled fences"")' Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch --- drivers/gpu/drm/i915/gt/intel_execlists_submission.c | 4 ---- 1 file changed, 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index de5f9c86b9a4..cafb0608ffb4 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -2140,10 +2140,6 @@ static void __execlists_unhold(struct i915_request *rq) if (p->flags & I915_DEPENDENCY_WEAK) continue;
- /* Propagate any change in error status */ - if (rq->fence.error) - i915_request_set_error_once(w, rq->fence.error); - if (w->engine != rq->engine) continue;
Add a cancel request selftest that results in an engine reset to cancel the request as it is non-preemptable. Also insert a NOP request after the cancelled request and confirm that it completely successfully.
Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/selftests/i915_request.c | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index d67710d10615..e2c5db77f087 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -772,6 +772,98 @@ static int __cancel_completed(struct intel_engine_cs *engine) return err; }
+static int __cancel_reset(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct igt_spinner spin; + struct i915_request *rq, *nop; + unsigned long preempt_timeout_ms; + int err = 0; + + preempt_timeout_ms = engine->props.preempt_timeout_ms; + engine->props.preempt_timeout_ms = 100; + + if (igt_spinner_init(&spin, engine->gt)) + goto out_restore; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out_spin; + } + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ce; + } + + pr_debug("%s: Cancelling active request\n", engine->name); + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin, rq)) { + struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + + pr_err("Failed to start spinner on %s\n", engine->name); + intel_engine_dump(engine, &p, "%s\n", engine->name); + err = -ETIME; + goto out_rq; + } + + nop = intel_context_create_request(ce); + if (IS_ERR(nop)) + goto out_nop; + i915_request_get(nop); + i915_request_add(nop); + + i915_request_cancel(rq, -EINTR); + + if (i915_request_wait(rq, 0, HZ) < 0) { + struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + + pr_err("%s: Failed to cancel hung request\n", engine->name); + intel_engine_dump(engine, &p, "%s\n", engine->name); + err = -ETIME; + goto out_nop; + } + + if (rq->fence.error != -EINTR) { + pr_err("%s: fence not cancelled (%u)\n", + engine->name, rq->fence.error); + err = -EINVAL; + goto out_nop; + } + + if (i915_request_wait(nop, 0, HZ) < 0) { + struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + + pr_err("%s: Failed to complete nop request\n", engine->name); + intel_engine_dump(engine, &p, "%s\n", engine->name); + err = -ETIME; + goto out_nop; + } + + if (nop->fence.error != 0) { + pr_err("%s: Nop request errored (%u)\n", + engine->name, nop->fence.error); + err = -EINVAL; + } + +out_nop: + i915_request_put(nop); +out_rq: + i915_request_put(rq); +out_ce: + intel_context_put(ce); +out_spin: + igt_spinner_fini(&spin); +out_restore: + engine->props.preempt_timeout_ms = preempt_timeout_ms; + if (err) + pr_err("%s: %s error %d\n", __func__, engine->name, err); + return err; +} + static int live_cancel_request(void *arg) { struct drm_i915_private *i915 = arg; @@ -804,6 +896,14 @@ static int live_cancel_request(void *arg) return err; if (err2) return err2; + + /* Expects reset so call outside of igt_live_test_* */ + err = __cancel_reset(engine); + if (err) + return err; + + if (igt_flush_test(i915)) + return -EIO; }
return 0;
On 26/08/2021 04:23, Matthew Brost wrote:
Add a cancel request selftest that results in an engine reset to cancel the request as it is non-preemptable. Also insert a NOP request after the cancelled request and confirm that it completely successfully.
Which patch fixes a problem this exposes in the execlists implementation?
Signed-off-by: Matthew Brost matthew.brost@intel.com
drivers/gpu/drm/i915/selftests/i915_request.c | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index d67710d10615..e2c5db77f087 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -772,6 +772,98 @@ static int __cancel_completed(struct intel_engine_cs *engine) return err; }
+static int __cancel_reset(struct intel_engine_cs *engine) +{
- struct intel_context *ce;
- struct igt_spinner spin;
- struct i915_request *rq, *nop;
- unsigned long preempt_timeout_ms;
- int err = 0;
You may need to skip the test if preempt timeout is compiled out or if GPU reset is altogether disabled.
- preempt_timeout_ms = engine->props.preempt_timeout_ms;
- engine->props.preempt_timeout_ms = 100;
- if (igt_spinner_init(&spin, engine->gt))
goto out_restore;
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out_spin;
- }
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
- if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ce;
- }
- pr_debug("%s: Cancelling active request\n", engine->name);
"active non-preemptable" perhaps?
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin, rq)) {
struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
pr_err("Failed to start spinner on %s\n", engine->name);
intel_engine_dump(engine, &p, "%s\n", engine->name);
err = -ETIME;
goto out_rq;
- }
- nop = intel_context_create_request(ce);
- if (IS_ERR(nop))
goto out_nop;
- i915_request_get(nop);
- i915_request_add(nop);
- i915_request_cancel(rq, -EINTR);
- if (i915_request_wait(rq, 0, HZ) < 0) {
struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
pr_err("%s: Failed to cancel hung request\n", engine->name);
intel_engine_dump(engine, &p, "%s\n", engine->name);
err = -ETIME;
goto out_nop;
- }
- if (rq->fence.error != -EINTR) {
pr_err("%s: fence not cancelled (%u)\n",
engine->name, rq->fence.error);
err = -EINVAL;
goto out_nop;
- }
- if (i915_request_wait(nop, 0, HZ) < 0) {
struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
pr_err("%s: Failed to complete nop request\n", engine->name);
intel_engine_dump(engine, &p, "%s\n", engine->name);
err = -ETIME;
goto out_nop;
- }
- if (nop->fence.error != 0) {
pr_err("%s: Nop request errored (%u)\n",
Maybe s/nop/innocent/ in the respective log messages?
engine->name, nop->fence.error);
err = -EINVAL;
- }
+out_nop:
- i915_request_put(nop);
+out_rq:
- i915_request_put(rq);
+out_ce:
- intel_context_put(ce);
+out_spin:
- igt_spinner_fini(&spin);
+out_restore:
- engine->props.preempt_timeout_ms = preempt_timeout_ms;
- if (err)
pr_err("%s: %s error %d\n", __func__, engine->name, err);
- return err;
+}
- static int live_cancel_request(void *arg) { struct drm_i915_private *i915 = arg;
@@ -804,6 +896,14 @@ static int live_cancel_request(void *arg) return err; if (err2) return err2;
/* Expects reset so call outside of igt_live_test_* */
Hm there are live tests like live_preempt_cancel which seemingly manage to do resets under the live test block.
Regards,
Tvrtko
err = __cancel_reset(engine);
if (err)
return err;
if (igt_flush_test(i915))
return -EIO;
}
return 0;
On Thu, Aug 26, 2021 at 10:32:54AM +0100, Tvrtko Ursulin wrote:
On 26/08/2021 04:23, Matthew Brost wrote:
Add a cancel request selftest that results in an engine reset to cancel the request as it is non-preemptable. Also insert a NOP request after the cancelled request and confirm that it completely successfully.
Which patch fixes a problem this exposes in the execlists implementation?
https://patchwork.freedesktop.org/patch/451421/?series=93704&rev=6
Signed-off-by: Matthew Brost matthew.brost@intel.com
drivers/gpu/drm/i915/selftests/i915_request.c | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index d67710d10615..e2c5db77f087 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -772,6 +772,98 @@ static int __cancel_completed(struct intel_engine_cs *engine) return err; } +static int __cancel_reset(struct intel_engine_cs *engine) +{
- struct intel_context *ce;
- struct igt_spinner spin;
- struct i915_request *rq, *nop;
- unsigned long preempt_timeout_ms;
- int err = 0;
You may need to skip the test if preempt timeout is compiled out or if GPU reset is altogether disabled.
Yes, probably. Will fix this.
- preempt_timeout_ms = engine->props.preempt_timeout_ms;
- engine->props.preempt_timeout_ms = 100;
- if (igt_spinner_init(&spin, engine->gt))
goto out_restore;
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out_spin;
- }
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
- if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ce;
- }
- pr_debug("%s: Cancelling active request\n", engine->name);
"active non-preemptable" perhaps?
Sure.
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin, rq)) {
struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
pr_err("Failed to start spinner on %s\n", engine->name);
intel_engine_dump(engine, &p, "%s\n", engine->name);
err = -ETIME;
goto out_rq;
- }
- nop = intel_context_create_request(ce);
- if (IS_ERR(nop))
goto out_nop;
- i915_request_get(nop);
- i915_request_add(nop);
- i915_request_cancel(rq, -EINTR);
- if (i915_request_wait(rq, 0, HZ) < 0) {
struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
pr_err("%s: Failed to cancel hung request\n", engine->name);
intel_engine_dump(engine, &p, "%s\n", engine->name);
err = -ETIME;
goto out_nop;
- }
- if (rq->fence.error != -EINTR) {
pr_err("%s: fence not cancelled (%u)\n",
engine->name, rq->fence.error);
err = -EINVAL;
goto out_nop;
- }
- if (i915_request_wait(nop, 0, HZ) < 0) {
struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
pr_err("%s: Failed to complete nop request\n", engine->name);
intel_engine_dump(engine, &p, "%s\n", engine->name);
err = -ETIME;
goto out_nop;
- }
- if (nop->fence.error != 0) {
pr_err("%s: Nop request errored (%u)\n",
Maybe s/nop/innocent/ in the respective log messages?
I kinda perfer NOP.
engine->name, nop->fence.error);
err = -EINVAL;
- }
+out_nop:
- i915_request_put(nop);
+out_rq:
- i915_request_put(rq);
+out_ce:
- intel_context_put(ce);
+out_spin:
- igt_spinner_fini(&spin);
+out_restore:
- engine->props.preempt_timeout_ms = preempt_timeout_ms;
- if (err)
pr_err("%s: %s error %d\n", __func__, engine->name, err);
- return err;
+}
- static int live_cancel_request(void *arg) { struct drm_i915_private *i915 = arg;
@@ -804,6 +896,14 @@ static int live_cancel_request(void *arg) return err; if (err2) return err2;
/* Expects reset so call outside of igt_live_test_* */
Hm there are live tests like live_preempt_cancel which seemingly manage to do resets under the live test block.
You can increment t->reset_global if a GT reset is expected, problem is only execlists do a GT while GuC submission does a GuC engine based reset so we'd have to put in a statement like this if was within the begin / end block:
if !guc t->reset_global++
I'd just rather leave it as is rather than baking in execlists / guc backend specific knowledge into the test.
Matt
Regards,
Tvrtko
err = __cancel_reset(engine);
if (err)
return err;
if (igt_flush_test(i915))
} return 0;return -EIO;
Kick tasklet after queuing a request so it submitted in a timely manner.
Fixes: 3a4cdf1982f0 ("drm/i915/guc: Implement GuC context operations for new inteface") Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 592b421e1429..b30fdccc71d4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1047,6 +1047,7 @@ static inline void queue_request(struct i915_sched_engine *sched_engine, list_add_tail(&rq->sched.link, i915_sched_lookup_priolist(sched_engine, prio)); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); + tasklet_hi_schedule(&sched_engine->tasklet); }
static int guc_bypass_tasklet_submit(struct intel_guc *guc,
When unblocking a context, do not enable scheduling if the context is banned, guc_id invalid, or not registered.
v2: (Daniele) - Add helper for unblock
Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation") Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Cc: stable@vger.kernel.org --- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 22 ++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index b30fdccc71d4..56f11accd6cc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -148,6 +148,7 @@ static inline void clr_context_registered(struct intel_context *ce) #define SCHED_STATE_BLOCKED_SHIFT 4 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT) #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT) + static inline void init_sched_state(struct intel_context *ce) { /* Only should be called from guc_lrc_desc_pin() */ @@ -1569,6 +1570,23 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce) return &ce->guc_blocked; }
+#define SCHED_STATE_MULTI_BLOCKED_MASK \ + (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED) +#define SCHED_STATE_NO_UNBLOCK \ + (SCHED_STATE_MULTI_BLOCKED_MASK | \ + SCHED_STATE_PENDING_DISABLE | \ + SCHED_STATE_BANNED) + +static bool context_cant_unblock(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + + return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || + context_guc_id_invalid(ce) || + !lrc_desc_registered(ce_to_guc(ce), ce->guc_id) || + !intel_context_is_pinned(ce); +} + static void guc_context_unblock(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); @@ -1583,9 +1601,7 @@ static void guc_context_unblock(struct intel_context *ce) spin_lock_irqsave(&ce->guc_state.lock, flags);
if (unlikely(submission_disabled(guc) || - !intel_context_is_pinned(ce) || - context_pending_disable(ce) || - context_blocked(ce) > 1)) { + context_cant_unblock(ce))) { enable = false; } else { enable = true;
When the GuC does a media reset, it copies a golden context state back into the corrupted context's state. The address of the golden context and the size of the engine state restore are passed in via the GuC ADS. The i915 had a bug where it passed in the whole size of the golden context, not the size of the engine state to restore resulting in a memory corruption.
Also copy the entire golden context on init rather than just the engine state that is restored.
Fixes: 481d458caede ("drm/i915/guc: Add golden context to GuC ADS") Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 28 +++++++++++++++++----- 1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 6926919bcac6..df2734bfe078 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -358,6 +358,11 @@ static int guc_prep_golden_context(struct intel_guc *guc, u8 engine_class, guc_class; struct guc_gt_system_info *info, local_info;
+ /* Skip execlist and PPGTT registers + HWSP */ + const u32 lr_hw_context_size = 80 * sizeof(u32); + const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE + + lr_hw_context_size; + /* * Reserve the memory for the golden contexts and point GuC at it but * leave it empty for now. The context data will be filled in later @@ -396,7 +401,18 @@ static int guc_prep_golden_context(struct intel_guc *guc, if (!blob) continue;
- blob->ads.eng_state_size[guc_class] = real_size; + /* + * This interface is slightly confusing. We need to pass the + * base address of the golden context and the engine state size + * which is not the size of the whole golden context, it is a + * subset that the GuC uses when doing a watchdog reset. The + * engine state size must match the size of the golden context + * minus the first part of the golden context that the GuC does + * not retore during reset. Currently no real way to verify this + * other than reading the GuC spec / code and ensuring the + * 'skip_size' below matches the value used in the GuC code. + */ + blob->ads.eng_state_size[guc_class] = real_size - skip_size; blob->ads.golden_context_lrca[guc_class] = addr_ggtt; addr_ggtt += alloc_size; } @@ -437,8 +453,8 @@ static void guc_init_golden_context(struct intel_guc *guc) u8 *ptr;
/* Skip execlist and PPGTT registers + HWSP */ - const u32 lr_hw_context_size = 80 * sizeof(u32); - const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE + + __maybe_unused const u32 lr_hw_context_size = 80 * sizeof(u32); + __maybe_unused const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE + lr_hw_context_size;
if (!intel_uc_uses_guc_submission(>->uc)) @@ -476,12 +492,12 @@ static void guc_init_golden_context(struct intel_guc *guc) continue; }
- GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != real_size); + GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != + real_size - skip_size); GEM_BUG_ON(blob->ads.golden_context_lrca[guc_class] != addr_ggtt); addr_ggtt += alloc_size;
- shmem_read(engine->default_state, skip_size, ptr + skip_size, - real_size - skip_size); + shmem_read(engine->default_state, 0, ptr, real_size); ptr += alloc_size; }
On 8/25/2021 8:23 PM, Matthew Brost wrote:
When the GuC does a media reset, it copies a golden context state back into the corrupted context's state. The address of the golden context and the size of the engine state restore are passed in via the GuC ADS. The i915 had a bug where it passed in the whole size of the golden context, not the size of the engine state to restore resulting in a memory corruption.
Also copy the entire golden context on init rather than just the engine state that is restored.
Fixes: 481d458caede ("drm/i915/guc: Add golden context to GuC ADS") Signed-off-by: Matthew Brost matthew.brost@intel.com
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 28 +++++++++++++++++----- 1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 6926919bcac6..df2734bfe078 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -358,6 +358,11 @@ static int guc_prep_golden_context(struct intel_guc *guc, u8 engine_class, guc_class; struct guc_gt_system_info *info, local_info;
- /* Skip execlist and PPGTT registers + HWSP */
- const u32 lr_hw_context_size = 80 * sizeof(u32);
- const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE +
lr_hw_context_size;
- /*
- Reserve the memory for the golden contexts and point GuC at it but
- leave it empty for now. The context data will be filled in later
@@ -396,7 +401,18 @@ static int guc_prep_golden_context(struct intel_guc *guc, if (!blob) continue;
blob->ads.eng_state_size[guc_class] = real_size;
/*
* This interface is slightly confusing. We need to pass the
* base address of the golden context and the engine state size
* which is not the size of the whole golden context, it is a
* subset that the GuC uses when doing a watchdog reset. The
* engine state size must match the size of the golden context
* minus the first part of the golden context that the GuC does
* not retore during reset. Currently no real way to verify this
* other than reading the GuC spec / code and ensuring the
* 'skip_size' below matches the value used in the GuC code.
*/
This last statement is incorrect. The skipped size is the PPHWSP + the execlists context. The size of the execlists context is defined in the specs (as part of the full context layout) and it is therefore not a magic number only available in the GuC code. With the comment fixed:
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com
Daniele
blob->ads.golden_context_lrca[guc_class] = addr_ggtt; addr_ggtt += alloc_size; }blob->ads.eng_state_size[guc_class] = real_size - skip_size;
@@ -437,8 +453,8 @@ static void guc_init_golden_context(struct intel_guc *guc) u8 *ptr;
/* Skip execlist and PPGTT registers + HWSP */
- const u32 lr_hw_context_size = 80 * sizeof(u32);
- const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE +
__maybe_unused const u32 lr_hw_context_size = 80 * sizeof(u32);
__maybe_unused const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE + lr_hw_context_size;
if (!intel_uc_uses_guc_submission(>->uc))
@@ -476,12 +492,12 @@ static void guc_init_golden_context(struct intel_guc *guc) continue; }
GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != real_size);
GEM_BUG_ON(blob->ads.eng_state_size[guc_class] !=
GEM_BUG_ON(blob->ads.golden_context_lrca[guc_class] != addr_ggtt); addr_ggtt += alloc_size;real_size - skip_size);
shmem_read(engine->default_state, skip_size, ptr + skip_size,
real_size - skip_size);
ptr += alloc_size; }shmem_read(engine->default_state, 0, ptr, real_size);
On 8/25/2021 20:23, Matthew Brost wrote:
When the GuC does a media reset, it copies a golden context state back into the corrupted context's state. The address of the golden context and the size of the engine state restore are passed in via the GuC ADS. The i915 had a bug where it passed in the whole size of the golden context, not the size of the engine state to restore resulting in a memory corruption.
Also copy the entire golden context on init rather than just the engine state that is restored.
Fixes: 481d458caede ("drm/i915/guc: Add golden context to GuC ADS") Signed-off-by: Matthew Brost matthew.brost@intel.com
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 28 +++++++++++++++++----- 1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 6926919bcac6..df2734bfe078 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -358,6 +358,11 @@ static int guc_prep_golden_context(struct intel_guc *guc, u8 engine_class, guc_class; struct guc_gt_system_info *info, local_info;
- /* Skip execlist and PPGTT registers + HWSP */
- const u32 lr_hw_context_size = 80 * sizeof(u32);
- const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE +
lr_hw_context_size;
- /*
- Reserve the memory for the golden contexts and point GuC at it but
- leave it empty for now. The context data will be filled in later
@@ -396,7 +401,18 @@ static int guc_prep_golden_context(struct intel_guc *guc, if (!blob) continue;
blob->ads.eng_state_size[guc_class] = real_size;
/*
* This interface is slightly confusing. We need to pass the
* base address of the golden context and the engine state size
* which is not the size of the whole golden context, it is a
* subset that the GuC uses when doing a watchdog reset. The
* engine state size must match the size of the golden context
* minus the first part of the golden context that the GuC does
* not retore during reset. Currently no real way to verify this
* other than reading the GuC spec / code and ensuring the
* 'skip_size' below matches the value used in the GuC code.
*/
blob->ads.golden_context_lrca[guc_class] = addr_ggtt; addr_ggtt += alloc_size; }blob->ads.eng_state_size[guc_class] = real_size - skip_size;
@@ -437,8 +453,8 @@ static void guc_init_golden_context(struct intel_guc *guc) u8 *ptr;
/* Skip execlist and PPGTT registers + HWSP */
- const u32 lr_hw_context_size = 80 * sizeof(u32);
- const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE +
- __maybe_unused const u32 lr_hw_context_size = 80 * sizeof(u32);
- __maybe_unused const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE + lr_hw_context_size;
Not sure why the 'maybe unused'? The values are not only used in BUG_ONs or such that could vanish.
More importantly, you now have two sets of definitions for these magic numbers. That seems like a very bad idea. They should be moved into a helper function rather than repeated.
John.
if (!intel_uc_uses_guc_submission(>->uc)) @@ -476,12 +492,12 @@ static void guc_init_golden_context(struct intel_guc *guc) continue; }
GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != real_size);
GEM_BUG_ON(blob->ads.eng_state_size[guc_class] !=
GEM_BUG_ON(blob->ads.golden_context_lrca[guc_class] != addr_ggtt); addr_ggtt += alloc_size;real_size - skip_size);
shmem_read(engine->default_state, skip_size, ptr + skip_size,
real_size - skip_size);
ptr += alloc_size; }shmem_read(engine->default_state, 0, ptr, real_size);
While debugging an issue with full GT resets I went down a rabbit hole thinking the scrubbing of lost G2H wasn't working correctly. This proved to be incorrect as this was working just fine but this chase inspired me to write a selftest to prove that this works. This simple selftest injects errors dropping various G2H and then issues a full GT reset proving that the scrubbing of these G2H doesn't blow up.
v2: (Daniel Vetter) - Use ifdef instead of macros for selftests v3: (Checkpatch) - A space after 'switch' statement v4: (Daniele) - A comment saying GT won't idle if G2H are lost
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context_types.h | 18 +++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 25 ++++ drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 127 ++++++++++++++++++ .../drm/i915/selftests/i915_live_selftests.h | 1 + .../i915/selftests/intel_scheduler_helpers.c | 12 ++ .../i915/selftests/intel_scheduler_helpers.h | 2 + 6 files changed, 185 insertions(+) create mode 100644 drivers/gpu/drm/i915/gt/uc/selftest_guc.c
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index e54351a170e2..3a73f3117873 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -198,6 +198,24 @@ struct intel_context { */ u8 guc_prio; u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM]; + +#ifdef CONFIG_DRM_I915_SELFTEST + /** + * @drop_schedule_enable: Force drop of schedule enable G2H for selftest + */ + bool drop_schedule_enable; + + /** + * @drop_schedule_disable: Force drop of schedule disable G2H for + * selftest + */ + bool drop_schedule_disable; + + /** + * @drop_deregister: Force drop of deregister G2H for selftest + */ + bool drop_deregister; +#endif };
#endif /* __INTEL_CONTEXT_TYPES__ */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 56f11accd6cc..5844bb954922 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2645,6 +2645,13 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
trace_intel_context_deregister_done(ce);
+#ifdef CONFIG_DRM_I915_SELFTEST + if (unlikely(ce->drop_deregister)) { + ce->drop_deregister = false; + return 0; + } +#endif + if (context_wait_for_deregister_to_register(ce)) { struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm; @@ -2699,10 +2706,24 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, trace_intel_context_sched_done(ce);
if (context_pending_enable(ce)) { +#ifdef CONFIG_DRM_I915_SELFTEST + if (unlikely(ce->drop_schedule_enable)) { + ce->drop_schedule_enable = false; + return 0; + } +#endif + clr_context_pending_enable(ce); } else if (context_pending_disable(ce)) { bool banned;
+#ifdef CONFIG_DRM_I915_SELFTEST + if (unlikely(ce->drop_schedule_disable)) { + ce->drop_schedule_disable = false; + return 0; + } +#endif + /* * Unpin must be done before __guc_signal_context_fence, * otherwise a race exists between the requests getting @@ -3079,3 +3100,7 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
return false; } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_guc.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c new file mode 100644 index 000000000000..fb0e4a7bd8ca --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright �� 2021 Intel Corporation + */ + +#include "selftests/intel_scheduler_helpers.h" + +static struct i915_request *nop_user_request(struct intel_context *ce, + struct i915_request *from) +{ + struct i915_request *rq; + int ret; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + if (from) { + ret = i915_sw_fence_await_dma_fence(&rq->submit, + &from->fence, 0, + I915_FENCE_GFP); + if (ret < 0) { + i915_request_put(rq); + return ERR_PTR(ret); + } + } + + i915_request_get(rq); + i915_request_add(rq); + + return rq; +} + +static int intel_guc_scrub_ctbs(void *arg) +{ + struct intel_gt *gt = arg; + int ret = 0; + int i; + struct i915_request *last[3] = {NULL, NULL, NULL}, *rq; + intel_wakeref_t wakeref; + struct intel_engine_cs *engine; + struct intel_context *ce; + + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + engine = intel_selftest_find_any_engine(gt); + + /* Submit requests and inject errors forcing G2H to be dropped */ + for (i = 0; i < 3; ++i) { + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); + pr_err("Failed to create context, %d: %d\n", i, ret); + goto err; + } + + switch (i) { + case 0: + ce->drop_schedule_enable = true; + break; + case 1: + ce->drop_schedule_disable = true; + break; + case 2: + ce->drop_deregister = true; + break; + } + + rq = nop_user_request(ce, NULL); + intel_context_put(ce); + + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + pr_err("Failed to create request, %d: %d\n", i, ret); + goto err; + } + + last[i] = rq; + } + + for (i = 0; i < 3; ++i) { + ret = i915_request_wait(last[i], 0, HZ); + if (ret < 0) { + pr_err("Last request failed to complete: %d\n", ret); + goto err; + } + i915_request_put(last[i]); + last[i] = NULL; + } + + /* Force all H2G / G2H to be submitted / processed */ + intel_gt_retire_requests(gt); + msleep(500); + + /* Scrub missing G2H */ + intel_gt_handle_error(engine->gt, -1, 0, "selftest reset"); + + /* GT will not idle if G2H are lost */ + ret = intel_gt_wait_for_idle(gt, HZ); + if (ret < 0) { + pr_err("GT failed to idle: %d\n", ret); + goto err; + } + +err: + for (i = 0; i < 3; ++i) + if (last[i]) + i915_request_put(last[i]); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + + return ret; +} + +int intel_guc_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(intel_guc_scrub_ctbs), + }; + struct intel_gt *gt = &i915->gt; + + if (intel_gt_is_wedged(gt)) + return 0; + + if (!intel_uc_uses_guc_submission(>->uc)) + return 0; + + return intel_gt_live_subtests(tests, gt); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index cfa5c4165a4f..3cf6758931f9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -47,5 +47,6 @@ selftest(execlists, intel_execlists_live_selftests) selftest(ring_submission, intel_ring_submission_live_selftests) selftest(perf, i915_perf_live_selftests) selftest(slpc, intel_slpc_live_selftests) +selftest(guc, intel_guc_live_selftests) /* Here be dragons: keep last to run last! */ selftest(late_gt_pm, intel_gt_pm_late_selftests) diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c index 4b328346b48a..310fb83c527e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c +++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c @@ -14,6 +14,18 @@ #define REDUCED_PREEMPT 10 #define WAIT_FOR_RESET_TIME 10000
+struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + return engine; + + pr_err("No valid engine found!\n"); + return NULL; +} + int intel_selftest_modify_policy(struct intel_engine_cs *engine, struct intel_selftest_saved_policy *saved, u32 modify_type) diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h index 35c098601ac0..ae60bb507f45 100644 --- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h +++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h @@ -10,6 +10,7 @@
struct i915_request; struct intel_engine_cs; +struct intel_gt;
struct intel_selftest_saved_policy { u32 flags; @@ -23,6 +24,7 @@ enum selftest_scheduler_modify { SELFTEST_SCHEDULER_MODIFY_FAST_RESET, };
+struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt); int intel_selftest_modify_policy(struct intel_engine_cs *engine, struct intel_selftest_saved_policy *saved, enum selftest_scheduler_modify modify_type);
A context can get destroyed after cancelling a request, if a context or GT reset occurs, so take a reference to context when cancelling a request.
Fixes: 62eaf0ae217d ("drm/i915/guc: Support request cancellation") Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 5844bb954922..1cb97e98871c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1630,8 +1630,10 @@ static void guc_context_cancel_request(struct intel_context *ce, struct i915_request *rq) { if (i915_sw_fence_signaled(&rq->submit)) { - struct i915_sw_fence *fence = guc_context_block(ce); + struct i915_sw_fence *fence;
+ intel_context_get(ce); + fence = guc_context_block(ce); i915_sw_fence_wait(fence); if (!i915_request_completed(rq)) { __i915_request_skip(rq); @@ -1646,6 +1648,7 @@ static void guc_context_cancel_request(struct intel_context *ce, flush_work(&ce_to_guc(ce)->ct.requests.worker);
guc_context_unblock(ce); + intel_context_put(ce); } }
Before we did some clever tricks to not use the a lock when touching guc_state.sched_state in certain cases. Don't do that, enforce the use of the lock.
Part of this is removing a dead code path from guc_lrc_desc_pin where a context could be deregistered when the aforementioned function was called from the submission path. Remove this dead code and add a GEM_BUG_ON if this path is ever attempted to be used.
v2: (kernel test robo ) - Add __maybe_unused to sched_state_is_init()
Signed-off-by: Matthew Brost matthew.brost@intel.com Reported-by: kernel test robot lkp@intel.com Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com --- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 58 ++++++++++--------- 1 file changed, 32 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 1cb97e98871c..901d867a4d90 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -151,11 +151,23 @@ static inline void clr_context_registered(struct intel_context *ce)
static inline void init_sched_state(struct intel_context *ce) { - /* Only should be called from guc_lrc_desc_pin() */ + lockdep_assert_held(&ce->guc_state.lock); atomic_set(&ce->guc_sched_state_no_lock, 0); ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; }
+__maybe_unused +static bool sched_state_is_init(struct intel_context *ce) +{ + /* + * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after + * suspend. + */ + return !(atomic_read(&ce->guc_sched_state_no_lock) & + ~SCHED_STATE_NO_LOCK_REGISTERED) && + !(ce->guc_state.sched_state &= ~SCHED_STATE_BLOCKED_MASK); +} + static inline bool context_wait_for_deregister_to_register(struct intel_context *ce) { @@ -166,7 +178,7 @@ context_wait_for_deregister_to_register(struct intel_context *ce) static inline void set_context_wait_for_deregister_to_register(struct intel_context *ce) { - /* Only should be called from guc_lrc_desc_pin() without lock */ + lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; } @@ -605,9 +617,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) bool pending_disable, pending_enable, deregister, destroyed, banned;
xa_for_each(&guc->context_lookup, index, ce) { - /* Flush context */ spin_lock_irqsave(&ce->guc_state.lock, flags); - spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/* * Once we are at this point submission_disabled() is guaranteed @@ -623,6 +633,8 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) banned = context_banned(ce); init_sched_state(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (pending_enable || destroyed || deregister) { decr_outstanding_submission_g2h(guc); if (deregister) @@ -1323,6 +1335,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) int ret = 0;
GEM_BUG_ON(!engine->mask); + GEM_BUG_ON(!sched_state_is_init(ce));
/* * Ensure LRC + CT vmas are is same region as write barrier is done @@ -1351,7 +1364,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) desc->priority = ce->guc_prio; desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; guc_context_policy_init(engine, desc); - init_sched_state(ce);
/* * The context_lookup xarray is used to determine if the hardware @@ -1362,26 +1374,23 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) * registering this context. */ if (context_registered) { + bool disabled; + unsigned long flags; + trace_intel_context_steal_guc_id(ce); - if (!loop) { + GEM_BUG_ON(!loop); + + /* Seal race with Reset */ + spin_lock_irqsave(&ce->guc_state.lock, flags); + disabled = submission_disabled(guc); + if (likely(!disabled)) { set_context_wait_for_deregister_to_register(ce); intel_context_get(ce); - } else { - bool disabled; - unsigned long flags; - - /* Seal race with Reset */ - spin_lock_irqsave(&ce->guc_state.lock, flags); - disabled = submission_disabled(guc); - if (likely(!disabled)) { - set_context_wait_for_deregister_to_register(ce); - intel_context_get(ce); - } - spin_unlock_irqrestore(&ce->guc_state.lock, flags); - if (unlikely(disabled)) { - reset_lrc_desc(guc, desc_idx); - return 0; /* Will get registered later */ - } + } + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(disabled)) { + reset_lrc_desc(guc, desc_idx); + return 0; /* Will get registered later */ }
/* @@ -1390,10 +1399,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) */ with_intel_runtime_pm(runtime_pm, wakeref) ret = deregister_context(ce, ce->guc_id, loop); - if (unlikely(ret == -EBUSY)) { - clr_context_wait_for_deregister_to_register(ce); - intel_context_put(ce); - } else if (unlikely(ret == -ENODEV)) { + if (unlikely(ret == -ENODEV)) { ret = 0; /* Will get registered later */ } } else {
Reset LRC descriptor if a context register returns -ENODEV as this means we are mid-reset.
Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface") Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 901d867a4d90..2949dce57489 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1405,10 +1405,12 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) } else { with_intel_runtime_pm(runtime_pm, wakeref) ret = register_context(ce, loop); - if (unlikely(ret == -EBUSY)) + if (unlikely(ret == -EBUSY)) { + reset_lrc_desc(guc, desc_idx); + } else if (unlikely(ret == -ENODEV)) { reset_lrc_desc(guc, desc_idx); - else if (unlikely(ret == -ENODEV)) ret = 0; /* Will get registered later */ + } }
return ret;
Error captures can now be done in a work queue processing G2H messages. These messages need to be completely done being processed in the reset path, to avoid races in the missing G2H cleanup, which create a dependency on memory allocations and dma fences (i915_requests). Requests depend on resets, thus now we have a circular dependency. To work around this, allocate the error capture in a nowait context.
v2: (Daniel Vetter) - Use GFP_NOWAIT instead GFP_ATOMIC
Fixes: dc0dad365c5e ("Fix for error capture after full GPU reset with GuC") Fixes: 573ba126aef3 ("Capture error state on context reset") Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/i915_gpu_error.c | 39 +++++++++++++-------------- 1 file changed, 19 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b9f66dbd46bb..8696ead02118 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -49,8 +49,7 @@ #include "i915_memcpy.h" #include "i915_scatterlist.h"
-#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) -#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) +#define ATOMIC_MAYFAIL (GFP_NOWAIT | __GFP_NOWARN)
static void __sg_set_buf(struct scatterlist *sg, void *addr, unsigned int len, loff_t it) @@ -79,7 +78,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) if (e->cur == e->end) { struct scatterlist *sgl;
- sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL); + sgl = (typeof(sgl))__get_free_page(ATOMIC_MAYFAIL); if (!sgl) { e->err = -ENOMEM; return false; @@ -99,10 +98,10 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) }
e->size = ALIGN(len + 1, SZ_64K); - e->buf = kmalloc(e->size, ALLOW_FAIL); + e->buf = kmalloc(e->size, ATOMIC_MAYFAIL); if (!e->buf) { e->size = PAGE_ALIGN(len + 1); - e->buf = kmalloc(e->size, GFP_KERNEL); + e->buf = kmalloc(e->size, ATOMIC_MAYFAIL); } if (!e->buf) { e->err = -ENOMEM; @@ -243,12 +242,12 @@ static bool compress_init(struct i915_vma_compress *c) { struct z_stream_s *zstream = &c->zstream;
- if (pool_init(&c->pool, ALLOW_FAIL)) + if (pool_init(&c->pool, ATOMIC_MAYFAIL)) return false;
zstream->workspace = kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), - ALLOW_FAIL); + ATOMIC_MAYFAIL); if (!zstream->workspace) { pool_fini(&c->pool); return false; @@ -256,7 +255,7 @@ static bool compress_init(struct i915_vma_compress *c)
c->tmp = NULL; if (i915_has_memcpy_from_wc()) - c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); + c->tmp = pool_alloc(&c->pool, ATOMIC_MAYFAIL);
return true; } @@ -280,7 +279,7 @@ static void *compress_next_page(struct i915_vma_compress *c, if (dst->page_count >= dst->num_pages) return ERR_PTR(-ENOSPC);
- page = pool_alloc(&c->pool, ALLOW_FAIL); + page = pool_alloc(&c->pool, ATOMIC_MAYFAIL); if (!page) return ERR_PTR(-ENOMEM);
@@ -376,7 +375,7 @@ struct i915_vma_compress {
static bool compress_init(struct i915_vma_compress *c) { - return pool_init(&c->pool, ALLOW_FAIL) == 0; + return pool_init(&c->pool, ATOMIC_MAYFAIL) == 0; }
static bool compress_start(struct i915_vma_compress *c) @@ -391,7 +390,7 @@ static int compress_page(struct i915_vma_compress *c, { void *ptr;
- ptr = pool_alloc(&c->pool, ALLOW_FAIL); + ptr = pool_alloc(&c->pool, ATOMIC_MAYFAIL); if (!ptr) return -ENOMEM;
@@ -1026,7 +1025,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */ - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL); + dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ATOMIC_MAYFAIL); if (!dst) return NULL;
@@ -1462,7 +1461,7 @@ capture_engine(struct intel_engine_cs *engine, struct i915_request *rq = NULL; unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, GFP_KERNEL); + ee = intel_engine_coredump_alloc(engine, ATOMIC_MAYFAIL); if (!ee) return NULL;
@@ -1510,7 +1509,7 @@ gt_record_engines(struct intel_gt_coredump *gt, struct intel_engine_coredump *ee;
/* Refill our page pool before entering atomic section */ - pool_refill(&compress->pool, ALLOW_FAIL); + pool_refill(&compress->pool, ATOMIC_MAYFAIL);
ee = capture_engine(engine, compress); if (!ee) @@ -1536,7 +1535,7 @@ gt_record_uc(struct intel_gt_coredump *gt, const struct intel_uc *uc = >->_gt->uc; struct intel_uc_coredump *error_uc;
- error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL); + error_uc = kzalloc(sizeof(*error_uc), ATOMIC_MAYFAIL); if (!error_uc) return NULL;
@@ -1547,8 +1546,8 @@ gt_record_uc(struct intel_gt_coredump *gt, * As modparams are generally accesible from the userspace make * explicit copies of the firmware paths. */ - error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL); - error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL); + error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ATOMIC_MAYFAIL); + error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ATOMIC_MAYFAIL); error_uc->guc_log = i915_vma_coredump_create(gt->_gt, uc->guc.log.vma, "GuC log buffer", @@ -1815,7 +1814,7 @@ i915_vma_capture_prepare(struct intel_gt_coredump *gt) { struct i915_vma_compress *compress;
- compress = kmalloc(sizeof(*compress), ALLOW_FAIL); + compress = kmalloc(sizeof(*compress), ATOMIC_MAYFAIL); if (!compress) return NULL;
@@ -1848,11 +1847,11 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) if (IS_ERR(error)) return error;
- error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL); + error = i915_gpu_coredump_alloc(i915, ATOMIC_MAYFAIL); if (!error) return ERR_PTR(-ENOMEM);
- error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL); + error->gt = intel_gt_coredump_alloc(gt, ATOMIC_MAYFAIL); if (error->gt) { struct i915_vma_compress *compress;
On Wed, Aug 25, 2021 at 08:23:16PM -0700, Matthew Brost wrote:
Error captures can now be done in a work queue processing G2H messages. These messages need to be completely done being processed in the reset path, to avoid races in the missing G2H cleanup, which create a dependency on memory allocations and dma fences (i915_requests). Requests depend on resets, thus now we have a circular dependency. To work around this, allocate the error capture in a nowait context.
For completeness Daniel suggested we include the lockdep splat, included below:
[ 154.625989] ====================================================== [ 154.632195] WARNING: possible circular locking dependency detected [ 154.638393] 5.14.0-rc5-guc+ #50 Tainted: G U [ 154.643991] ------------------------------------------------------ [ 154.650196] i915_selftest/1673 is trying to acquire lock: [ 154.655621] ffff8881079cb918 ((work_completion)(&ct->requests.worker)){+.+.}-{0:0}, at: __flush_work+0x350/0x4d0 [ 154.665826] but task is already holding lock: [ 154.671682] ffff8881079cbfb8 (>->reset.mutex){+.+.}-{3:3}, at: intel_gt_reset+0xf0/0x300 [i915] [ 154.680659] which lock already depends on the new lock.
[ 154.688857] the existing dependency chain (in reverse order) is: [ 154.696365] -> #2 (>->reset.mutex){+.+.}-{3:3}: [ 154.702571] lock_acquire+0xd2/0x300 [ 154.706695] i915_gem_shrinker_taints_mutex+0x2d/0x50 [i915] [ 154.712959] intel_gt_init_reset+0x61/0x80 [i915] [ 154.718258] intel_gt_init_early+0xe6/0x120 [i915] [ 154.723648] i915_driver_probe+0x592/0xdc0 [i915] [ 154.728942] i915_pci_probe+0x43/0x1c0 [i915] [ 154.733891] pci_device_probe+0x9b/0x110 [ 154.738362] really_probe+0x1a6/0x3a0 [ 154.742568] __driver_probe_device+0xf9/0x170 [ 154.747468] driver_probe_device+0x19/0x90 [ 154.752114] __driver_attach+0x99/0x170 [ 154.756492] bus_for_each_dev+0x73/0xc0 [ 154.760870] bus_add_driver+0x14b/0x1f0 [ 154.765248] driver_register+0x67/0xb0 [ 154.769542] i915_init+0x18/0x8c [i915] [ 154.773964] do_one_initcall+0x53/0x2e0 [ 154.778343] do_init_module+0x56/0x210 [ 154.782639] load_module+0x25fc/0x29f0 [ 154.786934] __do_sys_finit_module+0xae/0x110 [ 154.791835] do_syscall_64+0x38/0xc0 [ 154.795958] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 154.801558] -> #1 (fs_reclaim){+.+.}-{0:0}: [ 154.807241] lock_acquire+0xd2/0x300 [ 154.811361] fs_reclaim_acquire+0x9e/0xd0 [ 154.815914] kmem_cache_alloc_trace+0x30/0x790 [ 154.820899] i915_gpu_coredump_alloc+0x53/0x1a0 [i915] [ 154.826649] i915_gpu_coredump+0x39/0x560 [i915] [ 154.831866] i915_capture_error_state+0xa/0x70 [i915] [ 154.837513] intel_guc_context_reset_process_msg+0x174/0x1f0 [i915] [ 154.844383] ct_incoming_request_worker_func+0x130/0x1b0 [i915] [ 154.850898] process_one_work+0x264/0x590 [ 154.855451] worker_thread+0x4b/0x3a0 [ 154.859655] kthread+0x147/0x170 [ 154.863428] ret_from_fork+0x1f/0x30 [ 154.867548] -> #0 ((work_completion)(&ct->requests.worker)){+.+.}-{0:0}: [ 154.875747] check_prev_add+0x90/0xc30 [ 154.880042] __lock_acquire+0x1643/0x2110 [ 154.884595] lock_acquire+0xd2/0x300 [ 154.888715] __flush_work+0x373/0x4d0 [ 154.892920] intel_guc_submission_reset_prepare+0xf3/0x340 [i915] [ 154.899606] intel_uc_reset_prepare+0x40/0x50 [i915] [ 154.905166] reset_prepare+0x55/0x60 [i915] [ 154.909946] intel_gt_reset+0x11c/0x300 [i915] [ 154.914984] do_device_reset+0x13/0x20 [i915] [ 154.919936] check_whitelist_across_reset+0x166/0x250 [i915] [ 154.926212] live_reset_whitelist.cold+0x6a/0x7a [i915] [ 154.932037] __i915_subtests.cold+0x20/0x74 [i915] [ 154.937428] __run_selftests.cold+0x96/0xee [i915] [ 154.942816] i915_live_selftests+0x2c/0x60 [i915] [ 154.948125] i915_pci_probe+0x93/0x1c0 [i915] [ 154.953076] pci_device_probe+0x9b/0x110 [ 154.957545] really_probe+0x1a6/0x3a0 [ 154.961749] __driver_probe_device+0xf9/0x170 [ 154.966653] driver_probe_device+0x19/0x90 [ 154.971290] __driver_attach+0x99/0x170 [ 154.975671] bus_for_each_dev+0x73/0xc0 [ 154.980053] bus_add_driver+0x14b/0x1f0 [ 154.984431] driver_register+0x67/0xb0 [ 154.988725] i915_init+0x18/0x8c [i915] [ 154.993149] do_one_initcall+0x53/0x2e0 [ 154.997527] do_init_module+0x56/0x210 [ 155.001822] load_module+0x25fc/0x29f0 [ 155.006118] __do_sys_finit_module+0xae/0x110 [ 155.011019] do_syscall_64+0x38/0xc0 [ 155.015139] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 155.020729] other info that might help us debug this:
[ 155.028752] Chain exists of: (work_completion)(&ct->requests.worker) --> fs_reclaim --> >->reset.mutex
[ 155.041294] Possible unsafe locking scenario:
[ 155.047240] CPU0 CPU1 [ 155.051791] ---- ---- [ 155.056344] lock(>->reset.mutex); [ 155.060026] lock(fs_reclaim); [ 155.065706] lock(>->reset.mutex); [ 155.071912] lock((work_completion)(&ct->requests.worker)); [ 155.077595] *** DEADLOCK ***
v2: (Daniel Vetter)
- Use GFP_NOWAIT instead GFP_ATOMIC
Fixes: dc0dad365c5e ("Fix for error capture after full GPU reset with GuC") Fixes: 573ba126aef3 ("Capture error state on context reset") Signed-off-by: Matthew Brost matthew.brost@intel.com
drivers/gpu/drm/i915/i915_gpu_error.c | 39 +++++++++++++-------------- 1 file changed, 19 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b9f66dbd46bb..8696ead02118 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -49,8 +49,7 @@ #include "i915_memcpy.h" #include "i915_scatterlist.h"
-#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) -#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) +#define ATOMIC_MAYFAIL (GFP_NOWAIT | __GFP_NOWARN)
static void __sg_set_buf(struct scatterlist *sg, void *addr, unsigned int len, loff_t it) @@ -79,7 +78,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) if (e->cur == e->end) { struct scatterlist *sgl;
sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
if (!sgl) { e->err = -ENOMEM; return false;sgl = (typeof(sgl))__get_free_page(ATOMIC_MAYFAIL);
@@ -99,10 +98,10 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) }
e->size = ALIGN(len + 1, SZ_64K);
- e->buf = kmalloc(e->size, ALLOW_FAIL);
- e->buf = kmalloc(e->size, ATOMIC_MAYFAIL); if (!e->buf) { e->size = PAGE_ALIGN(len + 1);
e->buf = kmalloc(e->size, GFP_KERNEL);
} if (!e->buf) { e->err = -ENOMEM;e->buf = kmalloc(e->size, ATOMIC_MAYFAIL);
@@ -243,12 +242,12 @@ static bool compress_init(struct i915_vma_compress *c) { struct z_stream_s *zstream = &c->zstream;
- if (pool_init(&c->pool, ALLOW_FAIL))
if (pool_init(&c->pool, ATOMIC_MAYFAIL)) return false;
zstream->workspace = kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
ALLOW_FAIL);
if (!zstream->workspace) { pool_fini(&c->pool); return false;ATOMIC_MAYFAIL);
@@ -256,7 +255,7 @@ static bool compress_init(struct i915_vma_compress *c)
c->tmp = NULL; if (i915_has_memcpy_from_wc())
c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
c->tmp = pool_alloc(&c->pool, ATOMIC_MAYFAIL);
return true;
} @@ -280,7 +279,7 @@ static void *compress_next_page(struct i915_vma_compress *c, if (dst->page_count >= dst->num_pages) return ERR_PTR(-ENOSPC);
- page = pool_alloc(&c->pool, ALLOW_FAIL);
- page = pool_alloc(&c->pool, ATOMIC_MAYFAIL); if (!page) return ERR_PTR(-ENOMEM);
@@ -376,7 +375,7 @@ struct i915_vma_compress {
static bool compress_init(struct i915_vma_compress *c) {
- return pool_init(&c->pool, ALLOW_FAIL) == 0;
- return pool_init(&c->pool, ATOMIC_MAYFAIL) == 0;
}
static bool compress_start(struct i915_vma_compress *c) @@ -391,7 +390,7 @@ static int compress_page(struct i915_vma_compress *c, { void *ptr;
- ptr = pool_alloc(&c->pool, ALLOW_FAIL);
- ptr = pool_alloc(&c->pool, ATOMIC_MAYFAIL); if (!ptr) return -ENOMEM;
@@ -1026,7 +1025,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ATOMIC_MAYFAIL); if (!dst) return NULL;
@@ -1462,7 +1461,7 @@ capture_engine(struct intel_engine_cs *engine, struct i915_request *rq = NULL; unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
- ee = intel_engine_coredump_alloc(engine, ATOMIC_MAYFAIL); if (!ee) return NULL;
@@ -1510,7 +1509,7 @@ gt_record_engines(struct intel_gt_coredump *gt, struct intel_engine_coredump *ee;
/* Refill our page pool before entering atomic section */
pool_refill(&compress->pool, ALLOW_FAIL);
pool_refill(&compress->pool, ATOMIC_MAYFAIL);
ee = capture_engine(engine, compress); if (!ee)
@@ -1536,7 +1535,7 @@ gt_record_uc(struct intel_gt_coredump *gt, const struct intel_uc *uc = >->_gt->uc; struct intel_uc_coredump *error_uc;
- error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
- error_uc = kzalloc(sizeof(*error_uc), ATOMIC_MAYFAIL); if (!error_uc) return NULL;
@@ -1547,8 +1546,8 @@ gt_record_uc(struct intel_gt_coredump *gt, * As modparams are generally accesible from the userspace make * explicit copies of the firmware paths. */
- error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
- error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
- error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ATOMIC_MAYFAIL);
- error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ATOMIC_MAYFAIL); error_uc->guc_log = i915_vma_coredump_create(gt->_gt, uc->guc.log.vma, "GuC log buffer",
@@ -1815,7 +1814,7 @@ i915_vma_capture_prepare(struct intel_gt_coredump *gt) { struct i915_vma_compress *compress;
- compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
- compress = kmalloc(sizeof(*compress), ATOMIC_MAYFAIL); if (!compress) return NULL;
@@ -1848,11 +1847,11 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) if (IS_ERR(error)) return error;
- error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
- error = i915_gpu_coredump_alloc(i915, ATOMIC_MAYFAIL); if (!error) return ERR_PTR(-ENOMEM);
- error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
- error->gt = intel_gt_coredump_alloc(gt, ATOMIC_MAYFAIL); if (error->gt) { struct i915_vma_compress *compress;
-- 2.32.0
On Wed, Aug 25, 2021 at 08:23:16PM -0700, Matthew Brost wrote:
Error captures can now be done in a work queue processing G2H messages. These messages need to be completely done being processed in the reset path, to avoid races in the missing G2H cleanup, which create a dependency on memory allocations and dma fences (i915_requests). Requests depend on resets, thus now we have a circular dependency. To work around this, allocate the error capture in a nowait context.
v2: (Daniel Vetter)
- Use GFP_NOWAIT instead GFP_ATOMIC
Fixes: dc0dad365c5e ("Fix for error capture after full GPU reset with GuC") Fixes: 573ba126aef3 ("Capture error state on context reset") Signed-off-by: Matthew Brost matthew.brost@intel.com
Would be good to include an example splat here, since memory inversions are a bit wtf due to the fake lockdep locks involved. In generally always good to put all the data you have into the commit message (maybe condensed down) so it's easier to dig out things again.
With that: Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch
drivers/gpu/drm/i915/i915_gpu_error.c | 39 +++++++++++++-------------- 1 file changed, 19 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b9f66dbd46bb..8696ead02118 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -49,8 +49,7 @@ #include "i915_memcpy.h" #include "i915_scatterlist.h"
-#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) -#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) +#define ATOMIC_MAYFAIL (GFP_NOWAIT | __GFP_NOWARN)
static void __sg_set_buf(struct scatterlist *sg, void *addr, unsigned int len, loff_t it) @@ -79,7 +78,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) if (e->cur == e->end) { struct scatterlist *sgl;
sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
if (!sgl) { e->err = -ENOMEM; return false;sgl = (typeof(sgl))__get_free_page(ATOMIC_MAYFAIL);
@@ -99,10 +98,10 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) }
e->size = ALIGN(len + 1, SZ_64K);
- e->buf = kmalloc(e->size, ALLOW_FAIL);
- e->buf = kmalloc(e->size, ATOMIC_MAYFAIL); if (!e->buf) { e->size = PAGE_ALIGN(len + 1);
e->buf = kmalloc(e->size, GFP_KERNEL);
} if (!e->buf) { e->err = -ENOMEM;e->buf = kmalloc(e->size, ATOMIC_MAYFAIL);
@@ -243,12 +242,12 @@ static bool compress_init(struct i915_vma_compress *c) { struct z_stream_s *zstream = &c->zstream;
- if (pool_init(&c->pool, ALLOW_FAIL))
if (pool_init(&c->pool, ATOMIC_MAYFAIL)) return false;
zstream->workspace = kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
ALLOW_FAIL);
if (!zstream->workspace) { pool_fini(&c->pool); return false;ATOMIC_MAYFAIL);
@@ -256,7 +255,7 @@ static bool compress_init(struct i915_vma_compress *c)
c->tmp = NULL; if (i915_has_memcpy_from_wc())
c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
c->tmp = pool_alloc(&c->pool, ATOMIC_MAYFAIL);
return true;
} @@ -280,7 +279,7 @@ static void *compress_next_page(struct i915_vma_compress *c, if (dst->page_count >= dst->num_pages) return ERR_PTR(-ENOSPC);
- page = pool_alloc(&c->pool, ALLOW_FAIL);
- page = pool_alloc(&c->pool, ATOMIC_MAYFAIL); if (!page) return ERR_PTR(-ENOMEM);
@@ -376,7 +375,7 @@ struct i915_vma_compress {
static bool compress_init(struct i915_vma_compress *c) {
- return pool_init(&c->pool, ALLOW_FAIL) == 0;
- return pool_init(&c->pool, ATOMIC_MAYFAIL) == 0;
}
static bool compress_start(struct i915_vma_compress *c) @@ -391,7 +390,7 @@ static int compress_page(struct i915_vma_compress *c, { void *ptr;
- ptr = pool_alloc(&c->pool, ALLOW_FAIL);
- ptr = pool_alloc(&c->pool, ATOMIC_MAYFAIL); if (!ptr) return -ENOMEM;
@@ -1026,7 +1025,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ATOMIC_MAYFAIL); if (!dst) return NULL;
@@ -1462,7 +1461,7 @@ capture_engine(struct intel_engine_cs *engine, struct i915_request *rq = NULL; unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
- ee = intel_engine_coredump_alloc(engine, ATOMIC_MAYFAIL); if (!ee) return NULL;
@@ -1510,7 +1509,7 @@ gt_record_engines(struct intel_gt_coredump *gt, struct intel_engine_coredump *ee;
/* Refill our page pool before entering atomic section */
pool_refill(&compress->pool, ALLOW_FAIL);
pool_refill(&compress->pool, ATOMIC_MAYFAIL);
ee = capture_engine(engine, compress); if (!ee)
@@ -1536,7 +1535,7 @@ gt_record_uc(struct intel_gt_coredump *gt, const struct intel_uc *uc = >->_gt->uc; struct intel_uc_coredump *error_uc;
- error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
- error_uc = kzalloc(sizeof(*error_uc), ATOMIC_MAYFAIL); if (!error_uc) return NULL;
@@ -1547,8 +1546,8 @@ gt_record_uc(struct intel_gt_coredump *gt, * As modparams are generally accesible from the userspace make * explicit copies of the firmware paths. */
- error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
- error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
- error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ATOMIC_MAYFAIL);
- error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ATOMIC_MAYFAIL); error_uc->guc_log = i915_vma_coredump_create(gt->_gt, uc->guc.log.vma, "GuC log buffer",
@@ -1815,7 +1814,7 @@ i915_vma_capture_prepare(struct intel_gt_coredump *gt) { struct i915_vma_compress *compress;
- compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
- compress = kmalloc(sizeof(*compress), ATOMIC_MAYFAIL); if (!compress) return NULL;
@@ -1848,11 +1847,11 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) if (IS_ERR(error)) return error;
- error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
- error = i915_gpu_coredump_alloc(i915, ATOMIC_MAYFAIL); if (!error) return ERR_PTR(-ENOMEM);
- error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
- error->gt = intel_gt_coredump_alloc(gt, ATOMIC_MAYFAIL); if (error->gt) { struct i915_vma_compress *compress;
-- 2.32.0
It isn't safe to scrub for missing G2H or continue with the reset until all G2H processing is complete. Flush the G2H work queue during reset to ensure it is done running. No need to call the IRQ handler directly either as the scrubbing code can deal with any missing G2H.
Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface") Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com --- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 2949dce57489..baf789f37d42 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -714,8 +714,6 @@ static void guc_flush_submissions(struct intel_guc *guc)
void intel_guc_submission_reset_prepare(struct intel_guc *guc) { - int i; - if (unlikely(!guc_submission_initialized(guc))) { /* Reset called during driver load? GuC not yet initialised! */ return; @@ -731,20 +729,8 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
guc_flush_submissions(guc);
- /* - * Handle any outstanding G2Hs before reset. Call IRQ handler directly - * each pass as interrupt have been disabled. We always scrub for - * outstanding G2H as it is possible for outstanding_submission_g2h to - * be incremented after the context state update. - */ - for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) { - intel_guc_to_host_event_handler(guc); -#define wait_for_reset(guc, wait_var) \ - intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20)) - do { - wait_for_reset(guc, &guc->outstanding_submission_g2h); - } while (!list_empty(&guc->ct.requests.incoming)); - } + flush_work(&guc->ct.requests.worker); + scrub_guc_desc_for_outstanding_g2h(guc); }
A subsequent patch will flip the locking hierarchy from ce->guc_state.lock -> sched_engine->lock to sched_engine->lock -> ce->guc_state.lock. As such we need to release the submit fence for a request from an IRQ to break a lock inversion - i.e. the fence must be release went holding ce->guc_state.lock and the releasing of the can acquire sched_engine->lock.
v2: (Daniele) - Delete request from list before calling irq_work_queue
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com --- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 22 ++++++++++++++++--- drivers/gpu/drm/i915/i915_request.h | 5 +++++ 2 files changed, 24 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index baf789f37d42..c86aae0899e5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2035,17 +2035,32 @@ static const struct intel_context_ops guc_context_ops = { .create_virtual = guc_create_virtual, };
+static void submit_work_cb(struct irq_work *wrk) +{ + struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work); + + might_lock(&rq->engine->sched_engine->lock); + i915_sw_fence_complete(&rq->submit); +} + static void __guc_signal_context_fence(struct intel_context *ce) { - struct i915_request *rq; + struct i915_request *rq, *rn;
lockdep_assert_held(&ce->guc_state.lock);
if (!list_empty(&ce->guc_state.fences)) trace_intel_context_fence_release(ce);
- list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link) - i915_sw_fence_complete(&rq->submit); + /* + * Use an IRQ to ensure locking order of sched_engine->lock -> + * ce->guc_state.lock is preserved. + */ + list_for_each_entry_safe(rq, rn, &ce->guc_state.fences, + guc_fence_link) { + list_del(&rq->guc_fence_link); + irq_work_queue(&rq->submit_work); + }
INIT_LIST_HEAD(&ce->guc_state.fences); } @@ -2155,6 +2170,7 @@ static int guc_request_alloc(struct i915_request *rq) spin_lock_irqsave(&ce->guc_state.lock, flags); if (context_wait_for_deregister_to_register(ce) || context_pending_disable(ce)) { + init_irq_work(&rq->submit_work, submit_work_cb); i915_sw_fence_await(&rq->submit);
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 1bc1349ba3c2..d818cfbfc41d 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -218,6 +218,11 @@ struct i915_request { }; struct llist_head execute_cb; struct i915_sw_fence semaphore; + /** + * @submit_work: complete submit fence from an IRQ if needed for + * locking hierarchy reasons. + */ + struct irq_work submit_work;
/* * A list of everyone we wait upon, and everyone who waits upon us.
Move guc_blocked fence to struct guc_state as the lock which protects the fence lives there.
s/ce->guc_blocked/ce->guc_state.blocked/g
v2: (Daniele) - s/blocked_fence/blocked/g
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.c | 5 +++-- drivers/gpu/drm/i915/gt/intel_context_types.h | 5 ++--- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 18 +++++++++--------- 3 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 745e84c72c90..3048267ddc7e 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -405,8 +405,9 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) * Initialize fence to be complete as this is expected to be complete * unless there is a pending schedule disable outstanding. */ - i915_sw_fence_init(&ce->guc_blocked, sw_fence_dummy_notify); - i915_sw_fence_commit(&ce->guc_blocked); + i915_sw_fence_init(&ce->guc_state.blocked, + sw_fence_dummy_notify); + i915_sw_fence_commit(&ce->guc_state.blocked);
i915_active_init(&ce->active, __intel_context_active, __intel_context_retire, 0); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 3a73f3117873..5aecb9038b5b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -167,6 +167,8 @@ struct intel_context { * fence related to GuC submission */ struct list_head fences; + /* GuC context blocked fence */ + struct i915_sw_fence blocked; } guc_state;
struct { @@ -190,9 +192,6 @@ struct intel_context { */ struct list_head guc_id_link;
- /* GuC context blocked fence */ - struct i915_sw_fence guc_blocked; - /* * GuC priority management */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index c86aae0899e5..9d1eadd4b7c4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1488,24 +1488,24 @@ static void guc_blocked_fence_complete(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock);
- if (!i915_sw_fence_done(&ce->guc_blocked)) - i915_sw_fence_complete(&ce->guc_blocked); + if (!i915_sw_fence_done(&ce->guc_state.blocked)) + i915_sw_fence_complete(&ce->guc_state.blocked); }
static void guc_blocked_fence_reinit(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); - GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_blocked)); + GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
/* * This fence is always complete unless a pending schedule disable is * outstanding. We arm the fence here and complete it when we receive * the pending schedule disable complete message. */ - i915_sw_fence_fini(&ce->guc_blocked); - i915_sw_fence_reinit(&ce->guc_blocked); - i915_sw_fence_await(&ce->guc_blocked); - i915_sw_fence_commit(&ce->guc_blocked); + i915_sw_fence_fini(&ce->guc_state.blocked); + i915_sw_fence_reinit(&ce->guc_state.blocked); + i915_sw_fence_await(&ce->guc_state.blocked); + i915_sw_fence_commit(&ce->guc_state.blocked); }
static u16 prep_context_pending_disable(struct intel_context *ce) @@ -1545,7 +1545,7 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce) if (enabled) clr_context_enabled(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); - return &ce->guc_blocked; + return &ce->guc_state.blocked; }
/* @@ -1561,7 +1561,7 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce) with_intel_runtime_pm(runtime_pm, wakeref) __guc_context_sched_disable(guc, ce, guc_id);
- return &ce->guc_blocked; + return &ce->guc_state.blocked; }
#define SCHED_STATE_MULTI_BLOCKED_MASK \
Rework and simplify the locking with GuC subission. Drop sched_state_no_lock and move all fields under the guc_state.sched_state and protect all these fields with guc_state.lock . This requires changing the locking hierarchy from guc_state.lock -> sched_engine.lock to sched_engine.lock -> guc_state.lock.
v2: (Daniele) - Don't check fields outside of lock during sched disable, check less fields within lock as some of the outside are no longer needed
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context_types.h | 5 +- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 200 ++++++++---------- drivers/gpu/drm/i915/i915_trace.h | 6 +- 3 files changed, 89 insertions(+), 122 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 5aecb9038b5b..d2f798ef678c 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -161,7 +161,7 @@ struct intel_context { * sched_state: scheduling state of this context using GuC * submission */ - u16 sched_state; + u32 sched_state; /* * fences: maintains of list of requests that have a submit * fence related to GuC submission @@ -178,9 +178,6 @@ struct intel_context { struct list_head requests; } guc_active;
- /* GuC scheduling state flags that do not require a lock. */ - atomic_t guc_sched_state_no_lock; - /* GuC LRC descriptor ID */ u16 guc_id;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 9d1eadd4b7c4..2aa25cc4ac4b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -72,87 +72,24 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
#define GUC_REQUEST_SIZE 64 /* bytes */
-/* - * Below is a set of functions which control the GuC scheduling state which do - * not require a lock as all state transitions are mutually exclusive. i.e. It - * is not possible for the context pinning code and submission, for the same - * context, to be executing simultaneously. We still need an atomic as it is - * possible for some of the bits to changing at the same time though. - */ -#define SCHED_STATE_NO_LOCK_ENABLED BIT(0) -#define SCHED_STATE_NO_LOCK_PENDING_ENABLE BIT(1) -#define SCHED_STATE_NO_LOCK_REGISTERED BIT(2) -static inline bool context_enabled(struct intel_context *ce) -{ - return (atomic_read(&ce->guc_sched_state_no_lock) & - SCHED_STATE_NO_LOCK_ENABLED); -} - -static inline void set_context_enabled(struct intel_context *ce) -{ - atomic_or(SCHED_STATE_NO_LOCK_ENABLED, &ce->guc_sched_state_no_lock); -} - -static inline void clr_context_enabled(struct intel_context *ce) -{ - atomic_and((u32)~SCHED_STATE_NO_LOCK_ENABLED, - &ce->guc_sched_state_no_lock); -} - -static inline bool context_pending_enable(struct intel_context *ce) -{ - return (atomic_read(&ce->guc_sched_state_no_lock) & - SCHED_STATE_NO_LOCK_PENDING_ENABLE); -} - -static inline void set_context_pending_enable(struct intel_context *ce) -{ - atomic_or(SCHED_STATE_NO_LOCK_PENDING_ENABLE, - &ce->guc_sched_state_no_lock); -} - -static inline void clr_context_pending_enable(struct intel_context *ce) -{ - atomic_and((u32)~SCHED_STATE_NO_LOCK_PENDING_ENABLE, - &ce->guc_sched_state_no_lock); -} - -static inline bool context_registered(struct intel_context *ce) -{ - return (atomic_read(&ce->guc_sched_state_no_lock) & - SCHED_STATE_NO_LOCK_REGISTERED); -} - -static inline void set_context_registered(struct intel_context *ce) -{ - atomic_or(SCHED_STATE_NO_LOCK_REGISTERED, - &ce->guc_sched_state_no_lock); -} - -static inline void clr_context_registered(struct intel_context *ce) -{ - atomic_and((u32)~SCHED_STATE_NO_LOCK_REGISTERED, - &ce->guc_sched_state_no_lock); -} - /* * Below is a set of functions which control the GuC scheduling state which - * require a lock, aside from the special case where the functions are called - * from guc_lrc_desc_pin(). In that case it isn't possible for any other code - * path to be executing on the context. + * require a lock. */ #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0) #define SCHED_STATE_DESTROYED BIT(1) #define SCHED_STATE_PENDING_DISABLE BIT(2) #define SCHED_STATE_BANNED BIT(3) -#define SCHED_STATE_BLOCKED_SHIFT 4 +#define SCHED_STATE_ENABLED BIT(4) +#define SCHED_STATE_PENDING_ENABLE BIT(5) +#define SCHED_STATE_REGISTERED BIT(6) +#define SCHED_STATE_BLOCKED_SHIFT 7 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT) #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
static inline void init_sched_state(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); - atomic_set(&ce->guc_sched_state_no_lock, 0); ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; }
@@ -163,9 +100,8 @@ static bool sched_state_is_init(struct intel_context *ce) * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after * suspend. */ - return !(atomic_read(&ce->guc_sched_state_no_lock) & - ~SCHED_STATE_NO_LOCK_REGISTERED) && - !(ce->guc_state.sched_state &= ~SCHED_STATE_BLOCKED_MASK); + return !(ce->guc_state.sched_state &= + ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED)); }
static inline bool @@ -238,6 +174,57 @@ static inline void clr_context_banned(struct intel_context *ce) ce->guc_state.sched_state &= ~SCHED_STATE_BANNED; }
+static inline bool context_enabled(struct intel_context *ce) +{ + return ce->guc_state.sched_state & SCHED_STATE_ENABLED; +} + +static inline void set_context_enabled(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state |= SCHED_STATE_ENABLED; +} + +static inline void clr_context_enabled(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED; +} + +static inline bool context_pending_enable(struct intel_context *ce) +{ + return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE; +} + +static inline void set_context_pending_enable(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE; +} + +static inline void clr_context_pending_enable(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE; +} + +static inline bool context_registered(struct intel_context *ce) +{ + return ce->guc_state.sched_state & SCHED_STATE_REGISTERED; +} + +static inline void set_context_registered(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state |= SCHED_STATE_REGISTERED; +} + +static inline void clr_context_registered(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED; +} + static inline u32 context_blocked(struct intel_context *ce) { return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >> @@ -246,7 +233,6 @@ static inline u32 context_blocked(struct intel_context *ce)
static inline void incr_context_blocked(struct intel_context *ce) { - lockdep_assert_held(&ce->engine->sched_engine->lock); lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state += SCHED_STATE_BLOCKED; @@ -256,7 +242,6 @@ static inline void incr_context_blocked(struct intel_context *ce)
static inline void decr_context_blocked(struct intel_context *ce) { - lockdep_assert_held(&ce->engine->sched_engine->lock); lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */ @@ -450,6 +435,8 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) u32 g2h_len_dw = 0; bool enabled;
+ lockdep_assert_held(&rq->engine->sched_engine->lock); + /* * Corner case where requests were sitting in the priority list or a * request resubmitted after the context was banned. @@ -457,7 +444,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) if (unlikely(intel_context_is_banned(ce))) { i915_request_put(i915_request_mark_eio(rq)); intel_engine_signal_breadcrumbs(ce->engine); - goto out; + return 0; }
GEM_BUG_ON(!atomic_read(&ce->guc_id_ref)); @@ -470,9 +457,11 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) { err = guc_lrc_desc_pin(ce, false); if (unlikely(err)) - goto out; + return err; }
+ spin_lock(&ce->guc_state.lock); + /* * The request / context will be run on the hardware when scheduling * gets enabled in the unblock. @@ -507,6 +496,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) trace_i915_request_guc_submit(rq);
out: + spin_unlock(&ce->guc_state.lock); return err; }
@@ -727,8 +717,6 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) spin_lock_irq(&guc_to_gt(guc)->irq_lock); spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
- guc_flush_submissions(guc); - flush_work(&guc->ct.requests.worker);
scrub_guc_desc_for_outstanding_g2h(guc); @@ -1131,7 +1119,11 @@ static int steal_guc_id(struct intel_guc *guc)
list_del_init(&ce->guc_id_link); guc_id = ce->guc_id; + + spin_lock(&ce->guc_state.lock); clr_context_registered(ce); + spin_unlock(&ce->guc_state.lock); + set_context_guc_id_invalid(ce); return guc_id; } else { @@ -1167,6 +1159,8 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) try_again: spin_lock_irqsave(&guc->contexts_lock, flags);
+ might_lock(&ce->guc_state.lock); + if (context_guc_id_invalid(ce)) { ret = assign_guc_id(guc, &ce->guc_id); if (ret) @@ -1246,8 +1240,13 @@ static int register_context(struct intel_context *ce, bool loop) trace_intel_context_register(ce);
ret = __guc_action_register_context(guc, ce->guc_id, offset, loop); - if (likely(!ret)) + if (likely(!ret)) { + unsigned long flags; + + spin_lock_irqsave(&ce->guc_state.lock, flags); set_context_registered(ce); + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + }
return ret; } @@ -1523,7 +1522,6 @@ static u16 prep_context_pending_disable(struct intel_context *ce) static struct i915_sw_fence *guc_context_block(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); - struct i915_sched_engine *sched_engine = ce->engine->sched_engine; unsigned long flags; struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; intel_wakeref_t wakeref; @@ -1532,13 +1530,7 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
spin_lock_irqsave(&ce->guc_state.lock, flags);
- /* - * Sync with submission path, increment before below changes to context - * state. - */ - spin_lock(&sched_engine->lock); incr_context_blocked(ce); - spin_unlock(&sched_engine->lock);
enabled = context_enabled(ce); if (unlikely(!enabled || submission_disabled(guc))) { @@ -1584,7 +1576,6 @@ static bool context_cant_unblock(struct intel_context *ce) static void guc_context_unblock(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); - struct i915_sched_engine *sched_engine = ce->engine->sched_engine; unsigned long flags; struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; intel_wakeref_t wakeref; @@ -1604,13 +1595,7 @@ static void guc_context_unblock(struct intel_context *ce) intel_context_get(ce); }
- /* - * Sync with submission path, decrement after above changes to context - * state. - */ - spin_lock(&sched_engine->lock); decr_context_blocked(ce); - spin_unlock(&sched_engine->lock);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
@@ -1716,16 +1701,6 @@ static void guc_context_sched_disable(struct intel_context *ce) struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm; intel_wakeref_t wakeref; u16 guc_id; - bool enabled; - - if (submission_disabled(guc) || context_guc_id_invalid(ce) || - !lrc_desc_registered(guc, ce->guc_id)) { - clr_context_enabled(ce); - goto unpin; - } - - if (!context_enabled(ce)) - goto unpin;
spin_lock_irqsave(&ce->guc_state.lock, flags);
@@ -1739,10 +1714,8 @@ static void guc_context_sched_disable(struct intel_context *ce) * sleep) ensures another process doesn't pin this context and generate * a request before we set the 'context_pending_disable' flag here. */ - enabled = context_enabled(ce); - if (unlikely(!enabled || submission_disabled(guc))) { - if (enabled) - clr_context_enabled(ce); + if (unlikely(!context_enabled(ce) || submission_disabled(guc))) { + clr_context_enabled(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); goto unpin; } @@ -1770,7 +1743,6 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) GEM_BUG_ON(ce != __get_context(guc, ce->guc_id)); GEM_BUG_ON(context_enabled(ce));
- clr_context_registered(ce); deregister_context(ce, ce->guc_id, true); }
@@ -1843,8 +1815,10 @@ static void guc_context_destroy(struct kref *kref) /* Seal race with Reset */ spin_lock_irqsave(&ce->guc_state.lock, flags); disabled = submission_disabled(guc); - if (likely(!disabled)) + if (likely(!disabled)) { set_context_destroyed(ce); + clr_context_registered(ce); + } spin_unlock_irqrestore(&ce->guc_state.lock, flags); if (unlikely(disabled)) { release_guc_id(guc, ce); @@ -2710,8 +2684,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, (!context_pending_enable(ce) && !context_pending_disable(ce)))) { drm_err(&guc_to_gt(guc)->i915->drm, - "Bad context sched_state 0x%x, 0x%x, desc_idx %u", - atomic_read(&ce->guc_sched_state_no_lock), + "Bad context sched_state 0x%x, desc_idx %u", ce->guc_state.sched_state, desc_idx); return -EPROTO; } @@ -2726,7 +2699,9 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, } #endif
+ spin_lock_irqsave(&ce->guc_state.lock, flags); clr_context_pending_enable(ce); + spin_unlock_irqrestore(&ce->guc_state.lock, flags); } else if (context_pending_disable(ce)) { bool banned;
@@ -3000,9 +2975,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, atomic_read(&ce->pin_count)); drm_printf(p, "\t\tGuC ID Ref Count: %u\n", atomic_read(&ce->guc_id_ref)); - drm_printf(p, "\t\tSchedule State: 0x%x, 0x%x\n\n", - ce->guc_state.sched_state, - atomic_read(&ce->guc_sched_state_no_lock)); + drm_printf(p, "\t\tSchedule State: 0x%x\n\n", + ce->guc_state.sched_state);
guc_log_context_priority(p, ce); } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 806ad688274b..0a77eb2944b5 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -903,7 +903,6 @@ DECLARE_EVENT_CLASS(intel_context, __field(u32, guc_id) __field(int, pin_count) __field(u32, sched_state) - __field(u32, guc_sched_state_no_lock) __field(u8, guc_prio) ),
@@ -911,15 +910,12 @@ DECLARE_EVENT_CLASS(intel_context, __entry->guc_id = ce->guc_id; __entry->pin_count = atomic_read(&ce->pin_count); __entry->sched_state = ce->guc_state.sched_state; - __entry->guc_sched_state_no_lock = - atomic_read(&ce->guc_sched_state_no_lock); __entry->guc_prio = ce->guc_prio; ),
- TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x, guc_prio=%u", + TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u", __entry->guc_id, __entry->pin_count, __entry->sched_state, - __entry->guc_sched_state_no_lock, __entry->guc_prio) );
Lock the xarray and take ref to the context if needed.
v2: (Checkpatch) - Add new line after declaration (Daniel Vetter) - Correct put / get accounting in xa_for_loops v3: (Checkpatch) - Extra new line
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com --- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 102 +++++++++++++++--- 1 file changed, 87 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 2aa25cc4ac4b..2c6a6453d332 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -606,8 +606,18 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) unsigned long index, flags; bool pending_disable, pending_enable, deregister, destroyed, banned;
+ xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { - spin_lock_irqsave(&ce->guc_state.lock, flags); + /* + * Corner case where the ref count on the object is zero but and + * deregister G2H was lost. In this case we don't touch the ref + * count and finish the destroy of the context. + */ + bool do_put = kref_get_unless_zero(&ce->ref); + + xa_unlock(&guc->context_lookup); + + spin_lock(&ce->guc_state.lock);
/* * Once we are at this point submission_disabled() is guaranteed @@ -623,7 +633,9 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) banned = context_banned(ce); init_sched_state(ce);
- spin_unlock_irqrestore(&ce->guc_state.lock, flags); + spin_unlock(&ce->guc_state.lock); + + GEM_BUG_ON(!do_put && !destroyed);
if (pending_enable || destroyed || deregister) { decr_outstanding_submission_g2h(guc); @@ -646,13 +658,19 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) } intel_context_sched_disable_unpin(ce); decr_outstanding_submission_g2h(guc); - spin_lock_irqsave(&ce->guc_state.lock, flags); + + spin_lock(&ce->guc_state.lock); guc_blocked_fence_complete(ce); - spin_unlock_irqrestore(&ce->guc_state.lock, flags); + spin_unlock(&ce->guc_state.lock);
intel_context_put(ce); } + + if (do_put) + intel_context_put(ce); + xa_lock(&guc->context_lookup); } + xa_unlock_irqrestore(&guc->context_lookup, flags); }
static inline bool @@ -871,16 +889,29 @@ void intel_guc_submission_reset(struct intel_guc *guc, bool stalled) { struct intel_context *ce; unsigned long index; + unsigned long flags;
if (unlikely(!guc_submission_initialized(guc))) { /* Reset called during driver load? GuC not yet initialised! */ return; }
- xa_for_each(&guc->context_lookup, index, ce) + xa_lock_irqsave(&guc->context_lookup, flags); + xa_for_each(&guc->context_lookup, index, ce) { + if (!kref_get_unless_zero(&ce->ref)) + continue; + + xa_unlock(&guc->context_lookup); + if (intel_context_is_pinned(ce)) __guc_reset_context(ce, stalled);
+ intel_context_put(ce); + + xa_lock(&guc->context_lookup); + } + xa_unlock_irqrestore(&guc->context_lookup, flags); + /* GuC is blown away, drop all references to contexts */ xa_destroy(&guc->context_lookup); } @@ -955,11 +986,24 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc) { struct intel_context *ce; unsigned long index; + unsigned long flags; + + xa_lock_irqsave(&guc->context_lookup, flags); + xa_for_each(&guc->context_lookup, index, ce) { + if (!kref_get_unless_zero(&ce->ref)) + continue; + + xa_unlock(&guc->context_lookup);
- xa_for_each(&guc->context_lookup, index, ce) if (intel_context_is_pinned(ce)) guc_cancel_context_requests(ce);
+ intel_context_put(ce); + + xa_lock(&guc->context_lookup); + } + xa_unlock_irqrestore(&guc->context_lookup, flags); + guc_cancel_sched_engine_requests(guc->sched_engine);
/* GuC is blown away, drop all references to contexts */ @@ -2849,21 +2893,28 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine) struct intel_context *ce; struct i915_request *rq; unsigned long index; + unsigned long flags;
/* Reset called during driver load? GuC not yet initialised! */ if (unlikely(!guc_submission_initialized(guc))) return;
+ xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { - if (!intel_context_is_pinned(ce)) + if (!kref_get_unless_zero(&ce->ref)) continue;
+ xa_unlock(&guc->context_lookup); + + if (!intel_context_is_pinned(ce)) + goto next; + if (intel_engine_is_virtual(ce->engine)) { if (!(ce->engine->mask & engine->mask)) - continue; + goto next; } else { if (ce->engine != engine) - continue; + goto next; }
list_for_each_entry(rq, &ce->guc_active.requests, sched.link) { @@ -2873,9 +2924,16 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine) intel_engine_set_hung_context(engine, ce);
/* Can only cope with one hang at a time... */ - return; + intel_context_put(ce); + xa_lock(&guc->context_lookup); + goto done; } +next: + intel_context_put(ce); + xa_lock(&guc->context_lookup); } +done: + xa_unlock_irqrestore(&guc->context_lookup, flags); }
void intel_guc_dump_active_requests(struct intel_engine_cs *engine, @@ -2891,23 +2949,34 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine, if (unlikely(!guc_submission_initialized(guc))) return;
+ xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { - if (!intel_context_is_pinned(ce)) + if (!kref_get_unless_zero(&ce->ref)) continue;
+ xa_unlock(&guc->context_lookup); + + if (!intel_context_is_pinned(ce)) + goto next; + if (intel_engine_is_virtual(ce->engine)) { if (!(ce->engine->mask & engine->mask)) - continue; + goto next; } else { if (ce->engine != engine) - continue; + goto next; }
- spin_lock_irqsave(&ce->guc_active.lock, flags); + spin_lock(&ce->guc_active.lock); intel_engine_dump_active_requests(&ce->guc_active.requests, hung_rq, m); - spin_unlock_irqrestore(&ce->guc_active.lock, flags); + spin_unlock(&ce->guc_active.lock); + +next: + intel_context_put(ce); + xa_lock(&guc->context_lookup); } + xa_unlock_irqrestore(&guc->context_lookup, flags); }
void intel_guc_submission_print_info(struct intel_guc *guc, @@ -2961,7 +3030,9 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, { struct intel_context *ce; unsigned long index; + unsigned long flags;
+ xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id); drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); @@ -2980,6 +3051,7 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
guc_log_context_priority(p, ce); } + xa_unlock_irqrestore(&guc->context_lookup, flags); }
static struct intel_context *
Drop pin count check trick between a sched_disable and re-pin, now rely on the lock and counter of the number of committed requests to determine if scheduling should be disabled on the context.
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context_types.h | 2 + .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 53 +++++++++++-------- 2 files changed, 34 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index d2f798ef678c..3a5d98e908f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -169,6 +169,8 @@ struct intel_context { struct list_head fences; /* GuC context blocked fence */ struct i915_sw_fence blocked; + /* GuC committed requests */ + int number_committed_requests; } guc_state;
struct { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 2c6a6453d332..14a512533c39 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -249,6 +249,25 @@ static inline void decr_context_blocked(struct intel_context *ce) ce->guc_state.sched_state -= SCHED_STATE_BLOCKED; }
+static inline bool context_has_committed_requests(struct intel_context *ce) +{ + return !!ce->guc_state.number_committed_requests; +} + +static inline void incr_context_committed_requests(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ++ce->guc_state.number_committed_requests; + GEM_BUG_ON(ce->guc_state.number_committed_requests < 0); +} + +static inline void decr_context_committed_requests(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + --ce->guc_state.number_committed_requests; + GEM_BUG_ON(ce->guc_state.number_committed_requests < 0); +} + static inline bool context_guc_id_invalid(struct intel_context *ce) { return ce->guc_id == GUC_INVALID_LRC_ID; @@ -1749,24 +1768,18 @@ static void guc_context_sched_disable(struct intel_context *ce) spin_lock_irqsave(&ce->guc_state.lock, flags);
/* - * We have to check if the context has been disabled by another thread. - * We also have to check if the context has been pinned again as another - * pin operation is allowed to pass this function. Checking the pin - * count, within ce->guc_state.lock, synchronizes this function with - * guc_request_alloc ensuring a request doesn't slip through the - * 'context_pending_disable' fence. Checking within the spin lock (can't - * sleep) ensures another process doesn't pin this context and generate - * a request before we set the 'context_pending_disable' flag here. + * We have to check if the context has been disabled by another thread, + * check if submssion has been disabled to seal a race with reset and + * finally check if any more requests have been committed to the + * context ensursing that a request doesn't slip through the + * 'context_pending_disable' fence. */ - if (unlikely(!context_enabled(ce) || submission_disabled(guc))) { + if (unlikely(!context_enabled(ce) || submission_disabled(guc) || + context_has_committed_requests(ce))) { clr_context_enabled(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); goto unpin; } - if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) { - spin_unlock_irqrestore(&ce->guc_state.lock, flags); - return; - } guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); @@ -1796,6 +1809,7 @@ static void __guc_context_destroy(struct intel_context *ce) ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] || ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]); + GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce); intel_context_fini(ce); @@ -2026,6 +2040,10 @@ static void remove_from_context(struct i915_request *rq)
spin_unlock_irq(&ce->guc_active.lock);
+ spin_lock_irq(&ce->guc_state.lock); + decr_context_committed_requests(ce); + spin_unlock_irq(&ce->guc_state.lock); + atomic_dec(&ce->guc_id_ref); i915_request_notify_execute_cb_imm(rq); } @@ -2176,15 +2194,7 @@ static int guc_request_alloc(struct i915_request *rq) * schedule enable or context registration if either G2H is pending * respectfully. Once a G2H returns, the fence is released that is * blocking these requests (see guc_signal_context_fence). - * - * We can safely check the below fields outside of the lock as it isn't - * possible for these fields to transition from being clear to set but - * converse is possible, hence the need for the check within the lock. */ - if (likely(!context_wait_for_deregister_to_register(ce) && - !context_pending_disable(ce))) - return 0; - spin_lock_irqsave(&ce->guc_state.lock, flags); if (context_wait_for_deregister_to_register(ce) || context_pending_disable(ce)) { @@ -2193,6 +2203,7 @@ static int guc_request_alloc(struct i915_request *rq)
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences); } + incr_context_committed_requests(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return 0;
Move GuC management fields in context under guc_active struct as this is where the lock that protects theses fields lives. Also only set guc_prio field once during context init.
v2: (Daniele) - set CONTEXT_SET_INIT
Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context_types.h | 12 ++-- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 69 +++++++++++-------- drivers/gpu/drm/i915/i915_trace.h | 2 +- 3 files changed, 46 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 3a5d98e908f4..b56960a781da 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -112,6 +112,7 @@ struct intel_context { #define CONTEXT_FORCE_SINGLE_SUBMISSION 7 #define CONTEXT_NOPREEMPT 8 #define CONTEXT_LRCA_DIRTY 9 +#define CONTEXT_GUC_INIT 10
struct { u64 timeout_us; @@ -178,6 +179,11 @@ struct intel_context { spinlock_t lock; /** requests: active requests on this context */ struct list_head requests; + /* + * GuC priority management + */ + u8 prio; + u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; } guc_active;
/* GuC LRC descriptor ID */ @@ -191,12 +197,6 @@ struct intel_context { */ struct list_head guc_id_link;
- /* - * GuC priority management - */ - u8 guc_prio; - u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM]; - #ifdef CONFIG_DRM_I915_SELFTEST /** * @drop_schedule_enable: Force drop of schedule enable G2H for selftest diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 14a512533c39..bc68c0122be4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1367,8 +1367,6 @@ static void guc_context_policy_init(struct intel_engine_cs *engine, desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000; }
-static inline u8 map_i915_prio_to_guc_prio(int prio); - static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) { struct intel_engine_cs *engine = ce->engine; @@ -1376,8 +1374,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) struct intel_guc *guc = &engine->gt->uc.guc; u32 desc_idx = ce->guc_id; struct guc_lrc_desc *desc; - const struct i915_gem_context *ctx; - int prio = I915_CONTEXT_DEFAULT_PRIORITY; bool context_registered; intel_wakeref_t wakeref; int ret = 0; @@ -1394,12 +1390,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
context_registered = lrc_desc_registered(guc, desc_idx);
- rcu_read_lock(); - ctx = rcu_dereference(ce->gem_context); - if (ctx) - prio = ctx->sched.priority; - rcu_read_unlock(); - reset_lrc_desc(guc, desc_idx); set_lrc_desc_registered(guc, desc_idx, ce);
@@ -1408,8 +1398,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) desc->engine_submit_mask = adjust_engine_mask(engine->class, engine->mask); desc->hw_context_desc = ce->lrc.lrca; - ce->guc_prio = map_i915_prio_to_guc_prio(prio); - desc->priority = ce->guc_prio; + desc->priority = ce->guc_active.prio; desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; guc_context_policy_init(engine, desc);
@@ -1805,10 +1794,10 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
static void __guc_context_destroy(struct intel_context *ce) { - GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] || - ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] || - ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || - ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]); + GEM_BUG_ON(ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] || + ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_HIGH] || + ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || + ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_NORMAL]); GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce); @@ -1918,14 +1907,17 @@ static void guc_context_set_prio(struct intel_guc *guc,
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH || prio > GUC_CLIENT_PRIORITY_NORMAL); + lockdep_assert_held(&ce->guc_active.lock);
- if (ce->guc_prio == prio || submission_disabled(guc) || - !context_registered(ce)) + if (ce->guc_active.prio == prio || submission_disabled(guc) || + !context_registered(ce)) { + ce->guc_active.prio = prio; return; + }
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
- ce->guc_prio = prio; + ce->guc_active.prio = prio; trace_intel_context_set_prio(ce); }
@@ -1945,24 +1937,24 @@ static inline void add_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { lockdep_assert_held(&ce->guc_active.lock); - GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count)); + GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
- ++ce->guc_prio_count[guc_prio]; + ++ce->guc_active.prio_count[guc_prio];
/* Overflow protection */ - GEM_WARN_ON(!ce->guc_prio_count[guc_prio]); + GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]); }
static inline void sub_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { lockdep_assert_held(&ce->guc_active.lock); - GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count)); + GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
/* Underflow protection */ - GEM_WARN_ON(!ce->guc_prio_count[guc_prio]); + GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
- --ce->guc_prio_count[guc_prio]; + --ce->guc_active.prio_count[guc_prio]; }
static inline void update_context_prio(struct intel_context *ce) @@ -1975,8 +1967,8 @@ static inline void update_context_prio(struct intel_context *ce)
lockdep_assert_held(&ce->guc_active.lock);
- for (i = 0; i < ARRAY_SIZE(ce->guc_prio_count); ++i) { - if (ce->guc_prio_count[i]) { + for (i = 0; i < ARRAY_SIZE(ce->guc_active.prio_count); ++i) { + if (ce->guc_active.prio_count[i]) { guc_context_set_prio(guc, ce, i); break; } @@ -2118,6 +2110,21 @@ static bool context_needs_register(struct intel_context *ce, bool new_guc_id) !submission_disabled(ce_to_guc(ce)); }
+static void guc_context_init(struct intel_context *ce) +{ + const struct i915_gem_context *ctx; + int prio = I915_CONTEXT_DEFAULT_PRIORITY; + + rcu_read_lock(); + ctx = rcu_dereference(ce->gem_context); + if (ctx) + prio = ctx->sched.priority; + rcu_read_unlock(); + + ce->guc_active.prio = map_i915_prio_to_guc_prio(prio); + set_bit(CONTEXT_GUC_INIT, &ce->flags); +} + static int guc_request_alloc(struct i915_request *rq) { struct intel_context *ce = rq->context; @@ -2149,6 +2156,9 @@ static int guc_request_alloc(struct i915_request *rq)
rq->reserved_space -= GUC_REQUEST_SIZE;
+ if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags))) + guc_context_init(ce); + /* * Call pin_guc_id here rather than in the pinning step as with * dma_resv, contexts can be repeatedly pinned / unpinned trashing the @@ -3025,13 +3035,12 @@ static inline void guc_log_context_priority(struct drm_printer *p, { int i;
- drm_printf(p, "\t\tPriority: %d\n", - ce->guc_prio); + drm_printf(p, "\t\tPriority: %d\n", ce->guc_active.prio); drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n"); for (i = GUC_CLIENT_PRIORITY_KMD_HIGH; i < GUC_CLIENT_PRIORITY_NUM; ++i) { drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n", - i, ce->guc_prio_count[i]); + i, ce->guc_active.prio_count[i]); } drm_printf(p, "\n"); } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 0a77eb2944b5..6f882e72ed11 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -910,7 +910,7 @@ DECLARE_EVENT_CLASS(intel_context, __entry->guc_id = ce->guc_id; __entry->pin_count = atomic_read(&ce->pin_count); __entry->sched_state = ce->guc_state.sched_state; - __entry->guc_prio = ce->guc_prio; + __entry->guc_prio = ce->guc_active.prio; ),
TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
On 8/25/2021 8:23 PM, Matthew Brost wrote:
Move GuC management fields in context under guc_active struct as this is where the lock that protects theses fields lives. Also only set guc_prio field once during context init.
v2: (Daniele)
- set CONTEXT_SET_INIT
Signed-off-by: Matthew Brost matthew.brost@intel.com
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com
Daniele
drivers/gpu/drm/i915/gt/intel_context_types.h | 12 ++-- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 69 +++++++++++-------- drivers/gpu/drm/i915/i915_trace.h | 2 +- 3 files changed, 46 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 3a5d98e908f4..b56960a781da 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -112,6 +112,7 @@ struct intel_context { #define CONTEXT_FORCE_SINGLE_SUBMISSION 7 #define CONTEXT_NOPREEMPT 8 #define CONTEXT_LRCA_DIRTY 9 +#define CONTEXT_GUC_INIT 10
struct { u64 timeout_us; @@ -178,6 +179,11 @@ struct intel_context { spinlock_t lock; /** requests: active requests on this context */ struct list_head requests;
/*
* GuC priority management
*/
u8 prio;
u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
} guc_active;
/* GuC LRC descriptor ID */
@@ -191,12 +197,6 @@ struct intel_context { */ struct list_head guc_id_link;
- /*
* GuC priority management
*/
- u8 guc_prio;
- u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
- #ifdef CONFIG_DRM_I915_SELFTEST /**
- @drop_schedule_enable: Force drop of schedule enable G2H for selftest
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 14a512533c39..bc68c0122be4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1367,8 +1367,6 @@ static void guc_context_policy_init(struct intel_engine_cs *engine, desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000; }
-static inline u8 map_i915_prio_to_guc_prio(int prio);
- static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) { struct intel_engine_cs *engine = ce->engine;
@@ -1376,8 +1374,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) struct intel_guc *guc = &engine->gt->uc.guc; u32 desc_idx = ce->guc_id; struct guc_lrc_desc *desc;
- const struct i915_gem_context *ctx;
- int prio = I915_CONTEXT_DEFAULT_PRIORITY; bool context_registered; intel_wakeref_t wakeref; int ret = 0;
@@ -1394,12 +1390,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
context_registered = lrc_desc_registered(guc, desc_idx);
- rcu_read_lock();
- ctx = rcu_dereference(ce->gem_context);
- if (ctx)
prio = ctx->sched.priority;
- rcu_read_unlock();
- reset_lrc_desc(guc, desc_idx); set_lrc_desc_registered(guc, desc_idx, ce);
@@ -1408,8 +1398,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) desc->engine_submit_mask = adjust_engine_mask(engine->class, engine->mask); desc->hw_context_desc = ce->lrc.lrca;
- ce->guc_prio = map_i915_prio_to_guc_prio(prio);
- desc->priority = ce->guc_prio;
- desc->priority = ce->guc_active.prio; desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; guc_context_policy_init(engine, desc);
@@ -1805,10 +1794,10 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
static void __guc_context_destroy(struct intel_context *ce) {
- GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
GEM_BUG_ON(ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce);
@@ -1918,14 +1907,17 @@ static void guc_context_set_prio(struct intel_guc *guc,
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH || prio > GUC_CLIENT_PRIORITY_NORMAL);
- lockdep_assert_held(&ce->guc_active.lock);
- if (ce->guc_prio == prio || submission_disabled(guc) ||
!context_registered(ce))
if (ce->guc_active.prio == prio || submission_disabled(guc) ||
!context_registered(ce)) {
ce->guc_active.prio = prio;
return;
}
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
- ce->guc_prio = prio;
- ce->guc_active.prio = prio; trace_intel_context_set_prio(ce); }
@@ -1945,24 +1937,24 @@ static inline void add_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { lockdep_assert_held(&ce->guc_active.lock);
- GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
- GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
- ++ce->guc_prio_count[guc_prio];
++ce->guc_active.prio_count[guc_prio];
/* Overflow protection */
- GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]); }
static inline void sub_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { lockdep_assert_held(&ce->guc_active.lock);
- GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
/* Underflow protection */
- GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
- GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
- --ce->guc_prio_count[guc_prio];
--ce->guc_active.prio_count[guc_prio]; }
static inline void update_context_prio(struct intel_context *ce)
@@ -1975,8 +1967,8 @@ static inline void update_context_prio(struct intel_context *ce)
lockdep_assert_held(&ce->guc_active.lock);
- for (i = 0; i < ARRAY_SIZE(ce->guc_prio_count); ++i) {
if (ce->guc_prio_count[i]) {
- for (i = 0; i < ARRAY_SIZE(ce->guc_active.prio_count); ++i) {
}if (ce->guc_active.prio_count[i]) { guc_context_set_prio(guc, ce, i); break;
@@ -2118,6 +2110,21 @@ static bool context_needs_register(struct intel_context *ce, bool new_guc_id) !submission_disabled(ce_to_guc(ce)); }
+static void guc_context_init(struct intel_context *ce) +{
- const struct i915_gem_context *ctx;
- int prio = I915_CONTEXT_DEFAULT_PRIORITY;
- rcu_read_lock();
- ctx = rcu_dereference(ce->gem_context);
- if (ctx)
prio = ctx->sched.priority;
- rcu_read_unlock();
- ce->guc_active.prio = map_i915_prio_to_guc_prio(prio);
- set_bit(CONTEXT_GUC_INIT, &ce->flags);
+}
- static int guc_request_alloc(struct i915_request *rq) { struct intel_context *ce = rq->context;
@@ -2149,6 +2156,9 @@ static int guc_request_alloc(struct i915_request *rq)
rq->reserved_space -= GUC_REQUEST_SIZE;
- if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
guc_context_init(ce);
- /*
- Call pin_guc_id here rather than in the pinning step as with
- dma_resv, contexts can be repeatedly pinned / unpinned trashing the
@@ -3025,13 +3035,12 @@ static inline void guc_log_context_priority(struct drm_printer *p, { int i;
- drm_printf(p, "\t\tPriority: %d\n",
ce->guc_prio);
- drm_printf(p, "\t\tPriority: %d\n", ce->guc_active.prio); drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n"); for (i = GUC_CLIENT_PRIORITY_KMD_HIGH; i < GUC_CLIENT_PRIORITY_NUM; ++i) { drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
i, ce->guc_prio_count[i]);
} drm_printf(p, "\n"); }i, ce->guc_active.prio_count[i]);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 0a77eb2944b5..6f882e72ed11 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -910,7 +910,7 @@ DECLARE_EVENT_CLASS(intel_context, __entry->guc_id = ce->guc_id; __entry->pin_count = atomic_read(&ce->pin_count); __entry->sched_state = ce->guc_state.sched_state;
__entry->guc_prio = ce->guc_prio;
__entry->guc_prio = ce->guc_active.prio; ),
TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
To make ownership of locking clear move fields (guc_id, guc_id_ref, guc_id_link) to sub structure guc_id in intel_context.
Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context.c | 4 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 18 +-- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 6 +- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 104 +++++++++--------- drivers/gpu/drm/i915/i915_trace.h | 4 +- 5 files changed, 69 insertions(+), 67 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 3048267ddc7e..485460a11331 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -398,8 +398,8 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) spin_lock_init(&ce->guc_active.lock); INIT_LIST_HEAD(&ce->guc_active.requests);
- ce->guc_id = GUC_INVALID_LRC_ID; - INIT_LIST_HEAD(&ce->guc_id_link); + ce->guc_id.id = GUC_INVALID_LRC_ID; + INIT_LIST_HEAD(&ce->guc_id.link);
/* * Initialize fence to be complete as this is expected to be complete diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index b56960a781da..0b00d249c884 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -186,16 +186,18 @@ struct intel_context { u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; } guc_active;
- /* GuC LRC descriptor ID */ - u16 guc_id; + struct { + /* GuC LRC descriptor ID */ + u16 id;
- /* GuC LRC descriptor reference count */ - atomic_t guc_id_ref; + /* GuC LRC descriptor reference count */ + atomic_t ref;
- /* - * GuC ID link - in list when unpinned but guc_id still valid in GuC - */ - struct list_head guc_id_link; + /* + * GuC ID link - in list when unpinned but guc_id still valid in GuC + */ + struct list_head link; + } guc_id;
#ifdef CONFIG_DRM_I915_SELFTEST /** diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 2c1ed32ca5ac..e9130fa39616 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -789,7 +789,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) if (err) pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n", engine->name, rq->fence.context, - rq->fence.seqno, rq->context->guc_id, err); + rq->fence.seqno, rq->context->guc_id.id, err); }
skip: @@ -1098,7 +1098,7 @@ static int __igt_reset_engines(struct intel_gt *gt, if (err) pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n", engine->name, rq->fence.context, - rq->fence.seqno, rq->context->guc_id, err); + rq->fence.seqno, rq->context->guc_id.id, err); }
count++; @@ -1108,7 +1108,7 @@ static int __igt_reset_engines(struct intel_gt *gt, pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n", engine->name, test_name, rq->fence.context, - rq->fence.seqno, rq->context->guc_id); + rq->fence.seqno, rq->context->guc_id.id); i915_request_put(rq);
GEM_TRACE_DUMP(); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index bc68c0122be4..044f9dda1397 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -270,12 +270,12 @@ static inline void decr_context_committed_requests(struct intel_context *ce)
static inline bool context_guc_id_invalid(struct intel_context *ce) { - return ce->guc_id == GUC_INVALID_LRC_ID; + return ce->guc_id.id == GUC_INVALID_LRC_ID; }
static inline void set_context_guc_id_invalid(struct intel_context *ce) { - ce->guc_id = GUC_INVALID_LRC_ID; + ce->guc_id.id = GUC_INVALID_LRC_ID; }
static inline struct intel_guc *ce_to_guc(struct intel_context *ce) @@ -466,14 +466,14 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) return 0; }
- GEM_BUG_ON(!atomic_read(&ce->guc_id_ref)); + GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); GEM_BUG_ON(context_guc_id_invalid(ce));
/* * Corner case where the GuC firmware was blown away and reloaded while * this context was pinned. */ - if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) { + if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id))) { err = guc_lrc_desc_pin(ce, false); if (unlikely(err)) return err; @@ -492,14 +492,14 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
if (!enabled) { action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET; - action[len++] = ce->guc_id; + action[len++] = ce->guc_id.id; action[len++] = GUC_CONTEXT_ENABLE; set_context_pending_enable(ce); intel_context_get(ce); g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET; } else { action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT; - action[len++] = ce->guc_id; + action[len++] = ce->guc_id.id; }
err = intel_guc_send_nb(guc, action, len, g2h_len_dw); @@ -1148,12 +1148,12 @@ static int new_guc_id(struct intel_guc *guc) static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) { if (!context_guc_id_invalid(ce)) { - ida_simple_remove(&guc->guc_ids, ce->guc_id); - reset_lrc_desc(guc, ce->guc_id); + ida_simple_remove(&guc->guc_ids, ce->guc_id.id); + reset_lrc_desc(guc, ce->guc_id.id); set_context_guc_id_invalid(ce); } - if (!list_empty(&ce->guc_id_link)) - list_del_init(&ce->guc_id_link); + if (!list_empty(&ce->guc_id.link)) + list_del_init(&ce->guc_id.link); }
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce) @@ -1175,13 +1175,13 @@ static int steal_guc_id(struct intel_guc *guc) if (!list_empty(&guc->guc_id_list)) { ce = list_first_entry(&guc->guc_id_list, struct intel_context, - guc_id_link); + guc_id.link);
- GEM_BUG_ON(atomic_read(&ce->guc_id_ref)); + GEM_BUG_ON(atomic_read(&ce->guc_id.ref)); GEM_BUG_ON(context_guc_id_invalid(ce));
- list_del_init(&ce->guc_id_link); - guc_id = ce->guc_id; + list_del_init(&ce->guc_id.link); + guc_id = ce->guc_id.id;
spin_lock(&ce->guc_state.lock); clr_context_registered(ce); @@ -1217,7 +1217,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) int ret = 0; unsigned long flags, tries = PIN_GUC_ID_TRIES;
- GEM_BUG_ON(atomic_read(&ce->guc_id_ref)); + GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
try_again: spin_lock_irqsave(&guc->contexts_lock, flags); @@ -1225,20 +1225,20 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) might_lock(&ce->guc_state.lock);
if (context_guc_id_invalid(ce)) { - ret = assign_guc_id(guc, &ce->guc_id); + ret = assign_guc_id(guc, &ce->guc_id.id); if (ret) goto out_unlock; ret = 1; /* Indidcates newly assigned guc_id */ } - if (!list_empty(&ce->guc_id_link)) - list_del_init(&ce->guc_id_link); - atomic_inc(&ce->guc_id_ref); + if (!list_empty(&ce->guc_id.link)) + list_del_init(&ce->guc_id.link); + atomic_inc(&ce->guc_id.ref);
out_unlock: spin_unlock_irqrestore(&guc->contexts_lock, flags);
/* - * -EAGAIN indicates no guc_ids are available, let's retire any + * -EAGAIN indicates no guc_id are available, let's retire any * outstanding requests to see if that frees up a guc_id. If the first * retire didn't help, insert a sleep with the timeslice duration before * attempting to retire more requests. Double the sleep period each @@ -1266,15 +1266,15 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) { unsigned long flags;
- GEM_BUG_ON(atomic_read(&ce->guc_id_ref) < 0); + GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
if (unlikely(context_guc_id_invalid(ce))) return;
spin_lock_irqsave(&guc->contexts_lock, flags); - if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id_link) && - !atomic_read(&ce->guc_id_ref)) - list_add_tail(&ce->guc_id_link, &guc->guc_id_list); + if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) && + !atomic_read(&ce->guc_id.ref)) + list_add_tail(&ce->guc_id.link, &guc->guc_id_list); spin_unlock_irqrestore(&guc->contexts_lock, flags); }
@@ -1297,12 +1297,12 @@ static int register_context(struct intel_context *ce, bool loop) { struct intel_guc *guc = ce_to_guc(ce); u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) + - ce->guc_id * sizeof(struct guc_lrc_desc); + ce->guc_id.id * sizeof(struct guc_lrc_desc); int ret;
trace_intel_context_register(ce);
- ret = __guc_action_register_context(guc, ce->guc_id, offset, loop); + ret = __guc_action_register_context(guc, ce->guc_id.id, offset, loop); if (likely(!ret)) { unsigned long flags;
@@ -1372,7 +1372,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) struct intel_engine_cs *engine = ce->engine; struct intel_runtime_pm *runtime_pm = engine->uncore->rpm; struct intel_guc *guc = &engine->gt->uc.guc; - u32 desc_idx = ce->guc_id; + u32 desc_idx = ce->guc_id.id; struct guc_lrc_desc *desc; bool context_registered; intel_wakeref_t wakeref; @@ -1435,7 +1435,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) * context whose guc_id was stolen. */ with_intel_runtime_pm(runtime_pm, wakeref) - ret = deregister_context(ce, ce->guc_id, loop); + ret = deregister_context(ce, ce->guc_id.id, loop); if (unlikely(ret == -ENODEV)) { ret = 0; /* Will get registered later */ } @@ -1507,7 +1507,7 @@ static void __guc_context_sched_enable(struct intel_guc *guc, { u32 action[] = { INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET, - ce->guc_id, + ce->guc_id.id, GUC_CONTEXT_ENABLE };
@@ -1523,7 +1523,7 @@ static void __guc_context_sched_disable(struct intel_guc *guc, { u32 action[] = { INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET, - guc_id, /* ce->guc_id not stable */ + guc_id, /* ce->guc_id.id not stable */ GUC_CONTEXT_DISABLE };
@@ -1568,7 +1568,7 @@ static u16 prep_context_pending_disable(struct intel_context *ce) guc_blocked_fence_reinit(ce); intel_context_get(ce);
- return ce->guc_id; + return ce->guc_id.id; }
static struct i915_sw_fence *guc_context_block(struct intel_context *ce) @@ -1621,7 +1621,7 @@ static bool context_cant_unblock(struct intel_context *ce)
return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || context_guc_id_invalid(ce) || - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id) || + !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) || !intel_context_is_pinned(ce); }
@@ -1740,7 +1740,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq) if (!context_guc_id_invalid(ce)) with_intel_runtime_pm(runtime_pm, wakeref) __guc_context_set_preemption_timeout(guc, - ce->guc_id, + ce->guc_id.id, 1); spin_unlock_irqrestore(&ce->guc_state.lock, flags); } @@ -1785,11 +1785,11 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce);
- GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id)); - GEM_BUG_ON(ce != __get_context(guc, ce->guc_id)); + GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); + GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); GEM_BUG_ON(context_enabled(ce));
- deregister_context(ce, ce->guc_id, true); + deregister_context(ce, ce->guc_id.id, true); }
static void __guc_context_destroy(struct intel_context *ce) @@ -1834,7 +1834,7 @@ static void guc_context_destroy(struct kref *kref) __guc_context_destroy(ce); return; } else if (submission_disabled(guc) || - !lrc_desc_registered(guc, ce->guc_id)) { + !lrc_desc_registered(guc, ce->guc_id.id)) { release_guc_id(guc, ce); __guc_context_destroy(ce); return; @@ -1843,10 +1843,10 @@ static void guc_context_destroy(struct kref *kref) /* * We have to acquire the context spinlock and check guc_id again, if it * is valid it hasn't been stolen and needs to be deregistered. We - * delete this context from the list of unpinned guc_ids available to + * delete this context from the list of unpinned guc_id available to * steal to seal a race with guc_lrc_desc_pin(). When the G2H CTB * returns indicating this context has been deregistered the guc_id is - * returned to the pool of available guc_ids. + * returned to the pool of available guc_id. */ spin_lock_irqsave(&guc->contexts_lock, flags); if (context_guc_id_invalid(ce)) { @@ -1855,8 +1855,8 @@ static void guc_context_destroy(struct kref *kref) return; }
- if (!list_empty(&ce->guc_id_link)) - list_del_init(&ce->guc_id_link); + if (!list_empty(&ce->guc_id.link)) + list_del_init(&ce->guc_id.link); spin_unlock_irqrestore(&guc->contexts_lock, flags);
/* Seal race with Reset */ @@ -1901,7 +1901,7 @@ static void guc_context_set_prio(struct intel_guc *guc, { u32 action[] = { INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY, - ce->guc_id, + ce->guc_id.id, prio, };
@@ -2036,7 +2036,7 @@ static void remove_from_context(struct i915_request *rq) decr_context_committed_requests(ce); spin_unlock_irq(&ce->guc_state.lock);
- atomic_dec(&ce->guc_id_ref); + atomic_dec(&ce->guc_id.ref); i915_request_notify_execute_cb_imm(rq); }
@@ -2106,7 +2106,7 @@ static void guc_signal_context_fence(struct intel_context *ce) static bool context_needs_register(struct intel_context *ce, bool new_guc_id) { return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id)) && + !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) && !submission_disabled(ce_to_guc(ce)); }
@@ -2162,11 +2162,11 @@ static int guc_request_alloc(struct i915_request *rq) /* * Call pin_guc_id here rather than in the pinning step as with * dma_resv, contexts can be repeatedly pinned / unpinned trashing the - * guc_ids and creating horrible race conditions. This is especially bad - * when guc_ids are being stolen due to over subscription. By the time + * guc_id and creating horrible race conditions. This is especially bad + * when guc_id are being stolen due to over subscription. By the time * this function is reached, it is guaranteed that the guc_id will be * persistent until the generated request is retired. Thus, sealing these - * race conditions. It is still safe to fail here if guc_ids are + * race conditions. It is still safe to fail here if guc_id are * exhausted and return -EAGAIN to the user indicating that they can try * again in the future. * @@ -2176,7 +2176,7 @@ static int guc_request_alloc(struct i915_request *rq) * decremented on each retire. When it is zero, a lock around the * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id. */ - if (atomic_add_unless(&ce->guc_id_ref, 1, 0)) + if (atomic_add_unless(&ce->guc_id.ref, 1, 0)) goto out;
ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */ @@ -2189,7 +2189,7 @@ static int guc_request_alloc(struct i915_request *rq) disable_submission(guc); goto out; /* GPU will be reset */ } - atomic_dec(&ce->guc_id_ref); + atomic_dec(&ce->guc_id.ref); unpin_guc_id(guc, ce); return ret; } @@ -3023,7 +3023,7 @@ void intel_guc_submission_print_info(struct intel_guc *guc,
priolist_for_each_request(rq, pl) drm_printf(p, "guc_id=%u, seqno=%llu\n", - rq->context->guc_id, + rq->context->guc_id.id, rq->fence.seqno); } spin_unlock_irqrestore(&sched_engine->lock, flags); @@ -3054,7 +3054,7 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { - drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id); + drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id); drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n", ce->ring->head, @@ -3065,7 +3065,7 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, drm_printf(p, "\t\tContext Pin Count: %u\n", atomic_read(&ce->pin_count)); drm_printf(p, "\t\tGuC ID Ref Count: %u\n", - atomic_read(&ce->guc_id_ref)); + atomic_read(&ce->guc_id.ref)); drm_printf(p, "\t\tSchedule State: 0x%x\n\n", ce->guc_state.sched_state);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 6f882e72ed11..0574f5c7a985 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -805,7 +805,7 @@ DECLARE_EVENT_CLASS(i915_request, __entry->dev = rq->engine->i915->drm.primary->index; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; - __entry->guc_id = rq->context->guc_id; + __entry->guc_id = rq->context->guc_id.id; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->tail = rq->tail; @@ -907,7 +907,7 @@ DECLARE_EVENT_CLASS(intel_context, ),
TP_fast_assign( - __entry->guc_id = ce->guc_id; + __entry->guc_id = ce->guc_id.id; __entry->pin_count = atomic_read(&ce->pin_count); __entry->sched_state = ce->guc_state.sched_state; __entry->guc_prio = ce->guc_active.prio;
Now that we have locking hierarchy of sched_engine->lock -> ce->guc_state everything from guc_active can be moved into guc_state and protected the guc_state.lock.
Signed-off-by: Matthew Brost matthew.brost@intel.com Reviewed-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_context.c | 10 +-- drivers/gpu/drm/i915/gt/intel_context_types.h | 7 +- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 88 +++++++++---------- drivers/gpu/drm/i915/i915_trace.h | 2 +- 4 files changed, 49 insertions(+), 58 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 485460a11331..ff637147b1a9 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -394,9 +394,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
spin_lock_init(&ce->guc_state.lock); INIT_LIST_HEAD(&ce->guc_state.fences); - - spin_lock_init(&ce->guc_active.lock); - INIT_LIST_HEAD(&ce->guc_active.requests); + INIT_LIST_HEAD(&ce->guc_state.requests);
ce->guc_id.id = GUC_INVALID_LRC_ID; INIT_LIST_HEAD(&ce->guc_id.link); @@ -521,15 +519,15 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce)
GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
- spin_lock_irqsave(&ce->guc_active.lock, flags); - list_for_each_entry_reverse(rq, &ce->guc_active.requests, + spin_lock_irqsave(&ce->guc_state.lock, flags); + list_for_each_entry_reverse(rq, &ce->guc_state.requests, sched.link) { if (i915_request_completed(rq)) break;
active = rq; } - spin_unlock_irqrestore(&ce->guc_active.lock, flags); + spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return active; } diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 0b00d249c884..5285d660eacf 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -172,11 +172,6 @@ struct intel_context { struct i915_sw_fence blocked; /* GuC committed requests */ int number_committed_requests; - } guc_state; - - struct { - /** lock: protects everything in guc_active */ - spinlock_t lock; /** requests: active requests on this context */ struct list_head requests; /* @@ -184,7 +179,7 @@ struct intel_context { */ u8 prio; u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; - } guc_active; + } guc_state;
struct { /* GuC LRC descriptor ID */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 044f9dda1397..eb884b48f4b8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -827,9 +827,9 @@ __unwind_incomplete_requests(struct intel_context *ce) unsigned long flags;
spin_lock_irqsave(&sched_engine->lock, flags); - spin_lock(&ce->guc_active.lock); + spin_lock(&ce->guc_state.lock); list_for_each_entry_safe_reverse(rq, rn, - &ce->guc_active.requests, + &ce->guc_state.requests, sched.link) { if (i915_request_completed(rq)) continue; @@ -848,7 +848,7 @@ __unwind_incomplete_requests(struct intel_context *ce) list_add(&rq->sched.link, pl); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); } - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); spin_unlock_irqrestore(&sched_engine->lock, flags); }
@@ -943,10 +943,10 @@ static void guc_cancel_context_requests(struct intel_context *ce)
/* Mark all executing requests as skipped. */ spin_lock_irqsave(&sched_engine->lock, flags); - spin_lock(&ce->guc_active.lock); - list_for_each_entry(rq, &ce->guc_active.requests, sched.link) + spin_lock(&ce->guc_state.lock); + list_for_each_entry(rq, &ce->guc_state.requests, sched.link) i915_request_put(i915_request_mark_eio(rq)); - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); spin_unlock_irqrestore(&sched_engine->lock, flags); }
@@ -1398,7 +1398,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) desc->engine_submit_mask = adjust_engine_mask(engine->class, engine->mask); desc->hw_context_desc = ce->lrc.lrca; - desc->priority = ce->guc_active.prio; + desc->priority = ce->guc_state.prio; desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; guc_context_policy_init(engine, desc);
@@ -1794,10 +1794,10 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
static void __guc_context_destroy(struct intel_context *ce) { - GEM_BUG_ON(ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] || - ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_HIGH] || - ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || - ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_NORMAL]); + GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] || + ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] || + ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || + ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]); GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce); @@ -1907,17 +1907,17 @@ static void guc_context_set_prio(struct intel_guc *guc,
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH || prio > GUC_CLIENT_PRIORITY_NORMAL); - lockdep_assert_held(&ce->guc_active.lock); + lockdep_assert_held(&ce->guc_state.lock);
- if (ce->guc_active.prio == prio || submission_disabled(guc) || + if (ce->guc_state.prio == prio || submission_disabled(guc) || !context_registered(ce)) { - ce->guc_active.prio = prio; + ce->guc_state.prio = prio; return; }
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
- ce->guc_active.prio = prio; + ce->guc_state.prio = prio; trace_intel_context_set_prio(ce); }
@@ -1936,25 +1936,25 @@ static inline u8 map_i915_prio_to_guc_prio(int prio) static inline void add_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { - lockdep_assert_held(&ce->guc_active.lock); - GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count)); + lockdep_assert_held(&ce->guc_state.lock); + GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
- ++ce->guc_active.prio_count[guc_prio]; + ++ce->guc_state.prio_count[guc_prio];
/* Overflow protection */ - GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]); + GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]); }
static inline void sub_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { - lockdep_assert_held(&ce->guc_active.lock); - GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count)); + lockdep_assert_held(&ce->guc_state.lock); + GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
/* Underflow protection */ - GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]); + GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
- --ce->guc_active.prio_count[guc_prio]; + --ce->guc_state.prio_count[guc_prio]; }
static inline void update_context_prio(struct intel_context *ce) @@ -1965,10 +1965,10 @@ static inline void update_context_prio(struct intel_context *ce) BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0); BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
- lockdep_assert_held(&ce->guc_active.lock); + lockdep_assert_held(&ce->guc_state.lock);
- for (i = 0; i < ARRAY_SIZE(ce->guc_active.prio_count); ++i) { - if (ce->guc_active.prio_count[i]) { + for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) { + if (ce->guc_state.prio_count[i]) { guc_context_set_prio(guc, ce, i); break; } @@ -1988,8 +1988,8 @@ static void add_to_context(struct i915_request *rq)
GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
- spin_lock(&ce->guc_active.lock); - list_move_tail(&rq->sched.link, &ce->guc_active.requests); + spin_lock(&ce->guc_state.lock); + list_move_tail(&rq->sched.link, &ce->guc_state.requests);
if (rq->guc_prio == GUC_PRIO_INIT) { rq->guc_prio = new_guc_prio; @@ -2001,12 +2001,12 @@ static void add_to_context(struct i915_request *rq) } update_context_prio(ce);
- spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); }
static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce) { - lockdep_assert_held(&ce->guc_active.lock); + lockdep_assert_held(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_INIT && rq->guc_prio != GUC_PRIO_FINI) { @@ -2020,7 +2020,7 @@ static void remove_from_context(struct i915_request *rq) { struct intel_context *ce = rq->context;
- spin_lock_irq(&ce->guc_active.lock); + spin_lock_irq(&ce->guc_state.lock);
list_del_init(&rq->sched.link); clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); @@ -2030,10 +2030,8 @@ static void remove_from_context(struct i915_request *rq)
guc_prio_fini(rq, ce);
- spin_unlock_irq(&ce->guc_active.lock); - - spin_lock_irq(&ce->guc_state.lock); decr_context_committed_requests(ce); + spin_unlock_irq(&ce->guc_state.lock);
atomic_dec(&ce->guc_id.ref); @@ -2121,7 +2119,7 @@ static void guc_context_init(struct intel_context *ce) prio = ctx->sched.priority; rcu_read_unlock();
- ce->guc_active.prio = map_i915_prio_to_guc_prio(prio); + ce->guc_state.prio = map_i915_prio_to_guc_prio(prio); set_bit(CONTEXT_GUC_INIT, &ce->flags); }
@@ -2355,7 +2353,7 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq, !new_guc_prio_higher(rq->guc_prio, new_guc_prio))) return;
- spin_lock(&ce->guc_active.lock); + spin_lock(&ce->guc_state.lock); if (rq->guc_prio != GUC_PRIO_FINI) { if (rq->guc_prio != GUC_PRIO_INIT) sub_context_inflight_prio(ce, rq->guc_prio); @@ -2363,16 +2361,16 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq, add_context_inflight_prio(ce, rq->guc_prio); update_context_prio(ce); } - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); }
static void guc_retire_inflight_request_prio(struct i915_request *rq) { struct intel_context *ce = rq->context;
- spin_lock(&ce->guc_active.lock); + spin_lock(&ce->guc_state.lock); guc_prio_fini(rq, ce); - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); }
static void sanitize_hwsp(struct intel_engine_cs *engine) @@ -2938,7 +2936,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine) goto next; }
- list_for_each_entry(rq, &ce->guc_active.requests, sched.link) { + list_for_each_entry(rq, &ce->guc_state.requests, sched.link) { if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE) continue;
@@ -2988,10 +2986,10 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine, goto next; }
- spin_lock(&ce->guc_active.lock); - intel_engine_dump_active_requests(&ce->guc_active.requests, + spin_lock(&ce->guc_state.lock); + intel_engine_dump_active_requests(&ce->guc_state.requests, hung_rq, m); - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock);
next: intel_context_put(ce); @@ -3035,12 +3033,12 @@ static inline void guc_log_context_priority(struct drm_printer *p, { int i;
- drm_printf(p, "\t\tPriority: %d\n", ce->guc_active.prio); + drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio); drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n"); for (i = GUC_CLIENT_PRIORITY_KMD_HIGH; i < GUC_CLIENT_PRIORITY_NUM; ++i) { drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n", - i, ce->guc_active.prio_count[i]); + i, ce->guc_state.prio_count[i]); } drm_printf(p, "\n"); } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 0574f5c7a985..ec7fe12b94aa 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -910,7 +910,7 @@ DECLARE_EVENT_CLASS(intel_context, __entry->guc_id = ce->guc_id.id; __entry->pin_count = atomic_read(&ce->pin_count); __entry->sched_state = ce->guc_state.sched_state; - __entry->guc_prio = ce->guc_active.prio; + __entry->guc_prio = ce->guc_state.prio; ),
TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
Add GuC kernel doc for all structures added thus far for GuC submission and update the main GuC submission section with the new interface details.
v2: - Drop guc_active.lock DOC v3: (Daniele) - Fixup a few kernel doc comments
Signed-off-by: Matthew Brost matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_context_types.h | 44 +++++--- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 19 +++- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 101 ++++++++++++++---- drivers/gpu/drm/i915/i915_request.h | 18 ++-- 4 files changed, 132 insertions(+), 50 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 5285d660eacf..920ed92f4382 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -156,40 +156,52 @@ struct intel_context { u8 wa_bb_page; /* if set, page num reserved for context workarounds */
struct { - /** lock: protects everything in guc_state */ + /** @lock: protects everything in guc_state */ spinlock_t lock; /** - * sched_state: scheduling state of this context using GuC + * @sched_state: scheduling state of this context using GuC * submission */ u32 sched_state; /* - * fences: maintains of list of requests that have a submit - * fence related to GuC submission + * @fences: maintains a list of requests are currently being + * fenced until a GuC operation completes */ struct list_head fences; - /* GuC context blocked fence */ + /** + * @blocked: fence used to signal when the blocking of a + * contexts submissions is complete. + */ struct i915_sw_fence blocked; - /* GuC committed requests */ + /** @number_committed_requests: number of committed requests */ int number_committed_requests; - /** requests: active requests on this context */ + /** @requests: list of active requests on this context */ struct list_head requests; - /* - * GuC priority management - */ + /** @prio: the contexts current guc priority */ u8 prio; + /** + * @prio_count: a counter of the number requests inflight in + * each priority bucket + */ u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; } guc_state;
struct { - /* GuC LRC descriptor ID */ + /** + * @id: unique handle which is used to communicate information + * with the GuC about this context, protected by + * guc->contexts_lock + */ u16 id; - - /* GuC LRC descriptor reference count */ + /** + * @ref: the number of references to the guc_id, when + * transitioning in and out of zero protected by + * guc->contexts_lock + */ atomic_t ref; - - /* - * GuC ID link - in list when unpinned but guc_id still valid in GuC + /** + * @link: in guc->guc_id_list when the guc_id has no refs but is + * still valid, protected by guc->contexts_lock */ struct list_head link; } guc_id; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 2e27fe59786b..112dd29a63fe 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -41,6 +41,10 @@ struct intel_guc { spinlock_t irq_lock; unsigned int msg_enabled_mask;
+ /** + * @outstanding_submission_g2h: number of outstanding G2H related to GuC + * submission, used to determine if the GT is idle + */ atomic_t outstanding_submission_g2h;
struct { @@ -49,12 +53,16 @@ struct intel_guc { void (*disable)(struct intel_guc *guc); } interrupts;
- /* - * contexts_lock protects the pool of free guc ids and a linked list of - * guc ids available to be stolen + /** + * @contexts_lock: protects guc_ids, guc_id_list, ce->guc_id.id, and + * ce->guc_id.ref when transitioning in and out of zero */ spinlock_t contexts_lock; + /** @guc_ids: used to allocate new guc_ids */ struct ida guc_ids; + /** + * @guc_id_list: list of intel_context with valid guc_ids but no refs + */ struct list_head guc_id_list;
bool submission_supported; @@ -70,7 +78,10 @@ struct intel_guc { struct i915_vma *lrc_desc_pool; void *lrc_desc_pool_vaddr;
- /* guc_id to intel_context lookup */ + /** + * @context_lookup: used to resolve intel_context from guc_id, if a + * context is present in this structure it is registered with the GuC + */ struct xarray context_lookup;
/* Control params for fw initialization */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index eb884b48f4b8..3fe45eca95ff 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -28,21 +28,6 @@ /** * DOC: GuC-based command submission * - * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC - * firmware is moving to an updated submission interface and we plan to - * turn submission back on when that lands. The below documentation (and related - * code) matches the old submission model and will be updated as part of the - * upgrade to the new flow. - * - * GuC stage descriptor: - * During initialization, the driver allocates a static pool of 1024 such - * descriptors, and shares them with the GuC. Currently, we only use one - * descriptor. This stage descriptor lets the GuC know about the workqueue and - * process descriptor. Theoretically, it also lets the GuC know about our HW - * contexts (context ID, etc...), but we actually employ a kind of submission - * where the GuC uses the LRCA sent via the work item instead. This is called - * a "proxy" submission. - * * The Scratch registers: * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes * a value to the action register (SOFT_SCRATCH_0) along with any data. It then @@ -51,14 +36,86 @@ * processes the request. The kernel driver polls waiting for this update and * then proceeds. * - * Work Items: - * There are several types of work items that the host may place into a - * workqueue, each with its own requirements and limitations. Currently only - * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which - * represents in-order queue. The kernel driver packs ring tail pointer and an - * ELSP context descriptor dword into Work Item. - * See guc_add_request() + * Command Transport buffers (CTBs): + * Covered in detail in other sections but CTBs (host-to-guc, H2G, guc-to-host + * G2H) are a message interface between the i915 and GuC used to controls + * submissions. + * + * Context registration: + * Before a context can be submitted it must be registered with the GuC via a + * H2G. A unique guc_id is associated with each context. The context is either + * registered at request creation time (normal operation) or at submission time + * (abnormal operation, e.g. after a reset). + * + * Context submission: + * The i915 updates the LRC tail value in memory. The i915 must enable the + * scheduling of the context within the GuC for the GuC to actually consider it. + * Therefore, the first time a disabled context is submitted we use a schedule + * enable H2G, while follow upsubmissions are done via the context submit H2G, + * which informs the GuC that a previously enabled context has new work + * available. + * + * Context unpin: + * To unpin a context a H2G is used to disable scheduling and when the + * corresponding G2H returns indicating the scheduling disable operation has + * completed it is safe to unpin the context. While a disable is in flight it + * isn't safe to resubmit the context so a fence is used to stall all future + * requests until the G2H is returned. + * + * Context deregistration: + * Before a context can be destroyed or if we steal its guc_id we must + * deregister the context with the GuC via H2G. If stealing the guc_id it isn't + * safe to submit anything to this guc_id until the deregister completes so a + * fence is used to stall all requests associated with this guc_ids until the + * corresponding G2H returns indicating the guc_id has been deregistered. + * + * guc_ids: + * Unique number associated with private GuC context data passed in during + * context registration / submission / deregistration. 64k available. Simple ida + * is used for allocation. + * + * Stealing guc_ids: + * If no guc_ids are available they can be stolen from another context at + * request creation time if that context is unpinned. If a guc_id can't be found + * we punt this problem to the user as we believe this is near impossible to hit + * during normal use cases. + * + * Locking: + * In the GuC submission code we have 3 basic spin locks which protect + * everything. Details about each below. + * + * sched_engine->lock + * This is the submission lock for all contexts that share a i915 schedule + * engine (sched_engine), thus only 1 context which share a sched_engine can be + * submitting at a time. Currently only 1 sched_engine used for all of GuC + * submission but that could change in the future. + * + * guc->contexts_lock + * Protects guc_id allocation. Global lock i.e. Only 1 context that uses GuC + * submission can hold this at a time. + * + * ce->guc_state.lock + * Protects everything under ce->guc_state. Ensures that a context is in the + * correct state before issuing a H2G. e.g. We don't issue a schedule disable + * on disabled context (bad idea), we don't issue schedule enable when a + * schedule disable is inflight, etc... Also protects list of inflight requests + * on the context and the priority management state. Lock individual to each + * context. + * + * Lock ordering rules: + * sched_engine->lock -> ce->guc_state.lock + * guc->contexts_lock -> ce->guc_state.lock * + * Reset races: + * When a GPU full reset is triggered it is assumed that some G2H responses to + * a H2G can be lost as the GuC is likely toast. Losing these G2H can prove to + * fatal as we do certain operations upon receiving a G2H (e.g. destroy + * contexts, release guc_ids, etc...). Luckly when this occurs we can scrub + * context state and cleanup appropriately, however this is quite racey. To + * avoid races the rules are check for submission being disabled (i.e. check for + * mid reset) with the appropriate lock being held. If submission is disabled + * don't send the H2G or update the context state. The reset code must disable + * submission and grab all these locks before scrubbing for the missing G2H. */
/* GuC Virtual Engine */ diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index d818cfbfc41d..177eaf55adff 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -290,18 +290,20 @@ struct i915_request { struct hrtimer timer; } watchdog;
- /* - * Requests may need to be stalled when using GuC submission waiting for - * certain GuC operations to complete. If that is the case, stalled - * requests are added to a per context list of stalled requests. The - * below list_head is the link in that list. + /** + * @guc_fence_link: Requests may need to be stalled when using GuC + * submission waiting for certain GuC operations to complete. If that is + * the case, stalled requests are added to a per context list of stalled + * requests. The below list_head is the link in that list. Protected by + * ce->guc_state.lock. */ struct list_head guc_fence_link;
/** - * Priority level while the request is inflight. Differs from i915 - * scheduler priority. See comment above - * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. + * @guc_prio: Priority level while the request is inflight. Differs from + * i915 scheduler priority. See comment above + * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by + * ce->guc_active.lock. */ #define GUC_PRIO_INIT 0xff #define GUC_PRIO_FINI 0xfe
On 8/25/2021 20:23, Matthew Brost wrote:
Add GuC kernel doc for all structures added thus far for GuC submission and update the main GuC submission section with the new interface details.
v2:
- Drop guc_active.lock DOC
v3: (Daniele)
- Fixup a few kernel doc comments
Signed-off-by: Matthew Brost matthew.brost@intel.com
drivers/gpu/drm/i915/gt/intel_context_types.h | 44 +++++--- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 19 +++- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 101 ++++++++++++++---- drivers/gpu/drm/i915/i915_request.h | 18 ++-- 4 files changed, 132 insertions(+), 50 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 5285d660eacf..920ed92f4382 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -156,40 +156,52 @@ struct intel_context { u8 wa_bb_page; /* if set, page num reserved for context workarounds */
struct {
/** lock: protects everything in guc_state */
spinlock_t lock; /**/** @lock: protects everything in guc_state */
* sched_state: scheduling state of this context using GuC
* @sched_state: scheduling state of this context using GuC
*/ u32 sched_state; /*
- submission
* fences: maintains of list of requests that have a submit
* fence related to GuC submission
* @fences: maintains a list of requests are currently being
requests *that* are
*/ struct list_head fences;* fenced until a GuC operation completes
/* GuC context blocked fence */
/**
* @blocked: fence used to signal when the blocking of a
* contexts submissions is complete.
context's
'submissions are' or 'submission is'?
struct i915_sw_fence blocked;*/
/* GuC committed requests */
int number_committed_requests;/** @number_committed_requests: number of committed requests */
/** requests: active requests on this context */
struct list_head requests;/** @requests: list of active requests on this context */
/*
* GuC priority management
*/
/** @prio: the contexts current guc priority */
context's
u8 prio;
/**
* @prio_count: a counter of the number requests inflight in
in flight
* each priority bucket
*/
u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; } guc_state;
struct {
/* GuC LRC descriptor ID */
/**
* @id: unique handle which is used to communicate information
* with the GuC about this context, protected by
"used to communicate information" seems an odd way to say it. Maybe: Â @id: handle which is used to uniquely identify this context with the GuC, protected by...
* guc->contexts_lock
u16 id;*/
/* GuC LRC descriptor reference count */
/**
* @ref: the number of references to the guc_id, when
* transitioning in and out of zero protected by
* guc->contexts_lock
atomic_t ref;*/
/*
* GuC ID link - in list when unpinned but guc_id still valid in GuC
/**
* @link: in guc->guc_id_list when the guc_id has no refs but is
*/ struct list_head link; } guc_id;* still valid, protected by guc->contexts_lock
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 2e27fe59786b..112dd29a63fe 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -41,6 +41,10 @@ struct intel_guc { spinlock_t irq_lock; unsigned int msg_enabled_mask;
- /**
* @outstanding_submission_g2h: number of outstanding G2H related to GuC
"G2H responses"?
Maybe even "GuC to Host responses"? Do we explain anywhere at a higher level what G2H (or H2G) means?
* submission, used to determine if the GT is idle
*/
atomic_t outstanding_submission_g2h;
struct {
@@ -49,12 +53,16 @@ struct intel_guc { void (*disable)(struct intel_guc *guc); } interrupts;
- /*
* contexts_lock protects the pool of free guc ids and a linked list of
* guc ids available to be stolen
- /**
* @contexts_lock: protects guc_ids, guc_id_list, ce->guc_id.id, and
*/ spinlock_t contexts_lock;* ce->guc_id.ref when transitioning in and out of zero
- /** @guc_ids: used to allocate new guc_ids */ struct ida guc_ids;
This is very confusing naming - 'guc_ids is used to allocate guc_ids'?! Should at least say 'used to allocate new ce->guc_id.id values' or some such.
- /**
* @guc_id_list: list of intel_context with valid guc_ids but no refs
struct list_head guc_id_list;*/
Feels like this should be called 'idle_guc_id_list' or similar. Otherwise it sounds like it is a list of all ce->guc_id entities and is therefore basically just a duplicate of the guc_ids ida above.
bool submission_supported; @@ -70,7 +78,10 @@ struct intel_guc { struct i915_vma *lrc_desc_pool; void *lrc_desc_pool_vaddr;
- /* guc_id to intel_context lookup */
/**
* @context_lookup: used to resolve intel_context from guc_id, if a
* context is present in this structure it is registered with the GuC
*/
struct xarray context_lookup;
/* Control params for fw initialization */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index eb884b48f4b8..3fe45eca95ff 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -28,21 +28,6 @@ /**
- DOC: GuC-based command submission
- IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
- firmware is moving to an updated submission interface and we plan to
- turn submission back on when that lands. The below documentation (and related
- code) matches the old submission model and will be updated as part of the
- upgrade to the new flow.
- GuC stage descriptor:
- During initialization, the driver allocates a static pool of 1024 such
- descriptors, and shares them with the GuC. Currently, we only use one
- descriptor. This stage descriptor lets the GuC know about the workqueue and
- process descriptor. Theoretically, it also lets the GuC know about our HW
- contexts (context ID, etc...), but we actually employ a kind of submission
- where the GuC uses the LRCA sent via the work item instead. This is called
- a "proxy" submission.
- The Scratch registers:
- There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
- a value to the action register (SOFT_SCRATCH_0) along with any data. It then
@@ -51,14 +36,86 @@
- processes the request. The kernel driver polls waiting for this update and
- then proceeds.
- Work Items:
- There are several types of work items that the host may place into a
- workqueue, each with its own requirements and limitations. Currently only
- WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
- represents in-order queue. The kernel driver packs ring tail pointer and an
- ELSP context descriptor dword into Work Item.
- See guc_add_request()
- Command Transport buffers (CTBs):
- Covered in detail in other sections but CTBs (host-to-guc, H2G, guc-to-host
The comma implies 'host-to-guc' and 'H2G' are two separate things. Maybe say '(host to GuC - H2G, GuC to host - G2H)'.
- G2H) are a message interface between the i915 and GuC used to controls
- submissions.
controls -> control
Also, they are used for more than just controlling submission. I would just stop at 'between the i915 and GuC'.
- Context registration:
- Before a context can be submitted it must be registered with the GuC via a
- H2G. A unique guc_id is associated with each context. The context is either
- registered at request creation time (normal operation) or at submission time
- (abnormal operation, e.g. after a reset).
- Context submission:
- The i915 updates the LRC tail value in memory. The i915 must enable the
- scheduling of the context within the GuC for the GuC to actually consider it.
- Therefore, the first time a disabled context is submitted we use a schedule
- enable H2G, while follow upsubmissions are done via the context submit H2G,
upsubmissions -> up submissions
- which informs the GuC that a previously enabled context has new work
- available.
- Context unpin:
- To unpin a context a H2G is used to disable scheduling and when the
scheduling and when -> scheduling. When Otherwise that is an unnecessarily long sentence.
- corresponding G2H returns indicating the scheduling disable operation has
- completed it is safe to unpin the context. While a disable is in flight it
- isn't safe to resubmit the context so a fence is used to stall all future
- requests until the G2H is returned.
all future requests of only that context or of everything?
- Context deregistration:
- Before a context can be destroyed or if we steal its guc_id we must
- deregister the context with the GuC via H2G. If stealing the guc_id it isn't
- safe to submit anything to this guc_id until the deregister completes so a
- fence is used to stall all requests associated with this guc_ids until the
guc_ids -> guc_id
- corresponding G2H returns indicating the guc_id has been deregistered.
- guc_ids:
- Unique number associated with private GuC context data passed in during
- context registration / submission / deregistration. 64k available. Simple ida
- is used for allocation.
- Stealing guc_ids:
- If no guc_ids are available they can be stolen from another context at
- request creation time if that context is unpinned. If a guc_id can't be found
- we punt this problem to the user as we believe this is near impossible to hit
- during normal use cases.
- Locking:
- In the GuC submission code we have 3 basic spin locks which protect
- everything. Details about each below.
- sched_engine->lock
- This is the submission lock for all contexts that share a i915 schedule
'a i915' -> 'an i915'
- engine (sched_engine), thus only 1 context which share a sched_engine can be
only 1 context which share -> only one of the contexts which share
- submitting at a time. Currently only 1 sched_engine used for all of GuC
used -> is used
- submission but that could change in the future.
- guc->contexts_lock
- Protects guc_id allocation. Global lock i.e. Only 1 context that uses GuC
- submission can hold this at a time.
Technically, it is per GuC. So if there are multiple GuCs (e.g. DGPU + IGPU or even two DGPU boards are plugged in) there can be two locks held concurrently by different contexts. Maybe say:
guc->contexts_lock Protects guc_id allocation for the given GuC. I.e. only one context can be doing guc_id allocation operations at a time for each GuC in the system.
- ce->guc_state.lock
- Protects everything under ce->guc_state. Ensures that a context is in the
- correct state before issuing a H2G. e.g. We don't issue a schedule disable
- on disabled context (bad idea), we don't issue schedule enable when a
on disabled -> on a disabled issue schedule -> issue a schedule
- schedule disable is inflight, etc... Also protects list of inflight requests
- on the context and the priority management state. Lock individual to each
Lock individual -> Lock is individual
- context.
- Lock ordering rules:
- sched_engine->lock -> ce->guc_state.lock
- guc->contexts_lock -> ce->guc_state.lock
- Reset races:
- When a GPU full reset is triggered it is assumed that some G2H responses to
Should be 'a full GT reset'. Full GPU reset is an ambiguous term. E.g. it can mean an FLR reset done at the PCI level (outside i915). There is also quite a lot of the GPU that isn't touched by the GT reset, but that is the biggest hammer i915 has access to at the moment.
- a H2G can be lost as the GuC is likely toast. Losing these G2H can prove to
a H2G -> H2Gs
'is likely toast' -> 'is also reset'. The current implementation of GT reset involves an explicit reset of the GuC, so 'likely toast' seems the wrong description. Sure, GuC itself may or may not have died and caused the reset to happen, but either way it is being killed off and that may or may not happen before it has a chance to send the G2Hs.
- fatal as we do certain operations upon receiving a G2H (e.g. destroy
- contexts, release guc_ids, etc...). Luckly when this occurs we can scrub
Luckly -> Luckily. Although, I would drop it completely and just say 'When this...'. Software design should not involve luck!
- context state and cleanup appropriately, however this is quite racey. To
- avoid races the rules are check for submission being disabled (i.e. check for
- mid reset) with the appropriate lock being held. If submission is disabled
- don't send the H2G or update the context state. The reset code must disable
- submission and grab all these locks before scrubbing for the missing G2H.
Feels like this could be better worded. 'the rules are' implies you are about to list a set of numbered rules rather than just give a general description. 'check for mid reset' is a confusing way to say it, I assume it means 'do a check part way through the reset for submission being disabled'? Also, where did "don't send the H2G" come from? Who is sending H2Gs and why? The earlier comment about races was only talking about cleaning up lost G2Hs. And 'grab all these locks' - which locks? The earlier sentence just says 'with the appropriate lock'. Now it says 'all these locks' without specifying what was appropriate before and what extra is needed now.
*/
/* GuC Virtual Engine */ diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index d818cfbfc41d..177eaf55adff 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -290,18 +290,20 @@ struct i915_request { struct hrtimer timer; } watchdog;
- /*
* Requests may need to be stalled when using GuC submission waiting for
* certain GuC operations to complete. If that is the case, stalled
* requests are added to a per context list of stalled requests. The
* below list_head is the link in that list.
/**
* @guc_fence_link: Requests may need to be stalled when using GuC
* submission waiting for certain GuC operations to complete. If that is
* the case, stalled requests are added to a per context list of stalled
* requests. The below list_head is the link in that list. Protected by
* ce->guc_state.lock.
*/ struct list_head guc_fence_link;
/**
* Priority level while the request is inflight. Differs from i915
* scheduler priority. See comment above
* I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details.
* @guc_prio: Priority level while the request is inflight. Differs from
inflight -> in flight
* i915 scheduler priority. See comment above
* I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by
*/ #define GUC_PRIO_INIT 0xff #define GUC_PRIO_FINI 0xfe* ce->guc_active.lock.
Does it matter that these defines are between the kerneldoc description of guc_prio and the variable declaration itself? And should the defines be described in the kerneldoc as well?
John.
s/static inline/static/g + fix function argument alignment to make checkpatch happy.
Signed-off-by: Matthew Brost matthew.brost@intel.com --- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 116 +++++++++--------- 1 file changed, 57 insertions(+), 59 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 3fe45eca95ff..f921763eb7a4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -144,7 +144,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count); #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT) #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
-static inline void init_sched_state(struct intel_context *ce) +static void init_sched_state(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; @@ -161,14 +161,14 @@ static bool sched_state_is_init(struct intel_context *ce) ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED)); }
-static inline bool +static bool context_wait_for_deregister_to_register(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; }
-static inline void +static void set_context_wait_for_deregister_to_register(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); @@ -176,7 +176,7 @@ set_context_wait_for_deregister_to_register(struct intel_context *ce) SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; }
-static inline void +static void clr_context_wait_for_deregister_to_register(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); @@ -184,111 +184,111 @@ clr_context_wait_for_deregister_to_register(struct intel_context *ce) ~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; }
-static inline bool +static bool context_destroyed(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_DESTROYED; }
-static inline void +static void set_context_destroyed(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_DESTROYED; }
-static inline bool context_pending_disable(struct intel_context *ce) +static bool context_pending_disable(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE; }
-static inline void set_context_pending_disable(struct intel_context *ce) +static void set_context_pending_disable(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE; }
-static inline void clr_context_pending_disable(struct intel_context *ce) +static void clr_context_pending_disable(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE; }
-static inline bool context_banned(struct intel_context *ce) +static bool context_banned(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_BANNED; }
-static inline void set_context_banned(struct intel_context *ce) +static void set_context_banned(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_BANNED; }
-static inline void clr_context_banned(struct intel_context *ce) +static void clr_context_banned(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_BANNED; }
-static inline bool context_enabled(struct intel_context *ce) +static bool context_enabled(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_ENABLED; }
-static inline void set_context_enabled(struct intel_context *ce) +static void set_context_enabled(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_ENABLED; }
-static inline void clr_context_enabled(struct intel_context *ce) +static void clr_context_enabled(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED; }
-static inline bool context_pending_enable(struct intel_context *ce) +static bool context_pending_enable(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE; }
-static inline void set_context_pending_enable(struct intel_context *ce) +static void set_context_pending_enable(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE; }
-static inline void clr_context_pending_enable(struct intel_context *ce) +static void clr_context_pending_enable(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE; }
-static inline bool context_registered(struct intel_context *ce) +static bool context_registered(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_REGISTERED; }
-static inline void set_context_registered(struct intel_context *ce) +static void set_context_registered(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_REGISTERED; }
-static inline void clr_context_registered(struct intel_context *ce) +static void clr_context_registered(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED; }
-static inline u32 context_blocked(struct intel_context *ce) +static u32 context_blocked(struct intel_context *ce) { return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >> SCHED_STATE_BLOCKED_SHIFT; }
-static inline void incr_context_blocked(struct intel_context *ce) +static void incr_context_blocked(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock);
@@ -297,7 +297,7 @@ static inline void incr_context_blocked(struct intel_context *ce) GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */ }
-static inline void decr_context_blocked(struct intel_context *ce) +static void decr_context_blocked(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock);
@@ -306,41 +306,41 @@ static inline void decr_context_blocked(struct intel_context *ce) ce->guc_state.sched_state -= SCHED_STATE_BLOCKED; }
-static inline bool context_has_committed_requests(struct intel_context *ce) +static bool context_has_committed_requests(struct intel_context *ce) { return !!ce->guc_state.number_committed_requests; }
-static inline void incr_context_committed_requests(struct intel_context *ce) +static void incr_context_committed_requests(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ++ce->guc_state.number_committed_requests; GEM_BUG_ON(ce->guc_state.number_committed_requests < 0); }
-static inline void decr_context_committed_requests(struct intel_context *ce) +static void decr_context_committed_requests(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); --ce->guc_state.number_committed_requests; GEM_BUG_ON(ce->guc_state.number_committed_requests < 0); }
-static inline bool context_guc_id_invalid(struct intel_context *ce) +static bool context_guc_id_invalid(struct intel_context *ce) { return ce->guc_id.id == GUC_INVALID_LRC_ID; }
-static inline void set_context_guc_id_invalid(struct intel_context *ce) +static void set_context_guc_id_invalid(struct intel_context *ce) { ce->guc_id.id = GUC_INVALID_LRC_ID; }
-static inline struct intel_guc *ce_to_guc(struct intel_context *ce) +static struct intel_guc *ce_to_guc(struct intel_context *ce) { return &ce->engine->gt->uc.guc; }
-static inline struct i915_priolist *to_priolist(struct rb_node *rb) +static struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); } @@ -354,7 +354,7 @@ static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) return &base[index]; }
-static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) +static struct intel_context *__get_context(struct intel_guc *guc, u32 id) { struct intel_context *ce = xa_load(&guc->context_lookup, id);
@@ -384,12 +384,12 @@ static void guc_lrc_desc_pool_destroy(struct intel_guc *guc) i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP); }
-static inline bool guc_submission_initialized(struct intel_guc *guc) +static bool guc_submission_initialized(struct intel_guc *guc) { return !!guc->lrc_desc_pool_vaddr; }
-static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) +static void reset_lrc_desc(struct intel_guc *guc, u32 id) { if (likely(guc_submission_initialized(guc))) { struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); @@ -407,13 +407,13 @@ static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) } }
-static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) +static bool lrc_desc_registered(struct intel_guc *guc, u32 id) { return __get_context(guc, id); }
-static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, - struct intel_context *ce) +static void set_lrc_desc_registered(struct intel_guc *guc, u32 id, + struct intel_context *ce) { unsigned long flags;
@@ -576,13 +576,13 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) return err; }
-static inline void guc_set_lrc_tail(struct i915_request *rq) +static void guc_set_lrc_tail(struct i915_request *rq) { rq->context->lrc_reg_state[CTX_RING_TAIL] = intel_ring_set_tail(rq->ring, rq->tail); }
-static inline int rq_prio(const struct i915_request *rq) +static int rq_prio(const struct i915_request *rq) { return rq->sched.attr.priority; } @@ -749,7 +749,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) xa_unlock_irqrestore(&guc->context_lookup, flags); }
-static inline bool +static bool submission_disabled(struct intel_guc *guc) { struct i915_sched_engine * const sched_engine = guc->sched_engine; @@ -830,7 +830,7 @@ guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling) return NULL; }
-static inline struct intel_engine_cs * +static struct intel_engine_cs * __context_to_physical_engine(struct intel_context *ce) { struct intel_engine_cs *engine = ce->engine; @@ -1146,9 +1146,9 @@ void intel_guc_submission_fini(struct intel_guc *guc) i915_sched_engine_put(guc->sched_engine); }
-static inline void queue_request(struct i915_sched_engine *sched_engine, - struct i915_request *rq, - int prio) +static void queue_request(struct i915_sched_engine *sched_engine, + struct i915_request *rq, + int prio) { GEM_BUG_ON(!list_empty(&rq->sched.link)); list_add_tail(&rq->sched.link, @@ -1838,7 +1838,7 @@ static void guc_context_sched_disable(struct intel_context *ce) intel_context_sched_disable_unpin(ce); }
-static inline void guc_lrc_desc_unpin(struct intel_context *ce) +static void guc_lrc_desc_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce);
@@ -1978,7 +1978,7 @@ static void guc_context_set_prio(struct intel_guc *guc, trace_intel_context_set_prio(ce); }
-static inline u8 map_i915_prio_to_guc_prio(int prio) +static u8 map_i915_prio_to_guc_prio(int prio) { if (prio == I915_PRIORITY_NORMAL) return GUC_CLIENT_PRIORITY_KMD_NORMAL; @@ -1990,8 +1990,7 @@ static inline u8 map_i915_prio_to_guc_prio(int prio) return GUC_CLIENT_PRIORITY_KMD_HIGH; }
-static inline void add_context_inflight_prio(struct intel_context *ce, - u8 guc_prio) +static void add_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { lockdep_assert_held(&ce->guc_state.lock); GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); @@ -2002,8 +2001,7 @@ static inline void add_context_inflight_prio(struct intel_context *ce, GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]); }
-static inline void sub_context_inflight_prio(struct intel_context *ce, - u8 guc_prio) +static void sub_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { lockdep_assert_held(&ce->guc_state.lock); GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); @@ -2014,7 +2012,7 @@ static inline void sub_context_inflight_prio(struct intel_context *ce, --ce->guc_state.prio_count[guc_prio]; }
-static inline void update_context_prio(struct intel_context *ce) +static void update_context_prio(struct intel_context *ce) { struct intel_guc *guc = &ce->engine->gt->uc.guc; int i; @@ -2032,7 +2030,7 @@ static inline void update_context_prio(struct intel_context *ce) } }
-static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio) +static bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio) { /* Lower value is higher priority */ return new_guc_prio < old_guc_prio; @@ -2506,15 +2504,15 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) engine->submit_request = guc_submit_request; }
-static inline void guc_kernel_context_pin(struct intel_guc *guc, - struct intel_context *ce) +static void guc_kernel_context_pin(struct intel_guc *guc, + struct intel_context *ce) { if (context_guc_id_invalid(ce)) pin_guc_id(guc, ce); guc_lrc_desc_pin(ce, true); }
-static inline void guc_init_lrc_mapping(struct intel_guc *guc) +static void guc_init_lrc_mapping(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); struct intel_engine_cs *engine; @@ -2617,7 +2615,7 @@ static void rcs_submission_override(struct intel_engine_cs *engine) } }
-static inline void guc_default_irqs(struct intel_engine_cs *engine) +static void guc_default_irqs(struct intel_engine_cs *engine) { engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT; intel_engine_set_irq_handler(engine, cs_irq_handler); @@ -2713,7 +2711,7 @@ void intel_guc_submission_init_early(struct intel_guc *guc) guc->submission_selected = __guc_submission_selected(guc); }
-static inline struct intel_context * +static struct intel_context * g2h_context_lookup(struct intel_guc *guc, u32 desc_idx) { struct intel_context *ce; @@ -3085,8 +3083,8 @@ void intel_guc_submission_print_info(struct intel_guc *guc, drm_printf(p, "\n"); }
-static inline void guc_log_context_priority(struct drm_printer *p, - struct intel_context *ce) +static void guc_log_context_priority(struct drm_printer *p, + struct intel_context *ce) { int i;
Subject should be 'drop .. functions *from* intel...'.
On 8/25/2021 20:23, Matthew Brost wrote:
s/static inline/static/g + fix function argument alignment to make checkpatch happy.
Why?
Signed-off-by: Matthew Brost matthew.brost@intel.com
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 116 +++++++++--------- 1 file changed, 57 insertions(+), 59 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 3fe45eca95ff..f921763eb7a4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -144,7 +144,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count); #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT) #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
-static inline void init_sched_state(struct intel_context *ce) +static void init_sched_state(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; @@ -161,14 +161,14 @@ static bool sched_state_is_init(struct intel_context *ce) ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED)); }
-static inline bool +static bool context_wait_for_deregister_to_register(struct intel_context *ce)
Could probably un-linewrap most of these split declarations and still stay under the line length limit.
{ return ce->guc_state.sched_state & SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; }
-static inline void +static void set_context_wait_for_deregister_to_register(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); @@ -176,7 +176,7 @@ set_context_wait_for_deregister_to_register(struct intel_context *ce) SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; }
-static inline void +static void clr_context_wait_for_deregister_to_register(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); @@ -184,111 +184,111 @@ clr_context_wait_for_deregister_to_register(struct intel_context *ce) ~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; }
-static inline bool +static bool context_destroyed(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_DESTROYED; }
-static inline void +static void set_context_destroyed(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_DESTROYED; }
-static inline bool context_pending_disable(struct intel_context *ce) +static bool context_pending_disable(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE; }
-static inline void set_context_pending_disable(struct intel_context *ce) +static void set_context_pending_disable(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE; }
-static inline void clr_context_pending_disable(struct intel_context *ce) +static void clr_context_pending_disable(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE; }
-static inline bool context_banned(struct intel_context *ce) +static bool context_banned(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_BANNED; }
-static inline void set_context_banned(struct intel_context *ce) +static void set_context_banned(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_BANNED; }
-static inline void clr_context_banned(struct intel_context *ce) +static void clr_context_banned(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_BANNED; }
-static inline bool context_enabled(struct intel_context *ce) +static bool context_enabled(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_ENABLED; }
-static inline void set_context_enabled(struct intel_context *ce) +static void set_context_enabled(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_ENABLED; }
-static inline void clr_context_enabled(struct intel_context *ce) +static void clr_context_enabled(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED; }
-static inline bool context_pending_enable(struct intel_context *ce) +static bool context_pending_enable(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE; }
-static inline void set_context_pending_enable(struct intel_context *ce) +static void set_context_pending_enable(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE; }
-static inline void clr_context_pending_enable(struct intel_context *ce) +static void clr_context_pending_enable(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE; }
-static inline bool context_registered(struct intel_context *ce) +static bool context_registered(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_REGISTERED; }
-static inline void set_context_registered(struct intel_context *ce) +static void set_context_registered(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_REGISTERED; }
-static inline void clr_context_registered(struct intel_context *ce) +static void clr_context_registered(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED; }
-static inline u32 context_blocked(struct intel_context *ce) +static u32 context_blocked(struct intel_context *ce) { return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >> SCHED_STATE_BLOCKED_SHIFT; }
-static inline void incr_context_blocked(struct intel_context *ce) +static void incr_context_blocked(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock);
@@ -297,7 +297,7 @@ static inline void incr_context_blocked(struct intel_context *ce) GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */ }
-static inline void decr_context_blocked(struct intel_context *ce) +static void decr_context_blocked(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock);
@@ -306,41 +306,41 @@ static inline void decr_context_blocked(struct intel_context *ce) ce->guc_state.sched_state -= SCHED_STATE_BLOCKED; }
-static inline bool context_has_committed_requests(struct intel_context *ce) +static bool context_has_committed_requests(struct intel_context *ce) { return !!ce->guc_state.number_committed_requests; }
-static inline void incr_context_committed_requests(struct intel_context *ce) +static void incr_context_committed_requests(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); ++ce->guc_state.number_committed_requests; GEM_BUG_ON(ce->guc_state.number_committed_requests < 0); }
-static inline void decr_context_committed_requests(struct intel_context *ce) +static void decr_context_committed_requests(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); --ce->guc_state.number_committed_requests; GEM_BUG_ON(ce->guc_state.number_committed_requests < 0); }
-static inline bool context_guc_id_invalid(struct intel_context *ce) +static bool context_guc_id_invalid(struct intel_context *ce) { return ce->guc_id.id == GUC_INVALID_LRC_ID; }
-static inline void set_context_guc_id_invalid(struct intel_context *ce) +static void set_context_guc_id_invalid(struct intel_context *ce) { ce->guc_id.id = GUC_INVALID_LRC_ID; }
-static inline struct intel_guc *ce_to_guc(struct intel_context *ce) +static struct intel_guc *ce_to_guc(struct intel_context *ce) { return &ce->engine->gt->uc.guc; }
-static inline struct i915_priolist *to_priolist(struct rb_node *rb) +static struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); } @@ -354,7 +354,7 @@ static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) return &base[index]; }
-static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) +static struct intel_context *__get_context(struct intel_guc *guc, u32 id) { struct intel_context *ce = xa_load(&guc->context_lookup, id);
@@ -384,12 +384,12 @@ static void guc_lrc_desc_pool_destroy(struct intel_guc *guc) i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP); }
-static inline bool guc_submission_initialized(struct intel_guc *guc) +static bool guc_submission_initialized(struct intel_guc *guc) { return !!guc->lrc_desc_pool_vaddr; }
-static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) +static void reset_lrc_desc(struct intel_guc *guc, u32 id) { if (likely(guc_submission_initialized(guc))) { struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); @@ -407,13 +407,13 @@ static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) } }
-static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) +static bool lrc_desc_registered(struct intel_guc *guc, u32 id) { return __get_context(guc, id); }
-static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
struct intel_context *ce)
+static void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
{ unsigned long flags;struct intel_context *ce)
@@ -576,13 +576,13 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) return err; }
-static inline void guc_set_lrc_tail(struct i915_request *rq) +static void guc_set_lrc_tail(struct i915_request *rq) { rq->context->lrc_reg_state[CTX_RING_TAIL] = intel_ring_set_tail(rq->ring, rq->tail); }
-static inline int rq_prio(const struct i915_request *rq) +static int rq_prio(const struct i915_request *rq) { return rq->sched.attr.priority; } @@ -749,7 +749,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) xa_unlock_irqrestore(&guc->context_lookup, flags); }
-static inline bool +static bool submission_disabled(struct intel_guc *guc) { struct i915_sched_engine * const sched_engine = guc->sched_engine; @@ -830,7 +830,7 @@ guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling) return NULL; }
-static inline struct intel_engine_cs * +static struct intel_engine_cs * __context_to_physical_engine(struct intel_context *ce) { struct intel_engine_cs *engine = ce->engine; @@ -1146,9 +1146,9 @@ void intel_guc_submission_fini(struct intel_guc *guc) i915_sched_engine_put(guc->sched_engine); }
-static inline void queue_request(struct i915_sched_engine *sched_engine,
struct i915_request *rq,
int prio)
+static void queue_request(struct i915_sched_engine *sched_engine,
struct i915_request *rq,
int prio)
Could unwrap the prio field.
John.
{ GEM_BUG_ON(!list_empty(&rq->sched.link)); list_add_tail(&rq->sched.link, @@ -1838,7 +1838,7 @@ static void guc_context_sched_disable(struct intel_context *ce) intel_context_sched_disable_unpin(ce); }
-static inline void guc_lrc_desc_unpin(struct intel_context *ce) +static void guc_lrc_desc_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce);
@@ -1978,7 +1978,7 @@ static void guc_context_set_prio(struct intel_guc *guc, trace_intel_context_set_prio(ce); }
-static inline u8 map_i915_prio_to_guc_prio(int prio) +static u8 map_i915_prio_to_guc_prio(int prio) { if (prio == I915_PRIORITY_NORMAL) return GUC_CLIENT_PRIORITY_KMD_NORMAL; @@ -1990,8 +1990,7 @@ static inline u8 map_i915_prio_to_guc_prio(int prio) return GUC_CLIENT_PRIORITY_KMD_HIGH; }
-static inline void add_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
+static void add_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { lockdep_assert_held(&ce->guc_state.lock); GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); @@ -2002,8 +2001,7 @@ static inline void add_context_inflight_prio(struct intel_context *ce, GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]); }
-static inline void sub_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
+static void sub_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { lockdep_assert_held(&ce->guc_state.lock); GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); @@ -2014,7 +2012,7 @@ static inline void sub_context_inflight_prio(struct intel_context *ce, --ce->guc_state.prio_count[guc_prio]; }
-static inline void update_context_prio(struct intel_context *ce) +static void update_context_prio(struct intel_context *ce) { struct intel_guc *guc = &ce->engine->gt->uc.guc; int i; @@ -2032,7 +2030,7 @@ static inline void update_context_prio(struct intel_context *ce) } }
-static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio) +static bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio) { /* Lower value is higher priority */ return new_guc_prio < old_guc_prio; @@ -2506,15 +2504,15 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) engine->submit_request = guc_submit_request; }
-static inline void guc_kernel_context_pin(struct intel_guc *guc,
struct intel_context *ce)
+static void guc_kernel_context_pin(struct intel_guc *guc,
{ if (context_guc_id_invalid(ce)) pin_guc_id(guc, ce); guc_lrc_desc_pin(ce, true); }struct intel_context *ce)
-static inline void guc_init_lrc_mapping(struct intel_guc *guc) +static void guc_init_lrc_mapping(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); struct intel_engine_cs *engine; @@ -2617,7 +2615,7 @@ static void rcs_submission_override(struct intel_engine_cs *engine) } }
-static inline void guc_default_irqs(struct intel_engine_cs *engine) +static void guc_default_irqs(struct intel_engine_cs *engine) { engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT; intel_engine_set_irq_handler(engine, cs_irq_handler); @@ -2713,7 +2711,7 @@ void intel_guc_submission_init_early(struct intel_guc *guc) guc->submission_selected = __guc_submission_selected(guc); }
-static inline struct intel_context * +static struct intel_context * g2h_context_lookup(struct intel_guc *guc, u32 desc_idx) { struct intel_context *ce; @@ -3085,8 +3083,8 @@ void intel_guc_submission_print_info(struct intel_guc *guc, drm_printf(p, "\n"); }
-static inline void guc_log_context_priority(struct drm_printer *p,
struct intel_context *ce)
+static void guc_log_context_priority(struct drm_printer *p,
{ int i;struct intel_context *ce)
dri-devel@lists.freedesktop.org