On 28.06.2021 01:14, Matthew Brost wrote:
Implement a stall timer which fails H2G CTBs once a period of time with no forward progress is reached to prevent deadlock.
v2: (Michal)
- Improve error message in ct_deadlock()
- Set broken when ct_deadlock() returns true
- Return -EPIPE on ct_deadlock()
Signed-off-by: John Harrison John.C.Harrison@Intel.com Signed-off-by: Daniele Ceraolo Spurio daniele.ceraolospurio@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 62 ++++++++++++++++++++--- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h | 4 ++ 2 files changed, 59 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 90ee95a240e8..8f553f7f9619 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -319,6 +319,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) goto err_deregister;
ct->enabled = true;
ct->stall_time = KTIME_MAX;
return 0;
@@ -391,9 +392,6 @@ static int ct_write(struct intel_guc_ct *ct, u32 *cmds = ctb->cmds; unsigned int i;
- if (unlikely(ctb->broken))
return -EPIPE;
- if (unlikely(desc->status)) goto corrupted;
@@ -509,6 +507,25 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status) return err; }
+#define GUC_CTB_TIMEOUT_MS 1500 +static inline bool ct_deadlocked(struct intel_guc_ct *ct) +{
- long timeout = GUC_CTB_TIMEOUT_MS;
- bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
- if (unlikely(ret)) {
struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
CT_ERROR(ct, "Communication stalled for %lld, desc status=%#x,%#x\n",
nit: missing unit in "stalled for ... ms" ^^^^
ktime_ms_delta(ktime_get(), ct->stall_time),
send->status, recv->status);
ct->ctbs.send.broken = true;
- }
- return ret;
+}
static inline bool h2g_has_room(struct intel_guc_ct_buffer *ctb, u32 len_dw) { struct guc_ct_buffer_desc *desc = ctb->desc; @@ -520,6 +537,26 @@ static inline bool h2g_has_room(struct intel_guc_ct_buffer *ctb, u32 len_dw) return space >= len_dw; }
+static int has_room_nb(struct intel_guc_ct *ct, u32 len_dw) +{
- struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
- lockdep_assert_held(&ct->ctbs.send.lock);
- if (unlikely(!h2g_has_room(ctb, len_dw))) {
if (ct->stall_time == KTIME_MAX)
ct->stall_time = ktime_get();
if (unlikely(ct_deadlocked(ct)))
return -EPIPE;
else
return -EBUSY;
- }
- ct->stall_time = KTIME_MAX;
- return 0;
+}
static int ct_send_nb(struct intel_guc_ct *ct, const u32 *action, u32 len, @@ -530,13 +567,14 @@ static int ct_send_nb(struct intel_guc_ct *ct, u32 fence; int ret;
- if (unlikely(ctb->broken))
return -EPIPE;
- spin_lock_irqsave(&ctb->lock, spin_flags);
- ret = h2g_has_room(ctb, len + GUC_CTB_HDR_LEN);
- if (unlikely(!ret)) {
ret = -EBUSY;
- ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN);
- if (unlikely(ret)) goto out;
}
fence = ct_get_next_fence(ct); ret = ct_write(ct, action, len, fence, flags);
@@ -571,6 +609,9 @@ static int ct_send(struct intel_guc_ct *ct, GEM_BUG_ON(!response_buf && response_buf_size); might_sleep();
- if (unlikely(ctb->broken))
return -EPIPE;
ok, but likely could be part of ct_can_send/has_room
- /*
- We use a lazy spin wait loop here as we believe that if the CT
- buffers are sized correctly the flow control condition should be
@@ -579,8 +620,13 @@ static int ct_send(struct intel_guc_ct *ct, retry: spin_lock_irqsave(&ctb->lock, flags); if (unlikely(!h2g_has_room(ctb, len + GUC_CTB_HDR_LEN))) {
if (ct->stall_time == KTIME_MAX)
ct->stall_time = ktime_get();
spin_unlock_irqrestore(&ctb->lock, flags);
if (unlikely(ct_deadlocked(ct)))
return -EPIPE;
can't we really put all this into one place?
static int ct_can_send(struct intel_guc_ct *ct, u32 len_dw, bool wait) { struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
lockdep_assert_held(&ct->ctbs.send.lock);
retry: if (ct->broken) return -EPIPE;
if (unlikely(!ctb_has_room(ctb, len_dw + GUC_CTB_HDR_LEN))) { if (ct->stall_time == KTIME_MAX) ct->stall_time = ktime_get();
if (unlikely(ct_deadlocked(ct))) return -EPIPE; if (!wait) return -EBUSY;
spin_unlock_irqrestore(&ctb->lock, flags); ... spin_lock_irqrestore(&ctb->lock, flags);
goto retry; }
ct->stall_time = KTIME_MAX; return 0; }
Michal
if (msleep_interruptible(sleep_period_ms)) return -EINTR; sleep_period_ms = sleep_period_ms << 1;
@@ -588,6 +634,8 @@ static int ct_send(struct intel_guc_ct *ct, goto retry; }
- ct->stall_time = KTIME_MAX;
- fence = ct_get_next_fence(ct); request.fence = fence; request.status = 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index f6a4d5b33467..c9d6ae7848a7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/workqueue.h> +#include <linux/ktime.h>
#include "intel_guc_fwif.h"
@@ -68,6 +69,9 @@ struct intel_guc_ct { struct list_head incoming; /* incoming requests */ struct work_struct worker; /* handler for incoming requests */ } requests;
- /** @stall_time: time of first time a CTB submission is stalled */
- ktime_t stall_time;
};
void intel_guc_ct_init_early(struct intel_guc_ct *ct);