On Thu, May 06, 2021 at 12:13:30PM -0700, Matthew Brost wrote:
From: Michal Wajdeczko michal.wajdeczko@intel.com
We want to stop using guc.send_mutex while sending CTB messages so we have to start protecting access to CTB send descriptor.
For completeness protect also CTB send descriptor.
Michal I think you have a typo here, receive descriptor, right? Again this is going to get squashed in the firmware update patch but thought I'd mention this.
With that: Reviewed-by: Matthew Brost matthew.brost@intel.com
Add spinlock to struct intel_guc_ct_buffer and start using it.
Signed-off-by: Michal Wajdeczko michal.wajdeczko@intel.com Signed-off-by: Matthew Brost matthew.brost@intel.com
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 14 ++++++++++++-- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h | 2 ++ 2 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index a4b2e7fe318b..bee0958d8bae 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -89,6 +89,8 @@ static void ct_incoming_request_worker_func(struct work_struct *w); */ void intel_guc_ct_init_early(struct intel_guc_ct *ct) {
- spin_lock_init(&ct->ctbs.send.lock);
- spin_lock_init(&ct->ctbs.recv.lock); spin_lock_init(&ct->requests.lock); INIT_LIST_HEAD(&ct->requests.pending); INIT_LIST_HEAD(&ct->requests.incoming);
@@ -479,17 +481,22 @@ static int ct_send(struct intel_guc_ct *ct, GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); GEM_BUG_ON(!response_buf && response_buf_size);
- spin_lock_irqsave(&ct->ctbs.send.lock, flags);
- fence = ct_get_next_fence(ct); request.fence = fence; request.status = 0; request.response_len = response_buf_size; request.response_buf = response_buf;
- spin_lock_irqsave(&ct->requests.lock, flags);
- spin_lock(&ct->requests.lock); list_add_tail(&request.link, &ct->requests.pending);
- spin_unlock_irqrestore(&ct->requests.lock, flags);
spin_unlock(&ct->requests.lock);
err = ct_write(ct, action, len, fence);
spin_unlock_irqrestore(&ct->ctbs.send.lock, flags);
if (unlikely(err)) goto unlink;
@@ -825,6 +832,7 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) void intel_guc_ct_event_handler(struct intel_guc_ct *ct) { u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
unsigned long flags; int err = 0;
if (unlikely(!ct->enabled)) {
@@ -833,7 +841,9 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct) }
do {
err = ct_read(ct, msg);spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
if (err) break;spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index fc9486779e87..bc52dc479a14 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -27,11 +27,13 @@ struct intel_guc;
- record (command transport buffer descriptor) and the actual buffer which
- holds the commands.
*/
- @lock: protects access to the commands buffer and buffer descriptor
- @desc: pointer to the buffer descriptor
- @cmds: pointer to the commands buffer
- @size: size of the commands buffer
struct intel_guc_ct_buffer {
- spinlock_t lock; struct guc_ct_buffer_desc *desc; u32 *cmds; u32 size;
-- 2.28.0