On Tue, Aug 03, 2021 at 03:29:07PM -0700, Matthew Brost wrote:
Taking a PM reference to prevent intel_gt_wait_for_idle from short circuiting while a scheduling of user context could be enabled.
Signed-off-by: Matthew Brost matthew.brost@intel.com
drivers/gpu/drm/i915/Makefile | 1 + .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 36 +++++++++++++++++-- 2 files changed, 34 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 903de270f2db..5e3a1e2095b0 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -103,6 +103,7 @@ gt-y += \ gt/intel_gt_clock_utils.o \ gt/intel_gt_irq.o \ gt/intel_gt_pm.o \
- gt/intel_gt_pm_unpark_work.o \
This file isn't here?
Also pm stuff tends to have very nasty locking requirements, doing special stuff like this in the backend tends to lead to really big surprises. I think two options to make sure our locking design stays consistent: - Lift this to generic code. - expose some engine_pm_migt_get/put() calls which do have the right set of might_lock annoations, and call those in the generic code.
Imo the worst kernel abstractions are those where all implementations look&act the same, except for locking. Unfortunately i915-gem code is full of this stuff, and we need to stop this by enlisting lockdep to check the contracts for us. -Daniel
gt/intel_gt_pm_irq.o \ gt/intel_gt_requests.o \ gt/intel_gtt.o \ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 7fe4d1559a81..c5d9548bfd00 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2056,7 +2056,12 @@ static int guc_context_pre_pin(struct intel_context *ce,
static int guc_context_pin(struct intel_context *ce, void *vaddr) {
- return __guc_context_pin(ce, ce->engine, vaddr);
- int ret = __guc_context_pin(ce, ce->engine, vaddr);
- if (likely(!ret && !intel_context_is_barrier(ce)))
intel_engine_pm_get(ce->engine);
- return ret;
}
static void guc_context_unpin(struct intel_context *ce) @@ -2067,6 +2072,9 @@ static void guc_context_unpin(struct intel_context *ce)
unpin_guc_id(guc, ce, true); lrc_unpin(ce);
- if (likely(!intel_context_is_barrier(ce)))
intel_engine_pm_put(ce->engine);
}
static void guc_context_post_unpin(struct intel_context *ce) @@ -3002,8 +3010,30 @@ static int guc_virtual_context_pre_pin(struct intel_context *ce, static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr) { struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
- int ret = __guc_context_pin(ce, engine, vaddr);
- intel_engine_mask_t tmp, mask = ce->engine->mask;
- if (likely(!ret))
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_get(engine);
- return __guc_context_pin(ce, engine, vaddr);
- return ret;
+}
+static void guc_virtual_context_unpin(struct intel_context *ce) +{
- intel_engine_mask_t tmp, mask = ce->engine->mask;
- struct intel_engine_cs *engine;
- struct intel_guc *guc = ce_to_guc(ce);
- GEM_BUG_ON(context_enabled(ce));
- GEM_BUG_ON(intel_context_is_barrier(ce));
- unpin_guc_id(guc, ce, true);
- lrc_unpin(ce);
- for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_put(engine);
}
static void guc_virtual_context_enter(struct intel_context *ce) @@ -3040,7 +3070,7 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.pre_pin = guc_virtual_context_pre_pin, .pin = guc_virtual_context_pin,
- .unpin = guc_context_unpin,
.unpin = guc_virtual_context_unpin, .post_unpin = guc_context_post_unpin,
.ban = guc_context_ban,
-- 2.28.0