A non-recoverable context must be used if the user wants proper error capture on discrete platforms. In the future the kernel may want to blit the contents of some objects when later doing the capture stage. Also extend to newer integrated platforms.
v2(Thomas): - Also extend to newer integrated platforms, for capture buffer memory allocation purposes.
Testcase: igt@gem_exec_capture@capture-recoverable Signed-off-by: Matthew Auld matthew.auld@intel.com Cc: Thomas Hellström thomas.hellstrom@linux.intel.com Cc: Lionel Landwerlin lionel.g.landwerlin@intel.com Cc: Tvrtko Ursulin tvrtko.ursulin@linux.intel.com Cc: Jon Bloomfield jon.bloomfield@intel.com Cc: Daniel Vetter daniel.vetter@ffwll.ch Cc: Jordan Justen jordan.l.justen@intel.com Cc: Kenneth Graunke kenneth@whitecape.org Cc: Akeem G Abodunrin akeem.g.abodunrin@intel.com Reviewed-by: Thomas Hellström thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 30fe847c6664..6ac88e2193f1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1951,7 +1951,7 @@ eb_find_first_request_added(struct i915_execbuffer *eb) #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */ -static void eb_capture_stage(struct i915_execbuffer *eb) +static int eb_capture_stage(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; unsigned int i = count, j; @@ -1964,6 +1964,10 @@ static void eb_capture_stage(struct i915_execbuffer *eb) if (!(flags & EXEC_OBJECT_CAPTURE)) continue;
+ if (i915_gem_context_is_recoverable(eb->gem_context) && + (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0))) + return -EINVAL; + for_each_batch_create_order(eb, j) { struct i915_capture_list *capture;
@@ -1976,6 +1980,8 @@ static void eb_capture_stage(struct i915_execbuffer *eb) eb->capture_lists[j] = capture; } } + + return 0; }
/* Commit once we're in the critical path */ @@ -2017,7 +2023,7 @@ static void eb_capture_list_clear(struct i915_execbuffer *eb)
#else
-static void eb_capture_stage(struct i915_execbuffer *eb) +static int eb_capture_stage(struct i915_execbuffer *eb) { }
@@ -3410,7 +3416,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, }
ww_acquire_done(&eb.ww.ctx); - eb_capture_stage(&eb); + err = eb_capture_stage(&eb); + if (err) + goto err_vma;
out_fence = eb_requests_create(&eb, in_fence, out_fence_fd); if (IS_ERR(out_fence)) {