On 2021-06-15 at 13:36:00 +0200, Thomas Hellström wrote:
To help avoid evicting already resident buffers from the batch we're processing, perform locking as a separate step.
Looks reasonable to me.
Reviewed-by: Ramalingam C ramalingam.c@intel.com
Signed-off-by: Thomas Hellström thomas.hellstrom@linux.intel.com
.../gpu/drm/i915/gem/i915_gem_execbuffer.c | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 201fed19d120..394eb40c95b5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -922,21 +922,38 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) return err; }
-static int eb_validate_vmas(struct i915_execbuffer *eb) +static int eb_lock_vmas(struct i915_execbuffer *eb) { unsigned int i; int err;
INIT_LIST_HEAD(&eb->unbound);
for (i = 0; i < eb->buffer_count; i++) {
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma;
err = i915_gem_object_lock(vma->obj, &eb->ww); if (err) return err;
- }
- return 0;
+}
+static int eb_validate_vmas(struct i915_execbuffer *eb) +{
unsigned int i;
int err;
INIT_LIST_HEAD(&eb->unbound);
err = eb_lock_vmas(eb);
if (err)
return err;
for (i = 0; i < eb->buffer_count; i++) {
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i];
struct i915_vma *vma = ev->vma;
err = eb_pin_vma(eb, entry, ev); if (err == -EDEADLK)
-- 2.31.1