On 07/01/2022 14:23, Thomas Hellström wrote:
Add a selftest to exercise asynchronous migration and -unbining. Extend the gem_migrate selftest to perform the migrations while depending on a spinner and a bound vma set up on the migrated buffer object.
Signed-off-by: Thomas Hellström thomas.hellstrom@linux.intel.com
drivers/gpu/drm/i915/gem/i915_gem_object.c | 12 ++ drivers/gpu/drm/i915/gem/i915_gem_object.h | 3 + .../drm/i915/gem/selftests/i915_gem_migrate.c | 192 ++++++++++++++++-- 3 files changed, 192 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index d87b508b59b1..1a9e1f940a7d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -756,6 +756,18 @@ i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj) return dma_fence_get(i915_gem_to_ttm(obj)->moving); }
+void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
struct dma_fence *fence)
+{
- struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
- if (*moving == fence)
return;
- dma_fence_put(*moving);
- *moving = dma_fence_get(fence);
+}
- /**
- i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
- @obj: The object whose moving fence to wait for.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index f66d46882ea7..1d17ffff8236 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -524,6 +524,9 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj) struct dma_fence * i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
struct dma_fence *fence);
- int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, bool intr);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c index ecb691c81d1e..d534141b2cf7 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c @@ -4,8 +4,13 @@ */
#include "gt/intel_migrate.h" +#include "gt/intel_gpu_commands.h" #include "gem/i915_gem_ttm_move.h"
+#include "i915_deps.h"
+#include "selftests/igt_spinner.h"
- static int igt_fill_check_buffer(struct drm_i915_gem_object *obj, bool fill) {
@@ -101,7 +106,8 @@ static int igt_same_create_migrate(void *arg) }
static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
{ int err;struct i915_vma *vma)
@@ -109,6 +115,24 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww, if (err) return err;
- if (vma) {
err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
0UL | PIN_OFFSET_FIXED |
PIN_USER);
if (err) {
if (err != -EINTR && err != ERESTARTSYS &&
err != -EDEADLK)
pr_err("Failed to pin vma.\n");
return err;
}
i915_vma_unpin(vma);
- }
- /*
* Migration will implicitly unbind (asynchronously) any bound
* vmas.
if (i915_gem_object_is_lmem(obj)) { err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM); if (err) {*/
@@ -149,11 +173,15 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww, return err; }
-static int igt_lmem_pages_migrate(void *arg) +static int __igt_lmem_pages_migrate(struct intel_gt *gt,
struct i915_address_space *vm,
struct i915_deps *deps,
struct igt_spinner *spin,
{struct dma_fence *spin_fence)
- struct intel_gt *gt = arg; struct drm_i915_private *i915 = gt->i915; struct drm_i915_gem_object *obj;
- struct i915_vma *vma = NULL; struct i915_gem_ww_ctx ww; struct i915_request *rq; int err;
@@ -165,6 +193,14 @@ static int igt_lmem_pages_migrate(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj);
- if (vm) {
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
}
- }
- /* Initial GPU fill, sync, CPU initialization. */ for_i915_gem_ww(&ww, err, true) { err = i915_gem_object_lock(obj, &ww);
@@ -175,25 +211,23 @@ static int igt_lmem_pages_migrate(void *arg) if (err) continue;
err = intel_migrate_clear(>->migrate, &ww, NULL,
if (rq) { dma_resv_add_excl_fence(obj->base.resv, &rq->fence);err = intel_migrate_clear(>->migrate, &ww, deps, obj->mm.pages->sgl, obj->cache_level, i915_gem_object_is_lmem(obj), 0xdeadbeaf, &rq);
} if (err) continue;i915_gem_object_set_moving_fence(obj, &rq->fence); i915_request_put(rq);
err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE,
5 * HZ);
if (err)
continue;
err = igt_fill_check_buffer(obj, true);
if (err)
continue;
if (!vma) {
err = igt_fill_check_buffer(obj, true);
if (err)
continue;
Don't we need some kind of sync in here?
Otherwise, Reviewed-by: Matthew Auld matthew.auld@intel.com