On Mon, Mar 21, 2022 at 02:58:51PM +0100, Christian König wrote:
We can now add multiple writers to the dma_resv object.
Also enable the check for not adding containers in dma_resv.c again.
Signed-off-by: Christian König christian.koenig@amd.com Cc: amd-gfx@lists.freedesktop.org
It's a bit much magic, but that's the entire point of your huge prep series to be able to have all the fences on a dma-resv :-)
Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch
drivers/dma-buf/dma-resv.c | 6 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 51 ++------------------- 3 files changed, 8 insertions(+), 50 deletions(-)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 26257ba1527e..10d70812373c 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -308,10 +308,10 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
dma_resv_assert_held(obj);
- /* TODO: Drivers should not add containers here, instead add each fence
* individually. Disabled for now until we cleaned up amdgpu/ttm.
- /* Drivers should not add containers here, instead add each fence
*/* individually.
- /* WARN_ON(dma_fence_is_container(fence)); */
WARN_ON(dma_fence_is_container(fence));
fobj = dma_resv_fences_list(obj); count = fobj->num_fences;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 044b41f0bfd9..529d52a204cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -34,7 +34,6 @@ struct amdgpu_fpriv; struct amdgpu_bo_list_entry { struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va;
- struct dma_fence_chain *chain; uint32_t priority; struct page **user_pages; bool user_invalidated;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1c039db976a9..88009833f523 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -575,14 +575,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e->bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
e->chain = dma_fence_chain_alloc();
if (!e->chain) {
r = -ENOMEM;
goto error_validate;
}
}
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -633,13 +625,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, }
error_validate:
- if (r) {
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
dma_fence_chain_free(e->chain);
e->chain = NULL;
}
- if (r) ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- }
out: return r; } @@ -679,17 +666,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, { unsigned i;
- if (error && backoff) {
struct amdgpu_bo_list_entry *e;
amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
dma_fence_chain_free(e->chain);
e->chain = NULL;
}
- if (error && backoff) ttm_eu_backoff_reservation(&parser->ticket, &parser->validated);
}
for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj);
@@ -1264,29 +1243,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
- amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct dma_resv *resv = e->tv.bo->base.resv;
struct dma_fence_chain *chain = e->chain;
struct dma_resv_iter cursor;
struct dma_fence *fence;
if (!chain)
continue;
/*
* Work around dma_resv shortcommings by wrapping up the
* submission in a dma_fence_chain and add it as exclusive
* fence.
*/
dma_resv_for_each_fence(&cursor, resv,
DMA_RESV_USAGE_WRITE,
fence) {
break;
}
dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
dma_resv_add_fence(resv, &chain->base, DMA_RESV_USAGE_WRITE);
e->chain = NULL;
- }
/* Make sure all BOs are remembered as writers */
amdgpu_bo_list_for_each_entry(e, p->bo_list)
e->tv.num_shared = 0;
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock);
-- 2.25.1