Instead of signaling failure by setting the node pointer to NULL do so by returning -ENOSPC.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 4 +--- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 5 ++--- drivers/gpu/drm/nouveau/nouveau_ttm.c | 8 -------- drivers/gpu/drm/ttm/ttm_bo.c | 11 +++++------ drivers/gpu/drm/ttm/ttm_bo_manager.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 4 +--- 6 files changed, 10 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 627104401e84..2baa80224fa4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -229,7 +229,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && atomic64_read(&mgr->available) < mem->num_pages) { spin_unlock(&mgr->lock); - return 0; + return -ENOSPC; } atomic64_sub(mem->num_pages, &mgr->available); spin_unlock(&mgr->lock); @@ -249,8 +249,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem); if (unlikely(r)) { kfree(node); - mem->mm_node = NULL; - r = 0; goto err_out; } } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 128a667ed8fa..e8d1dd564006 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -336,8 +336,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { atomic64_sub(mem_bytes, &mgr->usage); - mem->mm_node = NULL; - return 0; + return -ENOSPC; }
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { @@ -417,7 +416,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
kvfree(nodes); - return r == -ENOSPC ? 0 : r; + return r; }
/** diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 7ca0a2498532..e89ea052cf71 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -75,10 +75,6 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page); if (ret) { nouveau_mem_del(reg); - if (ret == -ENOSPC) { - reg->mm_node = NULL; - return 0; - } return ret; }
@@ -139,10 +135,6 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, reg->num_pages << PAGE_SHIFT, &mem->vma[0]); if (ret) { nouveau_mem_del(reg); - if (ret == -ENOSPC) { - reg->mm_node = NULL; - return 0; - } return ret; }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f73b81c2576e..15f9b19fa00d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -916,10 +916,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ticket = dma_resv_locking_ctx(bo->base.resv); do { ret = (*man->func->get_node)(man, bo, place, mem); - if (unlikely(ret != 0)) - return ret; - if (mem->mm_node) + if (likely(!ret)) break; + if (unlikely(ret != -ENOSPC)) + return ret; ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx, ticket); if (unlikely(ret != 0)) @@ -1063,12 +1063,11 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = &bdev->man[mem->mem_type]; ret = (*man->func->get_node)(man, bo, place, mem); + if (ret == -ENOSPC) + continue; if (unlikely(ret)) goto error;
- if (!mem->mm_node) - continue; - ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); if (unlikely(ret)) { (*man->func->put_node)(man, mem); diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 18d3debcc949..facd3049c3aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -86,7 +86,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, mem->start = node->start; }
- return 0; + return ret; }
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 7da752ca1c34..4a76fc7114ad 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -53,8 +53,6 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, (struct vmwgfx_gmrid_man *)man->priv; int id;
- mem->mm_node = NULL; - id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) return (id != -ENOMEM ? 0 : id); @@ -78,7 +76,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, gman->used_gmr_pages -= bo->num_pages; spin_unlock(&gman->lock); ida_free(&gman->gmr_ida, id); - return 0; + return -ENOSPC; }
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
We only need the page array when the BO is about to be accessed.
So not only populate, but also create it on demand.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/ttm/ttm_bo.c | 26 ++++---------------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 9 +++++++-- drivers/gpu/drm/ttm/ttm_bo_vm.c | 5 +++++ 3 files changed, 16 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 15f9b19fa00d..0e0a9dadf3ed 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -667,13 +667,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement);
- if (!placement.num_placement && !placement.num_busy_placement) { - ret = ttm_bo_pipeline_gutting(bo); - if (ret) - return ret; - - return ttm_tt_create(bo, false); - } + if (!placement.num_placement && !placement.num_busy_placement) + return ttm_bo_pipeline_gutting(bo);
evict_mem = bo->mem; evict_mem.mm_node = NULL; @@ -1200,13 +1195,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, /* * Remove the backing store if no placement is given. */ - if (!placement->num_placement && !placement->num_busy_placement) { - ret = ttm_bo_pipeline_gutting(bo); - if (ret) - return ret; - - return ttm_tt_create(bo, false); - } + if (!placement->num_placement && !placement->num_busy_placement) + return ttm_bo_pipeline_gutting(bo);
/* * Check whether we need to move buffer. @@ -1223,14 +1213,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ttm_flag_masked(&bo->mem.placement, new_flags, ~TTM_PL_MASK_MEMTYPE); } - /* - * We might need to add a TTM. - */ - if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - ret = ttm_tt_create(bo, true); - if (ret) - return ret; - } return 0; } EXPORT_SYMBOL(ttm_bo_validate); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 52d2b71f1588..f8414f820350 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -580,12 +580,17 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, .interruptible = false, .no_wait_gpu = false }; - struct ttm_tt *ttm = bo->ttm; + struct ttm_tt *ttm; pgprot_t prot; int ret;
- BUG_ON(!ttm); + if (!bo->ttm) { + ret = ttm_tt_create(bo, true); + if (ret) + return ret; + }
+ ttm = bo->ttm; ret = ttm_tt_populate(ttm, &ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 0ad30b112982..bdfed6725d6f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -349,6 +349,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
};
+ if (!bo->ttm && ttm_tt_create(bo, true)) { + ret = VM_FAULT_OOM; + goto out_io_unlock; + } + ttm = bo->ttm; if (ttm_tt_populate(bo->ttm, &ctx)) { ret = VM_FAULT_OOM;
Hmm, amdgpu_gtt_mgr_del() grabs mem->mm_node and kfrees it.
If this value is not NUL, it looks like bad things could happen.
Will _mgr_del never get called in this case?
Using the return value seems pretty reasonable, leaving bad pointers lying around makes me slightly nervous.
Mike
Am 26.06.20 um 19:39 schrieb Ruhl, Michael J:
Yes, everything else would be a bug.
Using the return value seems pretty reasonable, leaving bad pointers lying around makes me slightly nervous.
The caller should not touch the member when an error occurred since it is certainly not initialized.
But it might be a good idea to zero initialize the structure by the caller just to be sure.
Thanks for the comment, Christian.
dri-devel@lists.freedesktop.org