Instead of signaling failure by setting the node pointer to NULL do so by returning -ENOSPC.
v2: add memset() to make sure that mem is always initialized. v3: drop memset() only set mm_node = NULL, move mm_node init in amdgpu
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 5 ++--- drivers/gpu/drm/nouveau/nouveau_ttm.c | 8 -------- drivers/gpu/drm/ttm/ttm_bo.c | 13 +++++++------ drivers/gpu/drm/ttm/ttm_bo_manager.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 4 +--- 6 files changed, 12 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 627104401e84..2c20d23d62d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -229,7 +229,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && atomic64_read(&mgr->available) < mem->num_pages) { spin_unlock(&mgr->lock); - return 0; + return -ENOSPC; } atomic64_sub(mem->num_pages, &mgr->available); spin_unlock(&mgr->lock); @@ -250,7 +250,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, if (unlikely(r)) { kfree(node); mem->mm_node = NULL; - r = 0; goto err_out; } } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 128a667ed8fa..e8d1dd564006 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -336,8 +336,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { atomic64_sub(mem_bytes, &mgr->usage); - mem->mm_node = NULL; - return 0; + return -ENOSPC; }
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { @@ -417,7 +416,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
kvfree(nodes); - return r == -ENOSPC ? 0 : r; + return r; }
/** diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 7ca0a2498532..e89ea052cf71 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -75,10 +75,6 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page); if (ret) { nouveau_mem_del(reg); - if (ret == -ENOSPC) { - reg->mm_node = NULL; - return 0; - } return ret; }
@@ -139,10 +135,6 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, reg->num_pages << PAGE_SHIFT, &mem->vma[0]); if (ret) { nouveau_mem_del(reg); - if (ret == -ENOSPC) { - reg->mm_node = NULL; - return 0; - } return ret; }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f78cfc76ad78..2da8dbd2553b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -909,10 +909,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ticket = dma_resv_locking_ctx(bo->base.resv); do { ret = (*man->func->get_node)(man, bo, place, mem); - if (unlikely(ret != 0)) - return ret; - if (mem->mm_node) + if (likely(!ret)) break; + if (unlikely(ret != -ENOSPC)) + return ret; ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx, ticket); if (unlikely(ret != 0)) @@ -1056,12 +1056,11 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = &bdev->man[mem->mem_type]; ret = (*man->func->get_node)(man, bo, place, mem); + if (ret == -ENOSPC) + continue; if (unlikely(ret)) goto error;
- if (!mem->mm_node) - continue; - ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); if (unlikely(ret)) { (*man->func->put_node)(man, mem); @@ -1126,6 +1125,8 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, mem.page_alignment = bo->mem.page_alignment; mem.bus.io_reserved_vm = false; mem.bus.io_reserved_count = 0; + mem.mm_node = NULL; + /* * Determine where to move the buffer. */ diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 18d3debcc949..facd3049c3aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -86,7 +86,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, mem->start = node->start; }
- return 0; + return ret; }
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 7da752ca1c34..4a76fc7114ad 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -53,8 +53,6 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, (struct vmwgfx_gmrid_man *)man->priv; int id;
- mem->mm_node = NULL; - id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) return (id != -ENOMEM ? 0 : id); @@ -78,7 +76,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, gman->used_gmr_pages -= bo->num_pages; spin_unlock(&gman->lock); ida_free(&gman->gmr_ida, id); - return 0; + return -ENOSPC; }
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
We only need the page array when the BO is about to be accessed.
So not only populate, but also create it on demand.
v2: move NULL check into ttm_tt_create() v3: fix the occurrence in ttm_bo_kmap_ttm as well
Signed-off-by: Christian König christian.koenig@amd.com Reviewed-by: Michael J. Ruhl michael.j.ruhl@intel.com --- drivers/gpu/drm/ttm/ttm_bo.c | 37 ++++++++----------------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 7 ++++-- drivers/gpu/drm/ttm/ttm_bo_vm.c | 5 +++++ drivers/gpu/drm/ttm/ttm_tt.c | 4 +++- 4 files changed, 22 insertions(+), 31 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 2da8dbd2553b..0c13fe96c7e3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -292,12 +292,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, */
if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { - if (bo->ttm == NULL) { - bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); - ret = ttm_tt_create(bo, zero); - if (ret) - goto out_err; - } + bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); + + ret = ttm_tt_create(bo, zero); + if (ret) + goto out_err;
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) @@ -660,13 +659,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement);
- if (!placement.num_placement && !placement.num_busy_placement) { - ret = ttm_bo_pipeline_gutting(bo); - if (ret) - return ret; - - return ttm_tt_create(bo, false); - } + if (!placement.num_placement && !placement.num_busy_placement) + return ttm_bo_pipeline_gutting(bo);
evict_mem = bo->mem; evict_mem.mm_node = NULL; @@ -1195,13 +1189,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, /* * Remove the backing store if no placement is given. */ - if (!placement->num_placement && !placement->num_busy_placement) { - ret = ttm_bo_pipeline_gutting(bo); - if (ret) - return ret; - - return ttm_tt_create(bo, false); - } + if (!placement->num_placement && !placement->num_busy_placement) + return ttm_bo_pipeline_gutting(bo);
/* * Check whether we need to move buffer. @@ -1218,14 +1207,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ttm_flag_masked(&bo->mem.placement, new_flags, ~TTM_PL_MASK_MEMTYPE); } - /* - * We might need to add a TTM. - */ - if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - ret = ttm_tt_create(bo, true); - if (ret) - return ret; - } return 0; } EXPORT_SYMBOL(ttm_bo_validate); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 52d2b71f1588..dc4db7613666 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -580,12 +580,15 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, .interruptible = false, .no_wait_gpu = false }; - struct ttm_tt *ttm = bo->ttm; + struct ttm_tt *ttm; pgprot_t prot; int ret;
- BUG_ON(!ttm); + ret = ttm_tt_create(bo, true); + if (ret) + return ret;
+ ttm = bo->ttm; ret = ttm_tt_populate(ttm, &ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 0ad30b112982..0586870ab642 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -349,6 +349,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
};
+ if (ttm_tt_create(bo, true)) { + ret = VM_FAULT_OOM; + goto out_io_unlock; + } + ttm = bo->ttm; if (ttm_tt_populate(bo->ttm, &ctx)) { ret = VM_FAULT_OOM; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 2ec448e1d663..e25d4097aa16 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -50,6 +50,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
dma_resv_assert_held(bo->base.resv);
+ if (bo->ttm) + return 0; + if (bdev->need_dma32) page_flags |= TTM_PAGE_FLAG_DMA32;
@@ -67,7 +70,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) page_flags |= TTM_PAGE_FLAG_SG; break; default: - bo->ttm = NULL; pr_err("Illegal buffer object type\n"); return -EINVAL; }
On 2020-06-29 5:19 p.m., Christian König wrote:
We only need the page array when the BO is about to be accessed.
So not only populate, but also create it on demand.
v2: move NULL check into ttm_tt_create() v3: fix the occurrence in ttm_bo_kmap_ttm as well
This broke amdgpu userptr functionality for me, in particular all GL_AMD_pinned_memory piglit tests, e.g.
.../piglit/bin/amd_pinned_memory decrement-offset -auto -fbo Offset is decremented, no wait. Mesa: User error: GL_INVALID_OPERATION in glBufferData [...] Unexpected GL error: GL_INVALID_OPERATION 0x502 (Error at tests/spec/amd_pinned_memory/test.c:214) PIGLIT: {"result": "fail" }
Am 05.08.20 um 18:16 schrieb Michel Dänzer:
On 2020-06-29 5:19 p.m., Christian König wrote:
We only need the page array when the BO is about to be accessed.
So not only populate, but also create it on demand.
v2: move NULL check into ttm_tt_create() v3: fix the occurrence in ttm_bo_kmap_ttm as well
This broke amdgpu userptr functionality for me, in particular all GL_AMD_pinned_memory piglit tests, e.g.
.../piglit/bin/amd_pinned_memory decrement-offset -auto -fbo Offset is decremented, no wait. Mesa: User error: GL_INVALID_OPERATION in glBufferData [...] Unexpected GL error: GL_INVALID_OPERATION 0x502 (Error at tests/spec/amd_pinned_memory/test.c:214) PIGLIT: {"result": "fail" }
But no crashes? I would have expected a NULL pointer deref if we have missed call path.
Thanks, going to take a look. Christian.
On 2020-08-06 9:13 a.m., Christian König wrote:
Am 05.08.20 um 18:16 schrieb Michel Dänzer:
On 2020-06-29 5:19 p.m., Christian König wrote:
We only need the page array when the BO is about to be accessed.
So not only populate, but also create it on demand.
v2: move NULL check into ttm_tt_create() v3: fix the occurrence in ttm_bo_kmap_ttm as well
This broke amdgpu userptr functionality for me, in particular all GL_AMD_pinned_memory piglit tests, e.g.
.../piglit/bin/amd_pinned_memory decrement-offset -auto -fbo Offset is decremented, no wait. Mesa: User error: GL_INVALID_OPERATION in glBufferData [...] Unexpected GL error: GL_INVALID_OPERATION 0x502 (Error at tests/spec/amd_pinned_memory/test.c:214) PIGLIT: {"result": "fail" }
But no crashes? I would have expected a NULL pointer deref if we have missed call path.
No crashes. Since a GL error is raised, my guess would be that an ioctl which was previously succeeding is now returning an error.
Am 06.08.20 um 10:01 schrieb Michel Dänzer:
On 2020-08-06 9:13 a.m., Christian König wrote:
Am 05.08.20 um 18:16 schrieb Michel Dänzer:
On 2020-06-29 5:19 p.m., Christian König wrote:
We only need the page array when the BO is about to be accessed.
So not only populate, but also create it on demand.
v2: move NULL check into ttm_tt_create() v3: fix the occurrence in ttm_bo_kmap_ttm as well
This broke amdgpu userptr functionality for me, in particular all GL_AMD_pinned_memory piglit tests, e.g.
.../piglit/bin/amd_pinned_memory decrement-offset -auto -fbo Offset is decremented, no wait. Mesa: User error: GL_INVALID_OPERATION in glBufferData [...] Unexpected GL error: GL_INVALID_OPERATION 0x502 (Error at tests/spec/amd_pinned_memory/test.c:214) PIGLIT: {"result": "fail" }
But no crashes? I would have expected a NULL pointer deref if we have missed call path.
No crashes. Since a GL error is raised, my guess would be that an ioctl which was previously succeeding is now returning an error.
Mhm, I can reproduce the problem that userptrs doesn't work any more. But even with the mentioned patch reverted that still doesn't work on the tip of drm-misc-next.
Are you sure your bisecting is right? Going to keep testing.
Christian.
On 2020-08-06 10:43 a.m., Christian König wrote:
Am 06.08.20 um 10:01 schrieb Michel Dänzer:
On 2020-08-06 9:13 a.m., Christian König wrote:
Am 05.08.20 um 18:16 schrieb Michel Dänzer:
On 2020-06-29 5:19 p.m., Christian König wrote:
We only need the page array when the BO is about to be accessed.
So not only populate, but also create it on demand.
v2: move NULL check into ttm_tt_create() v3: fix the occurrence in ttm_bo_kmap_ttm as well
This broke amdgpu userptr functionality for me, in particular all GL_AMD_pinned_memory piglit tests, e.g.
.../piglit/bin/amd_pinned_memory decrement-offset -auto -fbo Offset is decremented, no wait. Mesa: User error: GL_INVALID_OPERATION in glBufferData [...] Unexpected GL error: GL_INVALID_OPERATION 0x502 (Error at tests/spec/amd_pinned_memory/test.c:214) PIGLIT: {"result": "fail" }
But no crashes? I would have expected a NULL pointer deref if we have missed call path.
No crashes. Since a GL error is raised, my guess would be that an ioctl which was previously succeeding is now returning an error.
Mhm, I can reproduce the problem that userptrs doesn't work any more. But even with the mentioned patch reverted that still doesn't work on the tip of drm-misc-next.
Maybe there are other changes affecting it as well, e.g.
e04be2310b5e "drm/ttm: further cleanup ttm_mem_reg handling" 1e691e244487 "drm/amdgpu: stop allocating dummy GTT nodes"
come to mind.
Are you sure your bisecting is right?
Fairly sure. If that commit works or its parent doesn't work for you, I can retry.
Am 06.08.20 um 11:05 schrieb Michel Dänzer:
On 2020-08-06 10:43 a.m., Christian König wrote:
Am 06.08.20 um 10:01 schrieb Michel Dänzer:
On 2020-08-06 9:13 a.m., Christian König wrote:
Am 05.08.20 um 18:16 schrieb Michel Dänzer:
On 2020-06-29 5:19 p.m., Christian König wrote:
We only need the page array when the BO is about to be accessed.
So not only populate, but also create it on demand.
v2: move NULL check into ttm_tt_create() v3: fix the occurrence in ttm_bo_kmap_ttm as well
This broke amdgpu userptr functionality for me, in particular all GL_AMD_pinned_memory piglit tests, e.g.
.../piglit/bin/amd_pinned_memory decrement-offset -auto -fbo Offset is decremented, no wait. Mesa: User error: GL_INVALID_OPERATION in glBufferData [...] Unexpected GL error: GL_INVALID_OPERATION 0x502 (Error at tests/spec/amd_pinned_memory/test.c:214) PIGLIT: {"result": "fail" }
But no crashes? I would have expected a NULL pointer deref if we have missed call path.
No crashes. Since a GL error is raised, my guess would be that an ioctl which was previously succeeding is now returning an error.
Mhm, I can reproduce the problem that userptrs doesn't work any more. But even with the mentioned patch reverted that still doesn't work on the tip of drm-misc-next.
Maybe there are other changes affecting it as well, e.g.
e04be2310b5e "drm/ttm: further cleanup ttm_mem_reg handling" 1e691e244487 "drm/amdgpu: stop allocating dummy GTT nodes"
come to mind.
Are you sure your bisecting is right?
Fairly sure. If that commit works or its parent doesn't work for you, I can retry.
I've figured out what's wrong here, and yes that needs to extra handling.
Going to have a patch in a minute.
Thanks for the notice, Christian.
-----Original Message----- From: dri-devel dri-devel-bounces@lists.freedesktop.org On Behalf Of Christian König Sent: Monday, June 29, 2020 11:19 AM To: dri-devel@lists.freedesktop.org Subject: [PATCH 1/2] drm/ttm: cleanup ttm_mem_type_manager_func.get_node interface v3
Instead of signaling failure by setting the node pointer to NULL do so by returning -ENOSPC.
v2: add memset() to make sure that mem is always initialized. v3: drop memset() only set mm_node = NULL, move mm_node init in amdgpu
Reviewed-by: Michael J. Ruhl michael.j.ruhl@intel.com
m
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 5 ++--- drivers/gpu/drm/nouveau/nouveau_ttm.c | 8 -------- drivers/gpu/drm/ttm/ttm_bo.c | 13 +++++++------ drivers/gpu/drm/ttm/ttm_bo_manager.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 4 +--- 6 files changed, 12 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 627104401e84..2c20d23d62d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -229,7 +229,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && atomic64_read(&mgr->available) < mem->num_pages) { spin_unlock(&mgr->lock);
return 0;
} atomic64_sub(mem->num_pages, &mgr->available); spin_unlock(&mgr->lock);return -ENOSPC;
@@ -250,7 +250,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, if (unlikely(r)) { kfree(node); mem->mm_node = NULL;
} } else {r = 0; goto err_out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 128a667ed8fa..e8d1dd564006 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -336,8 +336,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { atomic64_sub(mem_bytes, &mgr->usage);
mem->mm_node = NULL;
return 0;
return -ENOSPC;
}
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
@@ -417,7 +416,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
kvfree(nodes);
- return r == -ENOSPC ? 0 : r;
- return r;
}
/** diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 7ca0a2498532..e89ea052cf71 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -75,10 +75,6 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page); if (ret) { nouveau_mem_del(reg);
if (ret == -ENOSPC) {
reg->mm_node = NULL;
return 0;
return ret; }}
@@ -139,10 +135,6 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, reg->num_pages << PAGE_SHIFT, &mem->vma[0]); if (ret) { nouveau_mem_del(reg);
if (ret == -ENOSPC) {
reg->mm_node = NULL;
return 0;
return ret; }}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f78cfc76ad78..2da8dbd2553b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -909,10 +909,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ticket = dma_resv_locking_ctx(bo->base.resv); do { ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret != 0))
return ret;
if (mem->mm_node)
if (likely(!ret)) break;
if (unlikely(ret != -ENOSPC))
ret = ttm_mem_evict_first(bdev, mem->mem_type, place,return ret;
ctx, ticket); if (unlikely(ret != 0)) @@ -1056,12 +1056,11 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = &bdev->man[mem->mem_type]; ret = (*man->func->get_node)(man, bo, place, mem);
if (ret == -ENOSPC)
if (unlikely(ret)) goto error;continue;
if (!mem->mm_node)
continue;
- ret = ttm_bo_add_move_fence(bo, man, mem, ctx-
no_wait_gpu);
if (unlikely(ret)) { (*man->func->put_node)(man, mem);
@@ -1126,6 +1125,8 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, mem.page_alignment = bo->mem.page_alignment; mem.bus.io_reserved_vm = false; mem.bus.io_reserved_count = 0;
- mem.mm_node = NULL;
- /*
*/
- Determine where to move the buffer.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 18d3debcc949..facd3049c3aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -86,7 +86,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, mem->start = node->start; }
- return 0;
- return ret;
}
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 7da752ca1c34..4a76fc7114ad 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -53,8 +53,6 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, (struct vmwgfx_gmrid_man *)man->priv; int id;
- mem->mm_node = NULL;
- id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1,
GFP_KERNEL); if (id < 0) return (id != -ENOMEM ? 0 : id); @@ -78,7 +76,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, gman->used_gmr_pages -= bo->num_pages; spin_unlock(&gman->lock); ida_free(&gman->gmr_ida, id);
- return 0;
- return -ENOSPC;
}
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager
*man,
2.17.1
dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
dri-devel@lists.freedesktop.org