ttm_bo_pipeline_move needlessly duplicates code. Refactor this to instead call ttm_bo_move_accel_cleanup.
Signed-off-by: Brian Welty brian.welty@intel.com
--- Builds but otherwise currently untested. Came across this duplication and thought would see if others cared to see it cleaned up this way or not.
drivers/gpu/drm/ttm/ttm_bo_util.c | 64 +++---------------------------- 1 file changed, 5 insertions(+), 59 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index fe81c565e7ef..086ba2c2f60b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -746,49 +746,10 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; - struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; - - int ret; - - dma_resv_add_excl_fence(bo->base.resv, fence); - - if (!evict) { - struct ttm_buffer_object *ghost_obj; - - /** - * This should help pipeline ordinary buffer moves. - * - * Hang old buffer memory on a new buffer object, - * and leave it to be released when the GPU - * operation has completed. - */ - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence);
- ret = ttm_buffer_object_transfer(bo, &ghost_obj); - if (ret) - return ret; - - dma_resv_add_excl_fence(ghost_obj->base.resv, fence); - - /** - * If we're not moving to fixed memory, the TTM object - * needs to stay alive. Otherwhise hang it on the ghost - * bo to be unbound and destroyed. - */ - - if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED)) - ghost_obj->ttm = NULL; - else - bo->ttm = NULL; - - ttm_bo_unreserve(ghost_obj); - ttm_bo_put(ghost_obj); - - } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { + if (evict && from->flags & TTM_MEMTYPE_FLAG_FIXED) { + dma_resv_add_excl_fence(bo->base.resv, fence);
/** * BO doesn't have a TTM we need to bind/unbind. Just remember @@ -807,27 +768,12 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, dma_fence_put(bo->moving); bo->moving = dma_fence_get(fence);
+ *old_mem = *new_mem; + new_mem->mm_node = NULL; } else { - /** - * Last resort, wait for the move to be completed. - * - * Should never happen in pratice. - */ - - ret = ttm_bo_wait(bo, false, false); - if (ret) - return ret; - - if (to->flags & TTM_MEMTYPE_FLAG_FIXED) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - ttm_bo_free_old_node(bo); + return ttm_bo_move_accel_cleanup(bo, fence, evict, new_mem); }
- *old_mem = *new_mem; - new_mem->mm_node = NULL; - return 0; } EXPORT_SYMBOL(ttm_bo_pipeline_move);
Am 16.08.19 um 20:59 schrieb Brian Welty:
ttm_bo_pipeline_move needlessly duplicates code. Refactor this to instead call ttm_bo_move_accel_cleanup.
Signed-off-by: Brian Welty brian.welty@intel.com
At least of hand that looks valid to me, Reviewed-by: Christian König christian.koenig@amd.com for now.
Regards, Christian.
Builds but otherwise currently untested. Came across this duplication and thought would see if others cared to see it cleaned up this way or not.
drivers/gpu/drm/ttm/ttm_bo_util.c | 64 +++---------------------------- 1 file changed, 5 insertions(+), 59 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index fe81c565e7ef..086ba2c2f60b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -746,49 +746,10 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
int ret;
dma_resv_add_excl_fence(bo->base.resv, fence);
if (!evict) {
struct ttm_buffer_object *ghost_obj;
/**
* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
* bo to be unbound and destroyed.
*/
if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
if (evict && from->flags & TTM_MEMTYPE_FLAG_FIXED) {
dma_resv_add_excl_fence(bo->base.resv, fence);
/**
- BO doesn't have a TTM we need to bind/unbind. Just remember
@@ -807,27 +768,12 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, dma_fence_put(bo->moving); bo->moving = dma_fence_get(fence);
*old_mem = *new_mem;
} else {new_mem->mm_node = NULL;
/**
* Last resort, wait for the move to be completed.
*
* Should never happen in pratice.
*/
ret = ttm_bo_wait(bo, false, false);
if (ret)
return ret;
if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
ttm_bo_free_old_node(bo);
}return ttm_bo_move_accel_cleanup(bo, fence, evict, new_mem);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
- return 0; } EXPORT_SYMBOL(ttm_bo_pipeline_move);
dri-devel@lists.freedesktop.org