Am 26.05.21 um 13:32 schrieb Thomas Hellström:
If the bo is idle when calling ttm_bo_pipeline_gutting(), we unnecessarily create a ghost object and push it out to delayed destroy. Fix this by adding a path for idle, and document the function.
Also avoid having the bo end up in a bad state vulnerable to user-space triggered kernel BUGs if the call to ttm_tt_create() fails.
Finally reuse ttm_bo_pipeline_gutting() in ttm_bo_evict().
Cc: Christian König christian.koenig@amd.com Signed-off-by: Thomas Hellström thomas.hellstrom@linux.intel.com
v4:
- Clarify why we mark bo for clearing after ttm_bo_pipeline_gutting() (Reported by Matthew Auld)
drivers/gpu/drm/ttm/ttm_bo.c | 20 +++++------ drivers/gpu/drm/ttm/ttm_bo_util.c | 55 ++++++++++++++++++++++++++++--- drivers/gpu/drm/ttm/ttm_tt.c | 5 +++ include/drm/ttm/ttm_tt.h | 10 ++++++ 4 files changed, 76 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 51a94fd63bd7..be0406466460 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -501,10 +501,15 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bdev->funcs->evict_flags(bo, &placement);
if (!placement.num_placement && !placement.num_busy_placement) {
ttm_bo_wait(bo, false, false);
ret = ttm_bo_wait(bo, true, false);
if (ret)
return ret;
ttm_bo_cleanup_memtype_use(bo);
return ttm_tt_create(bo, false);
/*
* Since we've already synced, this frees backing store
* immediately.
*/
return ttm_bo_pipeline_gutting(bo);
}
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
@@ -976,13 +981,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, /* * Remove the backing store if no placement is given. */
- if (!placement->num_placement && !placement->num_busy_placement) {
ret = ttm_bo_pipeline_gutting(bo);
if (ret)
return ret;
return ttm_tt_create(bo, false);
- }
if (!placement->num_placement && !placement->num_busy_placement)
return ttm_bo_pipeline_gutting(bo);
/*
- Check whether we need to move buffer.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ebff603a97f4..4cca932f1c0e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -590,26 +590,73 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+/**
- ttm_bo_pipeline_gutting - purge the contents of a bo
- @bo: The buffer object
- Purge the contents of a bo, async if the bo is not idle.
- After a successful call, the bo is left unpopulated in
- system placement. The function may wait uninterruptible
- for idle on OOM.
- Return: 0 if successful, negative error code on failure.
- */ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) { static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; struct ttm_buffer_object *ghost;
- struct ttm_tt *ttm; int ret;
- ret = ttm_buffer_object_transfer(bo, &ghost);
/* If already idle, no need for ghost object dance. */
ret = ttm_bo_wait(bo, false, true);
if (ret != -EBUSY) {
if (!bo->ttm) {
/* See comment below about clearing. */
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
} else {
ttm_tt_unpopulate(bo->bdev, bo->ttm);
if (bo->type == ttm_bo_type_device)
ttm_tt_mark_for_clear(bo->ttm);
}
ttm_resource_free(bo, &bo->mem);
ttm_resource_alloc(bo, &sys_mem, &bo->mem);
return 0;
}
/*
* We need an unpopulated ttm_tt after giving our current one,
* if any, to the ghost object. And we can't afford to fail
* creating one *after* the operation. If the bo subsequently gets
* resurrected, make sure it's cleared (if ttm_bo_type_device)
* to avoid leaking sensitive information to user-space.
*/
ttm = bo->ttm;
bo->ttm = NULL;
ret = ttm_tt_create(bo, true);
swap(bo->ttm, ttm); if (ret) return ret;
ret = ttm_buffer_object_transfer(bo, &ghost);
if (ret) {
ttm_tt_destroy(bo->bdev, ttm);
return ret;
}
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); /* Last resort, wait for the BO to be idle when we are OOM */ if (ret) ttm_bo_wait(bo, false, false);
- ttm_resource_alloc(bo, &sys_mem, &bo->mem);
- bo->ttm = NULL;
- dma_resv_unlock(&ghost->base._resv); ttm_bo_put(ghost);
bo->ttm = ttm;
ttm_resource_alloc(bo, &sys_mem, &bo->mem);
return 0; }
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 0e41227116b1..913b330a234b 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -134,6 +134,11 @@ void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_tt_destroy_common);
+void ttm_tt_mark_for_clear(struct ttm_tt *ttm) +{
- ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+}
- void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { bdev->funcs->ttm_tt_destroy(bdev, ttm);
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 3102059db726..daa9c4cf48bb 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -170,6 +170,16 @@ int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_oper */ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
+/**
- ttm_tt_mark_for_clear - Mark pages for clearing on populate.
- @ttm: Pointer to the ttm_tt structure
- Marks pages for clearing so that the next time the page vector is
- populated, the pages will be cleared.
- */
+void ttm_tt_mark_for_clear(struct ttm_tt *ttm);
Either implement the function directly here as static (it's a one liner anyway) or move the documentation to the implementation.
Apart from that the patch is Reviewed-by: Christian König christian.koenig@amd.com
Regards, Christian.
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,