On Wed, Nov 24, 2021 at 08:44:30PM +0800, Christian König wrote:
We have the BO pointer in the base structure now as well.
Signed-off-by: Christian König christian.koenig@amd.com
Patch 7 -> Patch 12 are Reviewed-by: Huang Rui ray.huang@amd.com
I need more time to read patch 5.
Thanks, Ray
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 49 ++++++++------------- 1 file changed, 18 insertions(+), 31 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index ce5eeb3c1097..a55bbe1a154c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -26,23 +26,12 @@
#include "amdgpu.h"
-struct amdgpu_gtt_node {
- struct ttm_buffer_object *tbo;
- struct ttm_range_mgr_node base;
-};
static inline struct amdgpu_gtt_mgr * to_gtt_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_gtt_mgr, manager); }
-static inline struct amdgpu_gtt_node * -to_amdgpu_gtt_node(struct ttm_resource *res) -{
- return container_of(res, struct amdgpu_gtt_node, base.base);
-}
/**
- DOC: mem_info_gtt_total
@@ -107,9 +96,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = { */ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res) {
- struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
- struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
- return drm_mm_node_allocated(&node->base.mm_nodes[0]);
- return drm_mm_node_allocated(&node->mm_nodes[0]);
}
/** @@ -129,15 +118,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); uint32_t num_pages = PFN_UP(tbo->base.size);
- struct amdgpu_gtt_node *node;
- struct ttm_range_mgr_node *node; int r;
- node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
- node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL); if (!node) return -ENOMEM;
- node->tbo = tbo;
- ttm_resource_init(tbo, place, &node->base.base);
- ttm_resource_init(tbo, place, &node->base); if (!(place->flags & TTM_PL_FLAG_TEMPORARY) && ttm_resource_manager_usage(man) > man->size) { r = -ENOSPC;
@@ -146,8 +134,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
if (place->lpfn) { spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm,
&node->base.mm_nodes[0],
r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0], num_pages, tbo->page_alignment, 0, place->fpfn, place->lpfn, DRM_MM_INSERT_BEST);
@@ -155,18 +142,18 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, if (unlikely(r)) goto err_free;
node->base.base.start = node->base.mm_nodes[0].start;
} else {node->base.start = node->mm_nodes[0].start;
node->base.mm_nodes[0].start = 0;
node->base.mm_nodes[0].size = node->base.base.num_pages;
node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
node->mm_nodes[0].start = 0;
node->mm_nodes[0].size = node->base.num_pages;
}node->base.start = AMDGPU_BO_INVALID_OFFSET;
- *res = &node->base.base;
- *res = &node->base; return 0;
err_free:
- ttm_resource_fini(man, &node->base.base);
- ttm_resource_fini(man, &node->base); kfree(node); return r;
} @@ -182,12 +169,12 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, struct ttm_resource *res) {
- struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
spin_lock(&mgr->lock);
- if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
drm_mm_remove_node(&node->base.mm_nodes[0]);
if (drm_mm_node_allocated(&node->mm_nodes[0]))
drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&mgr->lock);
ttm_resource_fini(man, res);
@@ -204,16 +191,16 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
- struct ttm_range_mgr_node *node; struct amdgpu_device *adev;
struct amdgpu_gtt_node *node; struct drm_mm_node *mm_node; int r = 0;
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr); spin_lock(&mgr->lock); drm_mm_for_each_node(mm_node, &mgr->mm) {
node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
r = amdgpu_ttm_recover_gart(node->tbo);
node = container_of(mm_node, typeof(*node), mm_nodes[0]);
if (r) break; }r = amdgpu_ttm_recover_gart(node->base.bo);
-- 2.25.1