On Fri, 3 Sept 2021 at 13:31, Christian König ckoenig.leichtzumerken@gmail.com wrote:
Fix the remaining warnings, switch to inline structure documentation and finally enable this.
Signed-off-by: Christian König christian.koenig@amd.com
Documentation/gpu/drm-mm.rst | 9 +++++ include/drm/ttm/ttm_device.h | 73 +++++++++++++++++++++--------------- 2 files changed, 51 insertions(+), 31 deletions(-)
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index 8ca981065e1a..56b7b581567d 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -30,6 +30,15 @@ The Translation Table Manager (TTM)
TTM design background and information belongs here.
+TTM device object reference +---------------------------
+.. kernel-doc:: include/drm/ttm/ttm_device.h
- :internal:
+.. kernel-doc:: drivers/gpu/drm/ttm/ttm_device.c
- :export:
The Graphics Execution Manager (GEM)
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h index 07d722950d5b..0b31ec731e66 100644 --- a/include/drm/ttm/ttm_device.h +++ b/include/drm/ttm/ttm_device.h @@ -39,31 +39,23 @@ struct ttm_operation_ctx;
/**
- struct ttm_global - Buffer object driver global data.
- @dummy_read_page: Pointer to a dummy page used for mapping requests
- of unpopulated pages.
- @shrink: A shrink callback object used for buffer object swap.
- @device_list_mutex: Mutex protecting the device list.
- This mutex is held while traversing the device list for pm options.
- @lru_lock: Spinlock protecting the bo subsystem lru lists.
- @device_list: List of buffer object devices.
*/
- @swap_lru: Lru list of buffer objects used for swapping.
extern struct ttm_global {
/**
* Constant after init.
* @dummy_read_page: Pointer to a dummy page used for mapping requests
* of unpopulated pages. Constant after init. */
struct page *dummy_read_page; /**
* Protected by ttm_global_mutex.
* @device_list: List of buffer object devices. Protected by
* ttm_global_mutex.
Would it be reasonable to move the ttm_global_mutex into ttm_global here? That way everything is nicely grouped together, and we can easily reference it here with @mutex or so?
*/ struct list_head device_list; /**
* Internal protection.
* @bo_count: Number of buffer objects allocated by devices. */ atomic_t bo_count;
} ttm_glob; @@ -230,50 +222,69 @@ struct ttm_device_funcs {
/**
- struct ttm_device - Buffer object driver device-specific data.
- @device_list: Our entry in the global device list.
- @funcs: Function table for the device.
- @sysman: Resource manager for the system domain.
- @man_drv: An array of resource_managers.
- @vma_manager: Address space manager.
- @pool: page pool for the device.
- @dev_mapping: A pointer to the struct address_space representing the
- device address space.
*/
- @wq: Work queue structure for the delayed delete workqueue.
struct ttm_device {
/*
/**
* @device_list: Our entry in the global device list. * Constant after bo device init */ struct list_head device_list;
/**
* @funcs: Function table for the device.
* Constant after bo device init
*/ struct ttm_device_funcs *funcs;
/*
/**
* @sysman: Resource manager for the system domain. * Access via ttm_manager_type. */ struct ttm_resource_manager sysman;
/**
* @man_drv: An array of resource_managers.
*/ struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; /* * Protected by internal locks. */
/**
* @vma_manager: Address space manager for finding BOs to mmap.
*/ struct drm_vma_offset_manager *vma_manager;
/**
* @pool: page pool for the device.
*/ struct ttm_pool pool;
/*
* Protection for the per manager LRU and ddestroy lists.
/**
* @lru_lock: Protection for the per manager LRU and ddestroy lists. */ spinlock_t lru_lock;
/**
* @ddestroy: Destroyed but not yet cleaned up buffer objects.
*/ struct list_head ddestroy;
/**
* @pinned: Buffer object which are pinned and so not on any LRU list.
*/ struct list_head pinned;
/*
* Protected by load / firstopen / lastclose /unload sync.
/**
* @dev_mapping: A pointer to the struct address_space for invalidating
* CPU mappings on buffer move. Protected by load/unload sync. */ struct address_space *dev_mapping;
/*
* Internal protection.
/**
* @wq: Work queue structure for the delayed delete workqueue. Has
* internal protection. */ struct delayed_work wq;
};
2.25.1