Hi,
So attached is last batch of patch, i split the ttm put page fix and i fixed a bug in the pages alloc when clear flags wasn't set. I tested them on a bunch of radeon and everythings seems fine (several gl app, firefox, compositor ...). I will do more testing on agp and nouveau tomorrow.
The last patch add callback for populating and unpopulating (better name if any welcome) a ttm_tt. Allowing the driver to choose btw different choice, idea is that Konrad dma allocator would provide helper function the driver can call.
I choosed to allocate all page at once because ttm_tt object are meant to be bind and thus to be fully populated in their lifetime (vmwgfx might be different in this regard). It simplify code in several place. I didn't see any performances impact in the few gl benchmark i ran.
Konrad so i am planning on rebasing the last 4 patch of your patchset on top of that. They will likely shrink in size a bit.
Cheers, Jerome Glisse
From: Jerome Glisse jglisse@redhat.com
This was never use in none of the driver, properly using userspace page for bo would need more code (vma interaction mostly). Removing this dead code in preparation of ttm_tt & backend merge.
Signed-off-by: Jerome Glisse jglisse@redhat.com Reviewed-by: Konrad Rzeszutek Wilk konrad.wilk@oracle.com --- drivers/gpu/drm/ttm/ttm_bo.c | 22 -------- drivers/gpu/drm/ttm/ttm_tt.c | 105 +-------------------------------------- include/drm/ttm/ttm_bo_api.h | 5 -- include/drm/ttm/ttm_bo_driver.h | 24 --------- 4 files changed, 1 insertions(+), 155 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 617b646..4bde335 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -342,22 +342,6 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; - case ttm_bo_type_user: - bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, - page_flags | TTM_PAGE_FLAG_USER, - glob->dummy_read_page); - if (unlikely(bo->ttm == NULL)) { - ret = -ENOMEM; - break; - } - - ret = ttm_tt_set_user(bo->ttm, current, - bo->buffer_start, bo->num_pages); - if (unlikely(ret != 0)) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - break; default: printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); ret = -EINVAL; @@ -907,16 +891,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, }
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, - bool disallow_fixed, uint32_t mem_type, uint32_t proposed_placement, uint32_t *masked_placement) { uint32_t cur_flags = ttm_bo_type_flags(mem_type);
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) - return false; - if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) return false;
@@ -961,7 +941,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man, - bo->type == ttm_bo_type_user, mem_type, placement->placement[i], &cur_flags); @@ -1009,7 +988,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (!man->has_type) continue; if (!ttm_bo_mt_compatible(man, - bo->type == ttm_bo_type_user, mem_type, placement->busy_placement[i], &cur_flags)) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 58c271e..82a1161 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -62,43 +62,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) ttm->dma_address = NULL; }
-static void ttm_tt_free_user_pages(struct ttm_tt *ttm) -{ - int write; - int dirty; - struct page *page; - int i; - struct ttm_backend *be = ttm->be; - - BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER)); - write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0); - dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0); - - if (be) - be->func->clear(be); - - for (i = 0; i < ttm->num_pages; ++i) { - page = ttm->pages[i]; - if (page == NULL) - continue; - - if (page == ttm->dummy_read_page) { - BUG_ON(write); - continue; - } - - if (write && dirty && !PageReserved(page)) - set_page_dirty_lock(page); - - ttm->pages[i] = NULL; - ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE); - put_page(page); - } - ttm->state = tt_unpopulated; - ttm->first_himem_page = ttm->num_pages; - ttm->last_lomem_page = -1; -} - static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) { struct page *p; @@ -325,10 +288,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm) }
if (likely(ttm->pages != NULL)) { - if (ttm->page_flags & TTM_PAGE_FLAG_USER) - ttm_tt_free_user_pages(ttm); - else - ttm_tt_free_alloced_pages(ttm); + ttm_tt_free_alloced_pages(ttm);
ttm_tt_free_page_directory(ttm); } @@ -340,45 +300,6 @@ void ttm_tt_destroy(struct ttm_tt *ttm) kfree(ttm); }
-int ttm_tt_set_user(struct ttm_tt *ttm, - struct task_struct *tsk, - unsigned long start, unsigned long num_pages) -{ - struct mm_struct *mm = tsk->mm; - int ret; - int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; - struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; - - BUG_ON(num_pages != ttm->num_pages); - BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); - - /** - * Account user pages as lowmem pages for now. - */ - - ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, - false, false); - if (unlikely(ret != 0)) - return ret; - - down_read(&mm->mmap_sem); - ret = get_user_pages(tsk, mm, start, num_pages, - write, 0, ttm->pages, NULL); - up_read(&mm->mmap_sem); - - if (ret != num_pages && write) { - ttm_tt_free_user_pages(ttm); - ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE); - return -ENOMEM; - } - - ttm->tsk = tsk; - ttm->start = start; - ttm->state = tt_unbound; - - return 0; -} - struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct page *dummy_read_page) { @@ -452,8 +373,6 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
ttm->state = tt_bound;
- if (ttm->page_flags & TTM_PAGE_FLAG_USER) - ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY; return 0; } EXPORT_SYMBOL(ttm_tt_bind); @@ -469,16 +388,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) int i; int ret = -ENOMEM;
- if (ttm->page_flags & TTM_PAGE_FLAG_USER) { - ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, - ttm->num_pages); - if (unlikely(ret != 0)) - return ret; - - ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; - return 0; - } - swap_storage = ttm->swap_storage; BUG_ON(swap_storage == NULL);
@@ -529,18 +438,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); BUG_ON(ttm->caching_state != tt_cached);
- /* - * For user buffers, just unpin the pages, as there should be - * vma references. - */ - - if (ttm->page_flags & TTM_PAGE_FLAG_USER) { - ttm_tt_free_user_pages(ttm); - ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; - ttm->swap_storage = NULL; - return 0; - } - if (!persistent_swap_storage) { swap_storage = shmem_file_setup("ttm swap", ttm->num_pages << PAGE_SHIFT, diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 42e3469..8d95a42 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -122,17 +122,12 @@ struct ttm_mem_reg { * be mmapped by user space. Each of these bos occupy a slot in the * device address space, that can be used for normal vm operations. * - * @ttm_bo_type_user: These are user-space memory areas that are made - * available to the GPU by mapping the buffer pages into the GPU aperture - * space. These buffers cannot be mmaped from the device address space. - * * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, * but they cannot be accessed from user-space. For kernel-only use. */
enum ttm_bo_type { ttm_bo_type_device, - ttm_bo_type_user, ttm_bo_type_kernel };
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 94eb143..37527d6 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -118,8 +118,6 @@ struct ttm_backend { struct ttm_backend_func *func; };
-#define TTM_PAGE_FLAG_USER (1 << 1) -#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) #define TTM_PAGE_FLAG_WRITE (1 << 3) #define TTM_PAGE_FLAG_SWAPPED (1 << 4) #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) @@ -146,8 +144,6 @@ enum ttm_caching_state { * @num_pages: Number of pages in the page array. * @bdev: Pointer to the current struct ttm_bo_device. * @be: Pointer to the ttm backend. - * @tsk: The task for user ttm. - * @start: virtual address for user ttm. * @swap_storage: Pointer to shmem struct file for swap storage. * @caching_state: The current caching state of the pages. * @state: The current binding state of the pages. @@ -167,8 +163,6 @@ struct ttm_tt { unsigned long num_pages; struct ttm_bo_global *glob; struct ttm_backend *be; - struct task_struct *tsk; - unsigned long start; struct file *swap_storage; enum ttm_caching_state caching_state; enum { @@ -618,24 +612,6 @@ extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, struct page *dummy_read_page);
/** - * ttm_tt_set_user: - * - * @ttm: The struct ttm_tt to populate. - * @tsk: A struct task_struct for which @start is a valid user-space address. - * @start: A valid user-space address. - * @num_pages: Size in pages of the user memory area. - * - * Populate a struct ttm_tt with a user-space memory area after first pinning - * the pages backing it. - * Returns: - * !0: Error. - */ - -extern int ttm_tt_set_user(struct ttm_tt *ttm, - struct task_struct *tsk, - unsigned long start, unsigned long num_pages); - -/** * ttm_ttm_bind: * * @ttm: The struct ttm_tt containing backing pages.
From: Jerome Glisse jglisse@redhat.com
Split btw highmem and lowmem page was rendered useless by the pool code. Remove it. Note further cleanup would change the ttm page allocation helper to actualy take an array instead of relying on list this could drasticly reduce the number of function call in the common case of allocation whole buffer.
Signed-off-by: Jerome Glisse jglisse@redhat.com Reviewed-by: Konrad Rzeszutek Wilk konrad.wilk@oracle.com --- drivers/gpu/drm/ttm/ttm_tt.c | 11 ++--------- include/drm/ttm/ttm_bo_driver.h | 7 ------- 2 files changed, 2 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 82a1161..8b7a6d0 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -69,7 +69,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; int ret;
- while (NULL == (p = ttm->pages[index])) { + if (NULL == (p = ttm->pages[index])) {
INIT_LIST_HEAD(&h);
@@ -85,10 +85,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) if (unlikely(ret != 0)) goto out_err;
- if (PageHighMem(p)) - ttm->pages[--ttm->first_himem_page] = p; - else - ttm->pages[++ttm->last_lomem_page] = p; + ttm->pages[index] = p; } return p; out_err: @@ -270,8 +267,6 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state, ttm->dma_address); ttm->state = tt_unpopulated; - ttm->first_himem_page = ttm->num_pages; - ttm->last_lomem_page = -1; }
void ttm_tt_destroy(struct ttm_tt *ttm) @@ -315,8 +310,6 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
ttm->glob = bdev->glob; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - ttm->first_himem_page = ttm->num_pages; - ttm->last_lomem_page = -1; ttm->caching_state = tt_cached; ttm->page_flags = page_flags;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 37527d6..9da182b 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -136,11 +136,6 @@ enum ttm_caching_state { * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL * pointer. * @pages: Array of pages backing the data. - * @first_himem_page: Himem pages are put last in the page array, which - * enables us to run caching attribute changes on only the first part - * of the page array containing lomem pages. This is the index of the - * first himem page. - * @last_lomem_page: Index of the last lomem page in the page array. * @num_pages: Number of pages in the page array. * @bdev: Pointer to the current struct ttm_bo_device. * @be: Pointer to the ttm backend. @@ -157,8 +152,6 @@ enum ttm_caching_state { struct ttm_tt { struct page *dummy_read_page; struct page **pages; - long first_himem_page; - long last_lomem_page; uint32_t page_flags; unsigned long num_pages; struct ttm_bo_global *glob;
From: Jerome Glisse jglisse@redhat.com
This field is not use by any of the driver just drop it.
Signed-off-by: Jerome Glisse jglisse@redhat.com Reviewed-by: Konrad Rzeszutek Wilk konrad.wilk@oracle.com --- drivers/gpu/drm/radeon/radeon_ttm.c | 1 - include/drm/ttm/ttm_bo_driver.h | 2 -- 2 files changed, 0 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0b5468b..97c76ae 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -787,7 +787,6 @@ struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev) return NULL; } gtt->backend.bdev = &rdev->mman.bdev; - gtt->backend.flags = 0; gtt->backend.func = &radeon_backend_func; gtt->rdev = rdev; gtt->pages = NULL; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 9da182b..6d17140 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -106,7 +106,6 @@ struct ttm_backend_func { * struct ttm_backend * * @bdev: Pointer to a struct ttm_bo_device. - * @flags: For driver use. * @func: Pointer to a struct ttm_backend_func that describes * the backend methods. * @@ -114,7 +113,6 @@ struct ttm_backend_func {
struct ttm_backend { struct ttm_bo_device *bdev; - uint32_t flags; struct ttm_backend_func *func; };
From: Jerome Glisse jglisse@redhat.com
On failure we need to make sure the page we free has wb cache attribute. Do this pas call the proper ttm page helper function.
Signed-off-by: Jerome Glisse jglisse@redhat.com --- drivers/gpu/drm/ttm/ttm_tt.c | 5 ++++- 1 files changed, 4 insertions(+), 1 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 8b7a6d0..3fb4c6d 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -89,7 +89,10 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) } return p; out_err: - put_page(p); + INIT_LIST_HEAD(&h); + list_add(&p->lru, &h); + ttm_put_pages(&h, 1, ttm->page_flags, + ttm->caching_state, &ttm->dma_address[index]); return NULL; }
From: Jerome Glisse jglisse@redhat.com
Use the ttm_tt page ptr array for page allocation, move the list to array unwinding into the page allocation functions.
V2 split the fix to use ttm put page as a separate fix properly fill pages array when TTM_PAGE_FLAG_ZERO_ALLOC is not set
Signed-off-by: Jerome Glisse jglisse@redhat.com --- drivers/gpu/drm/ttm/ttm_memory.c | 44 +++++++++++++-------- drivers/gpu/drm/ttm/ttm_page_alloc.c | 70 +++++++++++++++++++--------------- drivers/gpu/drm/ttm/ttm_tt.c | 61 ++++++++++-------------------- include/drm/ttm/ttm_memory.h | 11 +++-- include/drm/ttm/ttm_page_alloc.h | 17 ++++---- 5 files changed, 101 insertions(+), 102 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index e70ddd8..3a3a58b 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -543,41 +543,53 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, } EXPORT_SYMBOL(ttm_mem_global_alloc);
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, - bool no_wait, bool interruptible) +int ttm_mem_global_alloc_pages(struct ttm_mem_global *glob, + struct page **pages, + unsigned npages, + bool no_wait, bool interruptible) {
struct ttm_mem_zone *zone = NULL; + unsigned i; + int r;
/** * Page allocations may be registed in a single zone * only if highmem or !dma32. */ - + for (i = 0; i < npages; i++) { #ifdef CONFIG_HIGHMEM - if (PageHighMem(page) && glob->zone_highmem != NULL) - zone = glob->zone_highmem; + if (PageHighMem(pages[i]) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; #else - if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) - zone = glob->zone_kernel; + if (glob->zone_dma32 && page_to_pfn(pages[i]) > 0x00100000UL) + zone = glob->zone_kernel; #endif - return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, - interruptible); + r = ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, + interruptible); + if (r) { + return r; + } + } + return 0; }
-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) +void ttm_mem_global_free_pages(struct ttm_mem_global *glob, + struct page **pages, unsigned npages) { struct ttm_mem_zone *zone = NULL; + unsigned i;
+ for (i = 0; i < npages; i++) { #ifdef CONFIG_HIGHMEM - if (PageHighMem(page) && glob->zone_highmem != NULL) - zone = glob->zone_highmem; + if (PageHighMem(pages[i]) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; #else - if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) - zone = glob->zone_kernel; + if (glob->zone_dma32 && page_to_pfn(pages[i]) > 0x00100000UL) + zone = glob->zone_kernel; #endif - ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); + ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); + } }
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 727e93d..e94ff12 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, * @return count of pages still required to fulfill the request. */ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, - struct list_head *pages, int ttm_flags, - enum ttm_caching_state cstate, unsigned count) + struct list_head *pages, + int ttm_flags, + enum ttm_caching_state cstate, + unsigned count) { unsigned long irq_flags; struct list_head *p; @@ -664,13 +666,14 @@ out: * On success pages list will hold count number of correctly * cached pages. */ -int ttm_get_pages(struct list_head *pages, int flags, - enum ttm_caching_state cstate, unsigned count, - dma_addr_t *dma_address) +int ttm_get_pages(struct page **pages, unsigned npages, int flags, + enum ttm_caching_state cstate, dma_addr_t *dma_address) { struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct page *p = NULL; + struct list_head plist; gfp_t gfp_flags = GFP_USER; + unsigned count = 0; int r;
/* set zero flag for page allocation if required */ @@ -684,82 +687,87 @@ int ttm_get_pages(struct list_head *pages, int flags, else gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < count; ++r) { - p = alloc_page(gfp_flags); - if (!p) { - + for (count = 0; count < npages; ++count) { + pages[count] = alloc_page(gfp_flags); + if (pages[count] == NULL) { printk(KERN_ERR TTM_PFX "Unable to allocate page."); return -ENOMEM; } - - list_add(&p->lru, pages); } return 0; }
- /* combine zero flag to pool flags */ gfp_flags |= pool->gfp_flags;
/* First we take pages from the pool */ - count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count); + INIT_LIST_HEAD(&plist); + npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
/* clear the pages coming from the pool if requested */ + count = 0; + list_for_each_entry(p, &plist, lru) { + pages[count++] = p; + } if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { - list_for_each_entry(p, pages, lru) { + list_for_each_entry(p, &plist, lru) { clear_page(page_address(p)); } }
/* If pool didn't have enough pages allocate new one. */ - if (count > 0) { + if (npages > 0) { /* ttm_alloc_new_pages doesn't reference pool so we can run * multiple requests in parallel. **/ - r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count); + INIT_LIST_HEAD(&plist); + r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); + list_for_each_entry(p, &plist, lru) { + pages[count++] = p; + } if (r) { /* If there is any pages in the list put them back to * the pool. */ printk(KERN_ERR TTM_PFX "Failed to allocate extra pages " "for large request."); - ttm_put_pages(pages, 0, flags, cstate, NULL); + ttm_put_pages(pages, count, flags, cstate, NULL); return r; } }
- return 0; }
/* Put all pages in pages list to correct pool to wait for reuse */ -void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, +void ttm_put_pages(struct page **pages, unsigned page_count, int flags, enum ttm_caching_state cstate, dma_addr_t *dma_address) { unsigned long irq_flags; struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); - struct page *p, *tmp; + unsigned i;
if (pool == NULL) { /* No pool for this memory type so free the pages */
- list_for_each_entry_safe(p, tmp, pages, lru) { - __free_page(p); + for (i = 0; i < page_count; i++) { + if (pages[i]) { + __free_page(pages[i]); + pages[i] = NULL; + } } - /* Make the pages list empty */ - INIT_LIST_HEAD(pages); return; } - if (page_count == 0) { - list_for_each_entry_safe(p, tmp, pages, lru) { - ++page_count; - } - }
spin_lock_irqsave(&pool->lock, irq_flags); - list_splice_init(pages, &pool->list); - pool->npages += page_count; + for (i = 0; i < page_count; i++) { + if (pages[i]) { + list_add_tail(&pages[i]->lru, &pool->list); + pages[i] = NULL; + pool->npages++; + } + } /* Check that we don't go over the pool limit */ page_count = 0; if (pool->npages > _manager->options.max_size) { diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 3fb4c6d..2dd45ca 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -65,33 +65,25 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) { struct page *p; - struct list_head h; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; int ret;
if (NULL == (p = ttm->pages[index])) {
- INIT_LIST_HEAD(&h); - - ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1, + ret = ttm_get_pages(&ttm->pages[index], 1, ttm->page_flags, + ttm->caching_state, &ttm->dma_address[index]); - if (ret != 0) return NULL;
- p = list_first_entry(&h, struct page, lru); - - ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); + ret = ttm_mem_global_alloc_pages(mem_glob, &ttm->pages[index], + 1, false, false); if (unlikely(ret != 0)) goto out_err; - - ttm->pages[index] = p; } return p; out_err: - INIT_LIST_HEAD(&h); - list_add(&p->lru, &h); - ttm_put_pages(&h, 1, ttm->page_flags, + ttm_put_pages(&ttm->pages[index], 1, ttm->page_flags, ttm->caching_state, &ttm->dma_address[index]); return NULL; } @@ -110,8 +102,7 @@ struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
int ttm_tt_populate(struct ttm_tt *ttm) { - struct page *page; - unsigned long i; + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; struct ttm_backend *be; int ret;
@@ -126,10 +117,16 @@ int ttm_tt_populate(struct ttm_tt *ttm)
be = ttm->be;
- for (i = 0; i < ttm->num_pages; ++i) { - page = __ttm_tt_get_page(ttm, i); - if (!page) - return -ENOMEM; + ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, + ttm->caching_state, ttm->dma_address); + if (ret != 0) + return -ENOMEM; + ret = ttm_mem_global_alloc_pages(mem_glob, ttm->pages, + ttm->num_pages, false, false); + if (unlikely(ret != 0)) { + ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, + ttm->caching_state, ttm->dma_address); + return -ENOMEM; }
be->func->populate(be, ttm->num_pages, ttm->pages, @@ -242,33 +239,15 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) { - int i; - unsigned count = 0; - struct list_head h; - struct page *cur_page; struct ttm_backend *be = ttm->be; - - INIT_LIST_HEAD(&h); + struct ttm_mem_global *glob = ttm->glob->mem_glob;
if (be) be->func->clear(be); - for (i = 0; i < ttm->num_pages; ++i) {
- cur_page = ttm->pages[i]; - ttm->pages[i] = NULL; - if (cur_page) { - if (page_count(cur_page) != 1) - printk(KERN_ERR TTM_PFX - "Erroneous page count. " - "Leaking pages.\n"); - ttm_mem_global_free_page(ttm->glob->mem_glob, - cur_page); - list_add(&cur_page->lru, &h); - count++; - } - } - ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state, - ttm->dma_address); + ttm_mem_global_free_pages(glob, ttm->pages, ttm->num_pages); + ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, + ttm->caching_state, ttm->dma_address); ttm->state = tt_unpopulated; }
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index 26c1f78..cee821e 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -150,10 +150,11 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, bool no_wait, bool interruptible); extern void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); -extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, - bool no_wait, bool interruptible); -extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page); +extern int ttm_mem_global_alloc_pages(struct ttm_mem_global *glob, + struct page **pages, + unsigned npages, + bool no_wait, bool interruptible); +extern void ttm_mem_global_free_pages(struct ttm_mem_global *glob, + struct page **pages, unsigned npages); extern size_t ttm_round_pot(size_t size); #endif diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 129de12..fffb3bd 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -32,29 +32,28 @@ /** * Get count number of pages from pool to pages list. * - * @pages: head of empty linked list where pages are filled. + * @pages: array of pages ptr + * @npages: number of pages to allocate. * @flags: ttm flags for page allocation. * @cstate: ttm caching state for the page. - * @count: number of pages to allocate. * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). */ -int ttm_get_pages(struct list_head *pages, +int ttm_get_pages(struct page **pages, + unsigned npages, int flags, enum ttm_caching_state cstate, - unsigned count, dma_addr_t *dma_address); /** * Put linked list of pages to pool. * - * @pages: list of pages to free. - * @page_count: number of pages in the list. Zero can be passed for unknown - * count. + * @pages: array of pages ptr + * @npages: number of pages to free. * @flags: ttm flags for page allocation. * @cstate: ttm caching state. * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). */ -void ttm_put_pages(struct list_head *pages, - unsigned page_count, +void ttm_put_pages(struct page **pages, + unsigned npages, int flags, enum ttm_caching_state cstate, dma_addr_t *dma_address);
On Wed, Nov 02, 2011 at 07:37:52PM -0400, j.glisse@gmail.com wrote:
.. snip..
We don't want to keep that check? Or perhaps move that functionality into ttm_put_pages?
Otherwise Reviewd-by...
From: Jerome Glisse jglisse@redhat.com
Signed-off-by: Jerome Glisse jglisse@redhat.com --- drivers/gpu/drm/ttm/ttm_tt.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 2dd45ca..58ea7dc 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -298,7 +298,7 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, ttm->dummy_read_page = dummy_read_page;
ttm_tt_alloc_page_directory(ttm); - if (!ttm->pages) { + if (!ttm->pages || !ttm->dma_address) { ttm_tt_destroy(ttm); printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); return NULL;
On Wed, Nov 02, 2011 at 07:37:53PM -0400, j.glisse@gmail.com wrote:
From: Jerome Glisse jglisse@redhat.com
Signed-off-by: Jerome Glisse jglisse@redhat.com
Reviewed-by-me.
From: Jerome Glisse jglisse@redhat.com
ttm_backend will exist only and only with a ttm_tt, and ttm_tt will be of interesting use only when bind to a backend. Thus to avoid code & data duplication btw the two merge them.
Signed-off-by: Jerome Glisse jglisse@redhat.com --- drivers/gpu/drm/nouveau/nouveau_bo.c | 14 ++- drivers/gpu/drm/nouveau/nouveau_drv.h | 5 +- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 188 ++++++++++++-------------- drivers/gpu/drm/radeon/radeon_ttm.c | 222 ++++++++++++------------------- drivers/gpu/drm/ttm/ttm_agp_backend.c | 88 +++++-------- drivers/gpu/drm/ttm/ttm_bo.c | 9 +- drivers/gpu/drm/ttm/ttm_tt.c | 59 ++------- drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 66 +++------ include/drm/ttm/ttm_bo_driver.h | 104 ++++++--------- 9 files changed, 295 insertions(+), 460 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 7226f41..b060fa4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -343,8 +343,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) *mem = val; }
-static struct ttm_backend * -nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) +static struct ttm_tt * +nouveau_ttm_tt_create(struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct drm_device *dev = dev_priv->dev; @@ -352,11 +354,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) switch (dev_priv->gart_info.type) { #if __OS_HAS_AGP case NOUVEAU_GART_AGP: - return ttm_agp_backend_init(bdev, dev->agp->bridge); + return ttm_agp_tt_create(bdev, dev->agp->bridge, + size, page_flags, dummy_read_page); #endif case NOUVEAU_GART_PDMA: case NOUVEAU_GART_HW: - return nouveau_sgdma_init_ttm(dev); + return nouveau_sgdma_create_ttm(bdev, size, page_flags, + dummy_read_page); default: NV_ERROR(dev, "Unknown GART type %d\n", dev_priv->gart_info.type); @@ -1045,7 +1049,7 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) }
struct ttm_bo_driver nouveau_bo_driver = { - .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, + .ttm_tt_create = &nouveau_ttm_tt_create, .invalidate_caches = nouveau_bo_invalidate_caches, .init_mem_type = nouveau_bo_init_mem_type, .evict_flags = nouveau_bo_evict_flags, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 29837da..0c53e39 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1000,7 +1000,10 @@ extern int nouveau_sgdma_init(struct drm_device *); extern void nouveau_sgdma_takedown(struct drm_device *); extern uint32_t nouveau_sgdma_get_physical(struct drm_device *, uint32_t offset); -extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); +extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, + unsigned long size, + uint32_t page_flags, + struct page *dummy_read_page);
/* nouveau_debugfs.c */ #if defined(CONFIG_DRM_NOUVEAU_DEBUG) diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index b75258a..bc2ab90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -8,44 +8,23 @@ #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
struct nouveau_sgdma_be { - struct ttm_backend backend; + struct ttm_tt ttm; struct drm_device *dev; - - dma_addr_t *pages; - unsigned nr_pages; - bool unmap_pages; - u64 offset; - bool bound; };
static int -nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, - struct page **pages, struct page *dummy_read_page, - dma_addr_t *dma_addrs) +nouveau_sgdma_dma_map(struct ttm_tt *ttm) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct drm_device *dev = nvbe->dev; int i;
- NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); - - nvbe->pages = dma_addrs; - nvbe->nr_pages = num_pages; - nvbe->unmap_pages = true; - - /* this code path isn't called and is incorrect anyways */ - if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */ - nvbe->unmap_pages = false; - return 0; - } - - for (i = 0; i < num_pages; i++) { - nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) { - nvbe->nr_pages = --i; - be->func->clear(be); + for (i = 0; i < ttm->num_pages; i++) { + ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], + 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) { return -EFAULT; } } @@ -54,53 +33,52 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, }
static void -nouveau_sgdma_clear(struct ttm_backend *be) +nouveau_sgdma_dma_unmap(struct ttm_tt *ttm) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct drm_device *dev = nvbe->dev; + int i;
- if (nvbe->bound) - be->func->unbind(be); - - if (nvbe->unmap_pages) { - while (nvbe->nr_pages--) { - pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], + for (i = 0; i < ttm->num_pages; i++) { + if (ttm->dma_address[i]) { + pci_unmap_page(dev->pdev, ttm->dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); } + ttm->dma_address[i] = 0; } }
static void -nouveau_sgdma_destroy(struct ttm_backend *be) +nouveau_sgdma_destroy(struct ttm_tt *ttm) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- if (be) { + if (ttm) { NV_DEBUG(nvbe->dev, "\n"); - - if (nvbe) { - if (nvbe->pages) - be->func->clear(be); - kfree(nvbe); - } + kfree(nvbe); } }
static int -nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) +nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct drm_device *dev = nvbe->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; unsigned i, j, pte; + int r;
NV_DEBUG(dev, "pg=0x%lx\n", mem->start); + r = nouveau_sgdma_dma_map(ttm); + if (r) { + return r; + }
nvbe->offset = mem->start << PAGE_SHIFT; pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; - for (i = 0; i < nvbe->nr_pages; i++) { - dma_addr_t dma_offset = nvbe->pages[i]; + for (i = 0; i < ttm->num_pages; i++) { + dma_addr_t dma_offset = ttm->dma_address[i]; uint32_t offset_l = lower_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { @@ -109,14 +87,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) } }
- nvbe->bound = true; return 0; }
static int -nv04_sgdma_unbind(struct ttm_backend *be) +nv04_sgdma_unbind(struct ttm_tt *ttm) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct drm_device *dev = nvbe->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; @@ -124,22 +101,20 @@ nv04_sgdma_unbind(struct ttm_backend *be)
NV_DEBUG(dev, "\n");
- if (!nvbe->bound) + if (ttm->state != tt_bound) return 0;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; - for (i = 0; i < nvbe->nr_pages; i++) { + for (i = 0; i < ttm->num_pages; i++) { for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); }
- nvbe->bound = false; + nouveau_sgdma_dma_unmap(ttm); return 0; }
static struct ttm_backend_func nv04_sgdma_backend = { - .populate = nouveau_sgdma_populate, - .clear = nouveau_sgdma_clear, .bind = nv04_sgdma_bind, .unbind = nv04_sgdma_unbind, .destroy = nouveau_sgdma_destroy @@ -158,16 +133,21 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe) }
static int -nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) +nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; - dma_addr_t *list = nvbe->pages; + dma_addr_t *list = ttm->dma_address; u32 pte = mem->start << 2; - u32 cnt = nvbe->nr_pages; + u32 cnt = ttm->num_pages; + int r;
nvbe->offset = mem->start << PAGE_SHIFT; + r = nouveau_sgdma_dma_map(ttm); + if (r) { + return r; + }
while (cnt--) { nv_wo32(pgt, pte, (*list++ >> 7) | 1); @@ -175,18 +155,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) }
nv41_sgdma_flush(nvbe); - nvbe->bound = true; return 0; }
static int -nv41_sgdma_unbind(struct ttm_backend *be) +nv41_sgdma_unbind(struct ttm_tt *ttm) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; u32 pte = (nvbe->offset >> 12) << 2; - u32 cnt = nvbe->nr_pages; + u32 cnt = ttm->num_pages;
while (cnt--) { nv_wo32(pgt, pte, 0x00000000); @@ -194,24 +173,23 @@ nv41_sgdma_unbind(struct ttm_backend *be) }
nv41_sgdma_flush(nvbe); - nvbe->bound = false; + nouveau_sgdma_dma_unmap(ttm); return 0; }
static struct ttm_backend_func nv41_sgdma_backend = { - .populate = nouveau_sgdma_populate, - .clear = nouveau_sgdma_clear, .bind = nv41_sgdma_bind, .unbind = nv41_sgdma_unbind, .destroy = nouveau_sgdma_destroy };
static void -nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe) +nv44_sgdma_flush(struct ttm_tt *ttm) { + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct drm_device *dev = nvbe->dev;
- nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12); + nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12); nv_wr32(dev, 0x100808, nvbe->offset | 0x20); if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) NV_ERROR(dev, "gart flush timeout: 0x%08x\n", @@ -270,17 +248,21 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt) }
static int -nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) +nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; - dma_addr_t *list = nvbe->pages; + dma_addr_t *list = ttm->dma_address; u32 pte = mem->start << 2, tmp[4]; - u32 cnt = nvbe->nr_pages; - int i; + u32 cnt = ttm->num_pages; + int i, r;
nvbe->offset = mem->start << PAGE_SHIFT; + r = nouveau_sgdma_dma_map(ttm); + if (r) { + return r; + }
if (pte & 0x0000000c) { u32 max = 4 - ((pte >> 2) & 0x3); @@ -305,19 +287,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) if (cnt) nv44_sgdma_fill(pgt, list, pte, cnt);
- nv44_sgdma_flush(nvbe); - nvbe->bound = true; + nv44_sgdma_flush(ttm); return 0; }
static int -nv44_sgdma_unbind(struct ttm_backend *be) +nv44_sgdma_unbind(struct ttm_tt *ttm) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; u32 pte = (nvbe->offset >> 12) << 2; - u32 cnt = nvbe->nr_pages; + u32 cnt = ttm->num_pages;
if (pte & 0x0000000c) { u32 max = 4 - ((pte >> 2) & 0x3); @@ -339,55 +320,53 @@ nv44_sgdma_unbind(struct ttm_backend *be) if (cnt) nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nv44_sgdma_flush(nvbe); - nvbe->bound = false; + nv44_sgdma_flush(ttm); + nouveau_sgdma_dma_unmap(ttm); return 0; }
static struct ttm_backend_func nv44_sgdma_backend = { - .populate = nouveau_sgdma_populate, - .clear = nouveau_sgdma_clear, .bind = nv44_sgdma_bind, .unbind = nv44_sgdma_unbind, .destroy = nouveau_sgdma_destroy };
static int -nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) +nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_mem *node = mem->mm_node; + int r; + /* noop: bound in move_notify() */ - node->pages = nvbe->pages; - nvbe->pages = (dma_addr_t *)node; - nvbe->bound = true; + r = nouveau_sgdma_dma_map(ttm); + if (r) { + return r; + } + node->pages = ttm->dma_address; return 0; }
static int -nv50_sgdma_unbind(struct ttm_backend *be) +nv50_sgdma_unbind(struct ttm_tt *ttm) { - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; - struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages; /* noop: unbound in move_notify() */ - nvbe->pages = node->pages; - node->pages = NULL; - nvbe->bound = false; + nouveau_sgdma_dma_unmap(ttm); return 0; }
static struct ttm_backend_func nv50_sgdma_backend = { - .populate = nouveau_sgdma_populate, - .clear = nouveau_sgdma_clear, .bind = nv50_sgdma_bind, .unbind = nv50_sgdma_unbind, .destroy = nouveau_sgdma_destroy };
-struct ttm_backend * -nouveau_sgdma_init_ttm(struct drm_device *dev) +struct ttm_tt * +nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); + struct drm_device *dev = dev_priv->dev; struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); @@ -395,9 +374,12 @@ nouveau_sgdma_init_ttm(struct drm_device *dev) return NULL;
nvbe->dev = dev; + nvbe->ttm.func = dev_priv->gart_info.func;
- nvbe->backend.func = dev_priv->gart_info.func; - return &nvbe->backend; + if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { + return NULL; + } + return &nvbe->ttm; }
int diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 97c76ae..53ff62b 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev) } }
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev); - -static struct ttm_backend* -radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev) -{ - struct radeon_device *rdev; - - rdev = radeon_get_rdev(bdev); -#if __OS_HAS_AGP - if (rdev->flags & RADEON_IS_AGP) { - return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge); - } else -#endif - { - return radeon_ttm_backend_create(rdev); - } -} - static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) { return 0; @@ -515,8 +497,93 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) return radeon_fence_signaled((struct radeon_fence *)sync_obj); }
+/* + * TTM backend functions. + */ +struct radeon_ttm_tt { + struct ttm_tt ttm; + struct radeon_device *rdev; + u64 offset; +}; + +static int radeon_ttm_backend_bind(struct ttm_tt *ttm, + struct ttm_mem_reg *bo_mem) +{ + struct radeon_ttm_tt *gtt; + int r; + + gtt = container_of(ttm, struct radeon_ttm_tt, ttm); + gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); + if (!ttm->num_pages) { + WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", + ttm->num_pages, bo_mem, ttm); + } + r = radeon_gart_bind(gtt->rdev, gtt->offset, + ttm->num_pages, ttm->pages, ttm->dma_address); + if (r) { + DRM_ERROR("failed to bind %lu pages at 0x%08X\n", + ttm->num_pages, (unsigned)gtt->offset); + return r; + } + return 0; +} + +static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt; + + gtt = container_of(ttm, struct radeon_ttm_tt, ttm); + radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); + return 0; +} + +static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt; + + gtt = container_of(ttm, struct radeon_ttm_tt, ttm); + if (ttm->state == tt_bound) { + radeon_ttm_backend_unbind(ttm); + } + kfree(gtt); +} + +static struct ttm_backend_func radeon_backend_func = { + .bind = &radeon_ttm_backend_bind, + .unbind = &radeon_ttm_backend_unbind, + .destroy = &radeon_ttm_backend_destroy, +}; + +struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page) +{ + struct radeon_device *rdev; + struct radeon_ttm_tt *gtt; + + rdev = radeon_get_rdev(bdev); +#if __OS_HAS_AGP + if (rdev->flags & RADEON_IS_AGP) { + return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, + size, page_flags, dummy_read_page); + } +#endif + + gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); + if (gtt == NULL) { + return NULL; + } + gtt->ttm.func = &radeon_backend_func; + gtt->rdev = rdev; + if (ttm_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { + return NULL; + } + return >t->ttm; +} + + static struct ttm_bo_driver radeon_bo_driver = { - .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, + .ttm_tt_create = &radeon_ttm_tt_create, .invalidate_caches = &radeon_invalidate_caches, .init_mem_type = &radeon_init_mem_type, .evict_flags = &radeon_evict_flags, @@ -680,123 +747,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) }
-/* - * TTM backend functions. - */ -struct radeon_ttm_backend { - struct ttm_backend backend; - struct radeon_device *rdev; - unsigned long num_pages; - struct page **pages; - struct page *dummy_read_page; - dma_addr_t *dma_addrs; - bool populated; - bool bound; - unsigned offset; -}; - -static int radeon_ttm_backend_populate(struct ttm_backend *backend, - unsigned long num_pages, - struct page **pages, - struct page *dummy_read_page, - dma_addr_t *dma_addrs) -{ - struct radeon_ttm_backend *gtt; - - gtt = container_of(backend, struct radeon_ttm_backend, backend); - gtt->pages = pages; - gtt->dma_addrs = dma_addrs; - gtt->num_pages = num_pages; - gtt->dummy_read_page = dummy_read_page; - gtt->populated = true; - return 0; -} - -static void radeon_ttm_backend_clear(struct ttm_backend *backend) -{ - struct radeon_ttm_backend *gtt; - - gtt = container_of(backend, struct radeon_ttm_backend, backend); - gtt->pages = NULL; - gtt->dma_addrs = NULL; - gtt->num_pages = 0; - gtt->dummy_read_page = NULL; - gtt->populated = false; - gtt->bound = false; -} - - -static int radeon_ttm_backend_bind(struct ttm_backend *backend, - struct ttm_mem_reg *bo_mem) -{ - struct radeon_ttm_backend *gtt; - int r; - - gtt = container_of(backend, struct radeon_ttm_backend, backend); - gtt->offset = bo_mem->start << PAGE_SHIFT; - if (!gtt->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", - gtt->num_pages, bo_mem, backend); - } - r = radeon_gart_bind(gtt->rdev, gtt->offset, - gtt->num_pages, gtt->pages, gtt->dma_addrs); - if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08X\n", - gtt->num_pages, gtt->offset); - return r; - } - gtt->bound = true; - return 0; -} - -static int radeon_ttm_backend_unbind(struct ttm_backend *backend) -{ - struct radeon_ttm_backend *gtt; - - gtt = container_of(backend, struct radeon_ttm_backend, backend); - radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages); - gtt->bound = false; - return 0; -} - -static void radeon_ttm_backend_destroy(struct ttm_backend *backend) -{ - struct radeon_ttm_backend *gtt; - - gtt = container_of(backend, struct radeon_ttm_backend, backend); - if (gtt->bound) { - radeon_ttm_backend_unbind(backend); - } - kfree(gtt); -} - -static struct ttm_backend_func radeon_backend_func = { - .populate = &radeon_ttm_backend_populate, - .clear = &radeon_ttm_backend_clear, - .bind = &radeon_ttm_backend_bind, - .unbind = &radeon_ttm_backend_unbind, - .destroy = &radeon_ttm_backend_destroy, -}; - -struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev) -{ - struct radeon_ttm_backend *gtt; - - gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL); - if (gtt == NULL) { - return NULL; - } - gtt->backend.bdev = &rdev->mman.bdev; - gtt->backend.func = &radeon_backend_func; - gtt->rdev = rdev; - gtt->pages = NULL; - gtt->num_pages = 0; - gtt->dummy_read_page = NULL; - gtt->populated = false; - gtt->bound = false; - return >t->backend; -} - #define RADEON_DEBUGFS_MEM_TYPES 2
#if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 1c4a72f..14ebd36 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -40,45 +40,33 @@ #include <asm/agp.h>
struct ttm_agp_backend { - struct ttm_backend backend; + struct ttm_tt ttm; struct agp_memory *mem; struct agp_bridge_data *bridge; };
-static int ttm_agp_populate(struct ttm_backend *backend, - unsigned long num_pages, struct page **pages, - struct page *dummy_read_page, - dma_addr_t *dma_addrs) +static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { - struct ttm_agp_backend *agp_be = - container_of(backend, struct ttm_agp_backend, backend); - struct page **cur_page, **last_page = pages + num_pages; + struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); + struct drm_mm_node *node = bo_mem->mm_node; struct agp_memory *mem; + int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); + unsigned i;
- mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); + mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); if (unlikely(mem == NULL)) return -ENOMEM;
mem->page_count = 0; - for (cur_page = pages; cur_page < last_page; ++cur_page) { - struct page *page = *cur_page; + for (i = 0; i < ttm->num_pages; i++) { + struct page *page = ttm->pages[i]; + if (!page) - page = dummy_read_page; + page = ttm->dummy_read_page;
mem->pages[mem->page_count++] = page; } agp_be->mem = mem; - return 0; -} - -static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) -{ - struct ttm_agp_backend *agp_be = - container_of(backend, struct ttm_agp_backend, backend); - struct drm_mm_node *node = bo_mem->mm_node; - struct agp_memory *mem = agp_be->mem; - int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); - int ret;
mem->is_flushed = 1; mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; @@ -90,50 +78,38 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) return ret; }
-static int ttm_agp_unbind(struct ttm_backend *backend) +static int ttm_agp_unbind(struct ttm_tt *ttm) { - struct ttm_agp_backend *agp_be = - container_of(backend, struct ttm_agp_backend, backend); - - if (agp_be->mem->is_bound) - return agp_unbind_memory(agp_be->mem); - else - return 0; -} + struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
-static void ttm_agp_clear(struct ttm_backend *backend) -{ - struct ttm_agp_backend *agp_be = - container_of(backend, struct ttm_agp_backend, backend); - struct agp_memory *mem = agp_be->mem; - - if (mem) { - ttm_agp_unbind(backend); - agp_free_memory(mem); + if (agp_be->mem) { + if (agp_be->mem->is_bound) + return agp_unbind_memory(agp_be->mem); + agp_free_memory(agp_be->mem); + agp_be->mem = NULL; } - agp_be->mem = NULL; + return 0; }
-static void ttm_agp_destroy(struct ttm_backend *backend) +static void ttm_agp_destroy(struct ttm_tt *ttm) { - struct ttm_agp_backend *agp_be = - container_of(backend, struct ttm_agp_backend, backend); + struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem) - ttm_agp_clear(backend); + ttm_agp_unbind(ttm); kfree(agp_be); }
static struct ttm_backend_func ttm_agp_func = { - .populate = ttm_agp_populate, - .clear = ttm_agp_clear, .bind = ttm_agp_bind, .unbind = ttm_agp_unbind, .destroy = ttm_agp_destroy, };
-struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, - struct agp_bridge_data *bridge) +struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, + struct agp_bridge_data *bridge, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page) { struct ttm_agp_backend *agp_be;
@@ -143,10 +119,14 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
agp_be->mem = NULL; agp_be->bridge = bridge; - agp_be->backend.func = &ttm_agp_func; - agp_be->backend.bdev = bdev; - return &agp_be->backend; + agp_be->ttm.func = &ttm_agp_func; + + if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { + return NULL; + } + + return &agp_be->ttm; } -EXPORT_SYMBOL(ttm_agp_backend_init); +EXPORT_SYMBOL(ttm_agp_tt_create);
#endif diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4bde335..cb73527 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -337,8 +337,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) if (zero_alloc) page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; case ttm_bo_type_kernel: - bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, - page_flags, glob->dummy_read_page); + bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, + page_flags, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; @@ -1437,10 +1437,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref) goto out_no_shrink; }
- glob->ttm_bo_extra_size = - ttm_round_pot(sizeof(struct ttm_tt)) + - ttm_round_pot(sizeof(struct ttm_backend)); - + glob->ttm_bo_extra_size = ttm_round_pot(sizeof(struct ttm_tt)); glob->ttm_bo_size = glob->ttm_bo_extra_size + ttm_round_pot(sizeof(struct ttm_buffer_object));
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 58ea7dc..b55e132 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -103,7 +103,6 @@ struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) int ttm_tt_populate(struct ttm_tt *ttm) { struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; - struct ttm_backend *be; int ret;
if (ttm->state != tt_unpopulated) @@ -115,8 +114,6 @@ int ttm_tt_populate(struct ttm_tt *ttm) return ret; }
- be = ttm->be; - ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm->caching_state, ttm->dma_address); if (ret != 0) @@ -129,8 +126,6 @@ int ttm_tt_populate(struct ttm_tt *ttm) return -ENOMEM; }
- be->func->populate(be, ttm->num_pages, ttm->pages, - ttm->dummy_read_page, ttm->dma_address); ttm->state = tt_unbound; return 0; } @@ -239,12 +234,8 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) { - struct ttm_backend *be = ttm->be; struct ttm_mem_global *glob = ttm->glob->mem_glob;
- if (be) - be->func->clear(be); - ttm_mem_global_free_pages(glob, ttm->pages, ttm->num_pages); ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm->caching_state, ttm->dma_address); @@ -253,20 +244,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
void ttm_tt_destroy(struct ttm_tt *ttm) { - struct ttm_backend *be; - if (unlikely(ttm == NULL)) return;
- be = ttm->be; - if (likely(be != NULL)) { - be->func->destroy(be); - ttm->be = NULL; - } - if (likely(ttm->pages != NULL)) { ttm_tt_free_alloced_pages(ttm); - ttm_tt_free_page_directory(ttm); }
@@ -274,52 +256,38 @@ void ttm_tt_destroy(struct ttm_tt *ttm) ttm->swap_storage) fput(ttm->swap_storage);
- kfree(ttm); + ttm->swap_storage = NULL; + ttm->func->destroy(ttm); }
-struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, - uint32_t page_flags, struct page *dummy_read_page) +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page) { - struct ttm_bo_driver *bo_driver = bdev->driver; - struct ttm_tt *ttm; - - if (!bo_driver) - return NULL; - - ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); - if (!ttm) - return NULL; - + ttm->bdev = bdev; ttm->glob = bdev->glob; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm->caching_state = tt_cached; ttm->page_flags = page_flags; - ttm->dummy_read_page = dummy_read_page; + ttm->state = tt_unpopulated;
ttm_tt_alloc_page_directory(ttm); if (!ttm->pages || !ttm->dma_address) { ttm_tt_destroy(ttm); printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); - return NULL; - } - ttm->be = bo_driver->create_ttm_backend_entry(bdev); - if (!ttm->be) { - ttm_tt_destroy(ttm); - printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n"); - return NULL; + return -ENOMEM; } - ttm->state = tt_unpopulated; - return ttm; + return 0; } +EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_unbind(struct ttm_tt *ttm) { int ret; - struct ttm_backend *be = ttm->be;
if (ttm->state == tt_bound) { - ret = be->func->unbind(be); + ret = ttm->func->unbind(ttm); BUG_ON(ret); ttm->state = tt_unbound; } @@ -328,7 +296,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm) int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { int ret = 0; - struct ttm_backend *be;
if (!ttm) return -EINVAL; @@ -336,13 +303,11 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) if (ttm->state == tt_bound) return 0;
- be = ttm->be; - ret = ttm_tt_populate(ttm); if (ret) return ret;
- ret = be->func->bind(be, bo_mem); + ret = ttm->func->bind(ttm, bo_mem); if (unlikely(ret != 0)) return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 5a72ed9..cc72435 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -139,85 +139,61 @@ struct ttm_placement vmw_srf_placement = { .busy_placement = gmr_vram_placement_flags };
-struct vmw_ttm_backend { - struct ttm_backend backend; - struct page **pages; - unsigned long num_pages; +struct vmw_ttm_tt { + struct ttm_tt ttm; struct vmw_private *dev_priv; int gmr_id; };
-static int vmw_ttm_populate(struct ttm_backend *backend, - unsigned long num_pages, struct page **pages, - struct page *dummy_read_page, - dma_addr_t *dma_addrs) +static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { - struct vmw_ttm_backend *vmw_be = - container_of(backend, struct vmw_ttm_backend, backend); - - vmw_be->pages = pages; - vmw_be->num_pages = num_pages; - - return 0; -} - -static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) -{ - struct vmw_ttm_backend *vmw_be = - container_of(backend, struct vmw_ttm_backend, backend); + struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
vmw_be->gmr_id = bo_mem->start;
- return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages, - vmw_be->num_pages, vmw_be->gmr_id); + return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages, + ttm->num_pages, vmw_be->gmr_id); }
-static int vmw_ttm_unbind(struct ttm_backend *backend) +static int vmw_ttm_unbind(struct ttm_tt *ttm) { - struct vmw_ttm_backend *vmw_be = - container_of(backend, struct vmw_ttm_backend, backend); + struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); return 0; }
-static void vmw_ttm_clear(struct ttm_backend *backend) +static void vmw_ttm_destroy(struct ttm_tt *ttm) { - struct vmw_ttm_backend *vmw_be = - container_of(backend, struct vmw_ttm_backend, backend); - - vmw_be->pages = NULL; - vmw_be->num_pages = 0; -} - -static void vmw_ttm_destroy(struct ttm_backend *backend) -{ - struct vmw_ttm_backend *vmw_be = - container_of(backend, struct vmw_ttm_backend, backend); + struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
kfree(vmw_be); }
static struct ttm_backend_func vmw_ttm_func = { - .populate = vmw_ttm_populate, - .clear = vmw_ttm_clear, .bind = vmw_ttm_bind, .unbind = vmw_ttm_unbind, .destroy = vmw_ttm_destroy, };
-struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) +struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page) { - struct vmw_ttm_backend *vmw_be; + struct vmw_ttm_tt *vmw_be;
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); if (!vmw_be) return NULL;
- vmw_be->backend.func = &vmw_ttm_func; + vmw_be->ttm.func = &vmw_ttm_func; vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
- return &vmw_be->backend; + if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) { + return NULL; + } + + return &vmw_be->ttm; }
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) @@ -357,7 +333,7 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, }
struct ttm_bo_driver vmw_bo_driver = { - .create_ttm_backend_entry = vmw_ttm_backend_init, + .ttm_tt_create = &vmw_ttm_tt_create, .invalidate_caches = vmw_invalidate_caches, .init_mem_type = vmw_init_mem_type, .evict_flags = vmw_evict_flags, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 6d17140..6b8c5cd 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -43,36 +43,9 @@ struct ttm_backend;
struct ttm_backend_func { /** - * struct ttm_backend_func member populate - * - * @backend: Pointer to a struct ttm_backend. - * @num_pages: Number of pages to populate. - * @pages: Array of pointers to ttm pages. - * @dummy_read_page: Page to be used instead of NULL pages in the - * array @pages. - * @dma_addrs: Array of DMA (bus) address of the ttm pages. - * - * Populate the backend with ttm pages. Depending on the backend, - * it may or may not copy the @pages array. - */ - int (*populate) (struct ttm_backend *backend, - unsigned long num_pages, struct page **pages, - struct page *dummy_read_page, - dma_addr_t *dma_addrs); - /** - * struct ttm_backend_func member clear - * - * @backend: Pointer to a struct ttm_backend. - * - * This is an "unpopulate" function. Release all resources - * allocated with populate. - */ - void (*clear) (struct ttm_backend *backend); - - /** * struct ttm_backend_func member bind * - * @backend: Pointer to a struct ttm_backend. + * @ttm: Pointer to a struct ttm_tt. * @bo_mem: Pointer to a struct ttm_mem_reg describing the * memory type and location for binding. * @@ -80,40 +53,27 @@ struct ttm_backend_func { * indicated by @bo_mem. This function should be able to handle * differences between aperture and system page sizes. */ - int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); + int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/** * struct ttm_backend_func member unbind * - * @backend: Pointer to a struct ttm_backend. + * @ttm: Pointer to a struct ttm_tt. * * Unbind previously bound backend pages. This function should be * able to handle differences between aperture and system page sizes. */ - int (*unbind) (struct ttm_backend *backend); + int (*unbind) (struct ttm_tt *ttm);
/** * struct ttm_backend_func member destroy * - * @backend: Pointer to a struct ttm_backend. + * @ttm: Pointer to a struct ttm_tt. * - * Destroy the backend. + * Destroy the backend. This will be call back from ttm_tt_destroy so + * don't call ttm_tt_destroy from the callback or infinite loop. */ - void (*destroy) (struct ttm_backend *backend); -}; - -/** - * struct ttm_backend - * - * @bdev: Pointer to a struct ttm_bo_device. - * @func: Pointer to a struct ttm_backend_func that describes - * the backend methods. - * - */ - -struct ttm_backend { - struct ttm_bo_device *bdev; - struct ttm_backend_func *func; + void (*destroy) (struct ttm_tt *ttm); };
#define TTM_PAGE_FLAG_WRITE (1 << 3) @@ -131,6 +91,9 @@ enum ttm_caching_state { /** * struct ttm_tt * + * @bdev: Pointer to a struct ttm_bo_device. + * @func: Pointer to a struct ttm_backend_func that describes + * the backend methods. * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL * pointer. * @pages: Array of pages backing the data. @@ -148,6 +111,8 @@ enum ttm_caching_state { */
struct ttm_tt { + struct ttm_bo_device *bdev; + struct ttm_backend_func *func; struct page *dummy_read_page; struct page **pages; uint32_t page_flags; @@ -336,15 +301,22 @@ struct ttm_mem_type_manager {
struct ttm_bo_driver { /** - * struct ttm_bo_driver member create_ttm_backend_entry + * ttm_tt_create * - * @bdev: The buffer object device. + * @bdev: pointer to a struct ttm_bo_device: + * @size: Size of the data needed backing. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * @dummy_read_page: See struct ttm_bo_device. * - * Create a driver specific struct ttm_backend. + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. */ - - struct ttm_backend *(*create_ttm_backend_entry) - (struct ttm_bo_device *bdev); + struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev, + unsigned long size, + uint32_t page_flags, + struct page *dummy_read_page);
/** * struct ttm_bo_driver member invalidate_caches @@ -585,8 +557,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) }
/** - * ttm_tt_create + * ttm_tt_init * + * @ttm: The struct ttm_tt. * @bdev: pointer to a struct ttm_bo_device: * @size: Size of the data needed backing. * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. @@ -597,10 +570,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) * Returns: * NULL: Out of memory. */ -extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, - unsigned long size, - uint32_t page_flags, - struct page *dummy_read_page); +extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page);
/** * ttm_ttm_bind: @@ -626,7 +598,7 @@ extern int ttm_tt_populate(struct ttm_tt *ttm); * * @ttm: The struct ttm_tt. * - * Unbind, unpopulate and destroy a struct ttm_tt. + * Unbind, unpopulate and destroy common struct ttm_tt. */ extern void ttm_tt_destroy(struct ttm_tt *ttm);
@@ -1013,17 +985,23 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; #include <linux/agp_backend.h>
/** - * ttm_agp_backend_init + * ttm_agp_tt_create * * @bdev: Pointer to a struct ttm_bo_device. * @bridge: The agp bridge this device is sitting on. + * @size: Size of the data needed backing. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * @dummy_read_page: See struct ttm_bo_device. + * * * Create a TTM backend that uses the indicated AGP bridge as an aperture * for TT memory. This function uses the linux agpgart interface to * bind and unbind memory backing a ttm_tt. */ -extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, - struct agp_bridge_data *bridge); +extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, + struct agp_bridge_data *bridge, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); #endif
#endif
From: Jerome Glisse jglisse@redhat.com
Move the page allocation and freeing to driver callback and provide ttm code helper function for those.
Most intrusive change, is the fact that we now only fully populate an object this simplify some of code designed around the page fault design.
Signed-off-by: Jerome Glisse jglisse@redhat.com --- drivers/gpu/drm/nouveau/nouveau_bo.c | 3 + drivers/gpu/drm/radeon/radeon_ttm.c | 2 + drivers/gpu/drm/ttm/ttm_bo_util.c | 31 ++++++----- drivers/gpu/drm/ttm/ttm_bo_vm.c | 13 ++-- drivers/gpu/drm/ttm/ttm_page_alloc.c | 42 ++++++++++++++ drivers/gpu/drm/ttm/ttm_tt.c | 97 +++---------------------------- drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 3 + include/drm/ttm/ttm_bo_driver.h | 41 ++++++++------ include/drm/ttm/ttm_page_alloc.h | 18 ++++++ 9 files changed, 125 insertions(+), 125 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index b060fa4..7e5ca3f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -28,6 +28,7 @@ */
#include "drmP.h" +#include "ttm/ttm_page_alloc.h"
#include "nouveau_drm.h" #include "nouveau_drv.h" @@ -1050,6 +1051,8 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
struct ttm_bo_driver nouveau_bo_driver = { .ttm_tt_create = &nouveau_ttm_tt_create, + .ttm_tt_populate = &ttm_page_alloc_ttm_tt_populate, + .ttm_tt_unpopulate = &ttm_page_alloc_ttm_tt_unpopulate, .invalidate_caches = nouveau_bo_invalidate_caches, .init_mem_type = nouveau_bo_init_mem_type, .evict_flags = nouveau_bo_evict_flags, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 53ff62b..490afce 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -584,6 +584,8 @@ struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, + .ttm_tt_populate = &ttm_page_alloc_ttm_tt_populate, + .ttm_tt_unpopulate = &ttm_page_alloc_ttm_tt_unpopulate, .invalidate_caches = &radeon_invalidate_caches, .init_mem_type = &radeon_init_mem_type, .evict_flags = &radeon_evict_flags, diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 082fcae..60f204d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, unsigned long page, pgprot_t prot) { - struct page *d = ttm_tt_get_page(ttm, page); + struct page *d = ttm->pages[page]; void *dst;
if (!d) @@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, unsigned long page, pgprot_t prot) { - struct page *s = ttm_tt_get_page(ttm, page); + struct page *s = ttm->pages[page]; void *src;
if (!s) @@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, if (old_iomap == NULL && ttm == NULL) goto out2;
+ if (ttm->state == tt_unpopulated) { + ret = ttm->bdev->driver->ttm_tt_populate(ttm); + if (ret) + goto out1; + } + add = 0; dir = 1;
@@ -502,10 +508,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, { struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; struct ttm_tt *ttm = bo->ttm; - struct page *d; - int i; + int ret;
BUG_ON(!ttm); + + if (ttm->state == tt_unpopulated) { + ret = ttm->bdev->driver->ttm_tt_populate(ttm); + if (ret) + return ret; + } + if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { /* * We're mapping a single page, and the desired @@ -513,18 +525,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, */
map->bo_kmap_type = ttm_bo_map_kmap; - map->page = ttm_tt_get_page(ttm, start_page); + map->page = ttm->pages[start_page]; map->virtual = kmap(map->page); } else { - /* - * Populate the part we're mapping; - */ - for (i = start_page; i < start_page + num_pages; ++i) { - d = ttm_tt_get_page(ttm, i); - if (!d) - return -ENOMEM; - } - /* * We need to use vmap to get the desired page protection * or to make the buffer object look contiguous. diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 221b924..bc1d751 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -174,18 +174,19 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? vm_get_page_prot(vma->vm_flags) : ttm_io_prot(bo->mem.placement, vma->vm_page_prot); - }
- /* - * Speculatively prefault a number of pages. Only error on - * first page. - */ + /* Allocate all page at once, most common usage */ + if (ttm->bdev->driver->ttm_tt_populate(ttm)) { + retval = VM_FAULT_OOM; + goto out_io_unlock; + } + }
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { if (bo->mem.bus.is_iomem) pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; else { - page = ttm_tt_get_page(ttm, page_offset); + page = ttm->pages[page_offset]; if (unlikely(!page && i == 0)) { retval = VM_FAULT_OOM; goto out_io_unlock; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index e94ff12..a5904ed 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -844,6 +844,48 @@ void ttm_page_alloc_fini(void) _manager = NULL; }
+int ttm_page_alloc_ttm_tt_populate(struct ttm_tt *ttm) +{ + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; + int ret; + + if (ttm->state != tt_unpopulated) + return 0; + + if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { + ret = ttm_tt_swapin(ttm); + if (unlikely(ret != 0)) + return ret; + } + + ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, + ttm->caching_state, ttm->dma_address); + if (ret != 0) + return -ENOMEM; + ret = ttm_mem_global_alloc_pages(mem_glob, ttm->pages, + ttm->num_pages, false, false); + if (unlikely(ret != 0)) { + ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, + ttm->caching_state, ttm->dma_address); + return -ENOMEM; + } + + ttm->state = tt_unbound; + return 0; +} +EXPORT_SYMBOL(ttm_page_alloc_ttm_tt_populate); + +void ttm_page_alloc_ttm_tt_unpopulate(struct ttm_tt *ttm) +{ + struct ttm_mem_global *glob = ttm->glob->mem_glob; + + ttm_mem_global_free_pages(glob, ttm->pages, ttm->num_pages); + ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, + ttm->caching_state, ttm->dma_address); + ttm->state = tt_unpopulated; +} +EXPORT_SYMBOL(ttm_page_alloc_ttm_tt_unpopulate); + int ttm_page_alloc_debugfs(struct seq_file *m, void *data) { struct ttm_page_pool *p; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index b55e132..07e86c4 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -42,8 +42,6 @@ #include "ttm/ttm_placement.h" #include "ttm/ttm_page_alloc.h"
-static int ttm_tt_swapin(struct ttm_tt *ttm); - /** * Allocates storage for pointers to the pages that back the ttm. */ @@ -62,75 +60,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) ttm->dma_address = NULL; }
-static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) -{ - struct page *p; - struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; - int ret; - - if (NULL == (p = ttm->pages[index])) { - - ret = ttm_get_pages(&ttm->pages[index], 1, ttm->page_flags, - ttm->caching_state, - &ttm->dma_address[index]); - if (ret != 0) - return NULL; - - ret = ttm_mem_global_alloc_pages(mem_glob, &ttm->pages[index], - 1, false, false); - if (unlikely(ret != 0)) - goto out_err; - } - return p; -out_err: - ttm_put_pages(&ttm->pages[index], 1, ttm->page_flags, - ttm->caching_state, &ttm->dma_address[index]); - return NULL; -} - -struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) -{ - int ret; - - if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { - ret = ttm_tt_swapin(ttm); - if (unlikely(ret != 0)) - return NULL; - } - return __ttm_tt_get_page(ttm, index); -} - -int ttm_tt_populate(struct ttm_tt *ttm) -{ - struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; - int ret; - - if (ttm->state != tt_unpopulated) - return 0; - - if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { - ret = ttm_tt_swapin(ttm); - if (unlikely(ret != 0)) - return ret; - } - - ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, - ttm->caching_state, ttm->dma_address); - if (ret != 0) - return -ENOMEM; - ret = ttm_mem_global_alloc_pages(mem_glob, ttm->pages, - ttm->num_pages, false, false); - if (unlikely(ret != 0)) { - ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, - ttm->caching_state, ttm->dma_address); - return -ENOMEM; - } - - ttm->state = tt_unbound; - return 0; -} -EXPORT_SYMBOL(ttm_tt_populate); - #ifdef CONFIG_X86 static inline int ttm_tt_set_page_caching(struct page *p, enum ttm_caching_state c_old, @@ -232,23 +161,13 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) } EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) -{ - struct ttm_mem_global *glob = ttm->glob->mem_glob; - - ttm_mem_global_free_pages(glob, ttm->pages, ttm->num_pages); - ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, - ttm->caching_state, ttm->dma_address); - ttm->state = tt_unpopulated; -} - void ttm_tt_destroy(struct ttm_tt *ttm) { if (unlikely(ttm == NULL)) return;
if (likely(ttm->pages != NULL)) { - ttm_tt_free_alloced_pages(ttm); + ttm->bdev->driver->ttm_tt_unpopulate(ttm); ttm_tt_free_page_directory(ttm); }
@@ -303,7 +222,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) if (ttm->state == tt_bound) return 0;
- ret = ttm_tt_populate(ttm); + ret = ttm->bdev->driver->ttm_tt_populate(ttm); if (ret) return ret;
@@ -317,7 +236,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) } EXPORT_SYMBOL(ttm_tt_bind);
-static int ttm_tt_swapin(struct ttm_tt *ttm) +int ttm_tt_swapin(struct ttm_tt *ttm) { struct address_space *swap_space; struct file *swap_storage; @@ -331,6 +250,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) swap_storage = ttm->swap_storage; BUG_ON(swap_storage == NULL);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm); + if (ret) + return ret; + swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
for (i = 0; i < ttm->num_pages; ++i) { @@ -339,7 +262,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) ret = PTR_ERR(from_page); goto out_err; } - to_page = __ttm_tt_get_page(ttm, i); + to_page = ttm->pages[i]; if (unlikely(to_page == NULL)) goto out_err;
@@ -360,7 +283,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
return 0; out_err: - ttm_tt_free_alloced_pages(ttm); + ttm->bdev->driver->ttm_tt_unpopulate(ttm); return ret; }
@@ -412,7 +335,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) page_cache_release(to_page); }
- ttm_tt_free_alloced_pages(ttm); + ttm->bdev->driver->ttm_tt_unpopulate(ttm); ttm->swap_storage = swap_storage; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; if (persistent_swap_storage) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index cc72435..383993b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -28,6 +28,7 @@ #include "vmwgfx_drv.h" #include "ttm/ttm_bo_driver.h" #include "ttm/ttm_placement.h" +#include "ttm/ttm_page_alloc.h"
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED; @@ -334,6 +335,8 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
struct ttm_bo_driver vmw_bo_driver = { .ttm_tt_create = &vmw_ttm_tt_create, + .ttm_tt_populate = &ttm_page_alloc_ttm_tt_populate, + .ttm_tt_unpopulate = &ttm_page_alloc_ttm_tt_unpopulate, .invalidate_caches = vmw_invalidate_caches, .init_mem_type = vmw_init_mem_type, .evict_flags = vmw_evict_flags, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 6b8c5cd..ae06e42 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -319,6 +319,26 @@ struct ttm_bo_driver { struct page *dummy_read_page);
/** + * ttm_tt_populate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Allocate all backing pages + * Returns: + * -ENOMEM: Out of memory. + */ + int (*ttm_tt_populate)(struct ttm_tt *ttm); + + /** + * ttm_tt_unpopulate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Free all backing page + */ + void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); + + /** * struct ttm_bo_driver member invalidate_caches * * @bdev: the buffer object device. @@ -585,15 +605,6 @@ extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/** - * ttm_tt_populate: - * - * @ttm: The struct ttm_tt to contain the backing pages. - * - * Add backing pages to all of @ttm - */ -extern int ttm_tt_populate(struct ttm_tt *ttm); - -/** * ttm_ttm_destroy: * * @ttm: The struct ttm_tt. @@ -612,19 +623,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm); extern void ttm_tt_unbind(struct ttm_tt *ttm);
/** - * ttm_ttm_destroy: + * ttm_tt_swapin: * * @ttm: The struct ttm_tt. - * @index: Index of the desired page. * - * Return a pointer to the struct page backing @ttm at page - * index @index. If the page is unpopulated, one will be allocated to - * populate that index. - * - * Returns: - * NULL on OOM. + * Swap in a previously swap out ttm_tt. */ -extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); +extern int ttm_tt_swapin(struct ttm_tt *ttm);
/** * ttm_tt_cache_flush: diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index fffb3bd..18a2d6b 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -67,6 +67,24 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); void ttm_page_alloc_fini(void);
/** + * ttm_page_alloc_ttm_tt_populate: + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Add backing pages to all of @ttm + */ +extern int ttm_page_alloc_ttm_tt_populate(struct ttm_tt *ttm); + +/** + * ttm_page_alloc_ttm_tt_unpopulate: + * + * @ttm: The struct ttm_tt which to free backing pages. + * + * Free all pages of @ttm + */ +extern void ttm_page_alloc_ttm_tt_unpopulate(struct ttm_tt *ttm); + +/** * Output the state of pools to debugfs file */ extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
dri-devel@lists.freedesktop.org