From: Christian König christian.koenig@amd.com
We need to figure out first how to correctly map them into the CPU page tables.
bug: https://bugs.freedesktop.org/show_bug.cgi?id=103138 Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index e5ef10d..96ad129 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -913,6 +913,7 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) if (huge) { gfp_flags |= GFP_TRANSHUGE; gfp_flags &= ~__GFP_MOVABLE; + gfp_flags &= ~__GFP_COMP; }
return gfp_flags;
From: Christian König christian.koenig@amd.com
Try to allocate huge pages when it makes sense.
v2: avoid compound pages for now
Signed-off-by: Christian König christian.koenig@amd.com Acked-by: Felix Kuehling Felix.Kuehling@amd.com Acked-by: Alex Deucher alexander.deucher@amd.com --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 50 ++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 6c852e8..1bc6053 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -685,12 +685,24 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
if (pool == NULL) { /* No pool for this memory type so free the pages */ - for (i = 0; i < npages; i++) { - if (pages[i]) { - if (page_count(pages[i]) != 1) - pr_err("Erroneous page count. Leaking pages.\n"); - __free_page(pages[i]); - pages[i] = NULL; + i = 0; + while (i < npages) { + unsigned order; + + if (!pages[i]) { + ++i; + continue; + } + + if (page_count(pages[i]) != 1) + pr_err("Erroneous page count. Leaking pages.\n"); + order = compound_order(pages[i]); + __free_pages(pages[i], order); + + order = 1 << order; + while (order) { + pages[i++] = NULL; + --order; } } return; @@ -740,12 +752,33 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
/* No pool for cached pages */ if (pool == NULL) { + unsigned i, j; + if (flags & TTM_PAGE_FLAG_DMA32) gfp_flags |= GFP_DMA32; else gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < npages; ++r) { + i = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + while (npages >= HPAGE_PMD_NR) { + gfp_t huge_flags = gfp_flags; + + huge_flags |= GFP_TRANSHUGE; + huge_flags &= ~__GFP_MOVABLE; + huge_flags &= ~__GFP_COMP; + p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); + if (!p) + break; + + for (j = 0; j < HPAGE_PMD_NR; ++j) + pages[i++] = p++; + + npages -= HPAGE_PMD_NR; + } +#endif + + while (npages) { p = alloc_page(gfp_flags); if (!p) {
@@ -753,7 +786,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return -ENOMEM; }
- pages[r] = p; + pages[i++] = p; + --npages; } return 0; }
From: Christian König christian.koenig@amd.com
Make it easier to add huge page pool.
Signed-off-by: Christian König christian.koenig@amd.com Acked-by: Alex Deucher alexander.deucher@amd.com --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 98 +++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 46 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 1bc6053..3974732 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -627,19 +627,20 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, }
/** - * Cut 'count' number of pages from the pool and put them on the return list. + * Allocate pages from the pool and put them on the return list. * - * @return count of pages still required to fulfill the request. + * @return zero for success or negative error code. */ -static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, - struct list_head *pages, - int ttm_flags, - enum ttm_caching_state cstate, - unsigned count) +static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, + struct list_head *pages, + int ttm_flags, + enum ttm_caching_state cstate, + unsigned count) { unsigned long irq_flags; struct list_head *p; unsigned i; + int r = 0;
spin_lock_irqsave(&pool->lock, irq_flags); ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); @@ -672,7 +673,35 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, count = 0; out: spin_unlock_irqrestore(&pool->lock, irq_flags); - return count; + + /* clear the pages coming from the pool if requested */ + if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { + struct page *page; + + list_for_each_entry(page, pages, lru) { + if (PageHighMem(page)) + clear_highpage(page); + else + clear_page(page_address(page)); + } + } + + /* If pool didn't have enough pages allocate new one. */ + if (count) { + gfp_t gfp_flags = pool->gfp_flags; + + /* set zero flag for page allocation if required */ + if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) + gfp_flags |= __GFP_ZERO; + + /* ttm_alloc_new_pages doesn't reference pool so we can run + * multiple requests in parallel. + **/ + r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, + count); + } + + return r; }
/* Put all pages in pages list to correct pool to wait for reuse */ @@ -742,18 +771,18 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct list_head plist; struct page *p = NULL; - gfp_t gfp_flags = GFP_USER; unsigned count; int r;
- /* set zero flag for page allocation if required */ - if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - /* No pool for cached pages */ if (pool == NULL) { + gfp_t gfp_flags = GFP_USER; unsigned i, j;
+ /* set zero flag for page allocation if required */ + if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) + gfp_flags |= __GFP_ZERO; + if (flags & TTM_PAGE_FLAG_DMA32) gfp_flags |= GFP_DMA32; else @@ -792,44 +821,21 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return 0; }
- /* combine zero flag to pool flags */ - gfp_flags |= pool->gfp_flags; - /* First we take pages from the pool */ INIT_LIST_HEAD(&plist); - npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); + r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); + count = 0; - list_for_each_entry(p, &plist, lru) { + list_for_each_entry(p, &plist, lru) pages[count++] = p; - } - - /* clear the pages coming from the pool if requested */ - if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { - list_for_each_entry(p, &plist, lru) { - if (PageHighMem(p)) - clear_highpage(p); - else - clear_page(page_address(p)); - } - }
- /* If pool didn't have enough pages allocate new one. */ - if (npages > 0) { - /* ttm_alloc_new_pages doesn't reference pool so we can run - * multiple requests in parallel. - **/ - INIT_LIST_HEAD(&plist); - r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); - list_for_each_entry(p, &plist, lru) { - pages[count++] = p; - } - if (r) { - /* If there is any pages in the list put them back to - * the pool. */ - pr_err("Failed to allocate extra pages for large request\n"); - ttm_put_pages(pages, count, flags, cstate); - return r; - } + if (r) { + /* If there is any pages in the list put them back to + * the pool. + */ + pr_err("Failed to allocate extra pages for large request\n"); + ttm_put_pages(pages, count, flags, cstate); + return r; }
return 0;
From: Christian König christian.koenig@amd.com
Add a new huge page pool and try to allocate from it when it makes sense.
v2: avoid compound pages for now
Signed-off-by: Christian König christian.koenig@amd.com Acked-by: Alex Deucher alexander.deucher@amd.com --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 136 ++++++++++++++++++++++++++++------- 1 file changed, 109 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 3974732..b6f16e7ff 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -95,7 +95,7 @@ struct ttm_pool_opts { unsigned small; };
-#define NUM_POOLS 4 +#define NUM_POOLS 6
/** * struct ttm_pool_manager - Holds memory pools for fst allocation @@ -122,6 +122,8 @@ struct ttm_pool_manager { struct ttm_page_pool uc_pool; struct ttm_page_pool wc_pool_dma32; struct ttm_page_pool uc_pool_dma32; + struct ttm_page_pool wc_pool_huge; + struct ttm_page_pool uc_pool_huge; } ; }; }; @@ -256,8 +258,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray)
/** * Select the right pool or requested caching state and ttm flags. */ -static struct ttm_page_pool *ttm_get_pool(int flags, - enum ttm_caching_state cstate) +static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, + enum ttm_caching_state cstate) { int pool_index;
@@ -269,9 +271,15 @@ static struct ttm_page_pool *ttm_get_pool(int flags, else pool_index = 0x1;
- if (flags & TTM_PAGE_FLAG_DMA32) + if (flags & TTM_PAGE_FLAG_DMA32) { + if (huge) + return NULL; pool_index |= 0x2;
+ } else if (huge) { + pool_index |= 0x4; + } + return &_manager->pools[pool_index]; }
@@ -494,12 +502,14 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, * pages returned in pages array. */ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, - int ttm_flags, enum ttm_caching_state cstate, unsigned count) + int ttm_flags, enum ttm_caching_state cstate, + unsigned count, unsigned order) { struct page **caching_array; struct page *p; int r = 0; - unsigned i, cpages; + unsigned i, j, cpages; + unsigned npages = 1 << order; unsigned max_cpages = min(count, (unsigned)(PAGE_SIZE/sizeof(struct page *)));
@@ -512,7 +522,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, }
for (i = 0, cpages = 0; i < count; ++i) { - p = alloc_page(gfp_flags); + p = alloc_pages(gfp_flags, order);
if (!p) { pr_err("Unable to get page %u\n", i); @@ -531,14 +541,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, goto out; }
+ list_add(&p->lru, pages); + #ifdef CONFIG_HIGHMEM /* gfp flags of highmem page should never be dma32 so we * we should be fine in such case */ - if (!PageHighMem(p)) + if (PageHighMem(p)) + continue; + #endif - { - caching_array[cpages++] = p; + for (j = 0; j < npages; ++j) { + caching_array[cpages++] = p++; if (cpages == max_cpages) {
r = ttm_set_pages_caching(caching_array, @@ -552,8 +566,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, cpages = 0; } } - - list_add(&p->lru, pages); }
if (cpages) { @@ -573,9 +585,9 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, * Fill the given pool if there aren't enough pages and the requested number of * pages is small. */ -static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, - int ttm_flags, enum ttm_caching_state cstate, unsigned count, - unsigned long *irq_flags) +static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, + enum ttm_caching_state cstate, + unsigned count, unsigned long *irq_flags) { struct page *p; int r; @@ -605,7 +617,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
INIT_LIST_HEAD(&new_pages); r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, - cstate, alloc_size); + cstate, alloc_size, 0); spin_lock_irqsave(&pool->lock, *irq_flags);
if (!r) { @@ -635,7 +647,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct list_head *pages, int ttm_flags, enum ttm_caching_state cstate, - unsigned count) + unsigned count, unsigned order) { unsigned long irq_flags; struct list_head *p; @@ -643,7 +655,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, int r = 0;
spin_lock_irqsave(&pool->lock, irq_flags); - ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); + if (!order) + ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, + &irq_flags);
if (count >= pool->npages) { /* take all pages from the pool */ @@ -698,7 +712,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, * multiple requests in parallel. **/ r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, - count); + count, order); }
return r; @@ -708,8 +722,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, static void ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { + struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); + struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); unsigned long irq_flags; - struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); unsigned i;
if (pool == NULL) { @@ -737,8 +752,48 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, return; }
+ i = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (huge) { + unsigned max_size, n2free; + + spin_lock_irqsave(&huge->lock, irq_flags); + while (i < npages) { + struct page *p = pages[i]; + unsigned j; + + if (!p) + break; + + for (j = 0; j < HPAGE_PMD_NR; ++j) + if (p++ != pages[i + j]) + break; + + if (j != HPAGE_PMD_NR) + break; + + list_add_tail(&pages[i]->lru, &huge->list); + + for (j = 0; j < HPAGE_PMD_NR; ++j) + pages[i++] = NULL; + huge->npages++; + } + + /* Check that we don't go over the pool limit */ + max_size = _manager->options.max_size; + max_size /= HPAGE_PMD_NR; + if (huge->npages > max_size) + n2free = huge->npages - max_size; + else + n2free = 0; + spin_unlock_irqrestore(&huge->lock, irq_flags); + if (n2free) + ttm_page_pool_free(huge, n2free, false); + } +#endif + spin_lock_irqsave(&pool->lock, irq_flags); - for (i = 0; i < npages; i++) { + while (i < npages) { if (pages[i]) { if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n"); @@ -746,6 +801,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, pages[i] = NULL; pool->npages++; } + ++i; } /* Check that we don't go over the pool limit */ npages = 0; @@ -768,7 +824,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, static int ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) { - struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); + struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); + struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); struct list_head plist; struct page *p = NULL; unsigned count; @@ -821,11 +878,28 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return 0; }
- /* First we take pages from the pool */ + count = 0; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (huge && npages >= HPAGE_PMD_NR) { + INIT_LIST_HEAD(&plist); + ttm_page_pool_get_pages(huge, &plist, flags, cstate, + npages / HPAGE_PMD_NR, + HPAGE_PMD_ORDER); + + list_for_each_entry(p, &plist, lru) { + unsigned j; + + for (j = 0; j < HPAGE_PMD_NR; ++j) + pages[count++] = &p[j]; + } + } +#endif + INIT_LIST_HEAD(&plist); - r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); + r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, + npages - count, 0);
- count = 0; list_for_each_entry(p, &plist, lru) pages[count++] = p;
@@ -872,6 +946,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) ttm_page_pool_init_locked(&_manager->uc_pool_dma32, GFP_USER | GFP_DMA32, "uc dma");
+ ttm_page_pool_init_locked(&_manager->wc_pool_huge, + GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP), + "wc huge"); + + ttm_page_pool_init_locked(&_manager->uc_pool_huge, + GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP) + , "uc huge"); + _manager->options.max_size = max_pages; _manager->options.small = SMALL_ALLOCATION; _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; @@ -1041,12 +1123,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) seq_printf(m, "No pool allocator running.\n"); return 0; } - seq_printf(m, "%6s %12s %13s %8s\n", + seq_printf(m, "%7s %12s %13s %8s\n", h[0], h[1], h[2], h[3]); for (i = 0; i < NUM_POOLS; ++i) { p = &_manager->pools[i];
- seq_printf(m, "%6s %12ld %13ld %8d\n", + seq_printf(m, "%7s %12ld %13ld %8d\n", p->name, p->nrefills, p->nfrees, p->npages); }
On Tue, Oct 10, 2017 at 4:53 AM, Christian König ckoenig.leichtzumerken@gmail.com wrote:
From: Christian König christian.koenig@amd.com
Add a new huge page pool and try to allocate from it when it makes sense.
v2: avoid compound pages for now
Signed-off-by: Christian König christian.koenig@amd.com Acked-by: Alex Deucher alexander.deucher@amd.com
Series is: Acked-by: Alex Deucher alexander.deucher@amd.com
drivers/gpu/drm/ttm/ttm_page_alloc.c | 136 ++++++++++++++++++++++++++++------- 1 file changed, 109 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 3974732..b6f16e7ff 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -95,7 +95,7 @@ struct ttm_pool_opts { unsigned small; };
-#define NUM_POOLS 4 +#define NUM_POOLS 6
/**
- struct ttm_pool_manager - Holds memory pools for fst allocation
@@ -122,6 +122,8 @@ struct ttm_pool_manager { struct ttm_page_pool uc_pool; struct ttm_page_pool wc_pool_dma32; struct ttm_page_pool uc_pool_dma32;
struct ttm_page_pool wc_pool_huge;
struct ttm_page_pool uc_pool_huge; } ; };
}; @@ -256,8 +258,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray)
/**
- Select the right pool or requested caching state and ttm flags. */
-static struct ttm_page_pool *ttm_get_pool(int flags,
enum ttm_caching_state cstate)
+static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
enum ttm_caching_state cstate)
{ int pool_index;
@@ -269,9 +271,15 @@ static struct ttm_page_pool *ttm_get_pool(int flags, else pool_index = 0x1;
if (flags & TTM_PAGE_FLAG_DMA32)
if (flags & TTM_PAGE_FLAG_DMA32) {
if (huge)
return NULL; pool_index |= 0x2;
} else if (huge) {
pool_index |= 0x4;
}
return &_manager->pools[pool_index];
}
@@ -494,12 +502,14 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
- pages returned in pages array.
*/ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
int ttm_flags, enum ttm_caching_state cstate,
unsigned count, unsigned order)
{ struct page **caching_array; struct page *p; int r = 0;
unsigned i, cpages;
unsigned i, j, cpages;
unsigned npages = 1 << order; unsigned max_cpages = min(count, (unsigned)(PAGE_SIZE/sizeof(struct page *)));
@@ -512,7 +522,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, }
for (i = 0, cpages = 0; i < count; ++i) {
p = alloc_page(gfp_flags);
p = alloc_pages(gfp_flags, order); if (!p) { pr_err("Unable to get page %u\n", i);
@@ -531,14 +541,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, goto out; }
list_add(&p->lru, pages);
#ifdef CONFIG_HIGHMEM /* gfp flags of highmem page should never be dma32 so we * we should be fine in such case */
if (!PageHighMem(p))
if (PageHighMem(p))
continue;
#endif
{
caching_array[cpages++] = p;
for (j = 0; j < npages; ++j) {
caching_array[cpages++] = p++; if (cpages == max_cpages) { r = ttm_set_pages_caching(caching_array,
@@ -552,8 +566,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, cpages = 0; } }
list_add(&p->lru, pages); } if (cpages) {
@@ -573,9 +585,9 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
- Fill the given pool if there aren't enough pages and the requested number of
- pages is small.
*/ -static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
int ttm_flags, enum ttm_caching_state cstate, unsigned count,
unsigned long *irq_flags)
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
enum ttm_caching_state cstate,
unsigned count, unsigned long *irq_flags)
{ struct page *p; int r; @@ -605,7 +617,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
INIT_LIST_HEAD(&new_pages); r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
cstate, alloc_size);
cstate, alloc_size, 0); spin_lock_irqsave(&pool->lock, *irq_flags); if (!r) {
@@ -635,7 +647,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct list_head *pages, int ttm_flags, enum ttm_caching_state cstate,
unsigned count)
unsigned count, unsigned order)
{ unsigned long irq_flags; struct list_head *p; @@ -643,7 +655,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, int r = 0;
spin_lock_irqsave(&pool->lock, irq_flags);
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
if (!order)
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
&irq_flags); if (count >= pool->npages) { /* take all pages from the pool */
@@ -698,7 +712,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, * multiple requests in parallel. **/ r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
count);
count, order); } return r;
@@ -708,8 +722,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, static void ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) {
struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); unsigned i; if (pool == NULL) {
@@ -737,8 +752,48 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, return; }
i = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (huge) {
unsigned max_size, n2free;
spin_lock_irqsave(&huge->lock, irq_flags);
while (i < npages) {
struct page *p = pages[i];
unsigned j;
if (!p)
break;
for (j = 0; j < HPAGE_PMD_NR; ++j)
if (p++ != pages[i + j])
break;
if (j != HPAGE_PMD_NR)
break;
list_add_tail(&pages[i]->lru, &huge->list);
for (j = 0; j < HPAGE_PMD_NR; ++j)
pages[i++] = NULL;
huge->npages++;
}
/* Check that we don't go over the pool limit */
max_size = _manager->options.max_size;
max_size /= HPAGE_PMD_NR;
if (huge->npages > max_size)
n2free = huge->npages - max_size;
else
n2free = 0;
spin_unlock_irqrestore(&huge->lock, irq_flags);
if (n2free)
ttm_page_pool_free(huge, n2free, false);
}
+#endif
spin_lock_irqsave(&pool->lock, irq_flags);
for (i = 0; i < npages; i++) {
while (i < npages) { if (pages[i]) { if (page_count(pages[i]) != 1) pr_err("Erroneous page count. Leaking pages.\n");
@@ -746,6 +801,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, pages[i] = NULL; pool->npages++; }
++i; } /* Check that we don't go over the pool limit */ npages = 0;
@@ -768,7 +824,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, static int ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) {
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); struct list_head plist; struct page *p = NULL; unsigned count;
@@ -821,11 +878,28 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return 0; }
/* First we take pages from the pool */
count = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (huge && npages >= HPAGE_PMD_NR) {
INIT_LIST_HEAD(&plist);
ttm_page_pool_get_pages(huge, &plist, flags, cstate,
npages / HPAGE_PMD_NR,
HPAGE_PMD_ORDER);
list_for_each_entry(p, &plist, lru) {
unsigned j;
for (j = 0; j < HPAGE_PMD_NR; ++j)
pages[count++] = &p[j];
}
}
+#endif
INIT_LIST_HEAD(&plist);
r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
npages - count, 0);
count = 0; list_for_each_entry(p, &plist, lru) pages[count++] = p;
@@ -872,6 +946,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) ttm_page_pool_init_locked(&_manager->uc_pool_dma32, GFP_USER | GFP_DMA32, "uc dma");
ttm_page_pool_init_locked(&_manager->wc_pool_huge,
GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
"wc huge");
ttm_page_pool_init_locked(&_manager->uc_pool_huge,
GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
, "uc huge");
_manager->options.max_size = max_pages; _manager->options.small = SMALL_ALLOCATION; _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
@@ -1041,12 +1123,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) seq_printf(m, "No pool allocator running.\n"); return 0; }
seq_printf(m, "%6s %12s %13s %8s\n",
seq_printf(m, "%7s %12s %13s %8s\n", h[0], h[1], h[2], h[3]); for (i = 0; i < NUM_POOLS; ++i) { p = &_manager->pools[i];
seq_printf(m, "%6s %12ld %13ld %8d\n",
seq_printf(m, "%7s %12ld %13ld %8d\n", p->name, p->nrefills, p->nfrees, p->npages); }
-- 2.7.4
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
dri-devel@lists.freedesktop.org