aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c245
1 files changed, 99 insertions, 146 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ebc6fac..508c64c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -30,9 +30,6 @@
* - Use page->lru to keep a free list
* - doesn't track currently in use pages
*/
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/highmem.h>
@@ -170,13 +167,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
- pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+ printk(KERN_ERR TTM_PFX
+ "Setting allocation size to %lu "
+ "is not allowed. Recommended size is "
+ "%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
- pr_warn("Setting allocation size to larger than %lu is not recommended\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ printk(KERN_WARNING TTM_PFX
+ "Setting allocation size to "
+ "larger than %lu is not recommended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
@@ -277,7 +279,8 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
{
unsigned i;
if (set_pages_array_wb(pages, npages))
- pr_err("Failed to set %d pages to wb!\n", npages);
+ printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
+ npages);
for (i = 0; i < npages; ++i)
__free_page(pages[i]);
}
@@ -312,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- pr_err("Failed to allocate memory for pool free operation\n");
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate memory for pool free operation.\n");
return 0;
}
@@ -394,13 +398,18 @@ static int ttm_pool_get_num_unused_pages(void)
static int ttm_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static DEFINE_MUTEX(lock);
+ static unsigned start_pool;
unsigned i;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
- pool_offset = pool_offset % NUM_POOLS;
+ if (shrink_pages == 0)
+ goto out;
+ if (!mutex_trylock(&lock))
+ return -1;
+ pool_offset = ++start_pool % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
@@ -409,6 +418,8 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
}
+ mutex_unlock(&lock);
+out:
/* return estimated number of unused pages in pool */
return ttm_pool_get_num_unused_pages();
}
@@ -434,12 +445,16 @@ static int ttm_set_pages_caching(struct page **pages,
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
- pr_err("Failed to set %d pages to uc!\n", cpages);
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to uc!\n",
+ cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
- pr_err("Failed to set %d pages to wc!\n", cpages);
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to wc!\n",
+ cpages);
break;
default:
break;
@@ -484,7 +499,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- pr_err("Unable to allocate table for new pages\n");
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate table for new pages.");
return -ENOMEM;
}
@@ -492,7 +508,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
p = alloc_page(gfp_flags);
if (!p) {
- pr_err("Unable to get page %u\n", i);
+ printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -590,7 +606,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
- pr_err("Failed to fill pool (%p)\n", pool);
+ printk(KERN_ERR TTM_PFX
+ "Failed to fill pool (%p).", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
@@ -609,10 +626,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
* @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages,
- int ttm_flags,
- enum ttm_caching_state cstate,
- unsigned count)
+ struct list_head *pages, int ttm_flags,
+ enum ttm_caching_state cstate, unsigned count)
{
unsigned long irq_flags;
struct list_head *p;
@@ -652,63 +667,17 @@ out:
return count;
}
-/* Put all pages in pages list to correct pool to wait for reuse */
-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
-{
- unsigned long irq_flags;
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
- unsigned i;
-
- if (pool == NULL) {
- /* No pool for this memory type so free the pages */
- for (i = 0; i < npages; i++) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- __free_page(pages[i]);
- pages[i] = NULL;
- }
- }
- return;
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- for (i = 0; i < npages; i++) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- list_add_tail(&pages[i]->lru, &pool->list);
- pages[i] = NULL;
- pool->npages++;
- }
- }
- /* Check that we don't go over the pool limit */
- npages = 0;
- if (pool->npages > _manager->options.max_size) {
- npages = pool->npages - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (npages)
- ttm_page_pool_free(pool, npages);
-}
-
/*
* On success pages list will hold count number of correctly
* cached pages.
*/
-static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
+int ttm_get_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate, unsigned count,
+ dma_addr_t *dma_address)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
- struct list_head plist;
struct page *p = NULL;
gfp_t gfp_flags = GFP_USER;
- unsigned count;
int r;
/* set zero flag for page allocation if required */
@@ -722,59 +691,99 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
else
gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < npages; ++r) {
+ for (r = 0; r < count; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
- pr_err("Unable to allocate page\n");
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate page.");
return -ENOMEM;
}
- pages[r] = p;
+ list_add(&p->lru, pages);
}
return 0;
}
+
/* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags;
/* First we take pages from the pool */
- INIT_LIST_HEAD(&plist);
- npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
- count = 0;
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
+ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- list_for_each_entry(p, &plist, lru) {
- clear_page(page_address(p));
+ list_for_each_entry(p, pages, lru) {
+ if (PageHighMem(p))
+ clear_highpage(p);
+ else
+ clear_page(page_address(p));
}
}
/* If pool didn't have enough pages allocate new one. */
- if (npages > 0) {
+ if (count > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
- INIT_LIST_HEAD(&plist);
- r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
+ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
- pr_err("Failed to allocate extra pages for large request\n");
- ttm_put_pages(pages, count, flags, cstate);
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate extra pages "
+ "for large request.");
+ ttm_put_pages(pages, 0, flags, cstate, NULL);
return r;
}
}
+
return 0;
}
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+ enum ttm_caching_state cstate, dma_addr_t *dma_address)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p, *tmp;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ __free_page(p);
+ }
+ /* Make the pages list empty */
+ INIT_LIST_HEAD(pages);
+ return;
+ }
+ if (page_count == 0) {
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ ++page_count;
+ }
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ list_splice_init(pages, &pool->list);
+ pool->npages += page_count;
+ /* Check that we don't go over the pool limit */
+ page_count = 0;
+ if (pool->npages > _manager->options.max_size) {
+ page_count = pool->npages - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (page_count < NUM_PAGES_TO_ALLOC)
+ page_count = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (page_count)
+ ttm_page_pool_free(pool, page_count);
+}
+
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
@@ -792,7 +801,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
WARN_ON(_manager);
- pr_info("Initializing pool allocator\n");
+ printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
@@ -827,7 +836,7 @@ void ttm_page_alloc_fini(void)
{
int i;
- pr_info("Finalizing pool allocator\n");
+ printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i)
@@ -837,62 +846,6 @@ void ttm_page_alloc_fini(void)
_manager = NULL;
}
-int ttm_pool_populate(struct ttm_tt *ttm)
-{
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- unsigned i;
- int ret;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_get_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- if (ret != 0) {
- ttm_pool_unpopulate(ttm);
- return -ENOMEM;
- }
-
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
- return -ENOMEM;
- }
- }
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
- return ret;
- }
- }
-
- ttm->state = tt_unbound;
- return 0;
-}
-EXPORT_SYMBOL(ttm_pool_populate);
-
-void ttm_pool_unpopulate(struct ttm_tt *ttm)
-{
- unsigned i;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- if (ttm->pages[i]) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
- ttm_put_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- }
- }
- ttm->state = tt_unpopulated;
-}
-EXPORT_SYMBOL(ttm_pool_unpopulate);
-
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;