aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>2014-08-03 20:02:03 +0900
committerBen Hutchings <ben@decadent.org.uk>2014-09-13 23:41:43 +0100
commit312b559d9e61b473b39d3dc05fe0db43c2df60fb (patch)
tree6855c1414c49f8e026b8385bc88d007d04d3fb2f /drivers/gpu
parent0d362ea03048606016efe050bdcc46ff87ae4506 (diff)
downloadkernel_samsung_smdk4412-312b559d9e61b473b39d3dc05fe0db43c2df60fb.zip
kernel_samsung_smdk4412-312b559d9e61b473b39d3dc05fe0db43c2df60fb.tar.gz
kernel_samsung_smdk4412-312b559d9e61b473b39d3dc05fe0db43c2df60fb.tar.bz2
drm/ttm: Fix possible stack overflow by recursive shrinker calls.
commit 71336e011d1d2312bcbcaa8fcec7365024f3a95d upstream. While ttm_dma_pool_shrink_scan() tries to take mutex before doing GFP_KERNEL allocation, ttm_pool_shrink_scan() does not do it. This can result in stack overflow if kmalloc() in ttm_page_pool_free() triggered recursion due to memory pressure. shrink_slab() => ttm_pool_shrink_scan() => ttm_page_pool_free() => kmalloc(GFP_KERNEL) => shrink_slab() => ttm_pool_shrink_scan() => ttm_page_pool_free() => kmalloc(GFP_KERNEL) Change ttm_pool_shrink_scan() to do like ttm_dma_pool_shrink_scan() does. Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Dave Airlie <airlied@redhat.com> [bwh: Backported to 3.2: - Adjust context - Change return value in the contended case to follow the old shrinker API] Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 9e4313e..508c64c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -398,13 +398,18 @@ static int ttm_pool_get_num_unused_pages(void)
static int ttm_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static DEFINE_MUTEX(lock);
+ static unsigned start_pool;
unsigned i;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
- pool_offset = pool_offset % NUM_POOLS;
+ if (shrink_pages == 0)
+ goto out;
+ if (!mutex_trylock(&lock))
+ return -1;
+ pool_offset = ++start_pool % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
@@ -413,6 +418,8 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
}
+ mutex_unlock(&lock);
+out:
/* return estimated number of unused pages in pool */
return ttm_pool_get_num_unused_pages();
}