drm/ttm: Fix possible stack overflow by recursive shrinker calls.
authorTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Sun, 3 Aug 2014 11:02:03 +0000 (20:02 +0900)
committerDave Airlie <airlied@redhat.com>
Tue, 5 Aug 2014 00:54:10 +0000 (10:54 +1000)
While ttm_dma_pool_shrink_scan() tries to take mutex before doing GFP_KERNEL
allocation, ttm_pool_shrink_scan() does not do it. This can result in stack
overflow if kmalloc() in ttm_page_pool_free() triggered recursion due to
memory pressure.

  shrink_slab()
  => ttm_pool_shrink_scan()
     => ttm_page_pool_free()
        => kmalloc(GFP_KERNEL)
           => shrink_slab()
              => ttm_pool_shrink_scan()
                 => ttm_page_pool_free()
                    => kmalloc(GFP_KERNEL)

Change ttm_pool_shrink_scan() to do like ttm_dma_pool_shrink_scan() does.

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: stable <stable@kernel.org> [2.6.35+]
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/ttm/ttm_page_alloc.c

index beb8e75a3f004e9be03f844e3400c40a9b1dc29a..edb83151041f33064a456d1ca9b1e92beb14de98 100644 (file)
@@ -391,14 +391,17 @@ out:
 static unsigned long
 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-       static atomic_t start_pool = ATOMIC_INIT(0);
+       static DEFINE_MUTEX(lock);
+       static unsigned start_pool;
        unsigned i;
-       unsigned pool_offset = atomic_add_return(1, &start_pool);
+       unsigned pool_offset;
        struct ttm_page_pool *pool;
        int shrink_pages = sc->nr_to_scan;
        unsigned long freed = 0;
 
-       pool_offset = pool_offset % NUM_POOLS;
+       if (!mutex_trylock(&lock))
+               return SHRINK_STOP;
+       pool_offset = ++start_pool % NUM_POOLS;
        /* select start pool in round robin fashion */
        for (i = 0; i < NUM_POOLS; ++i) {
                unsigned nr_free = shrink_pages;
@@ -408,6 +411,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                shrink_pages = ttm_page_pool_free(pool, nr_free);
                freed += nr_free - shrink_pages;
        }
+       mutex_unlock(&lock);
        return freed;
 }