mm: add_to_swap_cache() must not sleep
authorDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Tue, 22 Sep 2009 00:02:50 +0000 (17:02 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Sep 2009 14:17:35 +0000 (07:17 -0700)
After commit 355cfa73 ("mm: modify swap_map and add SWAP_HAS_CACHE flag"),
read_swap_cache_async() will busy-wait while a entry doesn't exist in swap
cache but it has SWAP_HAS_CACHE flag.

Such entries can exist on add/delete path of swap cache.  On add path,
add_to_swap_cache() is called soon after SWAP_HAS_CACHE flag is set, and
on delete path, swapcache_free() will be called (SWAP_HAS_CACHE flag is
cleared) soon after __delete_from_swap_cache() is called.  So, the
busy-wait works well in most cases.

But this mechanism can cause soft lockup if add_to_swap_cache() sleeps and
read_swap_cache_async() tries to swap-in the same entry on the same cpu.

This patch calls radix_tree_preload() before swapcache_prepare() and
divides add_to_swap_cache() into two part: radix_tree_preload() part and
radix_tree_insert() part(define it as __add_to_swap_cache()).

Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/swap_state.c

index 5ae6b8b78c801d821d41003917b720116c938bd0..b076a1a5a0aaf9298c90a6da78d077735ce09c33 100644 (file)
@@ -67,10 +67,10 @@ void show_swap_cache_info(void)
 }
 
 /*
- * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
+static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 {
        int error;
 
@@ -78,28 +78,37 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
        VM_BUG_ON(PageSwapCache(page));
        VM_BUG_ON(!PageSwapBacked(page));
 
+       page_cache_get(page);
+       SetPageSwapCache(page);
+       set_page_private(page, entry.val);
+
+       spin_lock_irq(&swapper_space.tree_lock);
+       error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
+       if (likely(!error)) {
+               total_swapcache_pages++;
+               __inc_zone_page_state(page, NR_FILE_PAGES);
+               INC_CACHE_INFO(add_total);
+       }
+       spin_unlock_irq(&swapper_space.tree_lock);
+
+       if (unlikely(error)) {
+               set_page_private(page, 0UL);
+               ClearPageSwapCache(page);
+               page_cache_release(page);
+       }
+
+       return error;
+}
+
+
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
+{
+       int error;
+
        error = radix_tree_preload(gfp_mask);
        if (!error) {
-               page_cache_get(page);
-               SetPageSwapCache(page);
-               set_page_private(page, entry.val);
-
-               spin_lock_irq(&swapper_space.tree_lock);
-               error = radix_tree_insert(&swapper_space.page_tree,
-                                               entry.val, page);
-               if (likely(!error)) {
-                       total_swapcache_pages++;
-                       __inc_zone_page_state(page, NR_FILE_PAGES);
-                       INC_CACHE_INFO(add_total);
-               }
-               spin_unlock_irq(&swapper_space.tree_lock);
+               error = __add_to_swap_cache(page, entry);
                radix_tree_preload_end();
-
-               if (unlikely(error)) {
-                       set_page_private(page, 0UL);
-                       ClearPageSwapCache(page);
-                       page_cache_release(page);
-               }
        }
        return error;
 }
@@ -289,14 +298,25 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                                break;          /* Out of memory */
                }
 
+               /*
+                * call radix_tree_preload() while we can wait.
+                */
+               err = radix_tree_preload(gfp_mask & GFP_KERNEL);
+               if (err)
+                       break;
+
                /*
                 * Swap entry may have been freed since our caller observed it.
                 */
                err = swapcache_prepare(entry);
-               if (err == -EEXIST) /* seems racy */
+               if (err == -EEXIST) {   /* seems racy */
+                       radix_tree_preload_end();
                        continue;
-               if (err)           /* swp entry is obsolete ? */
+               }
+               if (err) {              /* swp entry is obsolete ? */
+                       radix_tree_preload_end();
                        break;
+               }
 
                /*
                 * Associate the page with swap entry in the swap cache.
@@ -308,8 +328,9 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                 */
                __set_page_locked(new_page);
                SetPageSwapBacked(new_page);
-               err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
+               err = __add_to_swap_cache(new_page, entry);
                if (likely(!err)) {
+                       radix_tree_preload_end();
                        /*
                         * Initiate read into locked page and return.
                         */
@@ -317,6 +338,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                        swap_readpage(new_page);
                        return new_page;
                }
+               radix_tree_preload_end();
                ClearPageSwapBacked(new_page);
                __clear_page_locked(new_page);
                swapcache_free(entry, NULL);