mm: use __SetPageSwapBacked and dont ClearPageSwapBacked
authorHugh Dickins <hughd@google.com>
Fri, 20 May 2016 00:12:41 +0000 (17:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 May 2016 02:12:14 +0000 (19:12 -0700)
v3.16 commit 07a427884348 ("mm: shmem: avoid atomic operation during
shmem_getpage_gfp") rightly replaced one instance of SetPageSwapBacked
by __SetPageSwapBacked, pointing out that the newly allocated page is
not yet visible to other users (except speculative get_page_unless_zero-
ers, who may not update page flags before their further checks).

That was part of a series in which Mel was focused on tmpfs profiles:
but almost all SetPageSwapBacked uses can be so optimized, with the same
justification.

Remove ClearPageSwapBacked from __read_swap_cache_async() error path:
it's not an error to free a page with PG_swapbacked set.

Follow a convention of __SetPageLocked, __SetPageSwapBacked instead of
doing it differently in different places; but that's for tidiness - if
the ordering actually mattered, we should not be using the __variants.

There's probably scope for further __SetPageFlags in other places, but
SwapBacked is the one I'm interested in at the moment.

Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Reviewed-by: Mel Gorman <mgorman@techsingularity.net>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/migrate.c
mm/rmap.c
mm/shmem.c
mm/swap_state.c

index f9dfb18a4ebac9f2f36d798ce6fa6b4ecd4d3c77..53ab6398e7a2a2d3f122db43f0c36fbf62e562f2 100644 (file)
@@ -332,7 +332,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
                newpage->index = page->index;
                newpage->mapping = page->mapping;
                if (PageSwapBacked(page))
-                       SetPageSwapBacked(newpage);
+                       __SetPageSwapBacked(newpage);
 
                return MIGRATEPAGE_SUCCESS;
        }
@@ -378,7 +378,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
        newpage->index = page->index;
        newpage->mapping = page->mapping;
        if (PageSwapBacked(page))
-               SetPageSwapBacked(newpage);
+               __SetPageSwapBacked(newpage);
 
        get_page(newpage);      /* add cache reference */
        if (PageSwapCache(page)) {
@@ -1791,7 +1791,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 
        /* Prepare a page as a migration target */
        __SetPageLocked(new_page);
-       SetPageSwapBacked(new_page);
+       __SetPageSwapBacked(new_page);
 
        /* anon mapping, we can simply copy page->mapping to the new page: */
        new_page->mapping = page->mapping;
index 4cebe8a7c2cbaea58de6e69747079f0fdf5a43fe..8a839935b18c000ecaa8411f0ffaf6c5ee81b998 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1249,7 +1249,7 @@ void page_add_new_anon_rmap(struct page *page,
        int nr = compound ? hpage_nr_pages(page) : 1;
 
        VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
-       SetPageSwapBacked(page);
+       __SetPageSwapBacked(page);
        if (compound) {
                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
                /* increment count (starts at -1) */
index e684a914022805cce8dddf3fb84ea66359f4b315..9e609d58df7387de2656741bb562064ccb9c1479 100644 (file)
@@ -1085,8 +1085,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        flush_dcache_page(newpage);
 
        __SetPageLocked(newpage);
+       __SetPageSwapBacked(newpage);
        SetPageUptodate(newpage);
-       SetPageSwapBacked(newpage);
        set_page_private(newpage, swap_index);
        SetPageSwapCache(newpage);
 
@@ -1276,8 +1276,8 @@ repeat:
                        goto decused;
                }
 
-               __SetPageSwapBacked(page);
                __SetPageLocked(page);
+               __SetPageSwapBacked(page);
                if (sgp == SGP_WRITE)
                        __SetPageReferenced(page);
 
index 366ce3518703ecb7cd780b43acd7154adece2b9d..0d457e7db8d6df5e3bd185ac2b2333830bccf7b4 100644 (file)
@@ -358,7 +358,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 
                /* May fail (-ENOMEM) if radix-tree node allocation failed. */
                __SetPageLocked(new_page);
-               SetPageSwapBacked(new_page);
+               __SetPageSwapBacked(new_page);
                err = __add_to_swap_cache(new_page, entry);
                if (likely(!err)) {
                        radix_tree_preload_end();
@@ -370,7 +370,6 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                        return new_page;
                }
                radix_tree_preload_end();
-               ClearPageSwapBacked(new_page);
                __ClearPageLocked(new_page);
                /*
                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely