mm: page migration use the put_new_page whenever necessary
authorHugh Dickins <hughd@google.com>
Fri, 6 Nov 2015 02:49:46 +0000 (18:49 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Nov 2015 03:34:48 +0000 (19:34 -0800)
I don't know of any problem from the way it's used in our current tree,
but there is one defect in page migration's custom put_new_page feature.

An unused newpage is expected to be released with the put_new_page(), but
there was one MIGRATEPAGE_SUCCESS (0) path which released it with
putback_lru_page(): which can be very wrong for a custom pool.

Fixed more easily by resetting put_new_page once it won't be needed, than
by adding a further flag to modify the rc test.

Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/migrate.c

index d149cbb67a349322015687cab1342bd0d0a1b8ab..2f2e2236daf74ea24ab55351151068e469c34cd0 100644 (file)
@@ -938,10 +938,11 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
                                   int force, enum migrate_mode mode,
                                   enum migrate_reason reason)
 {
-       int rc = 0;
+       int rc = MIGRATEPAGE_SUCCESS;
        int *result = NULL;
-       struct page *newpage = get_new_page(page, private, &result);
+       struct page *newpage;
 
+       newpage = get_new_page(page, private, &result);
        if (!newpage)
                return -ENOMEM;
 
@@ -955,6 +956,8 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
                        goto out;
 
        rc = __unmap_and_move(page, newpage, force, mode);
+       if (rc == MIGRATEPAGE_SUCCESS)
+               put_new_page = NULL;
 
 out:
        if (rc != -EAGAIN) {
@@ -981,7 +984,7 @@ out:
         * it.  Otherwise, putback_lru_page() will drop the reference grabbed
         * during isolation.
         */
-       if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+       if (put_new_page) {
                ClearPageSwapBacked(newpage);
                put_new_page(newpage, private);
        } else if (unlikely(__is_movable_balloon_page(newpage))) {
@@ -1022,7 +1025,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
                                struct page *hpage, int force,
                                enum migrate_mode mode)
 {
-       int rc = 0;
+       int rc = -EAGAIN;
        int *result = NULL;
        int page_was_mapped = 0;
        struct page *new_hpage;
@@ -1044,8 +1047,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        if (!new_hpage)
                return -ENOMEM;
 
-       rc = -EAGAIN;
-
        if (!trylock_page(hpage)) {
                if (!force || mode != MIGRATE_SYNC)
                        goto out;
@@ -1070,8 +1071,10 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        if (anon_vma)
                put_anon_vma(anon_vma);
 
-       if (rc == MIGRATEPAGE_SUCCESS)
+       if (rc == MIGRATEPAGE_SUCCESS) {
                hugetlb_cgroup_migrate(hpage, new_hpage);
+               put_new_page = NULL;
+       }
 
        unlock_page(hpage);
 out:
@@ -1083,7 +1086,7 @@ out:
         * it.  Otherwise, put_page() will drop the reference grabbed during
         * isolation.
         */
-       if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+       if (put_new_page)
                put_new_page(new_hpage, private);
        else
                putback_active_hugepage(new_hpage);