lightnvm: missing free on init error
[linux-2.6-block.git] / mm / migrate.c
index 08a7b6c4c266343e0afe67e553e46c1916902c23..7890d0bb5e23c3db75cc682878a2c5ec89b0e513 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/mempolicy.h>
 #include <linux/vmalloc.h>
 #include <linux/security.h>
+#include <linux/backing-dev.h>
 #include <linux/syscalls.h>
 #include <linux/hugetlb.h>
 #include <linux/hugetlb_cgroup.h>
@@ -313,6 +314,8 @@ int migrate_page_move_mapping(struct address_space *mapping,
                struct buffer_head *head, enum migrate_mode mode,
                int extra_count)
 {
+       struct zone *oldzone, *newzone;
+       int dirty;
        int expected_count = 1 + extra_count;
        void **pslot;
 
@@ -320,9 +323,20 @@ int migrate_page_move_mapping(struct address_space *mapping,
                /* Anonymous page without mapping */
                if (page_count(page) != expected_count)
                        return -EAGAIN;
+
+               /* No turning back from here */
+               set_page_memcg(newpage, page_memcg(page));
+               newpage->index = page->index;
+               newpage->mapping = page->mapping;
+               if (PageSwapBacked(page))
+                       SetPageSwapBacked(newpage);
+
                return MIGRATEPAGE_SUCCESS;
        }
 
+       oldzone = page_zone(page);
+       newzone = page_zone(newpage);
+
        spin_lock_irq(&mapping->tree_lock);
 
        pslot = radix_tree_lookup_slot(&mapping->page_tree,
@@ -355,14 +369,28 @@ int migrate_page_move_mapping(struct address_space *mapping,
        }
 
        /*
-        * Now we know that no one else is looking at the page.
+        * Now we know that no one else is looking at the page:
+        * no turning back from here.
         */
+       set_page_memcg(newpage, page_memcg(page));
+       newpage->index = page->index;
+       newpage->mapping = page->mapping;
+       if (PageSwapBacked(page))
+               SetPageSwapBacked(newpage);
+
        get_page(newpage);      /* add cache reference */
        if (PageSwapCache(page)) {
                SetPageSwapCache(newpage);
                set_page_private(newpage, page_private(page));
        }
 
+       /* Move dirty while page refs frozen and newpage not yet exposed */
+       dirty = PageDirty(page);
+       if (dirty) {
+               ClearPageDirty(page);
+               SetPageDirty(newpage);
+       }
+
        radix_tree_replace_slot(pslot, newpage);
 
        /*
@@ -372,6 +400,9 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        page_unfreeze_refs(page, expected_count - 1);
 
+       spin_unlock(&mapping->tree_lock);
+       /* Leave irq disabled to prevent preemption while updating stats */
+
        /*
         * If moved to a different zone then also account
         * the page for that zone. Other VM counters will be
@@ -382,13 +413,19 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * via NR_FILE_PAGES and NR_ANON_PAGES if they
         * are mapped to swap space.
         */
-       __dec_zone_page_state(page, NR_FILE_PAGES);
-       __inc_zone_page_state(newpage, NR_FILE_PAGES);
-       if (!PageSwapCache(page) && PageSwapBacked(page)) {
-               __dec_zone_page_state(page, NR_SHMEM);
-               __inc_zone_page_state(newpage, NR_SHMEM);
+       if (newzone != oldzone) {
+               __dec_zone_state(oldzone, NR_FILE_PAGES);
+               __inc_zone_state(newzone, NR_FILE_PAGES);
+               if (PageSwapBacked(page) && !PageSwapCache(page)) {
+                       __dec_zone_state(oldzone, NR_SHMEM);
+                       __inc_zone_state(newzone, NR_SHMEM);
+               }
+               if (dirty && mapping_cap_account_dirty(mapping)) {
+                       __dec_zone_state(oldzone, NR_FILE_DIRTY);
+                       __inc_zone_state(newzone, NR_FILE_DIRTY);
+               }
        }
-       spin_unlock_irq(&mapping->tree_lock);
+       local_irq_enable();
 
        return MIGRATEPAGE_SUCCESS;
 }
@@ -403,12 +440,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
        int expected_count;
        void **pslot;
 
-       if (!mapping) {
-               if (page_count(page) != 1)
-                       return -EAGAIN;
-               return MIGRATEPAGE_SUCCESS;
-       }
-
        spin_lock_irq(&mapping->tree_lock);
 
        pslot = radix_tree_lookup_slot(&mapping->page_tree,
@@ -426,6 +457,9 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
+       set_page_memcg(newpage, page_memcg(page));
+       newpage->index = page->index;
+       newpage->mapping = page->mapping;
        get_page(newpage);
 
        radix_tree_replace_slot(pslot, newpage);
@@ -512,20 +546,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
        if (PageMappedToDisk(page))
                SetPageMappedToDisk(newpage);
 
-       if (PageDirty(page)) {
-               clear_page_dirty_for_io(page);
-               /*
-                * Want to mark the page and the radix tree as dirty, and
-                * redo the accounting that clear_page_dirty_for_io undid,
-                * but we can't use set_page_dirty because that function
-                * is actually a signal that all of the page has become dirty.
-                * Whereas only part of our page may be dirty.
-                */
-               if (PageSwapBacked(page))
-                       SetPageDirty(newpage);
-               else
-                       __set_page_dirty_nobuffers(newpage);
-       }
+       /* Move dirty on pages not done by migrate_page_move_mapping() */
+       if (PageDirty(page))
+               SetPageDirty(newpage);
 
        if (page_is_young(page))
                set_page_young(newpage);
@@ -730,21 +753,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 
-       /* Prepare mapping for the new page.*/
-       newpage->index = page->index;
-       newpage->mapping = page->mapping;
-       if (PageSwapBacked(page))
-               SetPageSwapBacked(newpage);
-
-       /*
-        * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
-        * needs newpage's memcg set to transfer memcg dirty page accounting.
-        * So perform memcg migration in two steps:
-        * 1. set newpage->mem_cgroup (here)
-        * 2. clear page->mem_cgroup (below)
-        */
-       set_page_memcg(newpage, page_memcg(page));
-
        mapping = page_mapping(page);
        if (!mapping)
                rc = migrate_page(mapping, newpage, page, mode);
@@ -767,9 +775,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
                set_page_memcg(page, NULL);
                if (!PageAnon(page))
                        page->mapping = NULL;
-       } else {
-               set_page_memcg(newpage, NULL);
-               newpage->mapping = NULL;
        }
        return rc;
 }
@@ -971,10 +976,9 @@ out:
         * it.  Otherwise, putback_lru_page() will drop the reference grabbed
         * during isolation.
         */
-       if (put_new_page) {
-               ClearPageSwapBacked(newpage);
+       if (put_new_page)
                put_new_page(newpage, private);
-       else if (unlikely(__is_movable_balloon_page(newpage))) {
+       else if (unlikely(__is_movable_balloon_page(newpage))) {
                /* drop our reference, page already in the balloon */
                put_page(newpage);
        } else
@@ -1574,7 +1578,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
                                         (GFP_HIGHUSER_MOVABLE |
                                          __GFP_THISNODE | __GFP_NOMEMALLOC |
                                          __GFP_NORETRY | __GFP_NOWARN) &
-                                        ~GFP_IOFS, 0);
+                                        ~(__GFP_IO | __GFP_FS), 0);
 
        return newpage;
 }
@@ -1748,7 +1752,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                goto out_dropref;
 
        new_page = alloc_pages_node(node,
-               (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
+               (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
                HPAGE_PMD_ORDER);
        if (!new_page)
                goto out_fail;