mm: remove __GFP_NOFAIL is deprecated comment
[linux-2.6-block.git] / mm / migrate.c
index 3ad0fea5c4387c5b6ff1d8fb513b73488b9180f2..577c94b8e959ac8034b82dbfa611c9a000de4478 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/balloon_compaction.h>
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
+#include <linux/page_owner.h>
 
 #include <asm/tlbflush.h>
 
@@ -325,7 +326,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
                        return -EAGAIN;
 
                /* No turning back from here */
-               set_page_memcg(newpage, page_memcg(page));
                newpage->index = page->index;
                newpage->mapping = page->mapping;
                if (PageSwapBacked(page))
@@ -349,7 +349,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
-       if (!page_freeze_refs(page, expected_count)) {
+       if (!page_ref_freeze(page, expected_count)) {
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -363,7 +363,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        if (mode == MIGRATE_ASYNC && head &&
                        !buffer_migrate_lock_buffers(head, mode)) {
-               page_unfreeze_refs(page, expected_count);
+               page_ref_unfreeze(page, expected_count);
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -372,7 +372,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * Now we know that no one else is looking at the page:
         * no turning back from here.
         */
-       set_page_memcg(newpage, page_memcg(page));
        newpage->index = page->index;
        newpage->mapping = page->mapping;
        if (PageSwapBacked(page))
@@ -398,7 +397,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_unfreeze_refs(page, expected_count - 1);
+       page_ref_unfreeze(page, expected_count - 1);
 
        spin_unlock(&mapping->tree_lock);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -452,21 +451,22 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
-       if (!page_freeze_refs(page, expected_count)) {
+       if (!page_ref_freeze(page, expected_count)) {
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
 
-       set_page_memcg(newpage, page_memcg(page));
        newpage->index = page->index;
        newpage->mapping = page->mapping;
+
        get_page(newpage);
 
        radix_tree_replace_slot(pslot, newpage);
 
-       page_unfreeze_refs(page, expected_count - 1);
+       page_ref_unfreeze(page, expected_count - 1);
 
        spin_unlock_irq(&mapping->tree_lock);
+
        return MIGRATEPAGE_SUCCESS;
 }
 
@@ -578,6 +578,10 @@ void migrate_page_copy(struct page *newpage, struct page *page)
         */
        if (PageWriteback(newpage))
                end_page_writeback(newpage);
+
+       copy_page_owner(page, newpage);
+
+       mem_cgroup_migrate(page, newpage);
 }
 
 /************************************************************
@@ -772,7 +776,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
         * page is freed; but stats require that PageAnon be left as PageAnon.
         */
        if (rc == MIGRATEPAGE_SUCCESS) {
-               set_page_memcg(page, NULL);
                if (!PageAnon(page))
                        page->mapping = NULL;
        }
@@ -952,8 +955,10 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
        }
 
        rc = __unmap_and_move(page, newpage, force, mode);
-       if (rc == MIGRATEPAGE_SUCCESS)
+       if (rc == MIGRATEPAGE_SUCCESS) {
                put_new_page = NULL;
+               set_page_owner_migrate_reason(newpage, reason);
+       }
 
 out:
        if (rc != -EAGAIN) {
@@ -1018,7 +1023,7 @@ out:
 static int unmap_and_move_huge_page(new_page_t get_new_page,
                                free_page_t put_new_page, unsigned long private,
                                struct page *hpage, int force,
-                               enum migrate_mode mode)
+                               enum migrate_mode mode, int reason)
 {
        int rc = -EAGAIN;
        int *result = NULL;
@@ -1076,6 +1081,7 @@ put_anon:
        if (rc == MIGRATEPAGE_SUCCESS) {
                hugetlb_cgroup_migrate(hpage, new_hpage);
                put_new_page = NULL;
+               set_page_owner_migrate_reason(new_hpage, reason);
        }
 
        unlock_page(hpage);
@@ -1148,7 +1154,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
                        if (PageHuge(page))
                                rc = unmap_and_move_huge_page(get_new_page,
                                                put_new_page, private, page,
-                                               pass > 2, mode);
+                                               pass > 2, mode, reason);
                        else
                                rc = unmap_and_move(get_new_page, put_new_page,
                                                private, page, pass > 2, mode,
@@ -1767,7 +1773,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                put_page(new_page);
                goto out_fail;
        }
-
+       /*
+        * We are not sure a pending tlb flush here is for a huge page
+        * mapping or not. Hence use the tlb range variant
+        */
        if (mm_tlb_flush_pending(mm))
                flush_tlb_range(vma, mmun_start, mmun_end);
 
@@ -1823,12 +1832,11 @@ fail_putback:
        page_add_anon_rmap(new_page, vma, mmun_start, true);
        pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
        set_pmd_at(mm, mmun_start, pmd, entry);
-       flush_tlb_range(vma, mmun_start, mmun_end);
        update_mmu_cache_pmd(vma, address, &entry);
 
        if (page_count(page) != 2) {
                set_pmd_at(mm, mmun_start, pmd, orig_entry);
-               flush_tlb_range(vma, mmun_start, mmun_end);
+               flush_pmd_tlb_range(vma, mmun_start, mmun_end);
                mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
                update_mmu_cache_pmd(vma, address, &entry);
                page_remove_rmap(new_page, true);
@@ -1836,9 +1844,8 @@ fail_putback:
        }
 
        mlock_migrate_page(new_page, page);
-       set_page_memcg(new_page, page_memcg(page));
-       set_page_memcg(page, NULL);
        page_remove_rmap(page, true);
+       set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
 
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);