Merge tag 'fbdev-v4.20' of https://github.com/bzolnier/linux
[linux-2.6-block.git] / mm / migrate.c
index 905c2264c7885cdf23f4e6f6382153daaff2bd60..f7e4bfdc13b780137d08fa522b070e7192056f24 100644 (file)
@@ -326,7 +326,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
        page = migration_entry_to_page(entry);
 
        /*
-        * Once radix-tree replacement of page migration started, page_count
+        * Once page cache replacement of page migration started, page_count
         * *must* be zero. And, we don't want to call wait_on_page_locked()
         * against a page without get_page().
         * So, we use get_page_unless_zero(), here. Even failed, page fault
@@ -441,10 +441,10 @@ int migrate_page_move_mapping(struct address_space *mapping,
                struct buffer_head *head, enum migrate_mode mode,
                int extra_count)
 {
+       XA_STATE(xas, &mapping->i_pages, page_index(page));
        struct zone *oldzone, *newzone;
        int dirty;
        int expected_count = 1 + extra_count;
-       void **pslot;
 
        /*
         * Device public or private pages have an extra refcount as they are
@@ -470,21 +470,16 @@ int migrate_page_move_mapping(struct address_space *mapping,
        oldzone = page_zone(page);
        newzone = page_zone(newpage);
 
-       xa_lock_irq(&mapping->i_pages);
-
-       pslot = radix_tree_lookup_slot(&mapping->i_pages,
-                                       page_index(page));
+       xas_lock_irq(&xas);
 
        expected_count += hpage_nr_pages(page) + page_has_private(page);
-       if (page_count(page) != expected_count ||
-               radix_tree_deref_slot_protected(pslot,
-                                       &mapping->i_pages.xa_lock) != page) {
-               xa_unlock_irq(&mapping->i_pages);
+       if (page_count(page) != expected_count || xas_load(&xas) != page) {
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
        if (!page_ref_freeze(page, expected_count)) {
-               xa_unlock_irq(&mapping->i_pages);
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
@@ -498,7 +493,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
        if (mode == MIGRATE_ASYNC && head &&
                        !buffer_migrate_lock_buffers(head, mode)) {
                page_ref_unfreeze(page, expected_count);
-               xa_unlock_irq(&mapping->i_pages);
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
@@ -526,16 +521,13 @@ int migrate_page_move_mapping(struct address_space *mapping,
                SetPageDirty(newpage);
        }
 
-       radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
+       xas_store(&xas, newpage);
        if (PageTransHuge(page)) {
                int i;
-               int index = page_index(page);
 
                for (i = 1; i < HPAGE_PMD_NR; i++) {
-                       pslot = radix_tree_lookup_slot(&mapping->i_pages,
-                                                      index + i);
-                       radix_tree_replace_slot(&mapping->i_pages, pslot,
-                                               newpage + i);
+                       xas_next(&xas);
+                       xas_store(&xas, newpage + i);
                }
        }
 
@@ -546,7 +538,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
 
-       xa_unlock(&mapping->i_pages);
+       xas_unlock(&xas);
        /* Leave irq disabled to prevent preemption while updating stats */
 
        /*
@@ -586,22 +578,18 @@ EXPORT_SYMBOL(migrate_page_move_mapping);
 int migrate_huge_page_move_mapping(struct address_space *mapping,
                                   struct page *newpage, struct page *page)
 {
+       XA_STATE(xas, &mapping->i_pages, page_index(page));
        int expected_count;
-       void **pslot;
-
-       xa_lock_irq(&mapping->i_pages);
-
-       pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page));
 
+       xas_lock_irq(&xas);
        expected_count = 2 + page_has_private(page);
-       if (page_count(page) != expected_count ||
-               radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) {
-               xa_unlock_irq(&mapping->i_pages);
+       if (page_count(page) != expected_count || xas_load(&xas) != page) {
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
        if (!page_ref_freeze(page, expected_count)) {
-               xa_unlock_irq(&mapping->i_pages);
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
@@ -610,11 +598,11 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
 
        get_page(newpage);
 
-       radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
+       xas_store(&xas, newpage);
 
        page_ref_unfreeze(page, expected_count - 1);
 
-       xa_unlock_irq(&mapping->i_pages);
+       xas_unlock_irq(&xas);
 
        return MIGRATEPAGE_SUCCESS;
 }
@@ -1976,7 +1964,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        struct page *new_page = NULL;
        int page_lru = page_is_file_cache(page);
        unsigned long start = address & HPAGE_PMD_MASK;
-       unsigned long end = start + HPAGE_PMD_SIZE;
 
        new_page = alloc_pages_node(node,
                (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
@@ -1999,6 +1986,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        /* anon mapping, we can simply copy page->mapping to the new page: */
        new_page->mapping = page->mapping;
        new_page->index = page->index;
+       /* flush the cache before copying using the kernel virtual address */
+       flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
        migrate_page_copy(new_page, page);
        WARN_ON(PageLRU(new_page));
 
@@ -2036,7 +2025,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
         * new page and page_add_new_anon_rmap guarantee the copy is
         * visible before the pagetable update.
         */
-       flush_cache_range(vma, start, end);
        page_add_anon_rmap(new_page, vma, start, true);
        /*
         * At this point the pmd is numa/protnone (i.e. non present) and the TLB