mm/vmscan: protect the workingset on anonymous LRU
[linux-block.git] / mm / memory.c
index c39a13b09602cc60f013dbe503e14ba32d239b65..6fe8b5b22c575b9775347abd33c8951017b33672 100644 (file)
@@ -2715,7 +2715,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                 */
                ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
                page_add_new_anon_rmap(new_page, vma, vmf->address, false);
-               lru_cache_add_active_or_unevictable(new_page, vma);
+               lru_cache_add_inactive_or_unevictable(new_page, vma);
                /*
                 * We call the notify macro here because, when using secondary
                 * mmu page tables (such as kvm shadow page tables), we want the
@@ -3266,10 +3266,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        /* ksm created a completely new copy */
        if (unlikely(page != swapcache && swapcache)) {
                page_add_new_anon_rmap(page, vma, vmf->address, false);
-               lru_cache_add_active_or_unevictable(page, vma);
+               lru_cache_add_inactive_or_unevictable(page, vma);
        } else {
                do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
-               activate_page(page);
        }
 
        swap_free(entry);
@@ -3414,7 +3413,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 
        inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, vmf->address, false);
-       lru_cache_add_active_or_unevictable(page, vma);
+       lru_cache_add_inactive_or_unevictable(page, vma);
 setpte:
        set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
@@ -3672,7 +3671,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
        if (write && !(vma->vm_flags & VM_SHARED)) {
                inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
                page_add_new_anon_rmap(page, vma, vmf->address, false);
-               lru_cache_add_active_or_unevictable(page, vma);
+               lru_cache_add_inactive_or_unevictable(page, vma);
        } else {
                inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
                page_add_file_rmap(page, false);