mm/munlock: mlock_page() munlock_page() batch by pagevec
[linux-2.6-block.git] / mm / swap.c
index 3f770b1ea2c12db2ca508b15b910496fa113473e..842d5cd92cf64cae5bd3f95775df973fb3678e00 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -490,18 +490,12 @@ EXPORT_SYMBOL(folio_add_lru);
 void lru_cache_add_inactive_or_unevictable(struct page *page,
                                         struct vm_area_struct *vma)
 {
-       bool unevictable;
-
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
-       unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
-       if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
-               int nr_pages = thp_nr_pages(page);
-
-               mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
-               count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
-       }
-       lru_cache_add(page);
+       if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
+               mlock_new_page(page);
+       else
+               lru_cache_add(page);
 }
 
 /*
@@ -640,6 +634,7 @@ void lru_add_drain_cpu(int cpu)
                pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
 
        activate_page_drain(cpu);
+       mlock_page_drain(cpu);
 }
 
 /**
@@ -842,6 +837,7 @@ inline void __lru_add_drain_all(bool force_all_cpus)
                    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
                    need_activate_page_drain(cpu) ||
+                   need_mlock_page_drain(cpu) ||
                    has_bh_in_lru(cpu, NULL)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
                        queue_work_on(cpu, mm_percpu_wq, work);
@@ -1030,7 +1026,7 @@ static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
         * Is an smp_mb__after_atomic() still required here, before
         * folio_evictable() tests PageMlocked, to rule out the possibility
         * of stranding an evictable folio on an unevictable LRU?  I think
-        * not, because munlock_page() only clears PageMlocked while the LRU
+        * not, because __munlock_page() only clears PageMlocked while the LRU
         * lock is held.
         *
         * (That is not true of __page_cache_release(), and not necessarily
@@ -1043,7 +1039,14 @@ static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
        } else {
                folio_clear_active(folio);
                folio_set_unevictable(folio);
-               folio->mlock_count = !!folio_test_mlocked(folio);
+               /*
+                * folio->mlock_count = !!folio_test_mlocked(folio)?
+                * But that leaves __mlock_page() in doubt whether another
+                * actor has already counted the mlock or not.  Err on the
+                * safe side, underestimate, let page reclaim fix it, rather
+                * than leaving a page on the unevictable LRU indefinitely.
+                */
+               folio->mlock_count = 0;
                if (!was_unevictable)
                        __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
        }