1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * This file contains the default values for the operation of the
10 * Linux VM subsystem. Fine-tuning documentation can be found in
11 * Documentation/admin-guide/sysctl/vm.rst.
13 * Swap aging added 23.2.95, Stephen Tweedie.
14 * Buffermem limits added 12.3.98, Rik van Riel.
18 #include <linux/sched.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/mman.h>
22 #include <linux/pagemap.h>
23 #include <linux/pagevec.h>
24 #include <linux/init.h>
25 #include <linux/export.h>
26 #include <linux/mm_inline.h>
27 #include <linux/percpu_counter.h>
28 #include <linux/memremap.h>
29 #include <linux/percpu.h>
30 #include <linux/cpu.h>
31 #include <linux/notifier.h>
32 #include <linux/backing-dev.h>
33 #include <linux/memcontrol.h>
34 #include <linux/gfp.h>
35 #include <linux/uio.h>
36 #include <linux/hugetlb.h>
37 #include <linux/page_idle.h>
38 #include <linux/local_lock.h>
39 #include <linux/buffer_head.h>
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/pagemap.h>
46 /* How many pages do we try to swap or page in/out together? */
49 /* Protecting only lru_rotate.fbatch which requires disabling interrupts */
52 struct folio_batch fbatch;
54 static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
55 .lock = INIT_LOCAL_LOCK(lock),
59 * The following folio batches are grouped together because they are protected
60 * by disabling preemption (and interrupts remain enabled).
64 struct folio_batch lru_add;
65 struct folio_batch lru_deactivate_file;
66 struct folio_batch lru_deactivate;
67 struct folio_batch lru_lazyfree;
69 struct folio_batch activate;
72 static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
73 .lock = INIT_LOCAL_LOCK(lock),
77 * This path almost never happens for VM activity - pages are normally freed
78 * via pagevecs. But it gets used by networking - and for compound pages.
80 static void __page_cache_release(struct page *page)
83 struct folio *folio = page_folio(page);
84 struct lruvec *lruvec;
87 lruvec = folio_lruvec_lock_irqsave(folio, &flags);
88 del_page_from_lru_list(page, lruvec);
89 __clear_page_lru_flags(page);
90 unlock_page_lruvec_irqrestore(lruvec, flags);
92 /* See comment on PageMlocked in release_pages() */
93 if (unlikely(PageMlocked(page))) {
94 int nr_pages = thp_nr_pages(page);
96 __ClearPageMlocked(page);
97 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
98 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
102 static void __put_single_page(struct page *page)
104 __page_cache_release(page);
105 mem_cgroup_uncharge(page_folio(page));
106 free_unref_page(page, 0);
109 static void __put_compound_page(struct page *page)
112 * __page_cache_release() is supposed to be called for thp, not for
113 * hugetlb. This is because hugetlb page does never have PageLRU set
114 * (it's never listed to any LRU lists) and no memcg routines should
115 * be called for hugetlb (it has a separate hugetlb_cgroup.)
118 __page_cache_release(page);
119 destroy_compound_page(page);
122 void __put_page(struct page *page)
124 if (unlikely(is_zone_device_page(page)))
125 free_zone_device_page(page);
126 else if (unlikely(PageCompound(page)))
127 __put_compound_page(page);
129 __put_single_page(page);
131 EXPORT_SYMBOL(__put_page);
134 * put_pages_list() - release a list of pages
135 * @pages: list of pages threaded on page->lru
137 * Release a list of pages which are strung together on page.lru.
139 void put_pages_list(struct list_head *pages)
141 struct page *page, *next;
143 list_for_each_entry_safe(page, next, pages, lru) {
144 if (!put_page_testzero(page)) {
145 list_del(&page->lru);
148 if (PageHead(page)) {
149 list_del(&page->lru);
150 __put_compound_page(page);
153 /* Cannot be PageLRU because it's passed to us using the lru */
156 free_unref_page_list(pages);
157 INIT_LIST_HEAD(pages);
159 EXPORT_SYMBOL(put_pages_list);
162 * get_kernel_pages() - pin kernel pages in memory
163 * @kiov: An array of struct kvec structures
164 * @nr_segs: number of segments to pin
165 * @write: pinning for read/write, currently ignored
166 * @pages: array that receives pointers to the pages pinned.
167 * Should be at least nr_segs long.
169 * Returns number of pages pinned. This may be fewer than the number requested.
170 * If nr_segs is 0 or negative, returns 0. If no pages were pinned, returns 0.
171 * Each page returned must be released with a put_page() call when it is
174 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
179 for (seg = 0; seg < nr_segs; seg++) {
180 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
183 pages[seg] = kmap_to_page(kiov[seg].iov_base);
184 get_page(pages[seg]);
189 EXPORT_SYMBOL_GPL(get_kernel_pages);
191 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
193 static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
195 int was_unevictable = folio_test_clear_unevictable(folio);
196 long nr_pages = folio_nr_pages(folio);
198 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
201 * Is an smp_mb__after_atomic() still required here, before
202 * folio_evictable() tests PageMlocked, to rule out the possibility
203 * of stranding an evictable folio on an unevictable LRU? I think
204 * not, because __munlock_page() only clears PageMlocked while the LRU
207 * (That is not true of __page_cache_release(), and not necessarily
208 * true of release_pages(): but those only clear PageMlocked after
209 * put_page_testzero() has excluded any other users of the page.)
211 if (folio_evictable(folio)) {
213 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
215 folio_clear_active(folio);
216 folio_set_unevictable(folio);
218 * folio->mlock_count = !!folio_test_mlocked(folio)?
219 * But that leaves __mlock_page() in doubt whether another
220 * actor has already counted the mlock or not. Err on the
221 * safe side, underestimate, let page reclaim fix it, rather
222 * than leaving a page on the unevictable LRU indefinitely.
224 folio->mlock_count = 0;
225 if (!was_unevictable)
226 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
229 lruvec_add_folio(lruvec, folio);
230 trace_mm_lru_insertion(folio);
233 static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
236 struct lruvec *lruvec = NULL;
237 unsigned long flags = 0;
239 for (i = 0; i < folio_batch_count(fbatch); i++) {
240 struct folio *folio = fbatch->folios[i];
242 /* block memcg migration while the folio moves between lru */
243 if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
246 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
247 move_fn(lruvec, folio);
249 folio_set_lru(folio);
253 unlock_page_lruvec_irqrestore(lruvec, flags);
254 folios_put(fbatch->folios, folio_batch_count(fbatch));
255 folio_batch_init(fbatch);
258 static void folio_batch_add_and_move(struct folio_batch *fbatch,
259 struct folio *folio, move_fn_t move_fn)
261 if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
262 !lru_cache_disabled())
264 folio_batch_move_lru(fbatch, move_fn);
267 static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
269 if (!folio_test_unevictable(folio)) {
270 lruvec_del_folio(lruvec, folio);
271 folio_clear_active(folio);
272 lruvec_add_folio_tail(lruvec, folio);
273 __count_vm_events(PGROTATED, folio_nr_pages(folio));
278 * Writeback is about to end against a folio which has been marked for
279 * immediate reclaim. If it still appears to be reclaimable, move it
280 * to the tail of the inactive list.
282 * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races.
284 void folio_rotate_reclaimable(struct folio *folio)
286 if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
287 !folio_test_unevictable(folio) && folio_test_lru(folio)) {
288 struct folio_batch *fbatch;
292 local_lock_irqsave(&lru_rotate.lock, flags);
293 fbatch = this_cpu_ptr(&lru_rotate.fbatch);
294 folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
295 local_unlock_irqrestore(&lru_rotate.lock, flags);
299 void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
302 unsigned long lrusize;
305 * Hold lruvec->lru_lock is safe here, since
306 * 1) The pinned lruvec in reclaim, or
307 * 2) From a pre-LRU page during refault (which also holds the
308 * rcu lock, so would be safe even if the page was on the LRU
309 * and could move simultaneously to a new lruvec).
311 spin_lock_irq(&lruvec->lru_lock);
312 /* Record cost event */
314 lruvec->file_cost += nr_pages;
316 lruvec->anon_cost += nr_pages;
319 * Decay previous events
321 * Because workloads change over time (and to avoid
322 * overflow) we keep these statistics as a floating
323 * average, which ends up weighing recent refaults
324 * more than old ones.
326 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
327 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
328 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
329 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
331 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
332 lruvec->file_cost /= 2;
333 lruvec->anon_cost /= 2;
335 spin_unlock_irq(&lruvec->lru_lock);
336 } while ((lruvec = parent_lruvec(lruvec)));
339 void lru_note_cost_folio(struct folio *folio)
341 lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
342 folio_nr_pages(folio));
345 static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio)
347 if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
348 long nr_pages = folio_nr_pages(folio);
350 lruvec_del_folio(lruvec, folio);
351 folio_set_active(folio);
352 lruvec_add_folio(lruvec, folio);
353 trace_mm_lru_activate(folio);
355 __count_vm_events(PGACTIVATE, nr_pages);
356 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
362 static void folio_activate_drain(int cpu)
364 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu);
366 if (folio_batch_count(fbatch))
367 folio_batch_move_lru(fbatch, folio_activate_fn);
370 static void folio_activate(struct folio *folio)
372 if (folio_test_lru(folio) && !folio_test_active(folio) &&
373 !folio_test_unevictable(folio)) {
374 struct folio_batch *fbatch;
377 local_lock(&cpu_fbatches.lock);
378 fbatch = this_cpu_ptr(&cpu_fbatches.activate);
379 folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
380 local_unlock(&cpu_fbatches.lock);
385 static inline void folio_activate_drain(int cpu)
389 static void folio_activate(struct folio *folio)
391 struct lruvec *lruvec;
393 if (folio_test_clear_lru(folio)) {
394 lruvec = folio_lruvec_lock_irq(folio);
395 folio_activate_fn(lruvec, folio);
396 unlock_page_lruvec_irq(lruvec);
397 folio_set_lru(folio);
402 static void __lru_cache_activate_folio(struct folio *folio)
404 struct folio_batch *fbatch;
407 local_lock(&cpu_fbatches.lock);
408 fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
411 * Search backwards on the optimistic assumption that the folio being
412 * activated has just been added to this batch. Note that only
413 * the local batch is examined as a !LRU folio could be in the
414 * process of being released, reclaimed, migrated or on a remote
415 * batch that is currently being drained. Furthermore, marking
416 * a remote batch's folio active potentially hits a race where
417 * a folio is marked active just after it is added to the inactive
418 * list causing accounting errors and BUG_ON checks to trigger.
420 for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
421 struct folio *batch_folio = fbatch->folios[i];
423 if (batch_folio == folio) {
424 folio_set_active(folio);
429 local_unlock(&cpu_fbatches.lock);
433 * Mark a page as having seen activity.
435 * inactive,unreferenced -> inactive,referenced
436 * inactive,referenced -> active,unreferenced
437 * active,unreferenced -> active,referenced
439 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
440 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
442 void folio_mark_accessed(struct folio *folio)
444 if (!folio_test_referenced(folio)) {
445 folio_set_referenced(folio);
446 } else if (folio_test_unevictable(folio)) {
448 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
449 * this list is never rotated or maintained, so marking an
450 * unevictable page accessed has no effect.
452 } else if (!folio_test_active(folio)) {
454 * If the folio is on the LRU, queue it for activation via
455 * cpu_fbatches.activate. Otherwise, assume the folio is in a
456 * folio_batch, mark it active and it'll be moved to the active
457 * LRU on the next drain.
459 if (folio_test_lru(folio))
460 folio_activate(folio);
462 __lru_cache_activate_folio(folio);
463 folio_clear_referenced(folio);
464 workingset_activation(folio);
466 if (folio_test_idle(folio))
467 folio_clear_idle(folio);
469 EXPORT_SYMBOL(folio_mark_accessed);
472 * folio_add_lru - Add a folio to an LRU list.
473 * @folio: The folio to be added to the LRU.
475 * Queue the folio for addition to the LRU. The decision on whether
476 * to add the page to the [in]active [file|anon] list is deferred until the
477 * folio_batch is drained. This gives a chance for the caller of folio_add_lru()
478 * have the folio added to the active list using folio_mark_accessed().
480 void folio_add_lru(struct folio *folio)
482 struct folio_batch *fbatch;
484 VM_BUG_ON_FOLIO(folio_test_active(folio) &&
485 folio_test_unevictable(folio), folio);
486 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
489 local_lock(&cpu_fbatches.lock);
490 fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
491 folio_batch_add_and_move(fbatch, folio, lru_add_fn);
492 local_unlock(&cpu_fbatches.lock);
494 EXPORT_SYMBOL(folio_add_lru);
497 * lru_cache_add_inactive_or_unevictable
498 * @page: the page to be added to LRU
499 * @vma: vma in which page is mapped for determining reclaimability
501 * Place @page on the inactive or unevictable LRU list, depending on its
504 void lru_cache_add_inactive_or_unevictable(struct page *page,
505 struct vm_area_struct *vma)
507 VM_BUG_ON_PAGE(PageLRU(page), page);
509 if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
510 mlock_new_page(page);
516 * If the folio cannot be invalidated, it is moved to the
517 * inactive list to speed up its reclaim. It is moved to the
518 * head of the list, rather than the tail, to give the flusher
519 * threads some time to write it out, as this is much more
520 * effective than the single-page writeout from reclaim.
522 * If the folio isn't mapped and dirty/writeback, the folio
523 * could be reclaimed asap using the reclaim flag.
525 * 1. active, mapped folio -> none
526 * 2. active, dirty/writeback folio -> inactive, head, reclaim
527 * 3. inactive, mapped folio -> none
528 * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
529 * 5. inactive, clean -> inactive, tail
532 * In 4, it moves to the head of the inactive list so the folio is
533 * written out by flusher threads as this is much more efficient
534 * than the single-page writeout from reclaim.
536 static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
538 bool active = folio_test_active(folio);
539 long nr_pages = folio_nr_pages(folio);
541 if (folio_test_unevictable(folio))
544 /* Some processes are using the folio */
545 if (folio_mapped(folio))
548 lruvec_del_folio(lruvec, folio);
549 folio_clear_active(folio);
550 folio_clear_referenced(folio);
552 if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
554 * Setting the reclaim flag could race with
555 * folio_end_writeback() and confuse readahead. But the
556 * race window is _really_ small and it's not a critical
559 lruvec_add_folio(lruvec, folio);
560 folio_set_reclaim(folio);
563 * The folio's writeback ended while it was in the batch.
564 * We move that folio to the tail of the inactive list.
566 lruvec_add_folio_tail(lruvec, folio);
567 __count_vm_events(PGROTATED, nr_pages);
571 __count_vm_events(PGDEACTIVATE, nr_pages);
572 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
577 static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
579 if (folio_test_active(folio) && !folio_test_unevictable(folio)) {
580 long nr_pages = folio_nr_pages(folio);
582 lruvec_del_folio(lruvec, folio);
583 folio_clear_active(folio);
584 folio_clear_referenced(folio);
585 lruvec_add_folio(lruvec, folio);
587 __count_vm_events(PGDEACTIVATE, nr_pages);
588 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
593 static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
595 if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
596 !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
597 long nr_pages = folio_nr_pages(folio);
599 lruvec_del_folio(lruvec, folio);
600 folio_clear_active(folio);
601 folio_clear_referenced(folio);
603 * Lazyfree folios are clean anonymous folios. They have
604 * the swapbacked flag cleared, to distinguish them from normal
607 folio_clear_swapbacked(folio);
608 lruvec_add_folio(lruvec, folio);
610 __count_vm_events(PGLAZYFREE, nr_pages);
611 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
617 * Drain pages out of the cpu's folio_batch.
618 * Either "cpu" is the current CPU, and preemption has already been
619 * disabled; or "cpu" is being hot-unplugged, and is already dead.
621 void lru_add_drain_cpu(int cpu)
623 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_add, cpu);
625 if (folio_batch_count(fbatch))
626 folio_batch_move_lru(fbatch, lru_add_fn);
628 fbatch = &per_cpu(lru_rotate.fbatch, cpu);
629 /* Disabling interrupts below acts as a compiler barrier. */
630 if (data_race(folio_batch_count(fbatch))) {
633 /* No harm done if a racing interrupt already did this */
634 local_lock_irqsave(&lru_rotate.lock, flags);
635 folio_batch_move_lru(fbatch, lru_move_tail_fn);
636 local_unlock_irqrestore(&lru_rotate.lock, flags);
639 fbatch = &per_cpu(cpu_fbatches.lru_deactivate_file, cpu);
640 if (folio_batch_count(fbatch))
641 folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
643 fbatch = &per_cpu(cpu_fbatches.lru_deactivate, cpu);
644 if (folio_batch_count(fbatch))
645 folio_batch_move_lru(fbatch, lru_deactivate_fn);
647 fbatch = &per_cpu(cpu_fbatches.lru_lazyfree, cpu);
648 if (folio_batch_count(fbatch))
649 folio_batch_move_lru(fbatch, lru_lazyfree_fn);
651 folio_activate_drain(cpu);
655 * deactivate_file_folio() - Deactivate a file folio.
656 * @folio: Folio to deactivate.
658 * This function hints to the VM that @folio is a good reclaim candidate,
659 * for example if its invalidation fails due to the folio being dirty
660 * or under writeback.
662 * Context: Caller holds a reference on the folio.
664 void deactivate_file_folio(struct folio *folio)
666 struct folio_batch *fbatch;
668 /* Deactivating an unevictable folio will not accelerate reclaim */
669 if (folio_test_unevictable(folio))
673 local_lock(&cpu_fbatches.lock);
674 fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
675 folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
676 local_unlock(&cpu_fbatches.lock);
680 * deactivate_page - deactivate a page
681 * @page: page to deactivate
683 * deactivate_page() moves @page to the inactive list if @page was on the active
684 * list and was not an unevictable page. This is done to accelerate the reclaim
687 void deactivate_page(struct page *page)
689 struct folio *folio = page_folio(page);
691 if (folio_test_lru(folio) && folio_test_active(folio) &&
692 !folio_test_unevictable(folio)) {
693 struct folio_batch *fbatch;
696 local_lock(&cpu_fbatches.lock);
697 fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
698 folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
699 local_unlock(&cpu_fbatches.lock);
704 * mark_page_lazyfree - make an anon page lazyfree
705 * @page: page to deactivate
707 * mark_page_lazyfree() moves @page to the inactive file list.
708 * This is done to accelerate the reclaim of @page.
710 void mark_page_lazyfree(struct page *page)
712 struct folio *folio = page_folio(page);
714 if (folio_test_lru(folio) && folio_test_anon(folio) &&
715 folio_test_swapbacked(folio) && !folio_test_swapcache(folio) &&
716 !folio_test_unevictable(folio)) {
717 struct folio_batch *fbatch;
720 local_lock(&cpu_fbatches.lock);
721 fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
722 folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);
723 local_unlock(&cpu_fbatches.lock);
727 void lru_add_drain(void)
729 local_lock(&cpu_fbatches.lock);
730 lru_add_drain_cpu(smp_processor_id());
731 local_unlock(&cpu_fbatches.lock);
732 mlock_page_drain_local();
736 * It's called from per-cpu workqueue context in SMP case so
737 * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
738 * the same cpu. It shouldn't be a problem in !SMP case since
739 * the core is only one and the locks will disable preemption.
741 static void lru_add_and_bh_lrus_drain(void)
743 local_lock(&cpu_fbatches.lock);
744 lru_add_drain_cpu(smp_processor_id());
745 local_unlock(&cpu_fbatches.lock);
746 invalidate_bh_lrus_cpu();
747 mlock_page_drain_local();
750 void lru_add_drain_cpu_zone(struct zone *zone)
752 local_lock(&cpu_fbatches.lock);
753 lru_add_drain_cpu(smp_processor_id());
754 drain_local_pages(zone);
755 local_unlock(&cpu_fbatches.lock);
756 mlock_page_drain_local();
761 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
763 static void lru_add_drain_per_cpu(struct work_struct *dummy)
765 lru_add_and_bh_lrus_drain();
768 static bool cpu_needs_drain(unsigned int cpu)
770 struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
772 /* Check these in order of likelihood that they're not zero */
773 return folio_batch_count(&fbatches->lru_add) ||
774 data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
775 folio_batch_count(&fbatches->lru_deactivate_file) ||
776 folio_batch_count(&fbatches->lru_deactivate) ||
777 folio_batch_count(&fbatches->lru_lazyfree) ||
778 folio_batch_count(&fbatches->activate) ||
779 need_mlock_page_drain(cpu) ||
780 has_bh_in_lru(cpu, NULL);
784 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
785 * kworkers being shut down before our page_alloc_cpu_dead callback is
786 * executed on the offlined cpu.
787 * Calling this function with cpu hotplug locks held can actually lead
788 * to obscure indirect dependencies via WQ context.
790 static inline void __lru_add_drain_all(bool force_all_cpus)
793 * lru_drain_gen - Global pages generation number
795 * (A) Definition: global lru_drain_gen = x implies that all generations
796 * 0 < n <= x are already *scheduled* for draining.
798 * This is an optimization for the highly-contended use case where a
799 * user space workload keeps constantly generating a flow of pages for
802 static unsigned int lru_drain_gen;
803 static struct cpumask has_work;
804 static DEFINE_MUTEX(lock);
805 unsigned cpu, this_gen;
808 * Make sure nobody triggers this path before mm_percpu_wq is fully
811 if (WARN_ON(!mm_percpu_wq))
815 * Guarantee folio_batch counter stores visible by this CPU
816 * are visible to other CPUs before loading the current drain
822 * (B) Locally cache global LRU draining generation number
824 * The read barrier ensures that the counter is loaded before the mutex
825 * is taken. It pairs with smp_mb() inside the mutex critical section
828 this_gen = smp_load_acquire(&lru_drain_gen);
833 * (C) Exit the draining operation if a newer generation, from another
834 * lru_add_drain_all(), was already scheduled for draining. Check (A).
836 if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
840 * (D) Increment global generation number
842 * Pairs with smp_load_acquire() at (B), outside of the critical
843 * section. Use a full memory barrier to guarantee that the
844 * new global drain generation number is stored before loading
845 * folio_batch counters.
847 * This pairing must be done here, before the for_each_online_cpu loop
848 * below which drains the page vectors.
850 * Let x, y, and z represent some system CPU numbers, where x < y < z.
851 * Assume CPU #z is in the middle of the for_each_online_cpu loop
852 * below and has already reached CPU #y's per-cpu data. CPU #x comes
853 * along, adds some pages to its per-cpu vectors, then calls
854 * lru_add_drain_all().
856 * If the paired barrier is done at any later step, e.g. after the
857 * loop, CPU #x will just exit at (C) and miss flushing out all of its
860 WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
863 cpumask_clear(&has_work);
864 for_each_online_cpu(cpu) {
865 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
867 if (cpu_needs_drain(cpu)) {
868 INIT_WORK(work, lru_add_drain_per_cpu);
869 queue_work_on(cpu, mm_percpu_wq, work);
870 __cpumask_set_cpu(cpu, &has_work);
874 for_each_cpu(cpu, &has_work)
875 flush_work(&per_cpu(lru_add_drain_work, cpu));
881 void lru_add_drain_all(void)
883 __lru_add_drain_all(false);
886 void lru_add_drain_all(void)
890 #endif /* CONFIG_SMP */
892 atomic_t lru_disable_count = ATOMIC_INIT(0);
895 * lru_cache_disable() needs to be called before we start compiling
896 * a list of pages to be migrated using isolate_lru_page().
897 * It drains pages on LRU cache and then disable on all cpus until
898 * lru_cache_enable is called.
900 * Must be paired with a call to lru_cache_enable().
902 void lru_cache_disable(void)
904 atomic_inc(&lru_disable_count);
906 * Readers of lru_disable_count are protected by either disabling
907 * preemption or rcu_read_lock:
909 * preempt_disable, local_irq_disable [bh_lru_lock()]
910 * rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT]
911 * preempt_disable [local_lock !CONFIG_PREEMPT_RT]
913 * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on
914 * preempt_disable() regions of code. So any CPU which sees
915 * lru_disable_count = 0 will have exited the critical
916 * section when synchronize_rcu() returns.
918 synchronize_rcu_expedited();
920 __lru_add_drain_all(true);
922 lru_add_and_bh_lrus_drain();
927 * release_pages - batched put_page()
928 * @pages: array of pages to release
929 * @nr: number of pages
931 * Decrement the reference count on all the pages in @pages. If it
932 * fell to zero, remove the page from the LRU and free it.
934 void release_pages(struct page **pages, int nr)
937 LIST_HEAD(pages_to_free);
938 struct lruvec *lruvec = NULL;
939 unsigned long flags = 0;
940 unsigned int lock_batch;
942 for (i = 0; i < nr; i++) {
943 struct page *page = pages[i];
944 struct folio *folio = page_folio(page);
947 * Make sure the IRQ-safe lock-holding time does not get
948 * excessive with a continuous string of pages from the
949 * same lruvec. The lock is held only if lruvec != NULL.
951 if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) {
952 unlock_page_lruvec_irqrestore(lruvec, flags);
957 if (is_huge_zero_page(page))
960 if (is_zone_device_page(page)) {
962 unlock_page_lruvec_irqrestore(lruvec, flags);
965 if (put_devmap_managed_page(page))
967 if (put_page_testzero(page))
968 free_zone_device_page(page);
972 if (!put_page_testzero(page))
975 if (PageCompound(page)) {
977 unlock_page_lruvec_irqrestore(lruvec, flags);
980 __put_compound_page(page);
985 struct lruvec *prev_lruvec = lruvec;
987 lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
989 if (prev_lruvec != lruvec)
992 del_page_from_lru_list(page, lruvec);
993 __clear_page_lru_flags(page);
997 * In rare cases, when truncation or holepunching raced with
998 * munlock after VM_LOCKED was cleared, Mlocked may still be
999 * found set here. This does not indicate a problem, unless
1000 * "unevictable_pgs_cleared" appears worryingly large.
1002 if (unlikely(PageMlocked(page))) {
1003 __ClearPageMlocked(page);
1004 dec_zone_page_state(page, NR_MLOCK);
1005 count_vm_event(UNEVICTABLE_PGCLEARED);
1008 list_add(&page->lru, &pages_to_free);
1011 unlock_page_lruvec_irqrestore(lruvec, flags);
1013 mem_cgroup_uncharge_list(&pages_to_free);
1014 free_unref_page_list(&pages_to_free);
1016 EXPORT_SYMBOL(release_pages);
1019 * The pages which we're about to release may be in the deferred lru-addition
1020 * queues. That would prevent them from really being freed right now. That's
1021 * OK from a correctness point of view but is inefficient - those pages may be
1022 * cache-warm and we want to give them back to the page allocator ASAP.
1024 * So __pagevec_release() will drain those queues here.
1025 * folio_batch_move_lru() calls folios_put() directly to avoid
1028 void __pagevec_release(struct pagevec *pvec)
1030 if (!pvec->percpu_pvec_drained) {
1032 pvec->percpu_pvec_drained = true;
1034 release_pages(pvec->pages, pagevec_count(pvec));
1035 pagevec_reinit(pvec);
1037 EXPORT_SYMBOL(__pagevec_release);
1040 * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
1041 * @fbatch: The batch to prune
1043 * find_get_entries() fills a batch with both folios and shadow/swap/DAX
1044 * entries. This function prunes all the non-folio entries from @fbatch
1045 * without leaving holes, so that it can be passed on to folio-only batch
1048 void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
1052 for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
1053 struct folio *folio = fbatch->folios[i];
1054 if (!xa_is_value(folio))
1055 fbatch->folios[j++] = folio;
1061 * pagevec_lookup_range - gang pagecache lookup
1062 * @pvec: Where the resulting pages are placed
1063 * @mapping: The address_space to search
1064 * @start: The starting page index
1065 * @end: The final page index
1067 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
1068 * pages in the mapping starting from index @start and upto index @end
1069 * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a
1070 * reference against the pages in @pvec.
1072 * The search returns a group of mapping-contiguous pages with ascending
1073 * indexes. There may be holes in the indices due to not-present pages. We
1074 * also update @start to index the next page for the traversal.
1076 * pagevec_lookup_range() returns the number of pages which were found. If this
1077 * number is smaller than PAGEVEC_SIZE, the end of specified range has been
1080 unsigned pagevec_lookup_range(struct pagevec *pvec,
1081 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1083 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1085 return pagevec_count(pvec);
1087 EXPORT_SYMBOL(pagevec_lookup_range);
1089 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1090 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1093 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1094 PAGEVEC_SIZE, pvec->pages);
1095 return pagevec_count(pvec);
1097 EXPORT_SYMBOL(pagevec_lookup_range_tag);
1100 * Perform any setup for the swap system
1102 void __init swap_setup(void)
1104 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1106 /* Use a smaller cluster for small-memory machines */
1112 * Right now other parts of the system means that we
1113 * _really_ don't want to cluster much more