mm: vmscan: remove remains of kswapd-managed zone->all_unreclaimable
[linux-2.6-block.git] / mm / vmscan.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
b1de0d13
MH
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
1da177e4
LT
16#include <linux/mm.h>
17#include <linux/module.h>
5a0e3ad6 18#include <linux/gfp.h>
1da177e4
LT
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/pagemap.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
70ddf637 24#include <linux/vmpressure.h>
e129b5c2 25#include <linux/vmstat.h>
1da177e4
LT
26#include <linux/file.h>
27#include <linux/writeback.h>
28#include <linux/blkdev.h>
29#include <linux/buffer_head.h> /* for try_to_release_page(),
30 buffer_heads_over_limit */
31#include <linux/mm_inline.h>
1da177e4
LT
32#include <linux/backing-dev.h>
33#include <linux/rmap.h>
34#include <linux/topology.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
3e7d3449 37#include <linux/compaction.h>
1da177e4
LT
38#include <linux/notifier.h>
39#include <linux/rwsem.h>
248a0301 40#include <linux/delay.h>
3218ae14 41#include <linux/kthread.h>
7dfb7103 42#include <linux/freezer.h>
66e1707b 43#include <linux/memcontrol.h>
873b4771 44#include <linux/delayacct.h>
af936a16 45#include <linux/sysctl.h>
929bea7c 46#include <linux/oom.h>
268bb0ce 47#include <linux/prefetch.h>
b1de0d13 48#include <linux/printk.h>
1da177e4
LT
49
50#include <asm/tlbflush.h>
51#include <asm/div64.h>
52
53#include <linux/swapops.h>
117aad1e 54#include <linux/balloon_compaction.h>
1da177e4 55
0f8053a5
NP
56#include "internal.h"
57
33906bc5
MG
58#define CREATE_TRACE_POINTS
59#include <trace/events/vmscan.h>
60
1da177e4 61struct scan_control {
1da177e4
LT
62 /* Incremented by the number of inactive pages that were scanned */
63 unsigned long nr_scanned;
64
a79311c1
RR
65 /* Number of pages freed so far during a call to shrink_zones() */
66 unsigned long nr_reclaimed;
67
22fba335
KM
68 /* How many pages shrink_list() should reclaim */
69 unsigned long nr_to_reclaim;
70
7b51755c
KM
71 unsigned long hibernation_mode;
72
1da177e4 73 /* This context's GFP mask */
6daa0e28 74 gfp_t gfp_mask;
1da177e4
LT
75
76 int may_writepage;
77
a6dc60f8
JW
78 /* Can mapped pages be reclaimed? */
79 int may_unmap;
f1fd1067 80
2e2e4259
KM
81 /* Can pages be swapped as part of reclaim? */
82 int may_swap;
83
5ad333eb 84 int order;
66e1707b 85
9e3b2f8c
KK
86 /* Scan (total_size >> priority) pages at once */
87 int priority;
88
688eb988
MH
89 /* anon vs. file LRUs scanning "ratio" */
90 int swappiness;
91
f16015fb
JW
92 /*
93 * The memory cgroup that hit its limit and as a result is the
94 * primary target of this reclaim invocation.
95 */
96 struct mem_cgroup *target_mem_cgroup;
66e1707b 97
327c0e96
KH
98 /*
99 * Nodemask of nodes allowed by the caller. If NULL, all nodes
100 * are scanned.
101 */
102 nodemask_t *nodemask;
1da177e4
LT
103};
104
1da177e4
LT
105#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
106
107#ifdef ARCH_HAS_PREFETCH
108#define prefetch_prev_lru_page(_page, _base, _field) \
109 do { \
110 if ((_page)->lru.prev != _base) { \
111 struct page *prev; \
112 \
113 prev = lru_to_page(&(_page->lru)); \
114 prefetch(&prev->_field); \
115 } \
116 } while (0)
117#else
118#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
119#endif
120
121#ifdef ARCH_HAS_PREFETCHW
122#define prefetchw_prev_lru_page(_page, _base, _field) \
123 do { \
124 if ((_page)->lru.prev != _base) { \
125 struct page *prev; \
126 \
127 prev = lru_to_page(&(_page->lru)); \
128 prefetchw(&prev->_field); \
129 } \
130 } while (0)
131#else
132#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
133#endif
134
135/*
136 * From 0 .. 100. Higher means more swappy.
137 */
138int vm_swappiness = 60;
b21e0b90 139unsigned long vm_total_pages; /* The total number of pages which the VM controls */
1da177e4
LT
140
141static LIST_HEAD(shrinker_list);
142static DECLARE_RWSEM(shrinker_rwsem);
143
c255a458 144#ifdef CONFIG_MEMCG
89b5fae5
JW
145static bool global_reclaim(struct scan_control *sc)
146{
f16015fb 147 return !sc->target_mem_cgroup;
89b5fae5 148}
91a45470 149#else
89b5fae5
JW
150static bool global_reclaim(struct scan_control *sc)
151{
152 return true;
153}
91a45470
KH
154#endif
155
a1c3bfb2 156static unsigned long zone_reclaimable_pages(struct zone *zone)
6e543d57
LD
157{
158 int nr;
159
160 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
161 zone_page_state(zone, NR_INACTIVE_FILE);
162
163 if (get_nr_swap_pages() > 0)
164 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
165 zone_page_state(zone, NR_INACTIVE_ANON);
166
167 return nr;
168}
169
170bool zone_reclaimable(struct zone *zone)
171{
172 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
173}
174
4d7dcca2 175static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
c9f299d9 176{
c3c787e8 177 if (!mem_cgroup_disabled())
4d7dcca2 178 return mem_cgroup_get_lru_size(lruvec, lru);
a3d8e054 179
074291fe 180 return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
c9f299d9
KM
181}
182
1da177e4 183/*
1d3d4437 184 * Add a shrinker callback to be called from the vm.
1da177e4 185 */
1d3d4437 186int register_shrinker(struct shrinker *shrinker)
1da177e4 187{
1d3d4437
GC
188 size_t size = sizeof(*shrinker->nr_deferred);
189
190 /*
191 * If we only have one possible node in the system anyway, save
192 * ourselves the trouble and disable NUMA aware behavior. This way we
193 * will save memory and some small loop time later.
194 */
195 if (nr_node_ids == 1)
196 shrinker->flags &= ~SHRINKER_NUMA_AWARE;
197
198 if (shrinker->flags & SHRINKER_NUMA_AWARE)
199 size *= nr_node_ids;
200
201 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
202 if (!shrinker->nr_deferred)
203 return -ENOMEM;
204
8e1f936b
RR
205 down_write(&shrinker_rwsem);
206 list_add_tail(&shrinker->list, &shrinker_list);
207 up_write(&shrinker_rwsem);
1d3d4437 208 return 0;
1da177e4 209}
8e1f936b 210EXPORT_SYMBOL(register_shrinker);
1da177e4
LT
211
212/*
213 * Remove one
214 */
8e1f936b 215void unregister_shrinker(struct shrinker *shrinker)
1da177e4
LT
216{
217 down_write(&shrinker_rwsem);
218 list_del(&shrinker->list);
219 up_write(&shrinker_rwsem);
ae393321 220 kfree(shrinker->nr_deferred);
1da177e4 221}
8e1f936b 222EXPORT_SYMBOL(unregister_shrinker);
1da177e4
LT
223
224#define SHRINK_BATCH 128
1d3d4437
GC
225
226static unsigned long
227shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
228 unsigned long nr_pages_scanned, unsigned long lru_pages)
229{
230 unsigned long freed = 0;
231 unsigned long long delta;
232 long total_scan;
d5bc5fd3 233 long freeable;
1d3d4437
GC
234 long nr;
235 long new_nr;
236 int nid = shrinkctl->nid;
237 long batch_size = shrinker->batch ? shrinker->batch
238 : SHRINK_BATCH;
239
d5bc5fd3
VD
240 freeable = shrinker->count_objects(shrinker, shrinkctl);
241 if (freeable == 0)
1d3d4437
GC
242 return 0;
243
244 /*
245 * copy the current shrinker scan count into a local variable
246 * and zero it so that other concurrent shrinker invocations
247 * don't also do this scanning work.
248 */
249 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
250
251 total_scan = nr;
252 delta = (4 * nr_pages_scanned) / shrinker->seeks;
d5bc5fd3 253 delta *= freeable;
1d3d4437
GC
254 do_div(delta, lru_pages + 1);
255 total_scan += delta;
256 if (total_scan < 0) {
257 printk(KERN_ERR
258 "shrink_slab: %pF negative objects to delete nr=%ld\n",
a0b02131 259 shrinker->scan_objects, total_scan);
d5bc5fd3 260 total_scan = freeable;
1d3d4437
GC
261 }
262
263 /*
264 * We need to avoid excessive windup on filesystem shrinkers
265 * due to large numbers of GFP_NOFS allocations causing the
266 * shrinkers to return -1 all the time. This results in a large
267 * nr being built up so when a shrink that can do some work
268 * comes along it empties the entire cache due to nr >>>
d5bc5fd3 269 * freeable. This is bad for sustaining a working set in
1d3d4437
GC
270 * memory.
271 *
272 * Hence only allow the shrinker to scan the entire cache when
273 * a large delta change is calculated directly.
274 */
d5bc5fd3
VD
275 if (delta < freeable / 4)
276 total_scan = min(total_scan, freeable / 2);
1d3d4437
GC
277
278 /*
279 * Avoid risking looping forever due to too large nr value:
280 * never try to free more than twice the estimate number of
281 * freeable entries.
282 */
d5bc5fd3
VD
283 if (total_scan > freeable * 2)
284 total_scan = freeable * 2;
1d3d4437
GC
285
286 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
287 nr_pages_scanned, lru_pages,
d5bc5fd3 288 freeable, delta, total_scan);
1d3d4437 289
0b1fb40a
VD
290 /*
291 * Normally, we should not scan less than batch_size objects in one
292 * pass to avoid too frequent shrinker calls, but if the slab has less
293 * than batch_size objects in total and we are really tight on memory,
294 * we will try to reclaim all available objects, otherwise we can end
295 * up failing allocations although there are plenty of reclaimable
296 * objects spread over several slabs with usage less than the
297 * batch_size.
298 *
299 * We detect the "tight on memory" situations by looking at the total
300 * number of objects we want to scan (total_scan). If it is greater
d5bc5fd3 301 * than the total number of objects on slab (freeable), we must be
0b1fb40a
VD
302 * scanning at high prio and therefore should try to reclaim as much as
303 * possible.
304 */
305 while (total_scan >= batch_size ||
d5bc5fd3 306 total_scan >= freeable) {
a0b02131 307 unsigned long ret;
0b1fb40a 308 unsigned long nr_to_scan = min(batch_size, total_scan);
1d3d4437 309
0b1fb40a 310 shrinkctl->nr_to_scan = nr_to_scan;
a0b02131
DC
311 ret = shrinker->scan_objects(shrinker, shrinkctl);
312 if (ret == SHRINK_STOP)
313 break;
314 freed += ret;
1d3d4437 315
0b1fb40a
VD
316 count_vm_events(SLABS_SCANNED, nr_to_scan);
317 total_scan -= nr_to_scan;
1d3d4437
GC
318
319 cond_resched();
320 }
321
322 /*
323 * move the unused scan count back into the shrinker in a
324 * manner that handles concurrent updates. If we exhausted the
325 * scan, there is no need to do an update.
326 */
327 if (total_scan > 0)
328 new_nr = atomic_long_add_return(total_scan,
329 &shrinker->nr_deferred[nid]);
330 else
331 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
332
df9024a8 333 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
1d3d4437 334 return freed;
1495f230
YH
335}
336
1da177e4
LT
337/*
338 * Call the shrink functions to age shrinkable caches
339 *
340 * Here we assume it costs one seek to replace a lru page and that it also
341 * takes a seek to recreate a cache object. With this in mind we age equal
342 * percentages of the lru and ageable caches. This should balance the seeks
343 * generated by these structures.
344 *
183ff22b 345 * If the vm encountered mapped pages on the LRU it increase the pressure on
1da177e4
LT
346 * slab to avoid swapping.
347 *
348 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
349 *
350 * `lru_pages' represents the number of on-LRU pages in all the zones which
351 * are eligible for the caller's allocation attempt. It is used for balancing
352 * slab reclaim versus page reclaim.
b15e0905 353 *
354 * Returns the number of slab objects which we shrunk.
1da177e4 355 */
24f7c6b9 356unsigned long shrink_slab(struct shrink_control *shrinkctl,
1495f230 357 unsigned long nr_pages_scanned,
a09ed5e0 358 unsigned long lru_pages)
1da177e4
LT
359{
360 struct shrinker *shrinker;
24f7c6b9 361 unsigned long freed = 0;
1da177e4 362
1495f230
YH
363 if (nr_pages_scanned == 0)
364 nr_pages_scanned = SWAP_CLUSTER_MAX;
1da177e4 365
f06590bd 366 if (!down_read_trylock(&shrinker_rwsem)) {
24f7c6b9
DC
367 /*
368 * If we would return 0, our callers would understand that we
369 * have nothing else to shrink and give up trying. By returning
370 * 1 we keep it going and assume we'll be able to shrink next
371 * time.
372 */
373 freed = 1;
f06590bd
MK
374 goto out;
375 }
1da177e4
LT
376
377 list_for_each_entry(shrinker, &shrinker_list, list) {
ec97097b
VD
378 if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) {
379 shrinkctl->nid = 0;
1d3d4437 380 freed += shrink_slab_node(shrinkctl, shrinker,
ec97097b
VD
381 nr_pages_scanned, lru_pages);
382 continue;
383 }
384
385 for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
386 if (node_online(shrinkctl->nid))
387 freed += shrink_slab_node(shrinkctl, shrinker,
388 nr_pages_scanned, lru_pages);
1da177e4 389
1da177e4 390 }
1da177e4
LT
391 }
392 up_read(&shrinker_rwsem);
f06590bd
MK
393out:
394 cond_resched();
24f7c6b9 395 return freed;
1da177e4
LT
396}
397
1da177e4
LT
398static inline int is_page_cache_freeable(struct page *page)
399{
ceddc3a5
JW
400 /*
401 * A freeable page cache page is referenced only by the caller
402 * that isolated the page, the page cache radix tree and
403 * optional buffer heads at page->private.
404 */
edcf4748 405 return page_count(page) - page_has_private(page) == 2;
1da177e4
LT
406}
407
7d3579e8
KM
408static int may_write_to_queue(struct backing_dev_info *bdi,
409 struct scan_control *sc)
1da177e4 410{
930d9152 411 if (current->flags & PF_SWAPWRITE)
1da177e4
LT
412 return 1;
413 if (!bdi_write_congested(bdi))
414 return 1;
415 if (bdi == current->backing_dev_info)
416 return 1;
417 return 0;
418}
419
420/*
421 * We detected a synchronous write error writing a page out. Probably
422 * -ENOSPC. We need to propagate that into the address_space for a subsequent
423 * fsync(), msync() or close().
424 *
425 * The tricky part is that after writepage we cannot touch the mapping: nothing
426 * prevents it from being freed up. But we have a ref on the page and once
427 * that page is locked, the mapping is pinned.
428 *
429 * We're allowed to run sleeping lock_page() here because we know the caller has
430 * __GFP_FS.
431 */
432static void handle_write_error(struct address_space *mapping,
433 struct page *page, int error)
434{
7eaceacc 435 lock_page(page);
3e9f45bd
GC
436 if (page_mapping(page) == mapping)
437 mapping_set_error(mapping, error);
1da177e4
LT
438 unlock_page(page);
439}
440
04e62a29
CL
441/* possible outcome of pageout() */
442typedef enum {
443 /* failed to write page out, page is locked */
444 PAGE_KEEP,
445 /* move page to the active list, page is locked */
446 PAGE_ACTIVATE,
447 /* page has been sent to the disk successfully, page is unlocked */
448 PAGE_SUCCESS,
449 /* page is clean and locked */
450 PAGE_CLEAN,
451} pageout_t;
452
1da177e4 453/*
1742f19f
AM
454 * pageout is called by shrink_page_list() for each dirty page.
455 * Calls ->writepage().
1da177e4 456 */
c661b078 457static pageout_t pageout(struct page *page, struct address_space *mapping,
7d3579e8 458 struct scan_control *sc)
1da177e4
LT
459{
460 /*
461 * If the page is dirty, only perform writeback if that write
462 * will be non-blocking. To prevent this allocation from being
463 * stalled by pagecache activity. But note that there may be
464 * stalls if we need to run get_block(). We could test
465 * PagePrivate for that.
466 *
8174202b 467 * If this process is currently in __generic_file_write_iter() against
1da177e4
LT
468 * this page's queue, we can perform writeback even if that
469 * will block.
470 *
471 * If the page is swapcache, write it back even if that would
472 * block, for some throttling. This happens by accident, because
473 * swap_backing_dev_info is bust: it doesn't reflect the
474 * congestion state of the swapdevs. Easy to fix, if needed.
1da177e4
LT
475 */
476 if (!is_page_cache_freeable(page))
477 return PAGE_KEEP;
478 if (!mapping) {
479 /*
480 * Some data journaling orphaned pages can have
481 * page->mapping == NULL while being dirty with clean buffers.
482 */
266cf658 483 if (page_has_private(page)) {
1da177e4
LT
484 if (try_to_free_buffers(page)) {
485 ClearPageDirty(page);
b1de0d13 486 pr_info("%s: orphaned page\n", __func__);
1da177e4
LT
487 return PAGE_CLEAN;
488 }
489 }
490 return PAGE_KEEP;
491 }
492 if (mapping->a_ops->writepage == NULL)
493 return PAGE_ACTIVATE;
0e093d99 494 if (!may_write_to_queue(mapping->backing_dev_info, sc))
1da177e4
LT
495 return PAGE_KEEP;
496
497 if (clear_page_dirty_for_io(page)) {
498 int res;
499 struct writeback_control wbc = {
500 .sync_mode = WB_SYNC_NONE,
501 .nr_to_write = SWAP_CLUSTER_MAX,
111ebb6e
OH
502 .range_start = 0,
503 .range_end = LLONG_MAX,
1da177e4
LT
504 .for_reclaim = 1,
505 };
506
507 SetPageReclaim(page);
508 res = mapping->a_ops->writepage(page, &wbc);
509 if (res < 0)
510 handle_write_error(mapping, page, res);
994fc28c 511 if (res == AOP_WRITEPAGE_ACTIVATE) {
1da177e4
LT
512 ClearPageReclaim(page);
513 return PAGE_ACTIVATE;
514 }
c661b078 515
1da177e4
LT
516 if (!PageWriteback(page)) {
517 /* synchronous write or broken a_ops? */
518 ClearPageReclaim(page);
519 }
23b9da55 520 trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
e129b5c2 521 inc_zone_page_state(page, NR_VMSCAN_WRITE);
1da177e4
LT
522 return PAGE_SUCCESS;
523 }
524
525 return PAGE_CLEAN;
526}
527
a649fd92 528/*
e286781d
NP
529 * Same as remove_mapping, but if the page is removed from the mapping, it
530 * gets returned with a refcount of 0.
a649fd92 531 */
a528910e
JW
532static int __remove_mapping(struct address_space *mapping, struct page *page,
533 bool reclaimed)
49d2e9cc 534{
28e4d965
NP
535 BUG_ON(!PageLocked(page));
536 BUG_ON(mapping != page_mapping(page));
49d2e9cc 537
19fd6231 538 spin_lock_irq(&mapping->tree_lock);
49d2e9cc 539 /*
0fd0e6b0
NP
540 * The non racy check for a busy page.
541 *
542 * Must be careful with the order of the tests. When someone has
543 * a ref to the page, it may be possible that they dirty it then
544 * drop the reference. So if PageDirty is tested before page_count
545 * here, then the following race may occur:
546 *
547 * get_user_pages(&page);
548 * [user mapping goes away]
549 * write_to(page);
550 * !PageDirty(page) [good]
551 * SetPageDirty(page);
552 * put_page(page);
553 * !page_count(page) [good, discard it]
554 *
555 * [oops, our write_to data is lost]
556 *
557 * Reversing the order of the tests ensures such a situation cannot
558 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
559 * load is not satisfied before that of page->_count.
560 *
561 * Note that if SetPageDirty is always performed via set_page_dirty,
562 * and thus under tree_lock, then this ordering is not required.
49d2e9cc 563 */
e286781d 564 if (!page_freeze_refs(page, 2))
49d2e9cc 565 goto cannot_free;
e286781d
NP
566 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
567 if (unlikely(PageDirty(page))) {
568 page_unfreeze_refs(page, 2);
49d2e9cc 569 goto cannot_free;
e286781d 570 }
49d2e9cc
CL
571
572 if (PageSwapCache(page)) {
573 swp_entry_t swap = { .val = page_private(page) };
574 __delete_from_swap_cache(page);
19fd6231 575 spin_unlock_irq(&mapping->tree_lock);
cb4b86ba 576 swapcache_free(swap, page);
e286781d 577 } else {
6072d13c 578 void (*freepage)(struct page *);
a528910e 579 void *shadow = NULL;
6072d13c
LT
580
581 freepage = mapping->a_ops->freepage;
a528910e
JW
582 /*
583 * Remember a shadow entry for reclaimed file cache in
584 * order to detect refaults, thus thrashing, later on.
585 *
586 * But don't store shadows in an address space that is
587 * already exiting. This is not just an optizimation,
588 * inode reclaim needs to empty out the radix tree or
589 * the nodes are lost. Don't plant shadows behind its
590 * back.
591 */
592 if (reclaimed && page_is_file_cache(page) &&
593 !mapping_exiting(mapping))
594 shadow = workingset_eviction(mapping, page);
595 __delete_from_page_cache(page, shadow);
19fd6231 596 spin_unlock_irq(&mapping->tree_lock);
e767e056 597 mem_cgroup_uncharge_cache_page(page);
6072d13c
LT
598
599 if (freepage != NULL)
600 freepage(page);
49d2e9cc
CL
601 }
602
49d2e9cc
CL
603 return 1;
604
605cannot_free:
19fd6231 606 spin_unlock_irq(&mapping->tree_lock);
49d2e9cc
CL
607 return 0;
608}
609
e286781d
NP
610/*
611 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
612 * someone else has a ref on the page, abort and return 0. If it was
613 * successfully detached, return 1. Assumes the caller has a single ref on
614 * this page.
615 */
616int remove_mapping(struct address_space *mapping, struct page *page)
617{
a528910e 618 if (__remove_mapping(mapping, page, false)) {
e286781d
NP
619 /*
620 * Unfreezing the refcount with 1 rather than 2 effectively
621 * drops the pagecache ref for us without requiring another
622 * atomic operation.
623 */
624 page_unfreeze_refs(page, 1);
625 return 1;
626 }
627 return 0;
628}
629
894bc310
LS
630/**
631 * putback_lru_page - put previously isolated page onto appropriate LRU list
632 * @page: page to be put back to appropriate lru list
633 *
634 * Add previously isolated @page to appropriate LRU list.
635 * Page may still be unevictable for other reasons.
636 *
637 * lru_lock must not be held, interrupts must be enabled.
638 */
894bc310
LS
639void putback_lru_page(struct page *page)
640{
0ec3b74c 641 bool is_unevictable;
bbfd28ee 642 int was_unevictable = PageUnevictable(page);
894bc310 643
309381fe 644 VM_BUG_ON_PAGE(PageLRU(page), page);
894bc310
LS
645
646redo:
647 ClearPageUnevictable(page);
648
39b5f29a 649 if (page_evictable(page)) {
894bc310
LS
650 /*
651 * For evictable pages, we can use the cache.
652 * In event of a race, worst case is we end up with an
653 * unevictable page on [in]active list.
654 * We know how to handle that.
655 */
0ec3b74c 656 is_unevictable = false;
c53954a0 657 lru_cache_add(page);
894bc310
LS
658 } else {
659 /*
660 * Put unevictable pages directly on zone's unevictable
661 * list.
662 */
0ec3b74c 663 is_unevictable = true;
894bc310 664 add_page_to_unevictable_list(page);
6a7b9548 665 /*
21ee9f39
MK
666 * When racing with an mlock or AS_UNEVICTABLE clearing
667 * (page is unlocked) make sure that if the other thread
668 * does not observe our setting of PG_lru and fails
24513264 669 * isolation/check_move_unevictable_pages,
21ee9f39 670 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
6a7b9548
JW
671 * the page back to the evictable list.
672 *
21ee9f39 673 * The other side is TestClearPageMlocked() or shmem_lock().
6a7b9548
JW
674 */
675 smp_mb();
894bc310 676 }
894bc310
LS
677
678 /*
679 * page's status can change while we move it among lru. If an evictable
680 * page is on unevictable list, it never be freed. To avoid that,
681 * check after we added it to the list, again.
682 */
0ec3b74c 683 if (is_unevictable && page_evictable(page)) {
894bc310
LS
684 if (!isolate_lru_page(page)) {
685 put_page(page);
686 goto redo;
687 }
688 /* This means someone else dropped this page from LRU
689 * So, it will be freed or putback to LRU again. There is
690 * nothing to do here.
691 */
692 }
693
0ec3b74c 694 if (was_unevictable && !is_unevictable)
bbfd28ee 695 count_vm_event(UNEVICTABLE_PGRESCUED);
0ec3b74c 696 else if (!was_unevictable && is_unevictable)
bbfd28ee
LS
697 count_vm_event(UNEVICTABLE_PGCULLED);
698
894bc310
LS
699 put_page(page); /* drop ref from isolate */
700}
701
dfc8d636
JW
702enum page_references {
703 PAGEREF_RECLAIM,
704 PAGEREF_RECLAIM_CLEAN,
64574746 705 PAGEREF_KEEP,
dfc8d636
JW
706 PAGEREF_ACTIVATE,
707};
708
709static enum page_references page_check_references(struct page *page,
710 struct scan_control *sc)
711{
64574746 712 int referenced_ptes, referenced_page;
dfc8d636 713 unsigned long vm_flags;
dfc8d636 714
c3ac9a8a
JW
715 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
716 &vm_flags);
64574746 717 referenced_page = TestClearPageReferenced(page);
dfc8d636 718
dfc8d636
JW
719 /*
720 * Mlock lost the isolation race with us. Let try_to_unmap()
721 * move the page to the unevictable list.
722 */
723 if (vm_flags & VM_LOCKED)
724 return PAGEREF_RECLAIM;
725
64574746 726 if (referenced_ptes) {
e4898273 727 if (PageSwapBacked(page))
64574746
JW
728 return PAGEREF_ACTIVATE;
729 /*
730 * All mapped pages start out with page table
731 * references from the instantiating fault, so we need
732 * to look twice if a mapped file page is used more
733 * than once.
734 *
735 * Mark it and spare it for another trip around the
736 * inactive list. Another page table reference will
737 * lead to its activation.
738 *
739 * Note: the mark is set for activated pages as well
740 * so that recently deactivated but used pages are
741 * quickly recovered.
742 */
743 SetPageReferenced(page);
744
34dbc67a 745 if (referenced_page || referenced_ptes > 1)
64574746
JW
746 return PAGEREF_ACTIVATE;
747
c909e993
KK
748 /*
749 * Activate file-backed executable pages after first usage.
750 */
751 if (vm_flags & VM_EXEC)
752 return PAGEREF_ACTIVATE;
753
64574746
JW
754 return PAGEREF_KEEP;
755 }
dfc8d636
JW
756
757 /* Reclaim if clean, defer dirty pages to writeback */
2e30244a 758 if (referenced_page && !PageSwapBacked(page))
64574746
JW
759 return PAGEREF_RECLAIM_CLEAN;
760
761 return PAGEREF_RECLAIM;
dfc8d636
JW
762}
763
e2be15f6
MG
764/* Check if a page is dirty or under writeback */
765static void page_check_dirty_writeback(struct page *page,
766 bool *dirty, bool *writeback)
767{
b4597226
MG
768 struct address_space *mapping;
769
e2be15f6
MG
770 /*
771 * Anonymous pages are not handled by flushers and must be written
772 * from reclaim context. Do not stall reclaim based on them
773 */
774 if (!page_is_file_cache(page)) {
775 *dirty = false;
776 *writeback = false;
777 return;
778 }
779
780 /* By default assume that the page flags are accurate */
781 *dirty = PageDirty(page);
782 *writeback = PageWriteback(page);
b4597226
MG
783
784 /* Verify dirty/writeback state if the filesystem supports it */
785 if (!page_has_private(page))
786 return;
787
788 mapping = page_mapping(page);
789 if (mapping && mapping->a_ops->is_dirty_writeback)
790 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
e2be15f6
MG
791}
792
1da177e4 793/*
1742f19f 794 * shrink_page_list() returns the number of reclaimed pages
1da177e4 795 */
1742f19f 796static unsigned long shrink_page_list(struct list_head *page_list,
6a18adb3 797 struct zone *zone,
f84f6e2b 798 struct scan_control *sc,
02c6de8d 799 enum ttu_flags ttu_flags,
8e950282 800 unsigned long *ret_nr_dirty,
d43006d5 801 unsigned long *ret_nr_unqueued_dirty,
8e950282 802 unsigned long *ret_nr_congested,
02c6de8d 803 unsigned long *ret_nr_writeback,
b1a6f21e 804 unsigned long *ret_nr_immediate,
02c6de8d 805 bool force_reclaim)
1da177e4
LT
806{
807 LIST_HEAD(ret_pages);
abe4c3b5 808 LIST_HEAD(free_pages);
1da177e4 809 int pgactivate = 0;
d43006d5 810 unsigned long nr_unqueued_dirty = 0;
0e093d99
MG
811 unsigned long nr_dirty = 0;
812 unsigned long nr_congested = 0;
05ff5137 813 unsigned long nr_reclaimed = 0;
92df3a72 814 unsigned long nr_writeback = 0;
b1a6f21e 815 unsigned long nr_immediate = 0;
1da177e4
LT
816
817 cond_resched();
818
69980e31 819 mem_cgroup_uncharge_start();
1da177e4
LT
820 while (!list_empty(page_list)) {
821 struct address_space *mapping;
822 struct page *page;
823 int may_enter_fs;
02c6de8d 824 enum page_references references = PAGEREF_RECLAIM_CLEAN;
e2be15f6 825 bool dirty, writeback;
1da177e4
LT
826
827 cond_resched();
828
829 page = lru_to_page(page_list);
830 list_del(&page->lru);
831
529ae9aa 832 if (!trylock_page(page))
1da177e4
LT
833 goto keep;
834
309381fe
SL
835 VM_BUG_ON_PAGE(PageActive(page), page);
836 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1da177e4
LT
837
838 sc->nr_scanned++;
80e43426 839
39b5f29a 840 if (unlikely(!page_evictable(page)))
b291f000 841 goto cull_mlocked;
894bc310 842
a6dc60f8 843 if (!sc->may_unmap && page_mapped(page))
80e43426
CL
844 goto keep_locked;
845
1da177e4
LT
846 /* Double the slab pressure for mapped and swapcache pages */
847 if (page_mapped(page) || PageSwapCache(page))
848 sc->nr_scanned++;
849
c661b078
AW
850 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
851 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
852
e2be15f6
MG
853 /*
854 * The number of dirty pages determines if a zone is marked
855 * reclaim_congested which affects wait_iff_congested. kswapd
856 * will stall and start writing pages if the tail of the LRU
857 * is all dirty unqueued pages.
858 */
859 page_check_dirty_writeback(page, &dirty, &writeback);
860 if (dirty || writeback)
861 nr_dirty++;
862
863 if (dirty && !writeback)
864 nr_unqueued_dirty++;
865
d04e8acd
MG
866 /*
867 * Treat this page as congested if the underlying BDI is or if
868 * pages are cycling through the LRU so quickly that the
869 * pages marked for immediate reclaim are making it to the
870 * end of the LRU a second time.
871 */
e2be15f6 872 mapping = page_mapping(page);
d04e8acd
MG
873 if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
874 (writeback && PageReclaim(page)))
e2be15f6
MG
875 nr_congested++;
876
283aba9f
MG
877 /*
878 * If a page at the tail of the LRU is under writeback, there
879 * are three cases to consider.
880 *
881 * 1) If reclaim is encountering an excessive number of pages
882 * under writeback and this page is both under writeback and
883 * PageReclaim then it indicates that pages are being queued
884 * for IO but are being recycled through the LRU before the
885 * IO can complete. Waiting on the page itself risks an
886 * indefinite stall if it is impossible to writeback the
887 * page due to IO error or disconnected storage so instead
b1a6f21e
MG
888 * note that the LRU is being scanned too quickly and the
889 * caller can stall after page list has been processed.
283aba9f
MG
890 *
891 * 2) Global reclaim encounters a page, memcg encounters a
892 * page that is not marked for immediate reclaim or
893 * the caller does not have __GFP_IO. In this case mark
894 * the page for immediate reclaim and continue scanning.
895 *
896 * __GFP_IO is checked because a loop driver thread might
897 * enter reclaim, and deadlock if it waits on a page for
898 * which it is needed to do the write (loop masks off
899 * __GFP_IO|__GFP_FS for this reason); but more thought
900 * would probably show more reasons.
901 *
902 * Don't require __GFP_FS, since we're not going into the
903 * FS, just waiting on its writeback completion. Worryingly,
904 * ext4 gfs2 and xfs allocate pages with
905 * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
906 * may_enter_fs here is liable to OOM on them.
907 *
908 * 3) memcg encounters a page that is not already marked
909 * PageReclaim. memcg does not have any dirty pages
910 * throttling so we could easily OOM just because too many
911 * pages are in writeback and there is nothing else to
912 * reclaim. Wait for the writeback to complete.
913 */
c661b078 914 if (PageWriteback(page)) {
283aba9f
MG
915 /* Case 1 above */
916 if (current_is_kswapd() &&
917 PageReclaim(page) &&
918 zone_is_reclaim_writeback(zone)) {
b1a6f21e
MG
919 nr_immediate++;
920 goto keep_locked;
283aba9f
MG
921
922 /* Case 2 above */
923 } else if (global_reclaim(sc) ||
c3b94f44
HD
924 !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
925 /*
926 * This is slightly racy - end_page_writeback()
927 * might have just cleared PageReclaim, then
928 * setting PageReclaim here end up interpreted
929 * as PageReadahead - but that does not matter
930 * enough to care. What we do want is for this
931 * page to have PageReclaim set next time memcg
932 * reclaim reaches the tests above, so it will
933 * then wait_on_page_writeback() to avoid OOM;
934 * and it's also appropriate in global reclaim.
935 */
936 SetPageReclaim(page);
e62e384e 937 nr_writeback++;
283aba9f 938
c3b94f44 939 goto keep_locked;
283aba9f
MG
940
941 /* Case 3 above */
942 } else {
943 wait_on_page_writeback(page);
e62e384e 944 }
c661b078 945 }
1da177e4 946
02c6de8d
MK
947 if (!force_reclaim)
948 references = page_check_references(page, sc);
949
dfc8d636
JW
950 switch (references) {
951 case PAGEREF_ACTIVATE:
1da177e4 952 goto activate_locked;
64574746
JW
953 case PAGEREF_KEEP:
954 goto keep_locked;
dfc8d636
JW
955 case PAGEREF_RECLAIM:
956 case PAGEREF_RECLAIM_CLEAN:
957 ; /* try to reclaim the page below */
958 }
1da177e4 959
1da177e4
LT
960 /*
961 * Anonymous process memory has backing store?
962 * Try to allocate it some swap space here.
963 */
b291f000 964 if (PageAnon(page) && !PageSwapCache(page)) {
63eb6b93
HD
965 if (!(sc->gfp_mask & __GFP_IO))
966 goto keep_locked;
5bc7b8ac 967 if (!add_to_swap(page, page_list))
1da177e4 968 goto activate_locked;
63eb6b93 969 may_enter_fs = 1;
1da177e4 970
e2be15f6
MG
971 /* Adding to swap updated mapping */
972 mapping = page_mapping(page);
973 }
1da177e4
LT
974
975 /*
976 * The page is mapped into the page tables of one or more
977 * processes. Try to unmap it here.
978 */
979 if (page_mapped(page) && mapping) {
02c6de8d 980 switch (try_to_unmap(page, ttu_flags)) {
1da177e4
LT
981 case SWAP_FAIL:
982 goto activate_locked;
983 case SWAP_AGAIN:
984 goto keep_locked;
b291f000
NP
985 case SWAP_MLOCK:
986 goto cull_mlocked;
1da177e4
LT
987 case SWAP_SUCCESS:
988 ; /* try to free the page below */
989 }
990 }
991
992 if (PageDirty(page)) {
ee72886d
MG
993 /*
994 * Only kswapd can writeback filesystem pages to
d43006d5
MG
995 * avoid risk of stack overflow but only writeback
996 * if many dirty pages have been encountered.
ee72886d 997 */
f84f6e2b 998 if (page_is_file_cache(page) &&
9e3b2f8c 999 (!current_is_kswapd() ||
d43006d5 1000 !zone_is_reclaim_dirty(zone))) {
49ea7eb6
MG
1001 /*
1002 * Immediately reclaim when written back.
1003 * Similar in principal to deactivate_page()
1004 * except we already have the page isolated
1005 * and know it's dirty
1006 */
1007 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
1008 SetPageReclaim(page);
1009
ee72886d
MG
1010 goto keep_locked;
1011 }
1012
dfc8d636 1013 if (references == PAGEREF_RECLAIM_CLEAN)
1da177e4 1014 goto keep_locked;
4dd4b920 1015 if (!may_enter_fs)
1da177e4 1016 goto keep_locked;
52a8363e 1017 if (!sc->may_writepage)
1da177e4
LT
1018 goto keep_locked;
1019
1020 /* Page is dirty, try to write it out here */
7d3579e8 1021 switch (pageout(page, mapping, sc)) {
1da177e4
LT
1022 case PAGE_KEEP:
1023 goto keep_locked;
1024 case PAGE_ACTIVATE:
1025 goto activate_locked;
1026 case PAGE_SUCCESS:
7d3579e8 1027 if (PageWriteback(page))
41ac1999 1028 goto keep;
7d3579e8 1029 if (PageDirty(page))
1da177e4 1030 goto keep;
7d3579e8 1031
1da177e4
LT
1032 /*
1033 * A synchronous write - probably a ramdisk. Go
1034 * ahead and try to reclaim the page.
1035 */
529ae9aa 1036 if (!trylock_page(page))
1da177e4
LT
1037 goto keep;
1038 if (PageDirty(page) || PageWriteback(page))
1039 goto keep_locked;
1040 mapping = page_mapping(page);
1041 case PAGE_CLEAN:
1042 ; /* try to free the page below */
1043 }
1044 }
1045
1046 /*
1047 * If the page has buffers, try to free the buffer mappings
1048 * associated with this page. If we succeed we try to free
1049 * the page as well.
1050 *
1051 * We do this even if the page is PageDirty().
1052 * try_to_release_page() does not perform I/O, but it is
1053 * possible for a page to have PageDirty set, but it is actually
1054 * clean (all its buffers are clean). This happens if the
1055 * buffers were written out directly, with submit_bh(). ext3
894bc310 1056 * will do this, as well as the blockdev mapping.
1da177e4
LT
1057 * try_to_release_page() will discover that cleanness and will
1058 * drop the buffers and mark the page clean - it can be freed.
1059 *
1060 * Rarely, pages can have buffers and no ->mapping. These are
1061 * the pages which were not successfully invalidated in
1062 * truncate_complete_page(). We try to drop those buffers here
1063 * and if that worked, and the page is no longer mapped into
1064 * process address space (page_count == 1) it can be freed.
1065 * Otherwise, leave the page on the LRU so it is swappable.
1066 */
266cf658 1067 if (page_has_private(page)) {
1da177e4
LT
1068 if (!try_to_release_page(page, sc->gfp_mask))
1069 goto activate_locked;
e286781d
NP
1070 if (!mapping && page_count(page) == 1) {
1071 unlock_page(page);
1072 if (put_page_testzero(page))
1073 goto free_it;
1074 else {
1075 /*
1076 * rare race with speculative reference.
1077 * the speculative reference will free
1078 * this page shortly, so we may
1079 * increment nr_reclaimed here (and
1080 * leave it off the LRU).
1081 */
1082 nr_reclaimed++;
1083 continue;
1084 }
1085 }
1da177e4
LT
1086 }
1087
a528910e 1088 if (!mapping || !__remove_mapping(mapping, page, true))
49d2e9cc 1089 goto keep_locked;
1da177e4 1090
a978d6f5
NP
1091 /*
1092 * At this point, we have no other references and there is
1093 * no way to pick any more up (removed from LRU, removed
1094 * from pagecache). Can use non-atomic bitops now (and
1095 * we obviously don't have to worry about waking up a process
1096 * waiting on the page lock, because there are no references.
1097 */
1098 __clear_page_locked(page);
e286781d 1099free_it:
05ff5137 1100 nr_reclaimed++;
abe4c3b5
MG
1101
1102 /*
1103 * Is there need to periodically free_page_list? It would
1104 * appear not as the counts should be low
1105 */
1106 list_add(&page->lru, &free_pages);
1da177e4
LT
1107 continue;
1108
b291f000 1109cull_mlocked:
63d6c5ad
HD
1110 if (PageSwapCache(page))
1111 try_to_free_swap(page);
b291f000
NP
1112 unlock_page(page);
1113 putback_lru_page(page);
1114 continue;
1115
1da177e4 1116activate_locked:
68a22394
RR
1117 /* Not a candidate for swapping, so reclaim swap space. */
1118 if (PageSwapCache(page) && vm_swap_full())
a2c43eed 1119 try_to_free_swap(page);
309381fe 1120 VM_BUG_ON_PAGE(PageActive(page), page);
1da177e4
LT
1121 SetPageActive(page);
1122 pgactivate++;
1123keep_locked:
1124 unlock_page(page);
1125keep:
1126 list_add(&page->lru, &ret_pages);
309381fe 1127 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1da177e4 1128 }
abe4c3b5 1129
b745bc85 1130 free_hot_cold_page_list(&free_pages, true);
abe4c3b5 1131
1da177e4 1132 list_splice(&ret_pages, page_list);
f8891e5e 1133 count_vm_events(PGACTIVATE, pgactivate);
69980e31 1134 mem_cgroup_uncharge_end();
8e950282
MG
1135 *ret_nr_dirty += nr_dirty;
1136 *ret_nr_congested += nr_congested;
d43006d5 1137 *ret_nr_unqueued_dirty += nr_unqueued_dirty;
92df3a72 1138 *ret_nr_writeback += nr_writeback;
b1a6f21e 1139 *ret_nr_immediate += nr_immediate;
05ff5137 1140 return nr_reclaimed;
1da177e4
LT
1141}
1142
02c6de8d
MK
1143unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1144 struct list_head *page_list)
1145{
1146 struct scan_control sc = {
1147 .gfp_mask = GFP_KERNEL,
1148 .priority = DEF_PRIORITY,
1149 .may_unmap = 1,
1150 };
8e950282 1151 unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
02c6de8d
MK
1152 struct page *page, *next;
1153 LIST_HEAD(clean_pages);
1154
1155 list_for_each_entry_safe(page, next, page_list, lru) {
117aad1e
RA
1156 if (page_is_file_cache(page) && !PageDirty(page) &&
1157 !isolated_balloon_page(page)) {
02c6de8d
MK
1158 ClearPageActive(page);
1159 list_move(&page->lru, &clean_pages);
1160 }
1161 }
1162
1163 ret = shrink_page_list(&clean_pages, zone, &sc,
8e950282
MG
1164 TTU_UNMAP|TTU_IGNORE_ACCESS,
1165 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
02c6de8d 1166 list_splice(&clean_pages, page_list);
83da7510 1167 mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
02c6de8d
MK
1168 return ret;
1169}
1170
5ad333eb
AW
1171/*
1172 * Attempt to remove the specified page from its LRU. Only take this page
1173 * if it is of the appropriate PageActive status. Pages which are being
1174 * freed elsewhere are also ignored.
1175 *
1176 * page: page to consider
1177 * mode: one of the LRU isolation modes defined above
1178 *
1179 * returns 0 on success, -ve errno on failure.
1180 */
f3fd4a61 1181int __isolate_lru_page(struct page *page, isolate_mode_t mode)
5ad333eb
AW
1182{
1183 int ret = -EINVAL;
1184
1185 /* Only take pages on the LRU. */
1186 if (!PageLRU(page))
1187 return ret;
1188
e46a2879
MK
1189 /* Compaction should not handle unevictable pages but CMA can do so */
1190 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
894bc310
LS
1191 return ret;
1192
5ad333eb 1193 ret = -EBUSY;
08e552c6 1194
c8244935
MG
1195 /*
1196 * To minimise LRU disruption, the caller can indicate that it only
1197 * wants to isolate pages it will be able to operate on without
1198 * blocking - clean pages for the most part.
1199 *
1200 * ISOLATE_CLEAN means that only clean pages should be isolated. This
1201 * is used by reclaim when it is cannot write to backing storage
1202 *
1203 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1204 * that it is possible to migrate without blocking
1205 */
1206 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1207 /* All the caller can do on PageWriteback is block */
1208 if (PageWriteback(page))
1209 return ret;
1210
1211 if (PageDirty(page)) {
1212 struct address_space *mapping;
1213
1214 /* ISOLATE_CLEAN means only clean pages */
1215 if (mode & ISOLATE_CLEAN)
1216 return ret;
1217
1218 /*
1219 * Only pages without mappings or that have a
1220 * ->migratepage callback are possible to migrate
1221 * without blocking
1222 */
1223 mapping = page_mapping(page);
1224 if (mapping && !mapping->a_ops->migratepage)
1225 return ret;
1226 }
1227 }
39deaf85 1228
f80c0673
MK
1229 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1230 return ret;
1231
5ad333eb
AW
1232 if (likely(get_page_unless_zero(page))) {
1233 /*
1234 * Be careful not to clear PageLRU until after we're
1235 * sure the page is not being freed elsewhere -- the
1236 * page release code relies on it.
1237 */
1238 ClearPageLRU(page);
1239 ret = 0;
1240 }
1241
1242 return ret;
1243}
1244
1da177e4
LT
1245/*
1246 * zone->lru_lock is heavily contended. Some of the functions that
1247 * shrink the lists perform better by taking out a batch of pages
1248 * and working on them outside the LRU lock.
1249 *
1250 * For pagecache intensive workloads, this function is the hottest
1251 * spot in the kernel (apart from copy_*_user functions).
1252 *
1253 * Appropriate locks must be held before calling this function.
1254 *
1255 * @nr_to_scan: The number of pages to look through on the list.
5dc35979 1256 * @lruvec: The LRU vector to pull pages from.
1da177e4 1257 * @dst: The temp list to put pages on to.
f626012d 1258 * @nr_scanned: The number of pages that were scanned.
fe2c2a10 1259 * @sc: The scan_control struct for this reclaim session
5ad333eb 1260 * @mode: One of the LRU isolation modes
3cb99451 1261 * @lru: LRU list id for isolating
1da177e4
LT
1262 *
1263 * returns how many pages were moved onto *@dst.
1264 */
69e05944 1265static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
5dc35979 1266 struct lruvec *lruvec, struct list_head *dst,
fe2c2a10 1267 unsigned long *nr_scanned, struct scan_control *sc,
3cb99451 1268 isolate_mode_t mode, enum lru_list lru)
1da177e4 1269{
75b00af7 1270 struct list_head *src = &lruvec->lists[lru];
69e05944 1271 unsigned long nr_taken = 0;
c9b02d97 1272 unsigned long scan;
1da177e4 1273
c9b02d97 1274 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
5ad333eb 1275 struct page *page;
fa9add64 1276 int nr_pages;
5ad333eb 1277
1da177e4
LT
1278 page = lru_to_page(src);
1279 prefetchw_prev_lru_page(page, src, flags);
1280
309381fe 1281 VM_BUG_ON_PAGE(!PageLRU(page), page);
8d438f96 1282
f3fd4a61 1283 switch (__isolate_lru_page(page, mode)) {
5ad333eb 1284 case 0:
fa9add64
HD
1285 nr_pages = hpage_nr_pages(page);
1286 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
5ad333eb 1287 list_move(&page->lru, dst);
fa9add64 1288 nr_taken += nr_pages;
5ad333eb
AW
1289 break;
1290
1291 case -EBUSY:
1292 /* else it is being freed elsewhere */
1293 list_move(&page->lru, src);
1294 continue;
46453a6e 1295
5ad333eb
AW
1296 default:
1297 BUG();
1298 }
1da177e4
LT
1299 }
1300
f626012d 1301 *nr_scanned = scan;
75b00af7
HD
1302 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
1303 nr_taken, mode, is_file_lru(lru));
1da177e4
LT
1304 return nr_taken;
1305}
1306
62695a84
NP
1307/**
1308 * isolate_lru_page - tries to isolate a page from its LRU list
1309 * @page: page to isolate from its LRU list
1310 *
1311 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1312 * vmstat statistic corresponding to whatever LRU list the page was on.
1313 *
1314 * Returns 0 if the page was removed from an LRU list.
1315 * Returns -EBUSY if the page was not on an LRU list.
1316 *
1317 * The returned page will have PageLRU() cleared. If it was found on
894bc310
LS
1318 * the active list, it will have PageActive set. If it was found on
1319 * the unevictable list, it will have the PageUnevictable bit set. That flag
1320 * may need to be cleared by the caller before letting the page go.
62695a84
NP
1321 *
1322 * The vmstat statistic corresponding to the list on which the page was
1323 * found will be decremented.
1324 *
1325 * Restrictions:
1326 * (1) Must be called with an elevated refcount on the page. This is a
1327 * fundamentnal difference from isolate_lru_pages (which is called
1328 * without a stable reference).
1329 * (2) the lru_lock must not be held.
1330 * (3) interrupts must be enabled.
1331 */
1332int isolate_lru_page(struct page *page)
1333{
1334 int ret = -EBUSY;
1335
309381fe 1336 VM_BUG_ON_PAGE(!page_count(page), page);
0c917313 1337
62695a84
NP
1338 if (PageLRU(page)) {
1339 struct zone *zone = page_zone(page);
fa9add64 1340 struct lruvec *lruvec;
62695a84
NP
1341
1342 spin_lock_irq(&zone->lru_lock);
fa9add64 1343 lruvec = mem_cgroup_page_lruvec(page, zone);
0c917313 1344 if (PageLRU(page)) {
894bc310 1345 int lru = page_lru(page);
0c917313 1346 get_page(page);
62695a84 1347 ClearPageLRU(page);
fa9add64
HD
1348 del_page_from_lru_list(page, lruvec, lru);
1349 ret = 0;
62695a84
NP
1350 }
1351 spin_unlock_irq(&zone->lru_lock);
1352 }
1353 return ret;
1354}
1355
35cd7815 1356/*
d37dd5dc
FW
1357 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1358 * then get resheduled. When there are massive number of tasks doing page
1359 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1360 * the LRU list will go small and be scanned faster than necessary, leading to
1361 * unnecessary swapping, thrashing and OOM.
35cd7815
RR
1362 */
1363static int too_many_isolated(struct zone *zone, int file,
1364 struct scan_control *sc)
1365{
1366 unsigned long inactive, isolated;
1367
1368 if (current_is_kswapd())
1369 return 0;
1370
89b5fae5 1371 if (!global_reclaim(sc))
35cd7815
RR
1372 return 0;
1373
1374 if (file) {
1375 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1376 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1377 } else {
1378 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1379 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1380 }
1381
3cf23841
FW
1382 /*
1383 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1384 * won't get blocked by normal direct-reclaimers, forming a circular
1385 * deadlock.
1386 */
1387 if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
1388 inactive >>= 3;
1389
35cd7815
RR
1390 return isolated > inactive;
1391}
1392
66635629 1393static noinline_for_stack void
75b00af7 1394putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
66635629 1395{
27ac81d8
KK
1396 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1397 struct zone *zone = lruvec_zone(lruvec);
3f79768f 1398 LIST_HEAD(pages_to_free);
66635629 1399
66635629
MG
1400 /*
1401 * Put back any unfreeable pages.
1402 */
66635629 1403 while (!list_empty(page_list)) {
3f79768f 1404 struct page *page = lru_to_page(page_list);
66635629 1405 int lru;
3f79768f 1406
309381fe 1407 VM_BUG_ON_PAGE(PageLRU(page), page);
66635629 1408 list_del(&page->lru);
39b5f29a 1409 if (unlikely(!page_evictable(page))) {
66635629
MG
1410 spin_unlock_irq(&zone->lru_lock);
1411 putback_lru_page(page);
1412 spin_lock_irq(&zone->lru_lock);
1413 continue;
1414 }
fa9add64
HD
1415
1416 lruvec = mem_cgroup_page_lruvec(page, zone);
1417
7a608572 1418 SetPageLRU(page);
66635629 1419 lru = page_lru(page);
fa9add64
HD
1420 add_page_to_lru_list(page, lruvec, lru);
1421
66635629
MG
1422 if (is_active_lru(lru)) {
1423 int file = is_file_lru(lru);
9992af10
RR
1424 int numpages = hpage_nr_pages(page);
1425 reclaim_stat->recent_rotated[file] += numpages;
66635629 1426 }
2bcf8879
HD
1427 if (put_page_testzero(page)) {
1428 __ClearPageLRU(page);
1429 __ClearPageActive(page);
fa9add64 1430 del_page_from_lru_list(page, lruvec, lru);
2bcf8879
HD
1431
1432 if (unlikely(PageCompound(page))) {
1433 spin_unlock_irq(&zone->lru_lock);
1434 (*get_compound_page_dtor(page))(page);
1435 spin_lock_irq(&zone->lru_lock);
1436 } else
1437 list_add(&page->lru, &pages_to_free);
66635629
MG
1438 }
1439 }
66635629 1440
3f79768f
HD
1441 /*
1442 * To save our caller's stack, now use input list for pages to free.
1443 */
1444 list_splice(&pages_to_free, page_list);
66635629
MG
1445}
1446
399ba0b9
N
1447/*
1448 * If a kernel thread (such as nfsd for loop-back mounts) services
1449 * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1450 * In that case we should only throttle if the backing device it is
1451 * writing to is congested. In other cases it is safe to throttle.
1452 */
1453static int current_may_throttle(void)
1454{
1455 return !(current->flags & PF_LESS_THROTTLE) ||
1456 current->backing_dev_info == NULL ||
1457 bdi_write_congested(current->backing_dev_info);
1458}
1459
1da177e4 1460/*
1742f19f
AM
1461 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1462 * of reclaimed pages
1da177e4 1463 */
66635629 1464static noinline_for_stack unsigned long
1a93be0e 1465shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
9e3b2f8c 1466 struct scan_control *sc, enum lru_list lru)
1da177e4
LT
1467{
1468 LIST_HEAD(page_list);
e247dbce 1469 unsigned long nr_scanned;
05ff5137 1470 unsigned long nr_reclaimed = 0;
e247dbce 1471 unsigned long nr_taken;
8e950282
MG
1472 unsigned long nr_dirty = 0;
1473 unsigned long nr_congested = 0;
e2be15f6 1474 unsigned long nr_unqueued_dirty = 0;
92df3a72 1475 unsigned long nr_writeback = 0;
b1a6f21e 1476 unsigned long nr_immediate = 0;
f3fd4a61 1477 isolate_mode_t isolate_mode = 0;
3cb99451 1478 int file = is_file_lru(lru);
1a93be0e
KK
1479 struct zone *zone = lruvec_zone(lruvec);
1480 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
78dc583d 1481
35cd7815 1482 while (unlikely(too_many_isolated(zone, file, sc))) {
58355c78 1483 congestion_wait(BLK_RW_ASYNC, HZ/10);
35cd7815
RR
1484
1485 /* We are about to die and free our memory. Return now. */
1486 if (fatal_signal_pending(current))
1487 return SWAP_CLUSTER_MAX;
1488 }
1489
1da177e4 1490 lru_add_drain();
f80c0673
MK
1491
1492 if (!sc->may_unmap)
61317289 1493 isolate_mode |= ISOLATE_UNMAPPED;
f80c0673 1494 if (!sc->may_writepage)
61317289 1495 isolate_mode |= ISOLATE_CLEAN;
f80c0673 1496
1da177e4 1497 spin_lock_irq(&zone->lru_lock);
b35ea17b 1498
5dc35979
KK
1499 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1500 &nr_scanned, sc, isolate_mode, lru);
95d918fc
KK
1501
1502 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1503 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1504
89b5fae5 1505 if (global_reclaim(sc)) {
e247dbce
KM
1506 zone->pages_scanned += nr_scanned;
1507 if (current_is_kswapd())
75b00af7 1508 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
e247dbce 1509 else
75b00af7 1510 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
e247dbce 1511 }
d563c050 1512 spin_unlock_irq(&zone->lru_lock);
b35ea17b 1513
d563c050 1514 if (nr_taken == 0)
66635629 1515 return 0;
5ad333eb 1516
02c6de8d 1517 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
8e950282
MG
1518 &nr_dirty, &nr_unqueued_dirty, &nr_congested,
1519 &nr_writeback, &nr_immediate,
1520 false);
c661b078 1521
3f79768f
HD
1522 spin_lock_irq(&zone->lru_lock);
1523
95d918fc 1524 reclaim_stat->recent_scanned[file] += nr_taken;
d563c050 1525
904249aa
YH
1526 if (global_reclaim(sc)) {
1527 if (current_is_kswapd())
1528 __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1529 nr_reclaimed);
1530 else
1531 __count_zone_vm_events(PGSTEAL_DIRECT, zone,
1532 nr_reclaimed);
1533 }
a74609fa 1534
27ac81d8 1535 putback_inactive_pages(lruvec, &page_list);
3f79768f 1536
95d918fc 1537 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
3f79768f
HD
1538
1539 spin_unlock_irq(&zone->lru_lock);
1540
b745bc85 1541 free_hot_cold_page_list(&page_list, true);
e11da5b4 1542
92df3a72
MG
1543 /*
1544 * If reclaim is isolating dirty pages under writeback, it implies
1545 * that the long-lived page allocation rate is exceeding the page
1546 * laundering rate. Either the global limits are not being effective
1547 * at throttling processes due to the page distribution throughout
1548 * zones or there is heavy usage of a slow backing device. The
1549 * only option is to throttle from reclaim context which is not ideal
1550 * as there is no guarantee the dirtying process is throttled in the
1551 * same way balance_dirty_pages() manages.
1552 *
8e950282
MG
1553 * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
1554 * of pages under pages flagged for immediate reclaim and stall if any
1555 * are encountered in the nr_immediate check below.
92df3a72 1556 */
918fc718 1557 if (nr_writeback && nr_writeback == nr_taken)
283aba9f 1558 zone_set_flag(zone, ZONE_WRITEBACK);
92df3a72 1559
d43006d5 1560 /*
b1a6f21e
MG
1561 * memcg will stall in page writeback so only consider forcibly
1562 * stalling for global reclaim
d43006d5 1563 */
b1a6f21e 1564 if (global_reclaim(sc)) {
8e950282
MG
1565 /*
1566 * Tag a zone as congested if all the dirty pages scanned were
1567 * backed by a congested BDI and wait_iff_congested will stall.
1568 */
1569 if (nr_dirty && nr_dirty == nr_congested)
1570 zone_set_flag(zone, ZONE_CONGESTED);
1571
b1a6f21e
MG
1572 /*
1573 * If dirty pages are scanned that are not queued for IO, it
1574 * implies that flushers are not keeping up. In this case, flag
1575 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
b738d764 1576 * pages from reclaim context.
b1a6f21e
MG
1577 */
1578 if (nr_unqueued_dirty == nr_taken)
1579 zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
1580
1581 /*
b738d764
LT
1582 * If kswapd scans pages marked marked for immediate
1583 * reclaim and under writeback (nr_immediate), it implies
1584 * that pages are cycling through the LRU faster than
b1a6f21e
MG
1585 * they are written so also forcibly stall.
1586 */
b738d764 1587 if (nr_immediate && current_may_throttle())
b1a6f21e 1588 congestion_wait(BLK_RW_ASYNC, HZ/10);
e2be15f6 1589 }
d43006d5 1590
8e950282
MG
1591 /*
1592 * Stall direct reclaim for IO completions if underlying BDIs or zone
1593 * is congested. Allow kswapd to continue until it starts encountering
1594 * unqueued dirty pages or cycling through the LRU too quickly.
1595 */
399ba0b9
N
1596 if (!sc->hibernation_mode && !current_is_kswapd() &&
1597 current_may_throttle())
8e950282
MG
1598 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1599
e11da5b4
MG
1600 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1601 zone_idx(zone),
1602 nr_scanned, nr_reclaimed,
9e3b2f8c 1603 sc->priority,
23b9da55 1604 trace_shrink_flags(file));
05ff5137 1605 return nr_reclaimed;
1da177e4
LT
1606}
1607
1608/*
1609 * This moves pages from the active list to the inactive list.
1610 *
1611 * We move them the other way if the page is referenced by one or more
1612 * processes, from rmap.
1613 *
1614 * If the pages are mostly unmapped, the processing is fast and it is
1615 * appropriate to hold zone->lru_lock across the whole operation. But if
1616 * the pages are mapped, the processing is slow (page_referenced()) so we
1617 * should drop zone->lru_lock around each page. It's impossible to balance
1618 * this, so instead we remove the pages from the LRU while processing them.
1619 * It is safe to rely on PG_active against the non-LRU pages in here because
1620 * nobody will play with that bit on a non-LRU page.
1621 *
1622 * The downside is that we have to touch page->_count against each page.
1623 * But we had to alter page->flags anyway.
1624 */
1cfb419b 1625
fa9add64 1626static void move_active_pages_to_lru(struct lruvec *lruvec,
3eb4140f 1627 struct list_head *list,
2bcf8879 1628 struct list_head *pages_to_free,
3eb4140f
WF
1629 enum lru_list lru)
1630{
fa9add64 1631 struct zone *zone = lruvec_zone(lruvec);
3eb4140f 1632 unsigned long pgmoved = 0;
3eb4140f 1633 struct page *page;
fa9add64 1634 int nr_pages;
3eb4140f 1635
3eb4140f
WF
1636 while (!list_empty(list)) {
1637 page = lru_to_page(list);
fa9add64 1638 lruvec = mem_cgroup_page_lruvec(page, zone);
3eb4140f 1639
309381fe 1640 VM_BUG_ON_PAGE(PageLRU(page), page);
3eb4140f
WF
1641 SetPageLRU(page);
1642
fa9add64
HD
1643 nr_pages = hpage_nr_pages(page);
1644 mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
925b7673 1645 list_move(&page->lru, &lruvec->lists[lru]);
fa9add64 1646 pgmoved += nr_pages;
3eb4140f 1647
2bcf8879
HD
1648 if (put_page_testzero(page)) {
1649 __ClearPageLRU(page);
1650 __ClearPageActive(page);
fa9add64 1651 del_page_from_lru_list(page, lruvec, lru);
2bcf8879
HD
1652
1653 if (unlikely(PageCompound(page))) {
1654 spin_unlock_irq(&zone->lru_lock);
1655 (*get_compound_page_dtor(page))(page);
1656 spin_lock_irq(&zone->lru_lock);
1657 } else
1658 list_add(&page->lru, pages_to_free);
3eb4140f
WF
1659 }
1660 }
1661 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1662 if (!is_active_lru(lru))
1663 __count_vm_events(PGDEACTIVATE, pgmoved);
1664}
1cfb419b 1665
f626012d 1666static void shrink_active_list(unsigned long nr_to_scan,
1a93be0e 1667 struct lruvec *lruvec,
f16015fb 1668 struct scan_control *sc,
9e3b2f8c 1669 enum lru_list lru)
1da177e4 1670{
44c241f1 1671 unsigned long nr_taken;
f626012d 1672 unsigned long nr_scanned;
6fe6b7e3 1673 unsigned long vm_flags;
1da177e4 1674 LIST_HEAD(l_hold); /* The pages which were snipped off */
8cab4754 1675 LIST_HEAD(l_active);
b69408e8 1676 LIST_HEAD(l_inactive);
1da177e4 1677 struct page *page;
1a93be0e 1678 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
44c241f1 1679 unsigned long nr_rotated = 0;
f3fd4a61 1680 isolate_mode_t isolate_mode = 0;
3cb99451 1681 int file = is_file_lru(lru);
1a93be0e 1682 struct zone *zone = lruvec_zone(lruvec);
1da177e4
LT
1683
1684 lru_add_drain();
f80c0673
MK
1685
1686 if (!sc->may_unmap)
61317289 1687 isolate_mode |= ISOLATE_UNMAPPED;
f80c0673 1688 if (!sc->may_writepage)
61317289 1689 isolate_mode |= ISOLATE_CLEAN;
f80c0673 1690
1da177e4 1691 spin_lock_irq(&zone->lru_lock);
925b7673 1692
5dc35979
KK
1693 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1694 &nr_scanned, sc, isolate_mode, lru);
89b5fae5 1695 if (global_reclaim(sc))
f626012d 1696 zone->pages_scanned += nr_scanned;
89b5fae5 1697
b7c46d15 1698 reclaim_stat->recent_scanned[file] += nr_taken;
1cfb419b 1699
f626012d 1700 __count_zone_vm_events(PGREFILL, zone, nr_scanned);
3cb99451 1701 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
a731286d 1702 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1da177e4
LT
1703 spin_unlock_irq(&zone->lru_lock);
1704
1da177e4
LT
1705 while (!list_empty(&l_hold)) {
1706 cond_resched();
1707 page = lru_to_page(&l_hold);
1708 list_del(&page->lru);
7e9cd484 1709
39b5f29a 1710 if (unlikely(!page_evictable(page))) {
894bc310
LS
1711 putback_lru_page(page);
1712 continue;
1713 }
1714
cc715d99
MG
1715 if (unlikely(buffer_heads_over_limit)) {
1716 if (page_has_private(page) && trylock_page(page)) {
1717 if (page_has_private(page))
1718 try_to_release_page(page, 0);
1719 unlock_page(page);
1720 }
1721 }
1722
c3ac9a8a
JW
1723 if (page_referenced(page, 0, sc->target_mem_cgroup,
1724 &vm_flags)) {
9992af10 1725 nr_rotated += hpage_nr_pages(page);
8cab4754
WF
1726 /*
1727 * Identify referenced, file-backed active pages and
1728 * give them one more trip around the active list. So
1729 * that executable code get better chances to stay in
1730 * memory under moderate memory pressure. Anon pages
1731 * are not likely to be evicted by use-once streaming
1732 * IO, plus JVM can create lots of anon VM_EXEC pages,
1733 * so we ignore them here.
1734 */
41e20983 1735 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
8cab4754
WF
1736 list_add(&page->lru, &l_active);
1737 continue;
1738 }
1739 }
7e9cd484 1740
5205e56e 1741 ClearPageActive(page); /* we are de-activating */
1da177e4
LT
1742 list_add(&page->lru, &l_inactive);
1743 }
1744
b555749a 1745 /*
8cab4754 1746 * Move pages back to the lru list.
b555749a 1747 */
2a1dc509 1748 spin_lock_irq(&zone->lru_lock);
556adecb 1749 /*
8cab4754
WF
1750 * Count referenced pages from currently used mappings as rotated,
1751 * even though only some of them are actually re-activated. This
1752 * helps balance scan pressure between file and anonymous pages in
1753 * get_scan_ratio.
7e9cd484 1754 */
b7c46d15 1755 reclaim_stat->recent_rotated[file] += nr_rotated;
556adecb 1756
fa9add64
HD
1757 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1758 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
a731286d 1759 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
f8891e5e 1760 spin_unlock_irq(&zone->lru_lock);
2bcf8879 1761
b745bc85 1762 free_hot_cold_page_list(&l_hold, true);
1da177e4
LT
1763}
1764
74e3f3c3 1765#ifdef CONFIG_SWAP
14797e23 1766static int inactive_anon_is_low_global(struct zone *zone)
f89eb90e
KM
1767{
1768 unsigned long active, inactive;
1769
1770 active = zone_page_state(zone, NR_ACTIVE_ANON);
1771 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1772
1773 if (inactive * zone->inactive_ratio < active)
1774 return 1;
1775
1776 return 0;
1777}
1778
14797e23
KM
1779/**
1780 * inactive_anon_is_low - check if anonymous pages need to be deactivated
c56d5c7d 1781 * @lruvec: LRU vector to check
14797e23
KM
1782 *
1783 * Returns true if the zone does not have enough inactive anon pages,
1784 * meaning some active anon pages need to be deactivated.
1785 */
c56d5c7d 1786static int inactive_anon_is_low(struct lruvec *lruvec)
14797e23 1787{
74e3f3c3
MK
1788 /*
1789 * If we don't have swap space, anonymous page deactivation
1790 * is pointless.
1791 */
1792 if (!total_swap_pages)
1793 return 0;
1794
c3c787e8 1795 if (!mem_cgroup_disabled())
c56d5c7d 1796 return mem_cgroup_inactive_anon_is_low(lruvec);
f16015fb 1797
c56d5c7d 1798 return inactive_anon_is_low_global(lruvec_zone(lruvec));
14797e23 1799}
74e3f3c3 1800#else
c56d5c7d 1801static inline int inactive_anon_is_low(struct lruvec *lruvec)
74e3f3c3
MK
1802{
1803 return 0;
1804}
1805#endif
14797e23 1806
56e49d21
RR
1807/**
1808 * inactive_file_is_low - check if file pages need to be deactivated
c56d5c7d 1809 * @lruvec: LRU vector to check
56e49d21
RR
1810 *
1811 * When the system is doing streaming IO, memory pressure here
1812 * ensures that active file pages get deactivated, until more
1813 * than half of the file pages are on the inactive list.
1814 *
1815 * Once we get to that situation, protect the system's working
1816 * set from being evicted by disabling active file page aging.
1817 *
1818 * This uses a different ratio than the anonymous pages, because
1819 * the page cache uses a use-once replacement algorithm.
1820 */
c56d5c7d 1821static int inactive_file_is_low(struct lruvec *lruvec)
56e49d21 1822{
e3790144
JW
1823 unsigned long inactive;
1824 unsigned long active;
1825
1826 inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
1827 active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
56e49d21 1828
e3790144 1829 return active > inactive;
56e49d21
RR
1830}
1831
75b00af7 1832static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
b39415b2 1833{
75b00af7 1834 if (is_file_lru(lru))
c56d5c7d 1835 return inactive_file_is_low(lruvec);
b39415b2 1836 else
c56d5c7d 1837 return inactive_anon_is_low(lruvec);
b39415b2
RR
1838}
1839
4f98a2fe 1840static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1a93be0e 1841 struct lruvec *lruvec, struct scan_control *sc)
b69408e8 1842{
b39415b2 1843 if (is_active_lru(lru)) {
75b00af7 1844 if (inactive_list_is_low(lruvec, lru))
1a93be0e 1845 shrink_active_list(nr_to_scan, lruvec, sc, lru);
556adecb
RR
1846 return 0;
1847 }
1848
1a93be0e 1849 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
4f98a2fe
RR
1850}
1851
9a265114
JW
1852enum scan_balance {
1853 SCAN_EQUAL,
1854 SCAN_FRACT,
1855 SCAN_ANON,
1856 SCAN_FILE,
1857};
1858
4f98a2fe
RR
1859/*
1860 * Determine how aggressively the anon and file LRU lists should be
1861 * scanned. The relative value of each set of LRU lists is determined
1862 * by looking at the fraction of the pages scanned we did rotate back
1863 * onto the active list instead of evict.
1864 *
be7bd59d
WL
1865 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
1866 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
4f98a2fe 1867 */
90126375 1868static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
9e3b2f8c 1869 unsigned long *nr)
4f98a2fe 1870{
9a265114
JW
1871 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1872 u64 fraction[2];
1873 u64 denominator = 0; /* gcc */
1874 struct zone *zone = lruvec_zone(lruvec);
4f98a2fe 1875 unsigned long anon_prio, file_prio;
9a265114 1876 enum scan_balance scan_balance;
0bf1457f 1877 unsigned long anon, file;
9a265114 1878 bool force_scan = false;
4f98a2fe 1879 unsigned long ap, fp;
4111304d 1880 enum lru_list lru;
6f04f48d
SS
1881 bool some_scanned;
1882 int pass;
246e87a9 1883
f11c0ca5
JW
1884 /*
1885 * If the zone or memcg is small, nr[l] can be 0. This
1886 * results in no scanning on this priority and a potential
1887 * priority drop. Global direct reclaim can go to the next
1888 * zone and tends to have no problems. Global kswapd is for
1889 * zone balancing and it needs to scan a minimum amount. When
1890 * reclaiming for a memcg, a priority drop can cause high
1891 * latencies, so it's better to scan a minimum amount there as
1892 * well.
1893 */
6e543d57 1894 if (current_is_kswapd() && !zone_reclaimable(zone))
a4d3e9e7 1895 force_scan = true;
89b5fae5 1896 if (!global_reclaim(sc))
a4d3e9e7 1897 force_scan = true;
76a33fc3
SL
1898
1899 /* If we have no swap space, do not bother scanning anon pages. */
ec8acf20 1900 if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
9a265114 1901 scan_balance = SCAN_FILE;
76a33fc3
SL
1902 goto out;
1903 }
4f98a2fe 1904
10316b31
JW
1905 /*
1906 * Global reclaim will swap to prevent OOM even with no
1907 * swappiness, but memcg users want to use this knob to
1908 * disable swapping for individual groups completely when
1909 * using the memory controller's swap limit feature would be
1910 * too expensive.
1911 */
688eb988 1912 if (!global_reclaim(sc) && !sc->swappiness) {
9a265114 1913 scan_balance = SCAN_FILE;
10316b31
JW
1914 goto out;
1915 }
1916
1917 /*
1918 * Do not apply any pressure balancing cleverness when the
1919 * system is close to OOM, scan both anon and file equally
1920 * (unless the swappiness setting disagrees with swapping).
1921 */
688eb988 1922 if (!sc->priority && sc->swappiness) {
9a265114 1923 scan_balance = SCAN_EQUAL;
10316b31
JW
1924 goto out;
1925 }
1926
4d7dcca2
HD
1927 anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
1928 get_lru_size(lruvec, LRU_INACTIVE_ANON);
1929 file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
1930 get_lru_size(lruvec, LRU_INACTIVE_FILE);
a4d3e9e7 1931
62376251
JW
1932 /*
1933 * Prevent the reclaimer from falling into the cache trap: as
1934 * cache pages start out inactive, every cache fault will tip
1935 * the scan balance towards the file LRU. And as the file LRU
1936 * shrinks, so does the window for rotation from references.
1937 * This means we have a runaway feedback loop where a tiny
1938 * thrashing file LRU becomes infinitely more attractive than
1939 * anon pages. Try to detect this based on file LRU size.
1940 */
1941 if (global_reclaim(sc)) {
1942 unsigned long free = zone_page_state(zone, NR_FREE_PAGES);
1943
1944 if (unlikely(file + free <= high_wmark_pages(zone))) {
1945 scan_balance = SCAN_ANON;
1946 goto out;
1947 }
1948 }
1949
7c5bd705
JW
1950 /*
1951 * There is enough inactive page cache, do not reclaim
1952 * anything from the anonymous working set right now.
1953 */
1954 if (!inactive_file_is_low(lruvec)) {
9a265114 1955 scan_balance = SCAN_FILE;
7c5bd705
JW
1956 goto out;
1957 }
1958
9a265114
JW
1959 scan_balance = SCAN_FRACT;
1960
58c37f6e
KM
1961 /*
1962 * With swappiness at 100, anonymous and file have the same priority.
1963 * This scanning priority is essentially the inverse of IO cost.
1964 */
688eb988 1965 anon_prio = sc->swappiness;
75b00af7 1966 file_prio = 200 - anon_prio;
58c37f6e 1967
4f98a2fe
RR
1968 /*
1969 * OK, so we have swap space and a fair amount of page cache
1970 * pages. We use the recently rotated / recently scanned
1971 * ratios to determine how valuable each cache is.
1972 *
1973 * Because workloads change over time (and to avoid overflow)
1974 * we keep these statistics as a floating average, which ends
1975 * up weighing recent references more than old ones.
1976 *
1977 * anon in [0], file in [1]
1978 */
90126375 1979 spin_lock_irq(&zone->lru_lock);
6e901571 1980 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
6e901571
KM
1981 reclaim_stat->recent_scanned[0] /= 2;
1982 reclaim_stat->recent_rotated[0] /= 2;
4f98a2fe
RR
1983 }
1984
6e901571 1985 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
6e901571
KM
1986 reclaim_stat->recent_scanned[1] /= 2;
1987 reclaim_stat->recent_rotated[1] /= 2;
4f98a2fe
RR
1988 }
1989
4f98a2fe 1990 /*
00d8089c
RR
1991 * The amount of pressure on anon vs file pages is inversely
1992 * proportional to the fraction of recently scanned pages on
1993 * each list that were recently referenced and in active use.
4f98a2fe 1994 */
fe35004f 1995 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
6e901571 1996 ap /= reclaim_stat->recent_rotated[0] + 1;
4f98a2fe 1997
fe35004f 1998 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
6e901571 1999 fp /= reclaim_stat->recent_rotated[1] + 1;
90126375 2000 spin_unlock_irq(&zone->lru_lock);
4f98a2fe 2001
76a33fc3
SL
2002 fraction[0] = ap;
2003 fraction[1] = fp;
2004 denominator = ap + fp + 1;
2005out:
6f04f48d
SS
2006 some_scanned = false;
2007 /* Only use force_scan on second pass. */
2008 for (pass = 0; !some_scanned && pass < 2; pass++) {
2009 for_each_evictable_lru(lru) {
2010 int file = is_file_lru(lru);
2011 unsigned long size;
2012 unsigned long scan;
6e08a369 2013
6f04f48d
SS
2014 size = get_lru_size(lruvec, lru);
2015 scan = size >> sc->priority;
9a265114 2016
6f04f48d
SS
2017 if (!scan && pass && force_scan)
2018 scan = min(size, SWAP_CLUSTER_MAX);
9a265114 2019
6f04f48d
SS
2020 switch (scan_balance) {
2021 case SCAN_EQUAL:
2022 /* Scan lists relative to size */
2023 break;
2024 case SCAN_FRACT:
2025 /*
2026 * Scan types proportional to swappiness and
2027 * their relative recent reclaim efficiency.
2028 */
2029 scan = div64_u64(scan * fraction[file],
2030 denominator);
2031 break;
2032 case SCAN_FILE:
2033 case SCAN_ANON:
2034 /* Scan one type exclusively */
2035 if ((scan_balance == SCAN_FILE) != file)
2036 scan = 0;
2037 break;
2038 default:
2039 /* Look ma, no brain */
2040 BUG();
2041 }
2042 nr[lru] = scan;
9a265114 2043 /*
6f04f48d
SS
2044 * Skip the second pass and don't force_scan,
2045 * if we found something to scan.
9a265114 2046 */
6f04f48d 2047 some_scanned |= !!scan;
9a265114 2048 }
76a33fc3 2049 }
6e08a369 2050}
4f98a2fe 2051
9b4f98cd
JW
2052/*
2053 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
2054 */
2055static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
2056{
2057 unsigned long nr[NR_LRU_LISTS];
e82e0561 2058 unsigned long targets[NR_LRU_LISTS];
9b4f98cd
JW
2059 unsigned long nr_to_scan;
2060 enum lru_list lru;
2061 unsigned long nr_reclaimed = 0;
2062 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2063 struct blk_plug plug;
1a501907 2064 bool scan_adjusted;
9b4f98cd
JW
2065
2066 get_scan_count(lruvec, sc, nr);
2067
e82e0561
MG
2068 /* Record the original scan target for proportional adjustments later */
2069 memcpy(targets, nr, sizeof(nr));
2070
1a501907
MG
2071 /*
2072 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2073 * event that can occur when there is little memory pressure e.g.
2074 * multiple streaming readers/writers. Hence, we do not abort scanning
2075 * when the requested number of pages are reclaimed when scanning at
2076 * DEF_PRIORITY on the assumption that the fact we are direct
2077 * reclaiming implies that kswapd is not keeping up and it is best to
2078 * do a batch of work at once. For memcg reclaim one check is made to
2079 * abort proportional reclaim if either the file or anon lru has already
2080 * dropped to zero at the first pass.
2081 */
2082 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2083 sc->priority == DEF_PRIORITY);
2084
9b4f98cd
JW
2085 blk_start_plug(&plug);
2086 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2087 nr[LRU_INACTIVE_FILE]) {
e82e0561
MG
2088 unsigned long nr_anon, nr_file, percentage;
2089 unsigned long nr_scanned;
2090
9b4f98cd
JW
2091 for_each_evictable_lru(lru) {
2092 if (nr[lru]) {
2093 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2094 nr[lru] -= nr_to_scan;
2095
2096 nr_reclaimed += shrink_list(lru, nr_to_scan,
2097 lruvec, sc);
2098 }
2099 }
e82e0561
MG
2100
2101 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2102 continue;
2103
e82e0561
MG
2104 /*
2105 * For kswapd and memcg, reclaim at least the number of pages
1a501907 2106 * requested. Ensure that the anon and file LRUs are scanned
e82e0561
MG
2107 * proportionally what was requested by get_scan_count(). We
2108 * stop reclaiming one LRU and reduce the amount scanning
2109 * proportional to the original scan target.
2110 */
2111 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2112 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2113
1a501907
MG
2114 /*
2115 * It's just vindictive to attack the larger once the smaller
2116 * has gone to zero. And given the way we stop scanning the
2117 * smaller below, this makes sure that we only make one nudge
2118 * towards proportionality once we've got nr_to_reclaim.
2119 */
2120 if (!nr_file || !nr_anon)
2121 break;
2122
e82e0561
MG
2123 if (nr_file > nr_anon) {
2124 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2125 targets[LRU_ACTIVE_ANON] + 1;
2126 lru = LRU_BASE;
2127 percentage = nr_anon * 100 / scan_target;
2128 } else {
2129 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2130 targets[LRU_ACTIVE_FILE] + 1;
2131 lru = LRU_FILE;
2132 percentage = nr_file * 100 / scan_target;
2133 }
2134
2135 /* Stop scanning the smaller of the LRU */
2136 nr[lru] = 0;
2137 nr[lru + LRU_ACTIVE] = 0;
2138
2139 /*
2140 * Recalculate the other LRU scan count based on its original
2141 * scan target and the percentage scanning already complete
2142 */
2143 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2144 nr_scanned = targets[lru] - nr[lru];
2145 nr[lru] = targets[lru] * (100 - percentage) / 100;
2146 nr[lru] -= min(nr[lru], nr_scanned);
2147
2148 lru += LRU_ACTIVE;
2149 nr_scanned = targets[lru] - nr[lru];
2150 nr[lru] = targets[lru] * (100 - percentage) / 100;
2151 nr[lru] -= min(nr[lru], nr_scanned);
2152
2153 scan_adjusted = true;
9b4f98cd
JW
2154 }
2155 blk_finish_plug(&plug);
2156 sc->nr_reclaimed += nr_reclaimed;
2157
2158 /*
2159 * Even if we did not try to evict anon pages at all, we want to
2160 * rebalance the anon lru active/inactive ratio.
2161 */
2162 if (inactive_anon_is_low(lruvec))
2163 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2164 sc, LRU_ACTIVE_ANON);
2165
2166 throttle_vm_writeout(sc->gfp_mask);
2167}
2168
23b9da55 2169/* Use reclaim/compaction for costly allocs or under memory pressure */
9e3b2f8c 2170static bool in_reclaim_compaction(struct scan_control *sc)
23b9da55 2171{
d84da3f9 2172 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
23b9da55 2173 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
9e3b2f8c 2174 sc->priority < DEF_PRIORITY - 2))
23b9da55
MG
2175 return true;
2176
2177 return false;
2178}
2179
3e7d3449 2180/*
23b9da55
MG
2181 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2182 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2183 * true if more pages should be reclaimed such that when the page allocator
2184 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2185 * It will give up earlier than that if there is difficulty reclaiming pages.
3e7d3449 2186 */
9b4f98cd 2187static inline bool should_continue_reclaim(struct zone *zone,
3e7d3449
MG
2188 unsigned long nr_reclaimed,
2189 unsigned long nr_scanned,
2190 struct scan_control *sc)
2191{
2192 unsigned long pages_for_compaction;
2193 unsigned long inactive_lru_pages;
2194
2195 /* If not in reclaim/compaction mode, stop */
9e3b2f8c 2196 if (!in_reclaim_compaction(sc))
3e7d3449
MG
2197 return false;
2198
2876592f
MG
2199 /* Consider stopping depending on scan and reclaim activity */
2200 if (sc->gfp_mask & __GFP_REPEAT) {
2201 /*
2202 * For __GFP_REPEAT allocations, stop reclaiming if the
2203 * full LRU list has been scanned and we are still failing
2204 * to reclaim pages. This full LRU scan is potentially
2205 * expensive but a __GFP_REPEAT caller really wants to succeed
2206 */
2207 if (!nr_reclaimed && !nr_scanned)
2208 return false;
2209 } else {
2210 /*
2211 * For non-__GFP_REPEAT allocations which can presumably
2212 * fail without consequence, stop if we failed to reclaim
2213 * any pages from the last SWAP_CLUSTER_MAX number of
2214 * pages that were scanned. This will return to the
2215 * caller faster at the risk reclaim/compaction and
2216 * the resulting allocation attempt fails
2217 */
2218 if (!nr_reclaimed)
2219 return false;
2220 }
3e7d3449
MG
2221
2222 /*
2223 * If we have not reclaimed enough pages for compaction and the
2224 * inactive lists are large enough, continue reclaiming
2225 */
2226 pages_for_compaction = (2UL << sc->order);
9b4f98cd 2227 inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
ec8acf20 2228 if (get_nr_swap_pages() > 0)
9b4f98cd 2229 inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
3e7d3449
MG
2230 if (sc->nr_reclaimed < pages_for_compaction &&
2231 inactive_lru_pages > pages_for_compaction)
2232 return true;
2233
2234 /* If compaction would go ahead or the allocation would succeed, stop */
9b4f98cd 2235 switch (compaction_suitable(zone, sc->order)) {
3e7d3449
MG
2236 case COMPACT_PARTIAL:
2237 case COMPACT_CONTINUE:
2238 return false;
2239 default:
2240 return true;
2241 }
2242}
2243
0608f43d 2244static void shrink_zone(struct zone *zone, struct scan_control *sc)
1da177e4 2245{
f0fdc5e8 2246 unsigned long nr_reclaimed, nr_scanned;
1da177e4 2247
9b4f98cd
JW
2248 do {
2249 struct mem_cgroup *root = sc->target_mem_cgroup;
2250 struct mem_cgroup_reclaim_cookie reclaim = {
2251 .zone = zone,
2252 .priority = sc->priority,
2253 };
694fbc0f 2254 struct mem_cgroup *memcg;
3e7d3449 2255
9b4f98cd
JW
2256 nr_reclaimed = sc->nr_reclaimed;
2257 nr_scanned = sc->nr_scanned;
1da177e4 2258
694fbc0f
AM
2259 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2260 do {
9b4f98cd 2261 struct lruvec *lruvec;
5660048c 2262
9b4f98cd 2263 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
f9be23d6 2264
688eb988 2265 sc->swappiness = mem_cgroup_swappiness(memcg);
9b4f98cd 2266 shrink_lruvec(lruvec, sc);
f16015fb 2267
9b4f98cd 2268 /*
a394cb8e
MH
2269 * Direct reclaim and kswapd have to scan all memory
2270 * cgroups to fulfill the overall scan target for the
9b4f98cd 2271 * zone.
a394cb8e
MH
2272 *
2273 * Limit reclaim, on the other hand, only cares about
2274 * nr_to_reclaim pages to be reclaimed and it will
2275 * retry with decreasing priority if one round over the
2276 * whole hierarchy is not sufficient.
9b4f98cd 2277 */
a394cb8e
MH
2278 if (!global_reclaim(sc) &&
2279 sc->nr_reclaimed >= sc->nr_to_reclaim) {
9b4f98cd
JW
2280 mem_cgroup_iter_break(root, memcg);
2281 break;
2282 }
694fbc0f
AM
2283 memcg = mem_cgroup_iter(root, memcg, &reclaim);
2284 } while (memcg);
70ddf637
AV
2285
2286 vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
2287 sc->nr_scanned - nr_scanned,
2288 sc->nr_reclaimed - nr_reclaimed);
2289
9b4f98cd
JW
2290 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
2291 sc->nr_scanned - nr_scanned, sc));
f16015fb
JW
2292}
2293
fe4b1b24
MG
2294/* Returns true if compaction should go ahead for a high-order request */
2295static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2296{
2297 unsigned long balance_gap, watermark;
2298 bool watermark_ok;
2299
2300 /* Do not consider compaction for orders reclaim is meant to satisfy */
2301 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
2302 return false;
2303
2304 /*
2305 * Compaction takes time to run and there are potentially other
2306 * callers using the pages just freed. Continue reclaiming until
2307 * there is a buffer of free pages available to give compaction
2308 * a reasonable chance of completing and allocating the page
2309 */
4be89a34
JZ
2310 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
2311 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
fe4b1b24
MG
2312 watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
2313 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
2314
2315 /*
2316 * If compaction is deferred, reclaim up to a point where
2317 * compaction will have a chance of success when re-enabled
2318 */
aff62249 2319 if (compaction_deferred(zone, sc->order))
fe4b1b24
MG
2320 return watermark_ok;
2321
2322 /* If compaction is not ready to start, keep reclaiming */
2323 if (!compaction_suitable(zone, sc->order))
2324 return false;
2325
2326 return watermark_ok;
2327}
2328
1da177e4
LT
2329/*
2330 * This is the direct reclaim path, for page-allocating processes. We only
2331 * try to reclaim pages from zones which will satisfy the caller's allocation
2332 * request.
2333 *
41858966
MG
2334 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
2335 * Because:
1da177e4
LT
2336 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
2337 * allocation or
41858966
MG
2338 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
2339 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
2340 * zone defense algorithm.
1da177e4 2341 *
1da177e4
LT
2342 * If a zone is deemed to be full of pinned pages then just give it a light
2343 * scan then give up on it.
e0c23279
MG
2344 *
2345 * This function returns true if a zone is being reclaimed for a costly
fe4b1b24 2346 * high-order allocation and compaction is ready to begin. This indicates to
0cee34fd
MG
2347 * the caller that it should consider retrying the allocation instead of
2348 * further reclaim.
1da177e4 2349 */
3115cd91 2350static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
1da177e4 2351{
dd1a239f 2352 struct zoneref *z;
54a6eb5c 2353 struct zone *zone;
0608f43d
AM
2354 unsigned long nr_soft_reclaimed;
2355 unsigned long nr_soft_scanned;
65ec02cb 2356 unsigned long lru_pages = 0;
0cee34fd 2357 bool aborted_reclaim = false;
65ec02cb 2358 struct reclaim_state *reclaim_state = current->reclaim_state;
619d0d76 2359 gfp_t orig_mask;
3115cd91
VD
2360 struct shrink_control shrink = {
2361 .gfp_mask = sc->gfp_mask,
2362 };
9bbc04ee 2363 enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
1cfb419b 2364
cc715d99
MG
2365 /*
2366 * If the number of buffer_heads in the machine exceeds the maximum
2367 * allowed level, force direct reclaim to scan the highmem zone as
2368 * highmem pages could be pinning lowmem pages storing buffer_heads
2369 */
619d0d76 2370 orig_mask = sc->gfp_mask;
cc715d99
MG
2371 if (buffer_heads_over_limit)
2372 sc->gfp_mask |= __GFP_HIGHMEM;
2373
3115cd91 2374 nodes_clear(shrink.nodes_to_scan);
65ec02cb 2375
d4debc66
MG
2376 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2377 gfp_zone(sc->gfp_mask), sc->nodemask) {
f3fe6512 2378 if (!populated_zone(zone))
1da177e4 2379 continue;
1cfb419b
KH
2380 /*
2381 * Take care memory controller reclaiming has small influence
2382 * to global LRU.
2383 */
89b5fae5 2384 if (global_reclaim(sc)) {
1cfb419b
KH
2385 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2386 continue;
65ec02cb
VD
2387
2388 lru_pages += zone_reclaimable_pages(zone);
3115cd91 2389 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
65ec02cb 2390
6e543d57
LD
2391 if (sc->priority != DEF_PRIORITY &&
2392 !zone_reclaimable(zone))
1cfb419b 2393 continue; /* Let kswapd poll it */
d84da3f9 2394 if (IS_ENABLED(CONFIG_COMPACTION)) {
e0887c19 2395 /*
e0c23279
MG
2396 * If we already have plenty of memory free for
2397 * compaction in this zone, don't free any more.
2398 * Even though compaction is invoked for any
2399 * non-zero order, only frequent costly order
2400 * reclamation is disruptive enough to become a
c7cfa37b
CA
2401 * noticeable problem, like transparent huge
2402 * page allocations.
e0887c19 2403 */
9bbc04ee
WY
2404 if ((zonelist_zone_idx(z) <= requested_highidx)
2405 && compaction_ready(zone, sc)) {
0cee34fd 2406 aborted_reclaim = true;
e0887c19 2407 continue;
e0c23279 2408 }
e0887c19 2409 }
0608f43d
AM
2410 /*
2411 * This steals pages from memory cgroups over softlimit
2412 * and returns the number of reclaimed pages and
2413 * scanned pages. This works for global memory pressure
2414 * and balancing, not for a memcg's limit.
2415 */
2416 nr_soft_scanned = 0;
2417 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2418 sc->order, sc->gfp_mask,
2419 &nr_soft_scanned);
2420 sc->nr_reclaimed += nr_soft_reclaimed;
2421 sc->nr_scanned += nr_soft_scanned;
ac34a1a3 2422 /* need some check for avoid more shrink_zone() */
1cfb419b 2423 }
408d8544 2424
9e3b2f8c 2425 shrink_zone(zone, sc);
1da177e4 2426 }
e0c23279 2427
65ec02cb
VD
2428 /*
2429 * Don't shrink slabs when reclaiming memory from over limit cgroups
2430 * but do shrink slab at least once when aborting reclaim for
2431 * compaction to avoid unevenly scanning file/anon LRU pages over slab
2432 * pages.
2433 */
2434 if (global_reclaim(sc)) {
3115cd91 2435 shrink_slab(&shrink, sc->nr_scanned, lru_pages);
65ec02cb
VD
2436 if (reclaim_state) {
2437 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2438 reclaim_state->reclaimed_slab = 0;
2439 }
2440 }
2441
619d0d76
WY
2442 /*
2443 * Restore to original mask to avoid the impact on the caller if we
2444 * promoted it to __GFP_HIGHMEM.
2445 */
2446 sc->gfp_mask = orig_mask;
2447
0cee34fd 2448 return aborted_reclaim;
d1908362
MK
2449}
2450
929bea7c 2451/* All zones in zonelist are unreclaimable? */
d1908362
MK
2452static bool all_unreclaimable(struct zonelist *zonelist,
2453 struct scan_control *sc)
2454{
2455 struct zoneref *z;
2456 struct zone *zone;
d1908362
MK
2457
2458 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2459 gfp_zone(sc->gfp_mask), sc->nodemask) {
2460 if (!populated_zone(zone))
2461 continue;
2462 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2463 continue;
6e543d57 2464 if (zone_reclaimable(zone))
929bea7c 2465 return false;
d1908362
MK
2466 }
2467
929bea7c 2468 return true;
1da177e4 2469}
4f98a2fe 2470
1da177e4
LT
2471/*
2472 * This is the main entry point to direct page reclaim.
2473 *
2474 * If a full scan of the inactive list fails to free enough memory then we
2475 * are "out of memory" and something needs to be killed.
2476 *
2477 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2478 * high - the zone may be full of dirty or under-writeback pages, which this
5b0830cb
JA
2479 * caller can't do much about. We kick the writeback threads and take explicit
2480 * naps in the hope that some of these pages can be written. But if the
2481 * allocating task holds filesystem locks which prevent writeout this might not
2482 * work, and the allocation attempt will fail.
a41f24ea
NA
2483 *
2484 * returns: 0, if no pages reclaimed
2485 * else, the number of pages reclaimed
1da177e4 2486 */
dac1d27b 2487static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
3115cd91 2488 struct scan_control *sc)
1da177e4 2489{
69e05944 2490 unsigned long total_scanned = 0;
22fba335 2491 unsigned long writeback_threshold;
0cee34fd 2492 bool aborted_reclaim;
1da177e4 2493
873b4771
KK
2494 delayacct_freepages_start();
2495
89b5fae5 2496 if (global_reclaim(sc))
1cfb419b 2497 count_vm_event(ALLOCSTALL);
1da177e4 2498
9e3b2f8c 2499 do {
70ddf637
AV
2500 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2501 sc->priority);
66e1707b 2502 sc->nr_scanned = 0;
3115cd91 2503 aborted_reclaim = shrink_zones(zonelist, sc);
c6a8a8c5 2504
66e1707b 2505 total_scanned += sc->nr_scanned;
bb21c7ce 2506 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
1da177e4 2507 goto out;
1da177e4 2508
0e50ce3b
MK
2509 /*
2510 * If we're getting trouble reclaiming, start doing
2511 * writepage even in laptop mode.
2512 */
2513 if (sc->priority < DEF_PRIORITY - 2)
2514 sc->may_writepage = 1;
2515
1da177e4
LT
2516 /*
2517 * Try to write back as many pages as we just scanned. This
2518 * tends to cause slow streaming writers to write data to the
2519 * disk smoothly, at the dirtying rate, which is nice. But
2520 * that's undesirable in laptop mode, where we *want* lumpy
2521 * writeout. So in laptop mode, write out the whole world.
2522 */
22fba335
KM
2523 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2524 if (total_scanned > writeback_threshold) {
0e175a18
CW
2525 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2526 WB_REASON_TRY_TO_FREE_PAGES);
66e1707b 2527 sc->may_writepage = 1;
1da177e4 2528 }
5a1c9cbc 2529 } while (--sc->priority >= 0 && !aborted_reclaim);
bb21c7ce 2530
1da177e4 2531out:
873b4771
KK
2532 delayacct_freepages_end();
2533
bb21c7ce
KM
2534 if (sc->nr_reclaimed)
2535 return sc->nr_reclaimed;
2536
0cee34fd
MG
2537 /* Aborted reclaim to try compaction? don't OOM, then */
2538 if (aborted_reclaim)
7335084d
MG
2539 return 1;
2540
bb21c7ce 2541 /* top priority shrink_zones still had more to do? don't OOM, then */
89b5fae5 2542 if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
bb21c7ce
KM
2543 return 1;
2544
2545 return 0;
1da177e4
LT
2546}
2547
5515061d
MG
2548static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2549{
2550 struct zone *zone;
2551 unsigned long pfmemalloc_reserve = 0;
2552 unsigned long free_pages = 0;
2553 int i;
2554 bool wmark_ok;
2555
2556 for (i = 0; i <= ZONE_NORMAL; i++) {
2557 zone = &pgdat->node_zones[i];
675becce
MG
2558 if (!populated_zone(zone))
2559 continue;
2560
5515061d
MG
2561 pfmemalloc_reserve += min_wmark_pages(zone);
2562 free_pages += zone_page_state(zone, NR_FREE_PAGES);
2563 }
2564
675becce
MG
2565 /* If there are no reserves (unexpected config) then do not throttle */
2566 if (!pfmemalloc_reserve)
2567 return true;
2568
5515061d
MG
2569 wmark_ok = free_pages > pfmemalloc_reserve / 2;
2570
2571 /* kswapd must be awake if processes are being throttled */
2572 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2573 pgdat->classzone_idx = min(pgdat->classzone_idx,
2574 (enum zone_type)ZONE_NORMAL);
2575 wake_up_interruptible(&pgdat->kswapd_wait);
2576 }
2577
2578 return wmark_ok;
2579}
2580
2581/*
2582 * Throttle direct reclaimers if backing storage is backed by the network
2583 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2584 * depleted. kswapd will continue to make progress and wake the processes
50694c28
MG
2585 * when the low watermark is reached.
2586 *
2587 * Returns true if a fatal signal was delivered during throttling. If this
2588 * happens, the page allocator should not consider triggering the OOM killer.
5515061d 2589 */
50694c28 2590static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
5515061d
MG
2591 nodemask_t *nodemask)
2592{
675becce 2593 struct zoneref *z;
5515061d 2594 struct zone *zone;
675becce 2595 pg_data_t *pgdat = NULL;
5515061d
MG
2596
2597 /*
2598 * Kernel threads should not be throttled as they may be indirectly
2599 * responsible for cleaning pages necessary for reclaim to make forward
2600 * progress. kjournald for example may enter direct reclaim while
2601 * committing a transaction where throttling it could forcing other
2602 * processes to block on log_wait_commit().
2603 */
2604 if (current->flags & PF_KTHREAD)
50694c28
MG
2605 goto out;
2606
2607 /*
2608 * If a fatal signal is pending, this process should not throttle.
2609 * It should return quickly so it can exit and free its memory
2610 */
2611 if (fatal_signal_pending(current))
2612 goto out;
5515061d 2613
675becce
MG
2614 /*
2615 * Check if the pfmemalloc reserves are ok by finding the first node
2616 * with a usable ZONE_NORMAL or lower zone. The expectation is that
2617 * GFP_KERNEL will be required for allocating network buffers when
2618 * swapping over the network so ZONE_HIGHMEM is unusable.
2619 *
2620 * Throttling is based on the first usable node and throttled processes
2621 * wait on a queue until kswapd makes progress and wakes them. There
2622 * is an affinity then between processes waking up and where reclaim
2623 * progress has been made assuming the process wakes on the same node.
2624 * More importantly, processes running on remote nodes will not compete
2625 * for remote pfmemalloc reserves and processes on different nodes
2626 * should make reasonable progress.
2627 */
2628 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2629 gfp_mask, nodemask) {
2630 if (zone_idx(zone) > ZONE_NORMAL)
2631 continue;
2632
2633 /* Throttle based on the first usable node */
2634 pgdat = zone->zone_pgdat;
2635 if (pfmemalloc_watermark_ok(pgdat))
2636 goto out;
2637 break;
2638 }
2639
2640 /* If no zone was usable by the allocation flags then do not throttle */
2641 if (!pgdat)
50694c28 2642 goto out;
5515061d 2643
68243e76
MG
2644 /* Account for the throttling */
2645 count_vm_event(PGSCAN_DIRECT_THROTTLE);
2646
5515061d
MG
2647 /*
2648 * If the caller cannot enter the filesystem, it's possible that it
2649 * is due to the caller holding an FS lock or performing a journal
2650 * transaction in the case of a filesystem like ext[3|4]. In this case,
2651 * it is not safe to block on pfmemalloc_wait as kswapd could be
2652 * blocked waiting on the same lock. Instead, throttle for up to a
2653 * second before continuing.
2654 */
2655 if (!(gfp_mask & __GFP_FS)) {
2656 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2657 pfmemalloc_watermark_ok(pgdat), HZ);
50694c28
MG
2658
2659 goto check_pending;
5515061d
MG
2660 }
2661
2662 /* Throttle until kswapd wakes the process */
2663 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2664 pfmemalloc_watermark_ok(pgdat));
50694c28
MG
2665
2666check_pending:
2667 if (fatal_signal_pending(current))
2668 return true;
2669
2670out:
2671 return false;
5515061d
MG
2672}
2673
dac1d27b 2674unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
327c0e96 2675 gfp_t gfp_mask, nodemask_t *nodemask)
66e1707b 2676{
33906bc5 2677 unsigned long nr_reclaimed;
66e1707b 2678 struct scan_control sc = {
21caf2fc 2679 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
66e1707b 2680 .may_writepage = !laptop_mode,
22fba335 2681 .nr_to_reclaim = SWAP_CLUSTER_MAX,
a6dc60f8 2682 .may_unmap = 1,
2e2e4259 2683 .may_swap = 1,
66e1707b 2684 .order = order,
9e3b2f8c 2685 .priority = DEF_PRIORITY,
f16015fb 2686 .target_mem_cgroup = NULL,
327c0e96 2687 .nodemask = nodemask,
66e1707b
BS
2688 };
2689
5515061d 2690 /*
50694c28
MG
2691 * Do not enter reclaim if fatal signal was delivered while throttled.
2692 * 1 is returned so that the page allocator does not OOM kill at this
2693 * point.
5515061d 2694 */
50694c28 2695 if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
5515061d
MG
2696 return 1;
2697
33906bc5
MG
2698 trace_mm_vmscan_direct_reclaim_begin(order,
2699 sc.may_writepage,
2700 gfp_mask);
2701
3115cd91 2702 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
33906bc5
MG
2703
2704 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2705
2706 return nr_reclaimed;
66e1707b
BS
2707}
2708
c255a458 2709#ifdef CONFIG_MEMCG
66e1707b 2710
72835c86 2711unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
4e416953 2712 gfp_t gfp_mask, bool noswap,
0ae5e89c
YH
2713 struct zone *zone,
2714 unsigned long *nr_scanned)
4e416953
BS
2715{
2716 struct scan_control sc = {
0ae5e89c 2717 .nr_scanned = 0,
b8f5c566 2718 .nr_to_reclaim = SWAP_CLUSTER_MAX,
4e416953
BS
2719 .may_writepage = !laptop_mode,
2720 .may_unmap = 1,
2721 .may_swap = !noswap,
4e416953 2722 .order = 0,
9e3b2f8c 2723 .priority = 0,
688eb988 2724 .swappiness = mem_cgroup_swappiness(memcg),
72835c86 2725 .target_mem_cgroup = memcg,
4e416953 2726 };
f9be23d6 2727 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
0ae5e89c 2728
4e416953
BS
2729 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2730 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
bdce6d9e 2731
9e3b2f8c 2732 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
bdce6d9e
KM
2733 sc.may_writepage,
2734 sc.gfp_mask);
2735
4e416953
BS
2736 /*
2737 * NOTE: Although we can get the priority field, using it
2738 * here is not a good idea, since it limits the pages we can scan.
2739 * if we don't reclaim here, the shrink_zone from balance_pgdat
2740 * will pick up pages from other mem cgroup's as well. We hack
2741 * the priority and make it zero.
2742 */
f9be23d6 2743 shrink_lruvec(lruvec, &sc);
bdce6d9e
KM
2744
2745 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2746
0ae5e89c 2747 *nr_scanned = sc.nr_scanned;
4e416953
BS
2748 return sc.nr_reclaimed;
2749}
2750
72835c86 2751unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
a7885eb8 2752 gfp_t gfp_mask,
185efc0f 2753 bool noswap)
66e1707b 2754{
4e416953 2755 struct zonelist *zonelist;
bdce6d9e 2756 unsigned long nr_reclaimed;
889976db 2757 int nid;
66e1707b 2758 struct scan_control sc = {
66e1707b 2759 .may_writepage = !laptop_mode,
a6dc60f8 2760 .may_unmap = 1,
2e2e4259 2761 .may_swap = !noswap,
22fba335 2762 .nr_to_reclaim = SWAP_CLUSTER_MAX,
66e1707b 2763 .order = 0,
9e3b2f8c 2764 .priority = DEF_PRIORITY,
72835c86 2765 .target_mem_cgroup = memcg,
327c0e96 2766 .nodemask = NULL, /* we don't care the placement */
a09ed5e0
YH
2767 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2768 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2769 };
66e1707b 2770
889976db
YH
2771 /*
2772 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2773 * take care of from where we get pages. So the node where we start the
2774 * scan does not need to be the current node.
2775 */
72835c86 2776 nid = mem_cgroup_select_victim_node(memcg);
889976db
YH
2777
2778 zonelist = NODE_DATA(nid)->node_zonelists;
bdce6d9e
KM
2779
2780 trace_mm_vmscan_memcg_reclaim_begin(0,
2781 sc.may_writepage,
2782 sc.gfp_mask);
2783
3115cd91 2784 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
bdce6d9e
KM
2785
2786 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2787
2788 return nr_reclaimed;
66e1707b
BS
2789}
2790#endif
2791
9e3b2f8c 2792static void age_active_anon(struct zone *zone, struct scan_control *sc)
f16015fb 2793{
b95a2f2d 2794 struct mem_cgroup *memcg;
f16015fb 2795
b95a2f2d
JW
2796 if (!total_swap_pages)
2797 return;
2798
2799 memcg = mem_cgroup_iter(NULL, NULL, NULL);
2800 do {
c56d5c7d 2801 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
b95a2f2d 2802
c56d5c7d 2803 if (inactive_anon_is_low(lruvec))
1a93be0e 2804 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
9e3b2f8c 2805 sc, LRU_ACTIVE_ANON);
b95a2f2d
JW
2806
2807 memcg = mem_cgroup_iter(NULL, memcg, NULL);
2808 } while (memcg);
f16015fb
JW
2809}
2810
60cefed4
JW
2811static bool zone_balanced(struct zone *zone, int order,
2812 unsigned long balance_gap, int classzone_idx)
2813{
2814 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
2815 balance_gap, classzone_idx, 0))
2816 return false;
2817
d84da3f9
KS
2818 if (IS_ENABLED(CONFIG_COMPACTION) && order &&
2819 !compaction_suitable(zone, order))
60cefed4
JW
2820 return false;
2821
2822 return true;
2823}
2824
1741c877 2825/*
4ae0a48b
ZC
2826 * pgdat_balanced() is used when checking if a node is balanced.
2827 *
2828 * For order-0, all zones must be balanced!
2829 *
2830 * For high-order allocations only zones that meet watermarks and are in a
2831 * zone allowed by the callers classzone_idx are added to balanced_pages. The
2832 * total of balanced pages must be at least 25% of the zones allowed by
2833 * classzone_idx for the node to be considered balanced. Forcing all zones to
2834 * be balanced for high orders can cause excessive reclaim when there are
2835 * imbalanced zones.
1741c877
MG
2836 * The choice of 25% is due to
2837 * o a 16M DMA zone that is balanced will not balance a zone on any
2838 * reasonable sized machine
2839 * o On all other machines, the top zone must be at least a reasonable
25985edc 2840 * percentage of the middle zones. For example, on 32-bit x86, highmem
1741c877
MG
2841 * would need to be at least 256M for it to be balance a whole node.
2842 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2843 * to balance a node on its own. These seemed like reasonable ratios.
2844 */
4ae0a48b 2845static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
1741c877 2846{
b40da049 2847 unsigned long managed_pages = 0;
4ae0a48b 2848 unsigned long balanced_pages = 0;
1741c877
MG
2849 int i;
2850
4ae0a48b
ZC
2851 /* Check the watermark levels */
2852 for (i = 0; i <= classzone_idx; i++) {
2853 struct zone *zone = pgdat->node_zones + i;
1741c877 2854
4ae0a48b
ZC
2855 if (!populated_zone(zone))
2856 continue;
2857
b40da049 2858 managed_pages += zone->managed_pages;
4ae0a48b
ZC
2859
2860 /*
2861 * A special case here:
2862 *
2863 * balance_pgdat() skips over all_unreclaimable after
2864 * DEF_PRIORITY. Effectively, it considers them balanced so
2865 * they must be considered balanced here as well!
2866 */
6e543d57 2867 if (!zone_reclaimable(zone)) {
b40da049 2868 balanced_pages += zone->managed_pages;
4ae0a48b
ZC
2869 continue;
2870 }
2871
2872 if (zone_balanced(zone, order, 0, i))
b40da049 2873 balanced_pages += zone->managed_pages;
4ae0a48b
ZC
2874 else if (!order)
2875 return false;
2876 }
2877
2878 if (order)
b40da049 2879 return balanced_pages >= (managed_pages >> 2);
4ae0a48b
ZC
2880 else
2881 return true;
1741c877
MG
2882}
2883
5515061d
MG
2884/*
2885 * Prepare kswapd for sleeping. This verifies that there are no processes
2886 * waiting in throttle_direct_reclaim() and that watermarks have been met.
2887 *
2888 * Returns true if kswapd is ready to sleep
2889 */
2890static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
dc83edd9 2891 int classzone_idx)
f50de2d3 2892{
f50de2d3
MG
2893 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2894 if (remaining)
5515061d
MG
2895 return false;
2896
2897 /*
2898 * There is a potential race between when kswapd checks its watermarks
2899 * and a process gets throttled. There is also a potential race if
2900 * processes get throttled, kswapd wakes, a large process exits therby
2901 * balancing the zones that causes kswapd to miss a wakeup. If kswapd
2902 * is going to sleep, no process should be sleeping on pfmemalloc_wait
2903 * so wake them now if necessary. If necessary, processes will wake
2904 * kswapd and get throttled again
2905 */
2906 if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
2907 wake_up(&pgdat->pfmemalloc_wait);
2908 return false;
2909 }
f50de2d3 2910
4ae0a48b 2911 return pgdat_balanced(pgdat, order, classzone_idx);
f50de2d3
MG
2912}
2913
75485363
MG
2914/*
2915 * kswapd shrinks the zone by the number of pages required to reach
2916 * the high watermark.
b8e83b94
MG
2917 *
2918 * Returns true if kswapd scanned at least the requested number of pages to
283aba9f
MG
2919 * reclaim or if the lack of progress was due to pages under writeback.
2920 * This is used to determine if the scanning priority needs to be raised.
75485363 2921 */
b8e83b94 2922static bool kswapd_shrink_zone(struct zone *zone,
7c954f6d 2923 int classzone_idx,
75485363 2924 struct scan_control *sc,
2ab44f43
MG
2925 unsigned long lru_pages,
2926 unsigned long *nr_attempted)
75485363 2927{
7c954f6d
MG
2928 int testorder = sc->order;
2929 unsigned long balance_gap;
75485363
MG
2930 struct reclaim_state *reclaim_state = current->reclaim_state;
2931 struct shrink_control shrink = {
2932 .gfp_mask = sc->gfp_mask,
2933 };
7c954f6d 2934 bool lowmem_pressure;
75485363
MG
2935
2936 /* Reclaim above the high watermark. */
2937 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
7c954f6d
MG
2938
2939 /*
2940 * Kswapd reclaims only single pages with compaction enabled. Trying
2941 * too hard to reclaim until contiguous free pages have become
2942 * available can hurt performance by evicting too much useful data
2943 * from memory. Do not reclaim more than needed for compaction.
2944 */
2945 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2946 compaction_suitable(zone, sc->order) !=
2947 COMPACT_SKIPPED)
2948 testorder = 0;
2949
2950 /*
2951 * We put equal pressure on every zone, unless one zone has way too
2952 * many pages free already. The "too many pages" is defined as the
2953 * high wmark plus a "gap" where the gap is either the low
2954 * watermark or 1% of the zone, whichever is smaller.
2955 */
4be89a34
JZ
2956 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
2957 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
7c954f6d
MG
2958
2959 /*
2960 * If there is no low memory pressure or the zone is balanced then no
2961 * reclaim is necessary
2962 */
2963 lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
2964 if (!lowmem_pressure && zone_balanced(zone, testorder,
2965 balance_gap, classzone_idx))
2966 return true;
2967
75485363 2968 shrink_zone(zone, sc);
0ce3d744
DC
2969 nodes_clear(shrink.nodes_to_scan);
2970 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
75485363
MG
2971
2972 reclaim_state->reclaimed_slab = 0;
6e543d57 2973 shrink_slab(&shrink, sc->nr_scanned, lru_pages);
75485363
MG
2974 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2975
2ab44f43
MG
2976 /* Account for the number of pages attempted to reclaim */
2977 *nr_attempted += sc->nr_to_reclaim;
2978
283aba9f
MG
2979 zone_clear_flag(zone, ZONE_WRITEBACK);
2980
7c954f6d
MG
2981 /*
2982 * If a zone reaches its high watermark, consider it to be no longer
2983 * congested. It's possible there are dirty pages backed by congested
2984 * BDIs but as pressure is relieved, speculatively avoid congestion
2985 * waits.
2986 */
6e543d57 2987 if (zone_reclaimable(zone) &&
7c954f6d
MG
2988 zone_balanced(zone, testorder, 0, classzone_idx)) {
2989 zone_clear_flag(zone, ZONE_CONGESTED);
2990 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
2991 }
2992
b8e83b94 2993 return sc->nr_scanned >= sc->nr_to_reclaim;
75485363
MG
2994}
2995
1da177e4
LT
2996/*
2997 * For kswapd, balance_pgdat() will work across all this node's zones until
41858966 2998 * they are all at high_wmark_pages(zone).
1da177e4 2999 *
0abdee2b 3000 * Returns the final order kswapd was reclaiming at
1da177e4
LT
3001 *
3002 * There is special handling here for zones which are full of pinned pages.
3003 * This can happen if the pages are all mlocked, or if they are all used by
3004 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
3005 * What we do is to detect the case where all pages in the zone have been
3006 * scanned twice and there has been zero successful reclaim. Mark the zone as
3007 * dead and from now on, only perform a short scan. Basically we're polling
3008 * the zone for when the problem goes away.
3009 *
3010 * kswapd scans the zones in the highmem->normal->dma direction. It skips
41858966
MG
3011 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3012 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
3013 * lower zones regardless of the number of free pages in the lower zones. This
3014 * interoperates with the page allocator fallback scheme to ensure that aging
3015 * of pages is balanced across the zones.
1da177e4 3016 */
99504748 3017static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
dc83edd9 3018 int *classzone_idx)
1da177e4 3019{
1da177e4 3020 int i;
99504748 3021 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
0608f43d
AM
3022 unsigned long nr_soft_reclaimed;
3023 unsigned long nr_soft_scanned;
179e9639
AM
3024 struct scan_control sc = {
3025 .gfp_mask = GFP_KERNEL,
b8e83b94 3026 .priority = DEF_PRIORITY,
a6dc60f8 3027 .may_unmap = 1,
2e2e4259 3028 .may_swap = 1,
b8e83b94 3029 .may_writepage = !laptop_mode,
5ad333eb 3030 .order = order,
f16015fb 3031 .target_mem_cgroup = NULL,
179e9639 3032 };
f8891e5e 3033 count_vm_event(PAGEOUTRUN);
1da177e4 3034
9e3b2f8c 3035 do {
1da177e4 3036 unsigned long lru_pages = 0;
2ab44f43 3037 unsigned long nr_attempted = 0;
b8e83b94 3038 bool raise_priority = true;
2ab44f43 3039 bool pgdat_needs_compaction = (order > 0);
b8e83b94
MG
3040
3041 sc.nr_reclaimed = 0;
1da177e4 3042
d6277db4
RW
3043 /*
3044 * Scan in the highmem->dma direction for the highest
3045 * zone which needs scanning
3046 */
3047 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
3048 struct zone *zone = pgdat->node_zones + i;
1da177e4 3049
d6277db4
RW
3050 if (!populated_zone(zone))
3051 continue;
1da177e4 3052
6e543d57
LD
3053 if (sc.priority != DEF_PRIORITY &&
3054 !zone_reclaimable(zone))
d6277db4 3055 continue;
1da177e4 3056
556adecb
RR
3057 /*
3058 * Do some background aging of the anon list, to give
3059 * pages a chance to be referenced before reclaiming.
3060 */
9e3b2f8c 3061 age_active_anon(zone, &sc);
556adecb 3062
cc715d99
MG
3063 /*
3064 * If the number of buffer_heads in the machine
3065 * exceeds the maximum allowed level and this node
3066 * has a highmem zone, force kswapd to reclaim from
3067 * it to relieve lowmem pressure.
3068 */
3069 if (buffer_heads_over_limit && is_highmem_idx(i)) {
3070 end_zone = i;
3071 break;
3072 }
3073
60cefed4 3074 if (!zone_balanced(zone, order, 0, 0)) {
d6277db4 3075 end_zone = i;
e1dbeda6 3076 break;
439423f6 3077 } else {
d43006d5
MG
3078 /*
3079 * If balanced, clear the dirty and congested
3080 * flags
3081 */
439423f6 3082 zone_clear_flag(zone, ZONE_CONGESTED);
d43006d5 3083 zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
1da177e4 3084 }
1da177e4 3085 }
dafcb73e 3086
b8e83b94 3087 if (i < 0)
e1dbeda6
AM
3088 goto out;
3089
1da177e4
LT
3090 for (i = 0; i <= end_zone; i++) {
3091 struct zone *zone = pgdat->node_zones + i;
3092
2ab44f43
MG
3093 if (!populated_zone(zone))
3094 continue;
3095
adea02a1 3096 lru_pages += zone_reclaimable_pages(zone);
2ab44f43
MG
3097
3098 /*
3099 * If any zone is currently balanced then kswapd will
3100 * not call compaction as it is expected that the
3101 * necessary pages are already available.
3102 */
3103 if (pgdat_needs_compaction &&
3104 zone_watermark_ok(zone, order,
3105 low_wmark_pages(zone),
3106 *classzone_idx, 0))
3107 pgdat_needs_compaction = false;
1da177e4
LT
3108 }
3109
b7ea3c41
MG
3110 /*
3111 * If we're getting trouble reclaiming, start doing writepage
3112 * even in laptop mode.
3113 */
3114 if (sc.priority < DEF_PRIORITY - 2)
3115 sc.may_writepage = 1;
3116
1da177e4
LT
3117 /*
3118 * Now scan the zone in the dma->highmem direction, stopping
3119 * at the last zone which needs scanning.
3120 *
3121 * We do this because the page allocator works in the opposite
3122 * direction. This prevents the page allocator from allocating
3123 * pages behind kswapd's direction of progress, which would
3124 * cause too much scanning of the lower zones.
3125 */
3126 for (i = 0; i <= end_zone; i++) {
3127 struct zone *zone = pgdat->node_zones + i;
3128
f3fe6512 3129 if (!populated_zone(zone))
1da177e4
LT
3130 continue;
3131
6e543d57
LD
3132 if (sc.priority != DEF_PRIORITY &&
3133 !zone_reclaimable(zone))
1da177e4
LT
3134 continue;
3135
1da177e4 3136 sc.nr_scanned = 0;
4e416953 3137
0608f43d
AM
3138 nr_soft_scanned = 0;
3139 /*
3140 * Call soft limit reclaim before calling shrink_zone.
3141 */
3142 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
3143 order, sc.gfp_mask,
3144 &nr_soft_scanned);
3145 sc.nr_reclaimed += nr_soft_reclaimed;
3146
32a4330d 3147 /*
7c954f6d
MG
3148 * There should be no need to raise the scanning
3149 * priority if enough pages are already being scanned
3150 * that that high watermark would be met at 100%
3151 * efficiency.
fe2c2a10 3152 */
7c954f6d
MG
3153 if (kswapd_shrink_zone(zone, end_zone, &sc,
3154 lru_pages, &nr_attempted))
3155 raise_priority = false;
1da177e4 3156 }
5515061d
MG
3157
3158 /*
3159 * If the low watermark is met there is no need for processes
3160 * to be throttled on pfmemalloc_wait as they should not be
3161 * able to safely make forward progress. Wake them
3162 */
3163 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3164 pfmemalloc_watermark_ok(pgdat))
3165 wake_up(&pgdat->pfmemalloc_wait);
3166
1da177e4 3167 /*
b8e83b94
MG
3168 * Fragmentation may mean that the system cannot be rebalanced
3169 * for high-order allocations in all zones. If twice the
3170 * allocation size has been reclaimed and the zones are still
3171 * not balanced then recheck the watermarks at order-0 to
3172 * prevent kswapd reclaiming excessively. Assume that a
3173 * process requested a high-order can direct reclaim/compact.
1da177e4 3174 */
b8e83b94
MG
3175 if (order && sc.nr_reclaimed >= 2UL << order)
3176 order = sc.order = 0;
8357376d 3177
b8e83b94
MG
3178 /* Check if kswapd should be suspending */
3179 if (try_to_freeze() || kthread_should_stop())
3180 break;
8357376d 3181
2ab44f43
MG
3182 /*
3183 * Compact if necessary and kswapd is reclaiming at least the
3184 * high watermark number of pages as requsted
3185 */
3186 if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
3187 compact_pgdat(pgdat, order);
3188
73ce02e9 3189 /*
b8e83b94
MG
3190 * Raise priority if scanning rate is too low or there was no
3191 * progress in reclaiming pages
73ce02e9 3192 */
b8e83b94
MG
3193 if (raise_priority || !sc.nr_reclaimed)
3194 sc.priority--;
9aa41348 3195 } while (sc.priority >= 1 &&
b8e83b94 3196 !pgdat_balanced(pgdat, order, *classzone_idx));
1da177e4 3197
b8e83b94 3198out:
0abdee2b 3199 /*
5515061d 3200 * Return the order we were reclaiming at so prepare_kswapd_sleep()
0abdee2b
MG
3201 * makes a decision on the order we were last reclaiming at. However,
3202 * if another caller entered the allocator slow path while kswapd
3203 * was awake, order will remain at the higher level
3204 */
dc83edd9 3205 *classzone_idx = end_zone;
0abdee2b 3206 return order;
1da177e4
LT
3207}
3208
dc83edd9 3209static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
f0bc0a60
KM
3210{
3211 long remaining = 0;
3212 DEFINE_WAIT(wait);
3213
3214 if (freezing(current) || kthread_should_stop())
3215 return;
3216
3217 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3218
3219 /* Try to sleep for a short interval */
5515061d 3220 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
f0bc0a60
KM
3221 remaining = schedule_timeout(HZ/10);
3222 finish_wait(&pgdat->kswapd_wait, &wait);
3223 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3224 }
3225
3226 /*
3227 * After a short sleep, check if it was a premature sleep. If not, then
3228 * go fully to sleep until explicitly woken up.
3229 */
5515061d 3230 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
f0bc0a60
KM
3231 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3232
3233 /*
3234 * vmstat counters are not perfectly accurate and the estimated
3235 * value for counters such as NR_FREE_PAGES can deviate from the
3236 * true value by nr_online_cpus * threshold. To avoid the zone
3237 * watermarks being breached while under pressure, we reduce the
3238 * per-cpu vmstat threshold while kswapd is awake and restore
3239 * them before going back to sleep.
3240 */
3241 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
1c7e7f6c 3242
62997027
MG
3243 /*
3244 * Compaction records what page blocks it recently failed to
3245 * isolate pages from and skips them in the future scanning.
3246 * When kswapd is going to sleep, it is reasonable to assume
3247 * that pages and compaction may succeed so reset the cache.
3248 */
3249 reset_isolation_suitable(pgdat);
3250
1c7e7f6c
AK
3251 if (!kthread_should_stop())
3252 schedule();
3253
f0bc0a60
KM
3254 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3255 } else {
3256 if (remaining)
3257 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3258 else
3259 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3260 }
3261 finish_wait(&pgdat->kswapd_wait, &wait);
3262}
3263
1da177e4
LT
3264/*
3265 * The background pageout daemon, started as a kernel thread
4f98a2fe 3266 * from the init process.
1da177e4
LT
3267 *
3268 * This basically trickles out pages so that we have _some_
3269 * free memory available even if there is no other activity
3270 * that frees anything up. This is needed for things like routing
3271 * etc, where we otherwise might have all activity going on in
3272 * asynchronous contexts that cannot page things out.
3273 *
3274 * If there are applications that are active memory-allocators
3275 * (most normal use), this basically shouldn't matter.
3276 */
3277static int kswapd(void *p)
3278{
215ddd66 3279 unsigned long order, new_order;
d2ebd0f6 3280 unsigned balanced_order;
215ddd66 3281 int classzone_idx, new_classzone_idx;
d2ebd0f6 3282 int balanced_classzone_idx;
1da177e4
LT
3283 pg_data_t *pgdat = (pg_data_t*)p;
3284 struct task_struct *tsk = current;
f0bc0a60 3285
1da177e4
LT
3286 struct reclaim_state reclaim_state = {
3287 .reclaimed_slab = 0,
3288 };
a70f7302 3289 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1da177e4 3290
cf40bd16
NP
3291 lockdep_set_current_reclaim_state(GFP_KERNEL);
3292
174596a0 3293 if (!cpumask_empty(cpumask))
c5f59f08 3294 set_cpus_allowed_ptr(tsk, cpumask);
1da177e4
LT
3295 current->reclaim_state = &reclaim_state;
3296
3297 /*
3298 * Tell the memory management that we're a "memory allocator",
3299 * and that if we need more memory we should get access to it
3300 * regardless (see "__alloc_pages()"). "kswapd" should
3301 * never get caught in the normal page freeing logic.
3302 *
3303 * (Kswapd normally doesn't need memory anyway, but sometimes
3304 * you need a small amount of memory in order to be able to
3305 * page out something else, and this flag essentially protects
3306 * us from recursively trying to free more memory as we're
3307 * trying to free the first piece of memory in the first place).
3308 */
930d9152 3309 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
83144186 3310 set_freezable();
1da177e4 3311
215ddd66 3312 order = new_order = 0;
d2ebd0f6 3313 balanced_order = 0;
215ddd66 3314 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
d2ebd0f6 3315 balanced_classzone_idx = classzone_idx;
1da177e4 3316 for ( ; ; ) {
6f6313d4 3317 bool ret;
3e1d1d28 3318
215ddd66
MG
3319 /*
3320 * If the last balance_pgdat was unsuccessful it's unlikely a
3321 * new request of a similar or harder type will succeed soon
3322 * so consider going to sleep on the basis we reclaimed at
3323 */
d2ebd0f6
AS
3324 if (balanced_classzone_idx >= new_classzone_idx &&
3325 balanced_order == new_order) {
215ddd66
MG
3326 new_order = pgdat->kswapd_max_order;
3327 new_classzone_idx = pgdat->classzone_idx;
3328 pgdat->kswapd_max_order = 0;
3329 pgdat->classzone_idx = pgdat->nr_zones - 1;
3330 }
3331
99504748 3332 if (order < new_order || classzone_idx > new_classzone_idx) {
1da177e4
LT
3333 /*
3334 * Don't sleep if someone wants a larger 'order'
99504748 3335 * allocation or has tigher zone constraints
1da177e4
LT
3336 */
3337 order = new_order;
99504748 3338 classzone_idx = new_classzone_idx;
1da177e4 3339 } else {
d2ebd0f6
AS
3340 kswapd_try_to_sleep(pgdat, balanced_order,
3341 balanced_classzone_idx);
1da177e4 3342 order = pgdat->kswapd_max_order;
99504748 3343 classzone_idx = pgdat->classzone_idx;
f0dfcde0
AS
3344 new_order = order;
3345 new_classzone_idx = classzone_idx;
4d40502e 3346 pgdat->kswapd_max_order = 0;
215ddd66 3347 pgdat->classzone_idx = pgdat->nr_zones - 1;
1da177e4 3348 }
1da177e4 3349
8fe23e05
DR
3350 ret = try_to_freeze();
3351 if (kthread_should_stop())
3352 break;
3353
3354 /*
3355 * We can speed up thawing tasks if we don't call balance_pgdat
3356 * after returning from the refrigerator
3357 */
33906bc5
MG
3358 if (!ret) {
3359 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
d2ebd0f6
AS
3360 balanced_classzone_idx = classzone_idx;
3361 balanced_order = balance_pgdat(pgdat, order,
3362 &balanced_classzone_idx);
33906bc5 3363 }
1da177e4 3364 }
b0a8cc58 3365
71abdc15 3366 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
b0a8cc58 3367 current->reclaim_state = NULL;
71abdc15
JW
3368 lockdep_clear_current_reclaim_state();
3369
1da177e4
LT
3370 return 0;
3371}
3372
3373/*
3374 * A zone is low on free memory, so wake its kswapd task to service it.
3375 */
99504748 3376void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
1da177e4
LT
3377{
3378 pg_data_t *pgdat;
3379
f3fe6512 3380 if (!populated_zone(zone))
1da177e4
LT
3381 return;
3382
88f5acf8 3383 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1da177e4 3384 return;
88f5acf8 3385 pgdat = zone->zone_pgdat;
99504748 3386 if (pgdat->kswapd_max_order < order) {
1da177e4 3387 pgdat->kswapd_max_order = order;
99504748
MG
3388 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
3389 }
8d0986e2 3390 if (!waitqueue_active(&pgdat->kswapd_wait))
1da177e4 3391 return;
892f795d 3392 if (zone_balanced(zone, order, 0, 0))
88f5acf8
MG
3393 return;
3394
3395 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
8d0986e2 3396 wake_up_interruptible(&pgdat->kswapd_wait);
1da177e4
LT
3397}
3398
c6f37f12 3399#ifdef CONFIG_HIBERNATION
1da177e4 3400/*
7b51755c 3401 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
d6277db4
RW
3402 * freed pages.
3403 *
3404 * Rather than trying to age LRUs the aim is to preserve the overall
3405 * LRU order by reclaiming preferentially
3406 * inactive > active > active referenced > active mapped
1da177e4 3407 */
7b51755c 3408unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
1da177e4 3409{
d6277db4 3410 struct reclaim_state reclaim_state;
d6277db4 3411 struct scan_control sc = {
7b51755c
KM
3412 .gfp_mask = GFP_HIGHUSER_MOVABLE,
3413 .may_swap = 1,
3414 .may_unmap = 1,
d6277db4 3415 .may_writepage = 1,
7b51755c
KM
3416 .nr_to_reclaim = nr_to_reclaim,
3417 .hibernation_mode = 1,
7b51755c 3418 .order = 0,
9e3b2f8c 3419 .priority = DEF_PRIORITY,
1da177e4 3420 };
a09ed5e0 3421 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
7b51755c
KM
3422 struct task_struct *p = current;
3423 unsigned long nr_reclaimed;
1da177e4 3424
7b51755c
KM
3425 p->flags |= PF_MEMALLOC;
3426 lockdep_set_current_reclaim_state(sc.gfp_mask);
3427 reclaim_state.reclaimed_slab = 0;
3428 p->reclaim_state = &reclaim_state;
d6277db4 3429
3115cd91 3430 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
d979677c 3431
7b51755c
KM
3432 p->reclaim_state = NULL;
3433 lockdep_clear_current_reclaim_state();
3434 p->flags &= ~PF_MEMALLOC;
d6277db4 3435
7b51755c 3436 return nr_reclaimed;
1da177e4 3437}
c6f37f12 3438#endif /* CONFIG_HIBERNATION */
1da177e4 3439
1da177e4
LT
3440/* It's optimal to keep kswapds on the same CPUs as their memory, but
3441 not required for correctness. So if the last cpu in a node goes
3442 away, we get changed to run anywhere: as the first one comes back,
3443 restore their cpu bindings. */
fcb35a9b
GKH
3444static int cpu_callback(struct notifier_block *nfb, unsigned long action,
3445 void *hcpu)
1da177e4 3446{
58c0a4a7 3447 int nid;
1da177e4 3448
8bb78442 3449 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
48fb2e24 3450 for_each_node_state(nid, N_MEMORY) {
c5f59f08 3451 pg_data_t *pgdat = NODE_DATA(nid);
a70f7302
RR
3452 const struct cpumask *mask;
3453
3454 mask = cpumask_of_node(pgdat->node_id);
c5f59f08 3455
3e597945 3456 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
1da177e4 3457 /* One of our CPUs online: restore mask */
c5f59f08 3458 set_cpus_allowed_ptr(pgdat->kswapd, mask);
1da177e4
LT
3459 }
3460 }
3461 return NOTIFY_OK;
3462}
1da177e4 3463
3218ae14
YG
3464/*
3465 * This kswapd start function will be called by init and node-hot-add.
3466 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3467 */
3468int kswapd_run(int nid)
3469{
3470 pg_data_t *pgdat = NODE_DATA(nid);
3471 int ret = 0;
3472
3473 if (pgdat->kswapd)
3474 return 0;
3475
3476 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3477 if (IS_ERR(pgdat->kswapd)) {
3478 /* failure at boot is fatal */
3479 BUG_ON(system_state == SYSTEM_BOOTING);
d5dc0ad9
GS
3480 pr_err("Failed to start kswapd on node %d\n", nid);
3481 ret = PTR_ERR(pgdat->kswapd);
d72515b8 3482 pgdat->kswapd = NULL;
3218ae14
YG
3483 }
3484 return ret;
3485}
3486
8fe23e05 3487/*
d8adde17 3488 * Called by memory hotplug when all memory in a node is offlined. Caller must
bfc8c901 3489 * hold mem_hotplug_begin/end().
8fe23e05
DR
3490 */
3491void kswapd_stop(int nid)
3492{
3493 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3494
d8adde17 3495 if (kswapd) {
8fe23e05 3496 kthread_stop(kswapd);
d8adde17
JL
3497 NODE_DATA(nid)->kswapd = NULL;
3498 }
8fe23e05
DR
3499}
3500
1da177e4
LT
3501static int __init kswapd_init(void)
3502{
3218ae14 3503 int nid;
69e05944 3504
1da177e4 3505 swap_setup();
48fb2e24 3506 for_each_node_state(nid, N_MEMORY)
3218ae14 3507 kswapd_run(nid);
1da177e4
LT
3508 hotcpu_notifier(cpu_callback, 0);
3509 return 0;
3510}
3511
3512module_init(kswapd_init)
9eeff239
CL
3513
3514#ifdef CONFIG_NUMA
3515/*
3516 * Zone reclaim mode
3517 *
3518 * If non-zero call zone_reclaim when the number of free pages falls below
3519 * the watermarks.
9eeff239
CL
3520 */
3521int zone_reclaim_mode __read_mostly;
3522
1b2ffb78 3523#define RECLAIM_OFF 0
7d03431c 3524#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
1b2ffb78
CL
3525#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3526#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
3527
a92f7126
CL
3528/*
3529 * Priority for ZONE_RECLAIM. This determines the fraction of pages
3530 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3531 * a zone.
3532 */
3533#define ZONE_RECLAIM_PRIORITY 4
3534
9614634f
CL
3535/*
3536 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
3537 * occur.
3538 */
3539int sysctl_min_unmapped_ratio = 1;
3540
0ff38490
CL
3541/*
3542 * If the number of slab pages in a zone grows beyond this percentage then
3543 * slab reclaim needs to occur.
3544 */
3545int sysctl_min_slab_ratio = 5;
3546
90afa5de
MG
3547static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3548{
3549 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3550 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3551 zone_page_state(zone, NR_ACTIVE_FILE);
3552
3553 /*
3554 * It's possible for there to be more file mapped pages than
3555 * accounted for by the pages on the file LRU lists because
3556 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3557 */
3558 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3559}
3560
3561/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3562static long zone_pagecache_reclaimable(struct zone *zone)
3563{
3564 long nr_pagecache_reclaimable;
3565 long delta = 0;
3566
3567 /*
3568 * If RECLAIM_SWAP is set, then all file pages are considered
3569 * potentially reclaimable. Otherwise, we have to worry about
3570 * pages like swapcache and zone_unmapped_file_pages() provides
3571 * a better estimate
3572 */
3573 if (zone_reclaim_mode & RECLAIM_SWAP)
3574 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3575 else
3576 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3577
3578 /* If we can't clean pages, remove dirty pages from consideration */
3579 if (!(zone_reclaim_mode & RECLAIM_WRITE))
3580 delta += zone_page_state(zone, NR_FILE_DIRTY);
3581
3582 /* Watch for any possible underflows due to delta */
3583 if (unlikely(delta > nr_pagecache_reclaimable))
3584 delta = nr_pagecache_reclaimable;
3585
3586 return nr_pagecache_reclaimable - delta;
3587}
3588
9eeff239
CL
3589/*
3590 * Try to free up some pages from this zone through reclaim.
3591 */
179e9639 3592static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
9eeff239 3593{
7fb2d46d 3594 /* Minimum pages needed in order to stay on node */
69e05944 3595 const unsigned long nr_pages = 1 << order;
9eeff239
CL
3596 struct task_struct *p = current;
3597 struct reclaim_state reclaim_state;
179e9639
AM
3598 struct scan_control sc = {
3599 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
a6dc60f8 3600 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2e2e4259 3601 .may_swap = 1,
62b726c1 3602 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
21caf2fc 3603 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
bd2f6199 3604 .order = order,
9e3b2f8c 3605 .priority = ZONE_RECLAIM_PRIORITY,
179e9639 3606 };
a09ed5e0
YH
3607 struct shrink_control shrink = {
3608 .gfp_mask = sc.gfp_mask,
3609 };
15748048 3610 unsigned long nr_slab_pages0, nr_slab_pages1;
9eeff239 3611
9eeff239 3612 cond_resched();
d4f7796e
CL
3613 /*
3614 * We need to be able to allocate from the reserves for RECLAIM_SWAP
3615 * and we also need to be able to write out pages for RECLAIM_WRITE
3616 * and RECLAIM_SWAP.
3617 */
3618 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
76ca542d 3619 lockdep_set_current_reclaim_state(gfp_mask);
9eeff239
CL
3620 reclaim_state.reclaimed_slab = 0;
3621 p->reclaim_state = &reclaim_state;
c84db23c 3622
90afa5de 3623 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
0ff38490
CL
3624 /*
3625 * Free memory by calling shrink zone with increasing
3626 * priorities until we have enough memory freed.
3627 */
0ff38490 3628 do {
9e3b2f8c
KK
3629 shrink_zone(zone, &sc);
3630 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
0ff38490 3631 }
c84db23c 3632
15748048
KM
3633 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3634 if (nr_slab_pages0 > zone->min_slab_pages) {
2a16e3f4 3635 /*
7fb2d46d 3636 * shrink_slab() does not currently allow us to determine how
0ff38490
CL
3637 * many pages were freed in this zone. So we take the current
3638 * number of slab pages and shake the slab until it is reduced
3639 * by the same nr_pages that we used for reclaiming unmapped
3640 * pages.
2a16e3f4 3641 */
0ce3d744
DC
3642 nodes_clear(shrink.nodes_to_scan);
3643 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
4dc4b3d9
KM
3644 for (;;) {
3645 unsigned long lru_pages = zone_reclaimable_pages(zone);
3646
3647 /* No reclaimable slab or very low memory pressure */
1495f230 3648 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
4dc4b3d9
KM
3649 break;
3650
3651 /* Freed enough memory */
3652 nr_slab_pages1 = zone_page_state(zone,
3653 NR_SLAB_RECLAIMABLE);
3654 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3655 break;
3656 }
83e33a47
CL
3657
3658 /*
3659 * Update nr_reclaimed by the number of slab pages we
3660 * reclaimed from this zone.
3661 */
15748048
KM
3662 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3663 if (nr_slab_pages1 < nr_slab_pages0)
3664 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
2a16e3f4
CL
3665 }
3666
9eeff239 3667 p->reclaim_state = NULL;
d4f7796e 3668 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
76ca542d 3669 lockdep_clear_current_reclaim_state();
a79311c1 3670 return sc.nr_reclaimed >= nr_pages;
9eeff239 3671}
179e9639
AM
3672
3673int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3674{
179e9639 3675 int node_id;
d773ed6b 3676 int ret;
179e9639
AM
3677
3678 /*
0ff38490
CL
3679 * Zone reclaim reclaims unmapped file backed pages and
3680 * slab pages if we are over the defined limits.
34aa1330 3681 *
9614634f
CL
3682 * A small portion of unmapped file backed pages is needed for
3683 * file I/O otherwise pages read by file I/O will be immediately
3684 * thrown out if the zone is overallocated. So we do not reclaim
3685 * if less than a specified percentage of the zone is used by
3686 * unmapped file backed pages.
179e9639 3687 */
90afa5de
MG
3688 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3689 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
fa5e084e 3690 return ZONE_RECLAIM_FULL;
179e9639 3691
6e543d57 3692 if (!zone_reclaimable(zone))
fa5e084e 3693 return ZONE_RECLAIM_FULL;
d773ed6b 3694
179e9639 3695 /*
d773ed6b 3696 * Do not scan if the allocation should not be delayed.
179e9639 3697 */
d773ed6b 3698 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
fa5e084e 3699 return ZONE_RECLAIM_NOSCAN;
179e9639
AM
3700
3701 /*
3702 * Only run zone reclaim on the local zone or on zones that do not
3703 * have associated processors. This will favor the local processor
3704 * over remote processors and spread off node memory allocations
3705 * as wide as possible.
3706 */
89fa3024 3707 node_id = zone_to_nid(zone);
37c0708d 3708 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
fa5e084e 3709 return ZONE_RECLAIM_NOSCAN;
d773ed6b
DR
3710
3711 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
fa5e084e
MG
3712 return ZONE_RECLAIM_NOSCAN;
3713
d773ed6b
DR
3714 ret = __zone_reclaim(zone, gfp_mask, order);
3715 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3716
24cf7251
MG
3717 if (!ret)
3718 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3719
d773ed6b 3720 return ret;
179e9639 3721}
9eeff239 3722#endif
894bc310 3723
894bc310
LS
3724/*
3725 * page_evictable - test whether a page is evictable
3726 * @page: the page to test
894bc310
LS
3727 *
3728 * Test whether page is evictable--i.e., should be placed on active/inactive
39b5f29a 3729 * lists vs unevictable list.
894bc310
LS
3730 *
3731 * Reasons page might not be evictable:
ba9ddf49 3732 * (1) page's mapping marked unevictable
b291f000 3733 * (2) page is part of an mlocked VMA
ba9ddf49 3734 *
894bc310 3735 */
39b5f29a 3736int page_evictable(struct page *page)
894bc310 3737{
39b5f29a 3738 return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
894bc310 3739}
89e004ea 3740
85046579 3741#ifdef CONFIG_SHMEM
89e004ea 3742/**
24513264
HD
3743 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3744 * @pages: array of pages to check
3745 * @nr_pages: number of pages to check
89e004ea 3746 *
24513264 3747 * Checks pages for evictability and moves them to the appropriate lru list.
85046579
HD
3748 *
3749 * This function is only used for SysV IPC SHM_UNLOCK.
89e004ea 3750 */
24513264 3751void check_move_unevictable_pages(struct page **pages, int nr_pages)
89e004ea 3752{
925b7673 3753 struct lruvec *lruvec;
24513264
HD
3754 struct zone *zone = NULL;
3755 int pgscanned = 0;
3756 int pgrescued = 0;
3757 int i;
89e004ea 3758
24513264
HD
3759 for (i = 0; i < nr_pages; i++) {
3760 struct page *page = pages[i];
3761 struct zone *pagezone;
89e004ea 3762
24513264
HD
3763 pgscanned++;
3764 pagezone = page_zone(page);
3765 if (pagezone != zone) {
3766 if (zone)
3767 spin_unlock_irq(&zone->lru_lock);
3768 zone = pagezone;
3769 spin_lock_irq(&zone->lru_lock);
3770 }
fa9add64 3771 lruvec = mem_cgroup_page_lruvec(page, zone);
89e004ea 3772
24513264
HD
3773 if (!PageLRU(page) || !PageUnevictable(page))
3774 continue;
89e004ea 3775
39b5f29a 3776 if (page_evictable(page)) {
24513264
HD
3777 enum lru_list lru = page_lru_base_type(page);
3778
309381fe 3779 VM_BUG_ON_PAGE(PageActive(page), page);
24513264 3780 ClearPageUnevictable(page);
fa9add64
HD
3781 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3782 add_page_to_lru_list(page, lruvec, lru);
24513264 3783 pgrescued++;
89e004ea 3784 }
24513264 3785 }
89e004ea 3786
24513264
HD
3787 if (zone) {
3788 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3789 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3790 spin_unlock_irq(&zone->lru_lock);
89e004ea 3791 }
89e004ea 3792}
85046579 3793#endif /* CONFIG_SHMEM */
af936a16 3794
264e56d8 3795static void warn_scan_unevictable_pages(void)
af936a16 3796{
264e56d8 3797 printk_once(KERN_WARNING
25bd91bd 3798 "%s: The scan_unevictable_pages sysctl/node-interface has been "
264e56d8 3799 "disabled for lack of a legitimate use case. If you have "
25bd91bd
KM
3800 "one, please send an email to linux-mm@kvack.org.\n",
3801 current->comm);
af936a16
LS
3802}
3803
3804/*
3805 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
3806 * all nodes' unevictable lists for evictable pages
3807 */
3808unsigned long scan_unevictable_pages;
3809
3810int scan_unevictable_handler(struct ctl_table *table, int write,
8d65af78 3811 void __user *buffer,
af936a16
LS
3812 size_t *length, loff_t *ppos)
3813{
264e56d8 3814 warn_scan_unevictable_pages();
8d65af78 3815 proc_doulongvec_minmax(table, write, buffer, length, ppos);
af936a16
LS
3816 scan_unevictable_pages = 0;
3817 return 0;
3818}
3819
e4455abb 3820#ifdef CONFIG_NUMA
af936a16
LS
3821/*
3822 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
3823 * a specified node's per zone unevictable lists for evictable pages.
3824 */
3825
10fbcf4c
KS
3826static ssize_t read_scan_unevictable_node(struct device *dev,
3827 struct device_attribute *attr,
af936a16
LS
3828 char *buf)
3829{
264e56d8 3830 warn_scan_unevictable_pages();
af936a16
LS
3831 return sprintf(buf, "0\n"); /* always zero; should fit... */
3832}
3833
10fbcf4c
KS
3834static ssize_t write_scan_unevictable_node(struct device *dev,
3835 struct device_attribute *attr,
af936a16
LS
3836 const char *buf, size_t count)
3837{
264e56d8 3838 warn_scan_unevictable_pages();
af936a16
LS
3839 return 1;
3840}
3841
3842
10fbcf4c 3843static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
af936a16
LS
3844 read_scan_unevictable_node,
3845 write_scan_unevictable_node);
3846
3847int scan_unevictable_register_node(struct node *node)
3848{
10fbcf4c 3849 return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
af936a16
LS
3850}
3851
3852void scan_unevictable_unregister_node(struct node *node)
3853{
10fbcf4c 3854 device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
af936a16 3855}
e4455abb 3856#endif