mm/damon: rename damon_pageout_score() to damon_cold_score()
[linux-block.git] / mm / page_alloc.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
1da177e4
LT
18#include <linux/stddef.h>
19#include <linux/mm.h>
ca79b0c2 20#include <linux/highmem.h>
1da177e4 21#include <linux/swap.h>
bf181c58 22#include <linux/swapops.h>
1da177e4
LT
23#include <linux/interrupt.h>
24#include <linux/pagemap.h>
10ed273f 25#include <linux/jiffies.h>
edbe7d23 26#include <linux/memblock.h>
1da177e4 27#include <linux/compiler.h>
9f158333 28#include <linux/kernel.h>
b8c73fc2 29#include <linux/kasan.h>
b073d7f8 30#include <linux/kmsan.h>
1da177e4
LT
31#include <linux/module.h>
32#include <linux/suspend.h>
33#include <linux/pagevec.h>
34#include <linux/blkdev.h>
35#include <linux/slab.h>
a238ab5b 36#include <linux/ratelimit.h>
5a3135c2 37#include <linux/oom.h>
1da177e4
LT
38#include <linux/topology.h>
39#include <linux/sysctl.h>
40#include <linux/cpu.h>
41#include <linux/cpuset.h>
bdc8cb98 42#include <linux/memory_hotplug.h>
1da177e4
LT
43#include <linux/nodemask.h>
44#include <linux/vmalloc.h>
a6cccdc3 45#include <linux/vmstat.h>
4be38e35 46#include <linux/mempolicy.h>
4b94ffdc 47#include <linux/memremap.h>
6811378e 48#include <linux/stop_machine.h>
97500a4a 49#include <linux/random.h>
c713216d
MG
50#include <linux/sort.h>
51#include <linux/pfn.h>
3fcfab16 52#include <linux/backing-dev.h>
933e312e 53#include <linux/fault-inject.h>
a5d76b54 54#include <linux/page-isolation.h>
3ac7fe5a 55#include <linux/debugobjects.h>
dbb1f81c 56#include <linux/kmemleak.h>
56de7263 57#include <linux/compaction.h>
0d3d062a 58#include <trace/events/kmem.h>
d379f01d 59#include <trace/events/oom.h>
268bb0ce 60#include <linux/prefetch.h>
6e543d57 61#include <linux/mm_inline.h>
f920e413 62#include <linux/mmu_notifier.h>
041d3a8c 63#include <linux/migrate.h>
949f7ec5 64#include <linux/hugetlb.h>
8bd75c77 65#include <linux/sched/rt.h>
5b3cc15a 66#include <linux/sched/mm.h>
48c96a36 67#include <linux/page_owner.h>
df4e817b 68#include <linux/page_table_check.h>
0e1cc95b 69#include <linux/kthread.h>
4949148a 70#include <linux/memcontrol.h>
42c269c8 71#include <linux/ftrace.h>
d92a8cfc 72#include <linux/lockdep.h>
556b969a 73#include <linux/nmi.h>
eb414681 74#include <linux/psi.h>
e4443149 75#include <linux/padata.h>
4aab2be0 76#include <linux/khugepaged.h>
ba8f3587 77#include <linux/buffer_head.h>
5bf18281 78#include <linux/delayacct.h>
7ee3d4e8 79#include <asm/sections.h>
1da177e4 80#include <asm/tlbflush.h>
ac924c60 81#include <asm/div64.h>
1da177e4 82#include "internal.h"
e900a918 83#include "shuffle.h"
36e66c55 84#include "page_reporting.h"
014bb1de 85#include "swap.h"
1da177e4 86
f04a5d5d
DH
87/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
88typedef int __bitwise fpi_t;
89
90/* No special request */
91#define FPI_NONE ((__force fpi_t)0)
92
93/*
94 * Skip free page reporting notification for the (possibly merged) page.
95 * This does not hinder free page reporting from grabbing the page,
96 * reporting it and marking it "reported" - it only skips notifying
97 * the free page reporting infrastructure about a newly freed page. For
98 * example, used when temporarily pulling a page from a freelist and
99 * putting it back unmodified.
100 */
101#define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
102
47b6a24a
DH
103/*
104 * Place the (possibly merged) page to the tail of the freelist. Will ignore
105 * page shuffling (relevant code - e.g., memory onlining - is expected to
106 * shuffle the whole zone).
107 *
108 * Note: No code should rely on this flag for correctness - it's purely
109 * to allow for optimizations when handing back either fresh pages
110 * (memory onlining) or untouched pages (page isolation, free page
111 * reporting).
112 */
113#define FPI_TO_TAIL ((__force fpi_t)BIT(1))
114
2c335680
AK
115/*
116 * Don't poison memory with KASAN (only for the tag-based modes).
117 * During boot, all non-reserved memblock memory is exposed to page_alloc.
118 * Poisoning all that memory lengthens boot time, especially on systems with
119 * large amount of RAM. This flag is used to skip that poisoning.
120 * This is only done for the tag-based KASAN modes, as those are able to
121 * detect memory corruptions with the memory tags assigned by default.
122 * All memory allocated normally after boot gets poisoned as usual.
123 */
124#define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2))
125
c8e251fa
CS
126/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
127static DEFINE_MUTEX(pcp_batch_high_lock);
74f44822 128#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
c8e251fa 129
4b23a68f
MG
130#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
131/*
132 * On SMP, spin_trylock is sufficient protection.
133 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
134 */
135#define pcp_trylock_prepare(flags) do { } while (0)
136#define pcp_trylock_finish(flag) do { } while (0)
137#else
138
139/* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
140#define pcp_trylock_prepare(flags) local_irq_save(flags)
141#define pcp_trylock_finish(flags) local_irq_restore(flags)
142#endif
143
01b44456
MG
144/*
145 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
146 * a migration causing the wrong PCP to be locked and remote memory being
147 * potentially allocated, pin the task to the CPU for the lookup+lock.
148 * preempt_disable is used on !RT because it is faster than migrate_disable.
149 * migrate_disable is used on RT because otherwise RT spinlock usage is
150 * interfered with and a high priority task cannot preempt the allocator.
151 */
152#ifndef CONFIG_PREEMPT_RT
153#define pcpu_task_pin() preempt_disable()
154#define pcpu_task_unpin() preempt_enable()
155#else
156#define pcpu_task_pin() migrate_disable()
157#define pcpu_task_unpin() migrate_enable()
158#endif
c8e251fa 159
01b44456
MG
160/*
161 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
162 * Return value should be used with equivalent unlock helper.
163 */
164#define pcpu_spin_lock(type, member, ptr) \
165({ \
166 type *_ret; \
167 pcpu_task_pin(); \
168 _ret = this_cpu_ptr(ptr); \
169 spin_lock(&_ret->member); \
170 _ret; \
171})
172
173#define pcpu_spin_lock_irqsave(type, member, ptr, flags) \
174({ \
175 type *_ret; \
176 pcpu_task_pin(); \
177 _ret = this_cpu_ptr(ptr); \
178 spin_lock_irqsave(&_ret->member, flags); \
179 _ret; \
180})
181
182#define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \
183({ \
184 type *_ret; \
185 pcpu_task_pin(); \
186 _ret = this_cpu_ptr(ptr); \
187 if (!spin_trylock_irqsave(&_ret->member, flags)) { \
188 pcpu_task_unpin(); \
189 _ret = NULL; \
190 } \
191 _ret; \
192})
193
194#define pcpu_spin_unlock(member, ptr) \
195({ \
196 spin_unlock(&ptr->member); \
197 pcpu_task_unpin(); \
198})
199
200#define pcpu_spin_unlock_irqrestore(member, ptr, flags) \
201({ \
202 spin_unlock_irqrestore(&ptr->member, flags); \
203 pcpu_task_unpin(); \
204})
205
206/* struct per_cpu_pages specific helpers. */
207#define pcp_spin_lock(ptr) \
208 pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
209
210#define pcp_spin_lock_irqsave(ptr, flags) \
211 pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags)
212
213#define pcp_spin_trylock_irqsave(ptr, flags) \
214 pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags)
215
216#define pcp_spin_unlock(ptr) \
217 pcpu_spin_unlock(lock, ptr)
218
219#define pcp_spin_unlock_irqrestore(ptr, flags) \
220 pcpu_spin_unlock_irqrestore(lock, ptr, flags)
72812019
LS
221#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
222DEFINE_PER_CPU(int, numa_node);
223EXPORT_PER_CPU_SYMBOL(numa_node);
224#endif
225
4518085e
KW
226DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
227
7aac7898
LS
228#ifdef CONFIG_HAVE_MEMORYLESS_NODES
229/*
230 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
231 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
232 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
233 * defined in <linux/topology.h>.
234 */
235DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
236EXPORT_PER_CPU_SYMBOL(_numa_mem_);
237#endif
238
8b885f53 239static DEFINE_MUTEX(pcpu_drain_mutex);
bd233f53 240
38addce8 241#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
58bea414 242volatile unsigned long latent_entropy __latent_entropy;
38addce8
ER
243EXPORT_SYMBOL(latent_entropy);
244#endif
245
1da177e4 246/*
13808910 247 * Array of node states.
1da177e4 248 */
13808910
CL
249nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
250 [N_POSSIBLE] = NODE_MASK_ALL,
251 [N_ONLINE] = { { [0] = 1UL } },
252#ifndef CONFIG_NUMA
253 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
254#ifdef CONFIG_HIGHMEM
255 [N_HIGH_MEMORY] = { { [0] = 1UL } },
20b2f52b 256#endif
20b2f52b 257 [N_MEMORY] = { { [0] = 1UL } },
13808910
CL
258 [N_CPU] = { { [0] = 1UL } },
259#endif /* NUMA */
260};
261EXPORT_SYMBOL(node_states);
262
ca79b0c2
AK
263atomic_long_t _totalram_pages __read_mostly;
264EXPORT_SYMBOL(_totalram_pages);
cb45b0e9 265unsigned long totalreserve_pages __read_mostly;
e48322ab 266unsigned long totalcma_pages __read_mostly;
ab8fabd4 267
74f44822 268int percpu_pagelist_high_fraction;
dcce284a 269gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
51cba1eb 270DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
6471384a
AP
271EXPORT_SYMBOL(init_on_alloc);
272
51cba1eb 273DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
6471384a
AP
274EXPORT_SYMBOL(init_on_free);
275
04013513
VB
276static bool _init_on_alloc_enabled_early __read_mostly
277 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
6471384a
AP
278static int __init early_init_on_alloc(char *buf)
279{
6471384a 280
04013513 281 return kstrtobool(buf, &_init_on_alloc_enabled_early);
6471384a
AP
282}
283early_param("init_on_alloc", early_init_on_alloc);
284
04013513
VB
285static bool _init_on_free_enabled_early __read_mostly
286 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
6471384a
AP
287static int __init early_init_on_free(char *buf)
288{
04013513 289 return kstrtobool(buf, &_init_on_free_enabled_early);
6471384a
AP
290}
291early_param("init_on_free", early_init_on_free);
1da177e4 292
bb14c2c7
VB
293/*
294 * A cached value of the page's pageblock's migratetype, used when the page is
295 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
296 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
297 * Also the migratetype set in the page does not necessarily match the pcplist
298 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
299 * other index - this ensures that it will be put on the correct CMA freelist.
300 */
301static inline int get_pcppage_migratetype(struct page *page)
302{
303 return page->index;
304}
305
306static inline void set_pcppage_migratetype(struct page *page, int migratetype)
307{
308 page->index = migratetype;
309}
310
452aa699
RW
311#ifdef CONFIG_PM_SLEEP
312/*
313 * The following functions are used by the suspend/hibernate code to temporarily
314 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
315 * while devices are suspended. To avoid races with the suspend/hibernate code,
55f2503c
PL
316 * they should always be called with system_transition_mutex held
317 * (gfp_allowed_mask also should only be modified with system_transition_mutex
318 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
319 * with that modification).
452aa699 320 */
c9e664f1
RW
321
322static gfp_t saved_gfp_mask;
323
324void pm_restore_gfp_mask(void)
452aa699 325{
55f2503c 326 WARN_ON(!mutex_is_locked(&system_transition_mutex));
c9e664f1
RW
327 if (saved_gfp_mask) {
328 gfp_allowed_mask = saved_gfp_mask;
329 saved_gfp_mask = 0;
330 }
452aa699
RW
331}
332
c9e664f1 333void pm_restrict_gfp_mask(void)
452aa699 334{
55f2503c 335 WARN_ON(!mutex_is_locked(&system_transition_mutex));
c9e664f1
RW
336 WARN_ON(saved_gfp_mask);
337 saved_gfp_mask = gfp_allowed_mask;
d0164adc 338 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
452aa699 339}
f90ac398
MG
340
341bool pm_suspended_storage(void)
342{
d0164adc 343 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
f90ac398
MG
344 return false;
345 return true;
346}
452aa699
RW
347#endif /* CONFIG_PM_SLEEP */
348
d9c23400 349#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
d00181b9 350unsigned int pageblock_order __read_mostly;
d9c23400
MG
351#endif
352
7fef431b
DH
353static void __free_pages_ok(struct page *page, unsigned int order,
354 fpi_t fpi_flags);
a226f6c8 355
1da177e4
LT
356/*
357 * results with 256, 32 in the lowmem_reserve sysctl:
358 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
359 * 1G machine -> (16M dma, 784M normal, 224M high)
360 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
361 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
84109e15 362 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
363 *
364 * TBD: should special case ZONE_DMA32 machines here - in those we normally
365 * don't need any ZONE_NORMAL reservation
1da177e4 366 */
d3cda233 367int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
4b51d669 368#ifdef CONFIG_ZONE_DMA
d3cda233 369 [ZONE_DMA] = 256,
4b51d669 370#endif
fb0e7942 371#ifdef CONFIG_ZONE_DMA32
d3cda233 372 [ZONE_DMA32] = 256,
fb0e7942 373#endif
d3cda233 374 [ZONE_NORMAL] = 32,
e53ef38d 375#ifdef CONFIG_HIGHMEM
d3cda233 376 [ZONE_HIGHMEM] = 0,
e53ef38d 377#endif
d3cda233 378 [ZONE_MOVABLE] = 0,
2f1b6248 379};
1da177e4 380
15ad7cdc 381static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 382#ifdef CONFIG_ZONE_DMA
2f1b6248 383 "DMA",
4b51d669 384#endif
fb0e7942 385#ifdef CONFIG_ZONE_DMA32
2f1b6248 386 "DMA32",
fb0e7942 387#endif
2f1b6248 388 "Normal",
e53ef38d 389#ifdef CONFIG_HIGHMEM
2a1e274a 390 "HighMem",
e53ef38d 391#endif
2a1e274a 392 "Movable",
033fbae9
DW
393#ifdef CONFIG_ZONE_DEVICE
394 "Device",
395#endif
2f1b6248
CL
396};
397
c999fbd3 398const char * const migratetype_names[MIGRATE_TYPES] = {
60f30350
VB
399 "Unmovable",
400 "Movable",
401 "Reclaimable",
402 "HighAtomic",
403#ifdef CONFIG_CMA
404 "CMA",
405#endif
406#ifdef CONFIG_MEMORY_ISOLATION
407 "Isolate",
408#endif
409};
410
ae70eddd
AK
411compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
412 [NULL_COMPOUND_DTOR] = NULL,
413 [COMPOUND_PAGE_DTOR] = free_compound_page,
f1e61557 414#ifdef CONFIG_HUGETLB_PAGE
ae70eddd 415 [HUGETLB_PAGE_DTOR] = free_huge_page,
f1e61557 416#endif
9a982250 417#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ae70eddd 418 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
9a982250 419#endif
f1e61557
KS
420};
421
1da177e4 422int min_free_kbytes = 1024;
42aa83cb 423int user_min_free_kbytes = -1;
1c30844d 424int watermark_boost_factor __read_mostly = 15000;
795ae7a0 425int watermark_scale_factor = 10;
1da177e4 426
bbe5d993
OS
427static unsigned long nr_kernel_pages __initdata;
428static unsigned long nr_all_pages __initdata;
429static unsigned long dma_reserve __initdata;
1da177e4 430
bbe5d993
OS
431static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
432static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
7f16f91f 433static unsigned long required_kernelcore __initdata;
a5c6d650 434static unsigned long required_kernelcore_percent __initdata;
7f16f91f 435static unsigned long required_movablecore __initdata;
a5c6d650 436static unsigned long required_movablecore_percent __initdata;
bbe5d993 437static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
902c2d91 438bool mirrored_kernelcore __initdata_memblock;
0ee332c1
TH
439
440/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
441int movable_zone;
442EXPORT_SYMBOL(movable_zone);
c713216d 443
418508c1 444#if MAX_NUMNODES > 1
b9726c26 445unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
ce0725f7 446unsigned int nr_online_nodes __read_mostly = 1;
418508c1 447EXPORT_SYMBOL(nr_node_ids);
62bc62a8 448EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
449#endif
450
9ef9acb0
MG
451int page_group_by_mobility_disabled __read_mostly;
452
3a80a7fa 453#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3c0c12cc
WL
454/*
455 * During boot we initialize deferred pages on-demand, as needed, but once
456 * page_alloc_init_late() has finished, the deferred pages are all initialized,
457 * and we can permanently disable that path.
458 */
459static DEFINE_STATIC_KEY_TRUE(deferred_pages);
460
94ae8b83 461static inline bool deferred_pages_enabled(void)
3c0c12cc 462{
94ae8b83 463 return static_branch_unlikely(&deferred_pages);
3c0c12cc
WL
464}
465
3a80a7fa 466/* Returns true if the struct page for the pfn is uninitialised */
0e1cc95b 467static inline bool __meminit early_page_uninitialised(unsigned long pfn)
3a80a7fa 468{
ef70b6f4
MG
469 int nid = early_pfn_to_nid(pfn);
470
471 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
3a80a7fa
MG
472 return true;
473
474 return false;
475}
476
477/*
d3035be4 478 * Returns true when the remaining initialisation should be deferred until
3a80a7fa
MG
479 * later in the boot cycle when it can be parallelised.
480 */
d3035be4
PT
481static bool __meminit
482defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
3a80a7fa 483{
d3035be4
PT
484 static unsigned long prev_end_pfn, nr_initialised;
485
c4f20f14
LZ
486 if (early_page_ext_enabled())
487 return false;
d3035be4
PT
488 /*
489 * prev_end_pfn static that contains the end of previous zone
490 * No need to protect because called very early in boot before smp_init.
491 */
492 if (prev_end_pfn != end_pfn) {
493 prev_end_pfn = end_pfn;
494 nr_initialised = 0;
495 }
496
3c2c6488 497 /* Always populate low zones for address-constrained allocations */
d3035be4 498 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
3a80a7fa 499 return false;
23b68cfa 500
dc2da7b4
BH
501 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
502 return true;
23b68cfa
WY
503 /*
504 * We start only with one section of pages, more pages are added as
505 * needed until the rest of deferred pages are initialized.
506 */
d3035be4 507 nr_initialised++;
23b68cfa 508 if ((nr_initialised > PAGES_PER_SECTION) &&
d3035be4
PT
509 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
510 NODE_DATA(nid)->first_deferred_pfn = pfn;
511 return true;
3a80a7fa 512 }
d3035be4 513 return false;
3a80a7fa
MG
514}
515#else
94ae8b83 516static inline bool deferred_pages_enabled(void)
2c335680 517{
94ae8b83 518 return false;
2c335680 519}
3c0c12cc 520
3a80a7fa
MG
521static inline bool early_page_uninitialised(unsigned long pfn)
522{
523 return false;
524}
525
d3035be4 526static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
3a80a7fa 527{
d3035be4 528 return false;
3a80a7fa
MG
529}
530#endif
531
0b423ca2 532/* Return a pointer to the bitmap storing bits affecting a block of pages */
ca891f41 533static inline unsigned long *get_pageblock_bitmap(const struct page *page,
0b423ca2
MG
534 unsigned long pfn)
535{
536#ifdef CONFIG_SPARSEMEM
f1eca35a 537 return section_to_usemap(__pfn_to_section(pfn));
0b423ca2
MG
538#else
539 return page_zone(page)->pageblock_flags;
540#endif /* CONFIG_SPARSEMEM */
541}
542
ca891f41 543static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
0b423ca2
MG
544{
545#ifdef CONFIG_SPARSEMEM
546 pfn &= (PAGES_PER_SECTION-1);
0b423ca2 547#else
4f9bc69a 548 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
0b423ca2 549#endif /* CONFIG_SPARSEMEM */
399b795b 550 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
0b423ca2
MG
551}
552
535b81e2 553static __always_inline
ca891f41 554unsigned long __get_pfnblock_flags_mask(const struct page *page,
0b423ca2 555 unsigned long pfn,
0b423ca2
MG
556 unsigned long mask)
557{
558 unsigned long *bitmap;
559 unsigned long bitidx, word_bitidx;
560 unsigned long word;
561
562 bitmap = get_pageblock_bitmap(page, pfn);
563 bitidx = pfn_to_bitidx(page, pfn);
564 word_bitidx = bitidx / BITS_PER_LONG;
565 bitidx &= (BITS_PER_LONG-1);
1c563432
MK
566 /*
567 * This races, without locks, with set_pfnblock_flags_mask(). Ensure
568 * a consistent read of the memory array, so that results, even though
569 * racy, are not corrupted.
570 */
571 word = READ_ONCE(bitmap[word_bitidx]);
d93d5ab9 572 return (word >> bitidx) & mask;
0b423ca2
MG
573}
574
a00cda3f
MCC
575/**
576 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
577 * @page: The page within the block of interest
578 * @pfn: The target page frame number
579 * @mask: mask of bits that the caller is interested in
580 *
581 * Return: pageblock_bits flags
582 */
ca891f41
MWO
583unsigned long get_pfnblock_flags_mask(const struct page *page,
584 unsigned long pfn, unsigned long mask)
0b423ca2 585{
535b81e2 586 return __get_pfnblock_flags_mask(page, pfn, mask);
0b423ca2
MG
587}
588
ca891f41
MWO
589static __always_inline int get_pfnblock_migratetype(const struct page *page,
590 unsigned long pfn)
0b423ca2 591{
535b81e2 592 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
0b423ca2
MG
593}
594
595/**
596 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
597 * @page: The page within the block of interest
598 * @flags: The flags to set
599 * @pfn: The target page frame number
0b423ca2
MG
600 * @mask: mask of bits that the caller is interested in
601 */
602void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
603 unsigned long pfn,
0b423ca2
MG
604 unsigned long mask)
605{
606 unsigned long *bitmap;
607 unsigned long bitidx, word_bitidx;
04ec0061 608 unsigned long word;
0b423ca2
MG
609
610 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
125b860b 611 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
0b423ca2
MG
612
613 bitmap = get_pageblock_bitmap(page, pfn);
614 bitidx = pfn_to_bitidx(page, pfn);
615 word_bitidx = bitidx / BITS_PER_LONG;
616 bitidx &= (BITS_PER_LONG-1);
617
618 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
619
d93d5ab9
WY
620 mask <<= bitidx;
621 flags <<= bitidx;
0b423ca2
MG
622
623 word = READ_ONCE(bitmap[word_bitidx]);
04ec0061
UB
624 do {
625 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
0b423ca2 626}
3a80a7fa 627
ee6f509c 628void set_pageblock_migratetype(struct page *page, int migratetype)
b2a0ac88 629{
5d0f3f72
KM
630 if (unlikely(page_group_by_mobility_disabled &&
631 migratetype < MIGRATE_PCPTYPES))
49255c61
MG
632 migratetype = MIGRATE_UNMOVABLE;
633
d93d5ab9 634 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
535b81e2 635 page_to_pfn(page), MIGRATETYPE_MASK);
b2a0ac88
MG
636}
637
13e7444b 638#ifdef CONFIG_DEBUG_VM
c6a57e19 639static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 640{
bdc8cb98
DH
641 int ret = 0;
642 unsigned seq;
643 unsigned long pfn = page_to_pfn(page);
b5e6a5a2 644 unsigned long sp, start_pfn;
c6a57e19 645
bdc8cb98
DH
646 do {
647 seq = zone_span_seqbegin(zone);
b5e6a5a2
CS
648 start_pfn = zone->zone_start_pfn;
649 sp = zone->spanned_pages;
108bcc96 650 if (!zone_spans_pfn(zone, pfn))
bdc8cb98
DH
651 ret = 1;
652 } while (zone_span_seqretry(zone, seq));
653
b5e6a5a2 654 if (ret)
613813e8
DH
655 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
656 pfn, zone_to_nid(zone), zone->name,
657 start_pfn, start_pfn + sp);
b5e6a5a2 658
bdc8cb98 659 return ret;
c6a57e19
DH
660}
661
662static int page_is_consistent(struct zone *zone, struct page *page)
663{
1da177e4 664 if (zone != page_zone(page))
c6a57e19
DH
665 return 0;
666
667 return 1;
668}
669/*
670 * Temporary debugging check for pages not lying within a given zone.
671 */
d73d3c9f 672static int __maybe_unused bad_range(struct zone *zone, struct page *page)
c6a57e19
DH
673{
674 if (page_outside_zone_boundaries(zone, page))
1da177e4 675 return 1;
c6a57e19
DH
676 if (!page_is_consistent(zone, page))
677 return 1;
678
1da177e4
LT
679 return 0;
680}
13e7444b 681#else
d73d3c9f 682static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
13e7444b
NP
683{
684 return 0;
685}
686#endif
687
82a3241a 688static void bad_page(struct page *page, const char *reason)
1da177e4 689{
d936cf9b
HD
690 static unsigned long resume;
691 static unsigned long nr_shown;
692 static unsigned long nr_unshown;
693
694 /*
695 * Allow a burst of 60 reports, then keep quiet for that minute;
696 * or allow a steady drip of one report per second.
697 */
698 if (nr_shown == 60) {
699 if (time_before(jiffies, resume)) {
700 nr_unshown++;
701 goto out;
702 }
703 if (nr_unshown) {
ff8e8116 704 pr_alert(
1e9e6365 705 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
706 nr_unshown);
707 nr_unshown = 0;
708 }
709 nr_shown = 0;
710 }
711 if (nr_shown++ == 0)
712 resume = jiffies + 60 * HZ;
713
ff8e8116 714 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 715 current->comm, page_to_pfn(page));
d2f07ec0 716 dump_page(page, reason);
3dc14741 717
4f31888c 718 print_modules();
1da177e4 719 dump_stack();
d936cf9b 720out:
8cc3b392 721 /* Leave bad fields for debug, except PageBuddy could make trouble */
22b751c3 722 page_mapcount_reset(page); /* remove PageBuddy */
373d4d09 723 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
724}
725
44042b44
MG
726static inline unsigned int order_to_pindex(int migratetype, int order)
727{
728 int base = order;
729
730#ifdef CONFIG_TRANSPARENT_HUGEPAGE
731 if (order > PAGE_ALLOC_COSTLY_ORDER) {
732 VM_BUG_ON(order != pageblock_order);
5d0a661d 733 return NR_LOWORDER_PCP_LISTS;
44042b44
MG
734 }
735#else
736 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
737#endif
738
739 return (MIGRATE_PCPTYPES * base) + migratetype;
740}
741
742static inline int pindex_to_order(unsigned int pindex)
743{
744 int order = pindex / MIGRATE_PCPTYPES;
745
746#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5d0a661d 747 if (pindex == NR_LOWORDER_PCP_LISTS)
44042b44 748 order = pageblock_order;
44042b44
MG
749#else
750 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
751#endif
752
753 return order;
754}
755
756static inline bool pcp_allowed_order(unsigned int order)
757{
758 if (order <= PAGE_ALLOC_COSTLY_ORDER)
759 return true;
760#ifdef CONFIG_TRANSPARENT_HUGEPAGE
761 if (order == pageblock_order)
762 return true;
763#endif
764 return false;
765}
766
21d02f8f
MG
767static inline void free_the_page(struct page *page, unsigned int order)
768{
44042b44
MG
769 if (pcp_allowed_order(order)) /* Via pcp? */
770 free_unref_page(page, order);
21d02f8f
MG
771 else
772 __free_pages_ok(page, order, FPI_NONE);
773}
774
1da177e4
LT
775/*
776 * Higher-order pages are called "compound pages". They are structured thusly:
777 *
1d798ca3 778 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
1da177e4 779 *
1d798ca3
KS
780 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
781 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
1da177e4 782 *
1d798ca3
KS
783 * The first tail page's ->compound_dtor holds the offset in array of compound
784 * page destructors. See compound_page_dtors.
1da177e4 785 *
1d798ca3 786 * The first tail page's ->compound_order holds the order of allocation.
41d78ba5 787 * This usage means that zero-order pages may not be compound.
1da177e4 788 */
d98c7a09 789
9a982250 790void free_compound_page(struct page *page)
d98c7a09 791{
bbc6b703 792 mem_cgroup_uncharge(page_folio(page));
44042b44 793 free_the_page(page, compound_order(page));
d98c7a09
HD
794}
795
5b24eeef
JM
796static void prep_compound_head(struct page *page, unsigned int order)
797{
798 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
799 set_compound_order(page, order);
800 atomic_set(compound_mapcount_ptr(page), -1);
5232c63f 801 atomic_set(compound_pincount_ptr(page), 0);
5b24eeef
JM
802}
803
804static void prep_compound_tail(struct page *head, int tail_idx)
805{
806 struct page *p = head + tail_idx;
807
808 p->mapping = TAIL_MAPPING;
809 set_compound_head(p, head);
810}
811
d00181b9 812void prep_compound_page(struct page *page, unsigned int order)
18229df5
AW
813{
814 int i;
815 int nr_pages = 1 << order;
816
18229df5 817 __SetPageHead(page);
5b24eeef
JM
818 for (i = 1; i < nr_pages; i++)
819 prep_compound_tail(page, i);
1378a5ee 820
5b24eeef 821 prep_compound_head(page, order);
18229df5
AW
822}
823
5375336c
MWO
824void destroy_large_folio(struct folio *folio)
825{
826 enum compound_dtor_id dtor = folio_page(folio, 1)->compound_dtor;
827
828 VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
829 compound_page_dtors[dtor](&folio->page);
830}
831
c0a32fc5
SG
832#ifdef CONFIG_DEBUG_PAGEALLOC
833unsigned int _debug_guardpage_minorder;
96a2b03f 834
8e57f8ac
VB
835bool _debug_pagealloc_enabled_early __read_mostly
836 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
837EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
96a2b03f 838DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
505f6d22 839EXPORT_SYMBOL(_debug_pagealloc_enabled);
96a2b03f
VB
840
841DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
e30825f1 842
031bc574
JK
843static int __init early_debug_pagealloc(char *buf)
844{
8e57f8ac 845 return kstrtobool(buf, &_debug_pagealloc_enabled_early);
031bc574
JK
846}
847early_param("debug_pagealloc", early_debug_pagealloc);
848
c0a32fc5
SG
849static int __init debug_guardpage_minorder_setup(char *buf)
850{
851 unsigned long res;
852
853 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
1170532b 854 pr_err("Bad debug_guardpage_minorder value\n");
c0a32fc5
SG
855 return 0;
856 }
857 _debug_guardpage_minorder = res;
1170532b 858 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
c0a32fc5
SG
859 return 0;
860}
f1c1e9f7 861early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
c0a32fc5 862
acbc15a4 863static inline bool set_page_guard(struct zone *zone, struct page *page,
2847cf95 864 unsigned int order, int migratetype)
c0a32fc5 865{
e30825f1 866 if (!debug_guardpage_enabled())
acbc15a4
JK
867 return false;
868
869 if (order >= debug_guardpage_minorder())
870 return false;
e30825f1 871
3972f6bb 872 __SetPageGuard(page);
bf75f200 873 INIT_LIST_HEAD(&page->buddy_list);
2847cf95
JK
874 set_page_private(page, order);
875 /* Guard pages are not available for any usage */
b3618455
ML
876 if (!is_migrate_isolate(migratetype))
877 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
acbc15a4
JK
878
879 return true;
c0a32fc5
SG
880}
881
2847cf95
JK
882static inline void clear_page_guard(struct zone *zone, struct page *page,
883 unsigned int order, int migratetype)
c0a32fc5 884{
e30825f1
JK
885 if (!debug_guardpage_enabled())
886 return;
887
3972f6bb 888 __ClearPageGuard(page);
e30825f1 889
2847cf95
JK
890 set_page_private(page, 0);
891 if (!is_migrate_isolate(migratetype))
892 __mod_zone_freepage_state(zone, (1 << order), migratetype);
c0a32fc5
SG
893}
894#else
acbc15a4
JK
895static inline bool set_page_guard(struct zone *zone, struct page *page,
896 unsigned int order, int migratetype) { return false; }
2847cf95
JK
897static inline void clear_page_guard(struct zone *zone, struct page *page,
898 unsigned int order, int migratetype) {}
c0a32fc5
SG
899#endif
900
04013513
VB
901/*
902 * Enable static keys related to various memory debugging and hardening options.
903 * Some override others, and depend on early params that are evaluated in the
904 * order of appearance. So we need to first gather the full picture of what was
905 * enabled, and then make decisions.
906 */
5749fcc5 907void __init init_mem_debugging_and_hardening(void)
04013513 908{
9df65f52
ST
909 bool page_poisoning_requested = false;
910
911#ifdef CONFIG_PAGE_POISONING
912 /*
913 * Page poisoning is debug page alloc for some arches. If
914 * either of those options are enabled, enable poisoning.
915 */
916 if (page_poisoning_enabled() ||
917 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
918 debug_pagealloc_enabled())) {
919 static_branch_enable(&_page_poisoning_enabled);
920 page_poisoning_requested = true;
921 }
922#endif
923
69e5d322
ST
924 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
925 page_poisoning_requested) {
926 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
927 "will take precedence over init_on_alloc and init_on_free\n");
928 _init_on_alloc_enabled_early = false;
929 _init_on_free_enabled_early = false;
04013513
VB
930 }
931
69e5d322
ST
932 if (_init_on_alloc_enabled_early)
933 static_branch_enable(&init_on_alloc);
934 else
935 static_branch_disable(&init_on_alloc);
936
937 if (_init_on_free_enabled_early)
938 static_branch_enable(&init_on_free);
939 else
940 static_branch_disable(&init_on_free);
941
42eaa27d
AP
942 if (IS_ENABLED(CONFIG_KMSAN) &&
943 (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
944 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
945
04013513
VB
946#ifdef CONFIG_DEBUG_PAGEALLOC
947 if (!debug_pagealloc_enabled())
948 return;
949
950 static_branch_enable(&_debug_pagealloc_enabled);
951
952 if (!debug_guardpage_minorder())
953 return;
954
955 static_branch_enable(&_debug_guardpage_enabled);
956#endif
957}
958
ab130f91 959static inline void set_buddy_order(struct page *page, unsigned int order)
6aa3001b 960{
4c21e2f2 961 set_page_private(page, order);
676165a8 962 __SetPageBuddy(page);
1da177e4
LT
963}
964
5e1f0f09
MG
965#ifdef CONFIG_COMPACTION
966static inline struct capture_control *task_capc(struct zone *zone)
967{
968 struct capture_control *capc = current->capture_control;
969
deba0487 970 return unlikely(capc) &&
5e1f0f09
MG
971 !(current->flags & PF_KTHREAD) &&
972 !capc->page &&
deba0487 973 capc->cc->zone == zone ? capc : NULL;
5e1f0f09
MG
974}
975
976static inline bool
977compaction_capture(struct capture_control *capc, struct page *page,
978 int order, int migratetype)
979{
980 if (!capc || order != capc->cc->order)
981 return false;
982
983 /* Do not accidentally pollute CMA or isolated regions*/
984 if (is_migrate_cma(migratetype) ||
985 is_migrate_isolate(migratetype))
986 return false;
987
988 /*
f0953a1b 989 * Do not let lower order allocations pollute a movable pageblock.
5e1f0f09
MG
990 * This might let an unmovable request use a reclaimable pageblock
991 * and vice-versa but no more than normal fallback logic which can
992 * have trouble finding a high-order free page.
993 */
994 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
995 return false;
996
997 capc->page = page;
998 return true;
999}
1000
1001#else
1002static inline struct capture_control *task_capc(struct zone *zone)
1003{
1004 return NULL;
1005}
1006
1007static inline bool
1008compaction_capture(struct capture_control *capc, struct page *page,
1009 int order, int migratetype)
1010{
1011 return false;
1012}
1013#endif /* CONFIG_COMPACTION */
1014
6ab01363
AD
1015/* Used for pages not on another list */
1016static inline void add_to_free_list(struct page *page, struct zone *zone,
1017 unsigned int order, int migratetype)
1018{
1019 struct free_area *area = &zone->free_area[order];
1020
bf75f200 1021 list_add(&page->buddy_list, &area->free_list[migratetype]);
6ab01363
AD
1022 area->nr_free++;
1023}
1024
1025/* Used for pages not on another list */
1026static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
1027 unsigned int order, int migratetype)
1028{
1029 struct free_area *area = &zone->free_area[order];
1030
bf75f200 1031 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
6ab01363
AD
1032 area->nr_free++;
1033}
1034
293ffa5e
DH
1035/*
1036 * Used for pages which are on another list. Move the pages to the tail
1037 * of the list - so the moved pages won't immediately be considered for
1038 * allocation again (e.g., optimization for memory onlining).
1039 */
6ab01363
AD
1040static inline void move_to_free_list(struct page *page, struct zone *zone,
1041 unsigned int order, int migratetype)
1042{
1043 struct free_area *area = &zone->free_area[order];
1044
bf75f200 1045 list_move_tail(&page->buddy_list, &area->free_list[migratetype]);
6ab01363
AD
1046}
1047
1048static inline void del_page_from_free_list(struct page *page, struct zone *zone,
1049 unsigned int order)
1050{
36e66c55
AD
1051 /* clear reported state and update reported page count */
1052 if (page_reported(page))
1053 __ClearPageReported(page);
1054
bf75f200 1055 list_del(&page->buddy_list);
6ab01363
AD
1056 __ClearPageBuddy(page);
1057 set_page_private(page, 0);
1058 zone->free_area[order].nr_free--;
1059}
1060
a2129f24
AD
1061/*
1062 * If this is not the largest possible page, check if the buddy
1063 * of the next-highest order is free. If it is, it's possible
1064 * that pages are being freed that will coalesce soon. In case,
1065 * that is happening, add the free page to the tail of the list
1066 * so it's less likely to be used soon and more likely to be merged
1067 * as a higher order page
1068 */
1069static inline bool
1070buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1071 struct page *page, unsigned int order)
1072{
8170ac47
ZY
1073 unsigned long higher_page_pfn;
1074 struct page *higher_page;
a2129f24
AD
1075
1076 if (order >= MAX_ORDER - 2)
1077 return false;
1078
8170ac47
ZY
1079 higher_page_pfn = buddy_pfn & pfn;
1080 higher_page = page + (higher_page_pfn - pfn);
a2129f24 1081
8170ac47
ZY
1082 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
1083 NULL) != NULL;
a2129f24
AD
1084}
1085
1da177e4
LT
1086/*
1087 * Freeing function for a buddy system allocator.
1088 *
1089 * The concept of a buddy system is to maintain direct-mapped table
1090 * (containing bit values) for memory blocks of various "orders".
1091 * The bottom level table contains the map for the smallest allocatable
1092 * units of memory (here, pages), and each level above it describes
1093 * pairs of units from the levels below, hence, "buddies".
1094 * At a high level, all that happens here is marking the table entry
1095 * at the bottom level available, and propagating the changes upward
1096 * as necessary, plus some accounting needed to play nicely with other
1097 * parts of the VM system.
1098 * At each level, we keep a list of pages, which are heads of continuous
6e292b9b
MW
1099 * free pages of length of (1 << order) and marked with PageBuddy.
1100 * Page's order is recorded in page_private(page) field.
1da177e4 1101 * So when we are allocating or freeing one, we can derive the state of the
5f63b720
MN
1102 * other. That is, if we allocate a small block, and both were
1103 * free, the remainder of the region must be split into blocks.
1da177e4 1104 * If a block is freed, and its buddy is also free, then this
5f63b720 1105 * triggers coalescing into a block of larger size.
1da177e4 1106 *
6d49e352 1107 * -- nyc
1da177e4
LT
1108 */
1109
48db57f8 1110static inline void __free_one_page(struct page *page,
dc4b0caf 1111 unsigned long pfn,
ed0ae21d 1112 struct zone *zone, unsigned int order,
f04a5d5d 1113 int migratetype, fpi_t fpi_flags)
1da177e4 1114{
a2129f24 1115 struct capture_control *capc = task_capc(zone);
dae37a5d 1116 unsigned long buddy_pfn = 0;
a2129f24 1117 unsigned long combined_pfn;
a2129f24
AD
1118 struct page *buddy;
1119 bool to_tail;
d9dddbf5 1120
d29bb978 1121 VM_BUG_ON(!zone_is_initialized(zone));
6e9f0d58 1122 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1da177e4 1123
ed0ae21d 1124 VM_BUG_ON(migratetype == -1);
d9dddbf5 1125 if (likely(!is_migrate_isolate(migratetype)))
8f82b55d 1126 __mod_zone_freepage_state(zone, 1 << order, migratetype);
ed0ae21d 1127
76741e77 1128 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
309381fe 1129 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1da177e4 1130
bb0e28eb 1131 while (order < MAX_ORDER - 1) {
5e1f0f09
MG
1132 if (compaction_capture(capc, page, order, migratetype)) {
1133 __mod_zone_freepage_state(zone, -(1 << order),
1134 migratetype);
1135 return;
1136 }
13ad59df 1137
8170ac47
ZY
1138 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
1139 if (!buddy)
d9dddbf5 1140 goto done_merging;
bb0e28eb
ZY
1141
1142 if (unlikely(order >= pageblock_order)) {
1143 /*
1144 * We want to prevent merge between freepages on pageblock
1145 * without fallbacks and normal pageblock. Without this,
1146 * pageblock isolation could cause incorrect freepage or CMA
1147 * accounting or HIGHATOMIC accounting.
1148 */
1149 int buddy_mt = get_pageblock_migratetype(buddy);
1150
1151 if (migratetype != buddy_mt
1152 && (!migratetype_is_mergeable(migratetype) ||
1153 !migratetype_is_mergeable(buddy_mt)))
1154 goto done_merging;
1155 }
1156
c0a32fc5
SG
1157 /*
1158 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1159 * merge with it and move up one order.
1160 */
b03641af 1161 if (page_is_guard(buddy))
2847cf95 1162 clear_page_guard(zone, buddy, order, migratetype);
b03641af 1163 else
6ab01363 1164 del_page_from_free_list(buddy, zone, order);
76741e77
VB
1165 combined_pfn = buddy_pfn & pfn;
1166 page = page + (combined_pfn - pfn);
1167 pfn = combined_pfn;
1da177e4
LT
1168 order++;
1169 }
d9dddbf5
VB
1170
1171done_merging:
ab130f91 1172 set_buddy_order(page, order);
6dda9d55 1173
47b6a24a
DH
1174 if (fpi_flags & FPI_TO_TAIL)
1175 to_tail = true;
1176 else if (is_shuffle_order(order))
a2129f24 1177 to_tail = shuffle_pick_tail();
97500a4a 1178 else
a2129f24 1179 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
97500a4a 1180
a2129f24 1181 if (to_tail)
6ab01363 1182 add_to_free_list_tail(page, zone, order, migratetype);
a2129f24 1183 else
6ab01363 1184 add_to_free_list(page, zone, order, migratetype);
36e66c55
AD
1185
1186 /* Notify page reporting subsystem of freed page */
f04a5d5d 1187 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
36e66c55 1188 page_reporting_notify_free(order);
1da177e4
LT
1189}
1190
b2c9e2fb
ZY
1191/**
1192 * split_free_page() -- split a free page at split_pfn_offset
1193 * @free_page: the original free page
1194 * @order: the order of the page
1195 * @split_pfn_offset: split offset within the page
1196 *
86d28b07
ZY
1197 * Return -ENOENT if the free page is changed, otherwise 0
1198 *
b2c9e2fb
ZY
1199 * It is used when the free page crosses two pageblocks with different migratetypes
1200 * at split_pfn_offset within the page. The split free page will be put into
1201 * separate migratetype lists afterwards. Otherwise, the function achieves
1202 * nothing.
1203 */
86d28b07
ZY
1204int split_free_page(struct page *free_page,
1205 unsigned int order, unsigned long split_pfn_offset)
b2c9e2fb
ZY
1206{
1207 struct zone *zone = page_zone(free_page);
1208 unsigned long free_page_pfn = page_to_pfn(free_page);
1209 unsigned long pfn;
1210 unsigned long flags;
1211 int free_page_order;
86d28b07
ZY
1212 int mt;
1213 int ret = 0;
b2c9e2fb 1214
88ee1343 1215 if (split_pfn_offset == 0)
86d28b07 1216 return ret;
88ee1343 1217
b2c9e2fb 1218 spin_lock_irqsave(&zone->lock, flags);
86d28b07
ZY
1219
1220 if (!PageBuddy(free_page) || buddy_order(free_page) != order) {
1221 ret = -ENOENT;
1222 goto out;
1223 }
1224
1225 mt = get_pageblock_migratetype(free_page);
1226 if (likely(!is_migrate_isolate(mt)))
1227 __mod_zone_freepage_state(zone, -(1UL << order), mt);
1228
b2c9e2fb
ZY
1229 del_page_from_free_list(free_page, zone, order);
1230 for (pfn = free_page_pfn;
1231 pfn < free_page_pfn + (1UL << order);) {
1232 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn);
1233
86d28b07 1234 free_page_order = min_t(unsigned int,
88ee1343
ZY
1235 pfn ? __ffs(pfn) : order,
1236 __fls(split_pfn_offset));
b2c9e2fb
ZY
1237 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
1238 mt, FPI_NONE);
1239 pfn += 1UL << free_page_order;
1240 split_pfn_offset -= (1UL << free_page_order);
1241 /* we have done the first part, now switch to second part */
1242 if (split_pfn_offset == 0)
1243 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn);
1244 }
86d28b07 1245out:
b2c9e2fb 1246 spin_unlock_irqrestore(&zone->lock, flags);
86d28b07 1247 return ret;
b2c9e2fb 1248}
7bfec6f4
MG
1249/*
1250 * A bad page could be due to a number of fields. Instead of multiple branches,
1251 * try and check multiple fields with one check. The caller must do a detailed
1252 * check if necessary.
1253 */
1254static inline bool page_expected_state(struct page *page,
1255 unsigned long check_flags)
1256{
1257 if (unlikely(atomic_read(&page->_mapcount) != -1))
1258 return false;
1259
1260 if (unlikely((unsigned long)page->mapping |
1261 page_ref_count(page) |
1262#ifdef CONFIG_MEMCG
48060834 1263 page->memcg_data |
7bfec6f4
MG
1264#endif
1265 (page->flags & check_flags)))
1266 return false;
1267
1268 return true;
1269}
1270
58b7f119 1271static const char *page_bad_reason(struct page *page, unsigned long flags)
1da177e4 1272{
82a3241a 1273 const char *bad_reason = NULL;
f0b791a3 1274
53f9263b 1275 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
1276 bad_reason = "nonzero mapcount";
1277 if (unlikely(page->mapping != NULL))
1278 bad_reason = "non-NULL mapping";
fe896d18 1279 if (unlikely(page_ref_count(page) != 0))
0139aa7b 1280 bad_reason = "nonzero _refcount";
58b7f119
WY
1281 if (unlikely(page->flags & flags)) {
1282 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1283 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1284 else
1285 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
f0b791a3 1286 }
9edad6ea 1287#ifdef CONFIG_MEMCG
48060834 1288 if (unlikely(page->memcg_data))
9edad6ea
JW
1289 bad_reason = "page still charged to cgroup";
1290#endif
58b7f119
WY
1291 return bad_reason;
1292}
1293
a8368cd8 1294static void free_page_is_bad_report(struct page *page)
58b7f119
WY
1295{
1296 bad_page(page,
1297 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
bb552ac6
MG
1298}
1299
a8368cd8 1300static inline bool free_page_is_bad(struct page *page)
bb552ac6 1301{
da838d4f 1302 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
a8368cd8 1303 return false;
bb552ac6
MG
1304
1305 /* Something has gone sideways, find it */
a8368cd8
AM
1306 free_page_is_bad_report(page);
1307 return true;
1da177e4
LT
1308}
1309
4db7548c
MG
1310static int free_tail_pages_check(struct page *head_page, struct page *page)
1311{
1312 int ret = 1;
1313
1314 /*
1315 * We rely page->lru.next never has bit 0 set, unless the page
1316 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1317 */
1318 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1319
1320 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1321 ret = 0;
1322 goto out;
1323 }
1324 switch (page - head_page) {
1325 case 1:
4da1984e 1326 /* the first tail page: ->mapping may be compound_mapcount() */
4db7548c 1327 if (unlikely(compound_mapcount(page))) {
82a3241a 1328 bad_page(page, "nonzero compound_mapcount");
4db7548c
MG
1329 goto out;
1330 }
1331 break;
1332 case 2:
1333 /*
1334 * the second tail page: ->mapping is
fa3015b7 1335 * deferred_list.next -- ignore value.
4db7548c
MG
1336 */
1337 break;
1338 default:
1339 if (page->mapping != TAIL_MAPPING) {
82a3241a 1340 bad_page(page, "corrupted mapping in tail page");
4db7548c
MG
1341 goto out;
1342 }
1343 break;
1344 }
1345 if (unlikely(!PageTail(page))) {
82a3241a 1346 bad_page(page, "PageTail not set");
4db7548c
MG
1347 goto out;
1348 }
1349 if (unlikely(compound_head(page) != head_page)) {
82a3241a 1350 bad_page(page, "compound_head not consistent");
4db7548c
MG
1351 goto out;
1352 }
1353 ret = 0;
1354out:
1355 page->mapping = NULL;
1356 clear_compound_head(page);
1357 return ret;
1358}
1359
94ae8b83
AK
1360/*
1361 * Skip KASAN memory poisoning when either:
1362 *
1363 * 1. Deferred memory initialization has not yet completed,
1364 * see the explanation below.
1365 * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON,
1366 * see the comment next to it.
1367 * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON,
1368 * see the comment next to it.
1369 *
1370 * Poisoning pages during deferred memory init will greatly lengthen the
1371 * process and cause problem in large memory systems as the deferred pages
1372 * initialization is done with interrupt disabled.
1373 *
1374 * Assuming that there will be no reference to those newly initialized
1375 * pages before they are ever allocated, this should have no effect on
1376 * KASAN memory tracking as the poison will be properly inserted at page
1377 * allocation time. The only corner case is when pages are allocated by
1378 * on-demand allocation and then freed again before the deferred pages
1379 * initialization is done, but this is not likely to happen.
1380 */
1381static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
1382{
1383 return deferred_pages_enabled() ||
1384 (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
1385 (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
1386 PageSkipKASanPoison(page);
1387}
1388
aeaec8e2 1389static void kernel_init_pages(struct page *page, int numpages)
6471384a
AP
1390{
1391 int i;
1392
9e15afa5
QC
1393 /* s390's use of memset() could override KASAN redzones. */
1394 kasan_disable_current();
d9da8f6c
AK
1395 for (i = 0; i < numpages; i++)
1396 clear_highpage_kasan_tagged(page + i);
9e15afa5 1397 kasan_enable_current();
6471384a
AP
1398}
1399
e2769dbd 1400static __always_inline bool free_pages_prepare(struct page *page,
2c335680 1401 unsigned int order, bool check_free, fpi_t fpi_flags)
4db7548c 1402{
e2769dbd 1403 int bad = 0;
c3525330 1404 bool init = want_init_on_free();
4db7548c 1405
4db7548c
MG
1406 VM_BUG_ON_PAGE(PageTail(page), page);
1407
e2769dbd 1408 trace_mm_page_free(page, order);
b073d7f8 1409 kmsan_free_page(page, order);
e2769dbd 1410
79f5f8fa
OS
1411 if (unlikely(PageHWPoison(page)) && !order) {
1412 /*
1413 * Do not let hwpoison pages hit pcplists/buddy
1414 * Untie memcg state and reset page's owner
1415 */
18b2db3b 1416 if (memcg_kmem_enabled() && PageMemcgKmem(page))
79f5f8fa
OS
1417 __memcg_kmem_uncharge_page(page, order);
1418 reset_page_owner(page, order);
df4e817b 1419 page_table_check_free(page, order);
79f5f8fa
OS
1420 return false;
1421 }
1422
e2769dbd
MG
1423 /*
1424 * Check tail pages before head page information is cleared to
1425 * avoid checking PageCompound for order-0 pages.
1426 */
1427 if (unlikely(order)) {
1428 bool compound = PageCompound(page);
1429 int i;
1430
1431 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
4db7548c 1432
eac96c3e 1433 if (compound) {
9a73f61b 1434 ClearPageDoubleMap(page);
eac96c3e
YS
1435 ClearPageHasHWPoisoned(page);
1436 }
e2769dbd
MG
1437 for (i = 1; i < (1 << order); i++) {
1438 if (compound)
1439 bad += free_tail_pages_check(page, page + i);
a8368cd8 1440 if (unlikely(free_page_is_bad(page + i))) {
e2769dbd
MG
1441 bad++;
1442 continue;
1443 }
1444 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1445 }
1446 }
bda807d4 1447 if (PageMappingFlags(page))
4db7548c 1448 page->mapping = NULL;
18b2db3b 1449 if (memcg_kmem_enabled() && PageMemcgKmem(page))
f4b00eab 1450 __memcg_kmem_uncharge_page(page, order);
a8368cd8
AM
1451 if (check_free && free_page_is_bad(page))
1452 bad++;
e2769dbd
MG
1453 if (bad)
1454 return false;
4db7548c 1455
e2769dbd
MG
1456 page_cpupid_reset_last(page);
1457 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1458 reset_page_owner(page, order);
df4e817b 1459 page_table_check_free(page, order);
4db7548c
MG
1460
1461 if (!PageHighMem(page)) {
1462 debug_check_no_locks_freed(page_address(page),
e2769dbd 1463 PAGE_SIZE << order);
4db7548c 1464 debug_check_no_obj_freed(page_address(page),
e2769dbd 1465 PAGE_SIZE << order);
4db7548c 1466 }
6471384a 1467
8db26a3d
VB
1468 kernel_poison_pages(page, 1 << order);
1469
f9d79e8d 1470 /*
1bb5eab3 1471 * As memory initialization might be integrated into KASAN,
7c13c163 1472 * KASAN poisoning and memory initialization code must be
1bb5eab3
AK
1473 * kept together to avoid discrepancies in behavior.
1474 *
f9d79e8d
AK
1475 * With hardware tag-based KASAN, memory tags must be set before the
1476 * page becomes unavailable via debug_pagealloc or arch_free_page.
1477 */
487a32ec 1478 if (!should_skip_kasan_poison(page, fpi_flags)) {
c3525330 1479 kasan_poison_pages(page, order, init);
f9d79e8d 1480
db8a0477
AK
1481 /* Memory is already initialized if KASAN did it internally. */
1482 if (kasan_has_integrated_init())
1483 init = false;
1484 }
1485 if (init)
aeaec8e2 1486 kernel_init_pages(page, 1 << order);
db8a0477 1487
234fdce8
QC
1488 /*
1489 * arch_free_page() can make the page's contents inaccessible. s390
1490 * does this. So nothing which can access the page's contents should
1491 * happen after this.
1492 */
1493 arch_free_page(page, order);
1494
77bc7fd6 1495 debug_pagealloc_unmap_pages(page, 1 << order);
d6332692 1496
4db7548c
MG
1497 return true;
1498}
1499
e2769dbd 1500#ifdef CONFIG_DEBUG_VM
4462b32c
VB
1501/*
1502 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1503 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1504 * moved from pcp lists to free lists.
1505 */
44042b44 1506static bool free_pcp_prepare(struct page *page, unsigned int order)
e2769dbd 1507{
44042b44 1508 return free_pages_prepare(page, order, true, FPI_NONE);
e2769dbd
MG
1509}
1510
d452289f 1511/* return true if this page has an inappropriate state */
4462b32c 1512static bool bulkfree_pcp_prepare(struct page *page)
e2769dbd 1513{
8e57f8ac 1514 if (debug_pagealloc_enabled_static())
a8368cd8 1515 return free_page_is_bad(page);
4462b32c
VB
1516 else
1517 return false;
e2769dbd
MG
1518}
1519#else
4462b32c
VB
1520/*
1521 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1522 * moving from pcp lists to free list in order to reduce overhead. With
1523 * debug_pagealloc enabled, they are checked also immediately when being freed
1524 * to the pcp lists.
1525 */
44042b44 1526static bool free_pcp_prepare(struct page *page, unsigned int order)
e2769dbd 1527{
8e57f8ac 1528 if (debug_pagealloc_enabled_static())
44042b44 1529 return free_pages_prepare(page, order, true, FPI_NONE);
4462b32c 1530 else
44042b44 1531 return free_pages_prepare(page, order, false, FPI_NONE);
e2769dbd
MG
1532}
1533
4db7548c
MG
1534static bool bulkfree_pcp_prepare(struct page *page)
1535{
a8368cd8 1536 return free_page_is_bad(page);
4db7548c
MG
1537}
1538#endif /* CONFIG_DEBUG_VM */
1539
1da177e4 1540/*
5f8dcc21 1541 * Frees a number of pages from the PCP lists
7cba630b 1542 * Assumes all pages on list are in same zone.
207f36ee 1543 * count is the number of pages to free.
1da177e4 1544 */
5f8dcc21 1545static void free_pcppages_bulk(struct zone *zone, int count,
fd56eef2
MG
1546 struct per_cpu_pages *pcp,
1547 int pindex)
1da177e4 1548{
35b6d770
MG
1549 int min_pindex = 0;
1550 int max_pindex = NR_PCP_LISTS - 1;
44042b44 1551 unsigned int order;
3777999d 1552 bool isolated_pageblocks;
8b10b465 1553 struct page *page;
f2260e6b 1554
88e8ac11
CTR
1555 /*
1556 * Ensure proper count is passed which otherwise would stuck in the
1557 * below while (list_empty(list)) loop.
1558 */
1559 count = min(pcp->count, count);
d61372bc
MG
1560
1561 /* Ensure requested pindex is drained first. */
1562 pindex = pindex - 1;
1563
01b44456 1564 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
8b10b465
MG
1565 spin_lock(&zone->lock);
1566 isolated_pageblocks = has_isolate_pageblock(zone);
1567
44042b44 1568 while (count > 0) {
5f8dcc21 1569 struct list_head *list;
fd56eef2 1570 int nr_pages;
5f8dcc21 1571
fd56eef2 1572 /* Remove pages from lists in a round-robin fashion. */
5f8dcc21 1573 do {
35b6d770
MG
1574 if (++pindex > max_pindex)
1575 pindex = min_pindex;
44042b44 1576 list = &pcp->lists[pindex];
35b6d770
MG
1577 if (!list_empty(list))
1578 break;
1579
1580 if (pindex == max_pindex)
1581 max_pindex--;
1582 if (pindex == min_pindex)
1583 min_pindex++;
1584 } while (1);
48db57f8 1585
44042b44 1586 order = pindex_to_order(pindex);
fd56eef2 1587 nr_pages = 1 << order;
a6f9edd6 1588 do {
8b10b465
MG
1589 int mt;
1590
bf75f200 1591 page = list_last_entry(list, struct page, pcp_list);
8b10b465
MG
1592 mt = get_pcppage_migratetype(page);
1593
0a5f4e5b 1594 /* must delete to avoid corrupting pcp list */
bf75f200 1595 list_del(&page->pcp_list);
fd56eef2
MG
1596 count -= nr_pages;
1597 pcp->count -= nr_pages;
aa016d14 1598
4db7548c
MG
1599 if (bulkfree_pcp_prepare(page))
1600 continue;
1601
8b10b465
MG
1602 /* MIGRATE_ISOLATE page should not go to pcplists */
1603 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1604 /* Pageblock could have been isolated meanwhile */
1605 if (unlikely(isolated_pageblocks))
1606 mt = get_pageblock_migratetype(page);
0a5f4e5b 1607
8b10b465
MG
1608 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1609 trace_mm_page_pcpu_drain(page, order, mt);
1610 } while (count > 0 && !list_empty(list));
0a5f4e5b 1611 }
8b10b465 1612
d34b0733 1613 spin_unlock(&zone->lock);
1da177e4
LT
1614}
1615
dc4b0caf
MG
1616static void free_one_page(struct zone *zone,
1617 struct page *page, unsigned long pfn,
7aeb09f9 1618 unsigned int order,
7fef431b 1619 int migratetype, fpi_t fpi_flags)
1da177e4 1620{
df1acc85
MG
1621 unsigned long flags;
1622
1623 spin_lock_irqsave(&zone->lock, flags);
ad53f92e
JK
1624 if (unlikely(has_isolate_pageblock(zone) ||
1625 is_migrate_isolate(migratetype))) {
1626 migratetype = get_pfnblock_migratetype(page, pfn);
ad53f92e 1627 }
7fef431b 1628 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
df1acc85 1629 spin_unlock_irqrestore(&zone->lock, flags);
48db57f8
NP
1630}
1631
1e8ce83c 1632static void __meminit __init_single_page(struct page *page, unsigned long pfn,
d0dc12e8 1633 unsigned long zone, int nid)
1e8ce83c 1634{
d0dc12e8 1635 mm_zero_struct_page(page);
1e8ce83c 1636 set_page_links(page, zone, nid, pfn);
1e8ce83c
RH
1637 init_page_count(page);
1638 page_mapcount_reset(page);
1639 page_cpupid_reset_last(page);
2813b9c0 1640 page_kasan_tag_reset(page);
1e8ce83c 1641
1e8ce83c
RH
1642 INIT_LIST_HEAD(&page->lru);
1643#ifdef WANT_PAGE_VIRTUAL
1644 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1645 if (!is_highmem_idx(zone))
1646 set_page_address(page, __va(pfn << PAGE_SHIFT));
1647#endif
1648}
1649
7e18adb4 1650#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
57148a64 1651static void __meminit init_reserved_page(unsigned long pfn)
7e18adb4
MG
1652{
1653 pg_data_t *pgdat;
1654 int nid, zid;
1655
1656 if (!early_page_uninitialised(pfn))
1657 return;
1658
1659 nid = early_pfn_to_nid(pfn);
1660 pgdat = NODE_DATA(nid);
1661
1662 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1663 struct zone *zone = &pgdat->node_zones[zid];
1664
86fb05b9 1665 if (zone_spans_pfn(zone, pfn))
7e18adb4
MG
1666 break;
1667 }
d0dc12e8 1668 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
7e18adb4
MG
1669}
1670#else
1671static inline void init_reserved_page(unsigned long pfn)
1672{
1673}
1674#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1675
92923ca3
NZ
1676/*
1677 * Initialised pages do not have PageReserved set. This function is
1678 * called for each range allocated by the bootmem allocator and
1679 * marks the pages PageReserved. The remaining valid pages are later
1680 * sent to the buddy page allocator.
1681 */
4b50bcc7 1682void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
92923ca3
NZ
1683{
1684 unsigned long start_pfn = PFN_DOWN(start);
1685 unsigned long end_pfn = PFN_UP(end);
1686
7e18adb4
MG
1687 for (; start_pfn < end_pfn; start_pfn++) {
1688 if (pfn_valid(start_pfn)) {
1689 struct page *page = pfn_to_page(start_pfn);
1690
1691 init_reserved_page(start_pfn);
1d798ca3
KS
1692
1693 /* Avoid false-positive PageTail() */
1694 INIT_LIST_HEAD(&page->lru);
1695
d483da5b
AD
1696 /*
1697 * no need for atomic set_bit because the struct
1698 * page is not visible yet so nobody should
1699 * access it yet.
1700 */
1701 __SetPageReserved(page);
7e18adb4
MG
1702 }
1703 }
92923ca3
NZ
1704}
1705
7fef431b
DH
1706static void __free_pages_ok(struct page *page, unsigned int order,
1707 fpi_t fpi_flags)
ec95f53a 1708{
d34b0733 1709 unsigned long flags;
95e34412 1710 int migratetype;
dc4b0caf 1711 unsigned long pfn = page_to_pfn(page);
56f0e661 1712 struct zone *zone = page_zone(page);
ec95f53a 1713
2c335680 1714 if (!free_pages_prepare(page, order, true, fpi_flags))
ec95f53a
KM
1715 return;
1716
cfc47a28 1717 migratetype = get_pfnblock_migratetype(page, pfn);
dbbee9d5 1718
56f0e661 1719 spin_lock_irqsave(&zone->lock, flags);
56f0e661
MG
1720 if (unlikely(has_isolate_pageblock(zone) ||
1721 is_migrate_isolate(migratetype))) {
1722 migratetype = get_pfnblock_migratetype(page, pfn);
1723 }
1724 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1725 spin_unlock_irqrestore(&zone->lock, flags);
90249993 1726
d34b0733 1727 __count_vm_events(PGFREE, 1 << order);
1da177e4
LT
1728}
1729
a9cd410a 1730void __free_pages_core(struct page *page, unsigned int order)
a226f6c8 1731{
c3993076 1732 unsigned int nr_pages = 1 << order;
e2d0bd2b 1733 struct page *p = page;
c3993076 1734 unsigned int loop;
a226f6c8 1735
7fef431b
DH
1736 /*
1737 * When initializing the memmap, __init_single_page() sets the refcount
1738 * of all pages to 1 ("allocated"/"not free"). We have to set the
1739 * refcount of all involved pages to 0.
1740 */
e2d0bd2b
YL
1741 prefetchw(p);
1742 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1743 prefetchw(p + 1);
c3993076
JW
1744 __ClearPageReserved(p);
1745 set_page_count(p, 0);
a226f6c8 1746 }
e2d0bd2b
YL
1747 __ClearPageReserved(p);
1748 set_page_count(p, 0);
c3993076 1749
9705bea5 1750 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
7fef431b
DH
1751
1752 /*
1753 * Bypass PCP and place fresh pages right to the tail, primarily
1754 * relevant for memory onlining.
1755 */
2c335680 1756 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
a226f6c8
DH
1757}
1758
a9ee6cf5 1759#ifdef CONFIG_NUMA
7ace9917 1760
03e92a5e
MR
1761/*
1762 * During memory init memblocks map pfns to nids. The search is expensive and
1763 * this caches recent lookups. The implementation of __early_pfn_to_nid
1764 * treats start/end as pfns.
1765 */
1766struct mminit_pfnnid_cache {
1767 unsigned long last_start;
1768 unsigned long last_end;
1769 int last_nid;
1770};
75a592a4 1771
03e92a5e 1772static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
6f24fbd3
MR
1773
1774/*
1775 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1776 */
03e92a5e 1777static int __meminit __early_pfn_to_nid(unsigned long pfn,
6f24fbd3 1778 struct mminit_pfnnid_cache *state)
75a592a4 1779{
6f24fbd3 1780 unsigned long start_pfn, end_pfn;
75a592a4
MG
1781 int nid;
1782
6f24fbd3
MR
1783 if (state->last_start <= pfn && pfn < state->last_end)
1784 return state->last_nid;
1785
1786 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1787 if (nid != NUMA_NO_NODE) {
1788 state->last_start = start_pfn;
1789 state->last_end = end_pfn;
1790 state->last_nid = nid;
1791 }
7ace9917
MG
1792
1793 return nid;
75a592a4 1794}
75a592a4 1795
75a592a4 1796int __meminit early_pfn_to_nid(unsigned long pfn)
75a592a4 1797{
7ace9917 1798 static DEFINE_SPINLOCK(early_pfn_lock);
75a592a4
MG
1799 int nid;
1800
7ace9917 1801 spin_lock(&early_pfn_lock);
56ec43d8 1802 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
7ace9917 1803 if (nid < 0)
e4568d38 1804 nid = first_online_node;
7ace9917 1805 spin_unlock(&early_pfn_lock);
75a592a4 1806
7ace9917 1807 return nid;
75a592a4 1808}
a9ee6cf5 1809#endif /* CONFIG_NUMA */
75a592a4 1810
7c2ee349 1811void __init memblock_free_pages(struct page *page, unsigned long pfn,
3a80a7fa
MG
1812 unsigned int order)
1813{
1814 if (early_page_uninitialised(pfn))
1815 return;
3c206509
AP
1816 if (!kmsan_memblock_free_pages(page, order)) {
1817 /* KMSAN will take care of these pages. */
1818 return;
1819 }
a9cd410a 1820 __free_pages_core(page, order);
3a80a7fa
MG
1821}
1822
7cf91a98
JK
1823/*
1824 * Check that the whole (or subset of) a pageblock given by the interval of
1825 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
859a85dd 1826 * with the migration of free compaction scanner.
7cf91a98
JK
1827 *
1828 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1829 *
1830 * It's possible on some configurations to have a setup like node0 node1 node0
1831 * i.e. it's possible that all pages within a zones range of pages do not
1832 * belong to a single zone. We assume that a border between node0 and node1
1833 * can occur within a single pageblock, but not a node0 node1 node0
1834 * interleaving within a single pageblock. It is therefore sufficient to check
1835 * the first and last page of a pageblock and avoid checking each individual
1836 * page in a pageblock.
1837 */
1838struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1839 unsigned long end_pfn, struct zone *zone)
1840{
1841 struct page *start_page;
1842 struct page *end_page;
1843
1844 /* end_pfn is one past the range we are checking */
1845 end_pfn--;
1846
1847 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1848 return NULL;
1849
2d070eab
MH
1850 start_page = pfn_to_online_page(start_pfn);
1851 if (!start_page)
1852 return NULL;
7cf91a98
JK
1853
1854 if (page_zone(start_page) != zone)
1855 return NULL;
1856
1857 end_page = pfn_to_page(end_pfn);
1858
1859 /* This gives a shorter code than deriving page_zone(end_page) */
1860 if (page_zone_id(start_page) != page_zone_id(end_page))
1861 return NULL;
1862
1863 return start_page;
1864}
1865
1866void set_zone_contiguous(struct zone *zone)
1867{
1868 unsigned long block_start_pfn = zone->zone_start_pfn;
1869 unsigned long block_end_pfn;
1870
4f9bc69a 1871 block_end_pfn = pageblock_end_pfn(block_start_pfn);
7cf91a98
JK
1872 for (; block_start_pfn < zone_end_pfn(zone);
1873 block_start_pfn = block_end_pfn,
1874 block_end_pfn += pageblock_nr_pages) {
1875
1876 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1877
1878 if (!__pageblock_pfn_to_page(block_start_pfn,
1879 block_end_pfn, zone))
1880 return;
e84fe99b 1881 cond_resched();
7cf91a98
JK
1882 }
1883
1884 /* We confirm that there is no hole */
1885 zone->contiguous = true;
1886}
1887
1888void clear_zone_contiguous(struct zone *zone)
1889{
1890 zone->contiguous = false;
1891}
1892
7e18adb4 1893#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2f47a91f
PT
1894static void __init deferred_free_range(unsigned long pfn,
1895 unsigned long nr_pages)
a4de83dd 1896{
2f47a91f
PT
1897 struct page *page;
1898 unsigned long i;
a4de83dd 1899
2f47a91f 1900 if (!nr_pages)
a4de83dd
MG
1901 return;
1902
2f47a91f
PT
1903 page = pfn_to_page(pfn);
1904
a4de83dd 1905 /* Free a large naturally-aligned chunk if possible */
ee0913c4 1906 if (nr_pages == pageblock_nr_pages && pageblock_aligned(pfn)) {
ac5d2539 1907 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
a9cd410a 1908 __free_pages_core(page, pageblock_order);
a4de83dd
MG
1909 return;
1910 }
1911
e780149b 1912 for (i = 0; i < nr_pages; i++, page++, pfn++) {
ee0913c4 1913 if (pageblock_aligned(pfn))
e780149b 1914 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
a9cd410a 1915 __free_pages_core(page, 0);
e780149b 1916 }
a4de83dd
MG
1917}
1918
d3cd131d
NS
1919/* Completion tracking for deferred_init_memmap() threads */
1920static atomic_t pgdat_init_n_undone __initdata;
1921static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1922
1923static inline void __init pgdat_init_report_one_done(void)
1924{
1925 if (atomic_dec_and_test(&pgdat_init_n_undone))
1926 complete(&pgdat_init_all_done_comp);
1927}
0e1cc95b 1928
2f47a91f 1929/*
80b1f41c
PT
1930 * Returns true if page needs to be initialized or freed to buddy allocator.
1931 *
c9b3637f 1932 * We check if a current large page is valid by only checking the validity
80b1f41c 1933 * of the head pfn.
2f47a91f 1934 */
56ec43d8 1935static inline bool __init deferred_pfn_valid(unsigned long pfn)
2f47a91f 1936{
ee0913c4 1937 if (pageblock_aligned(pfn) && !pfn_valid(pfn))
80b1f41c 1938 return false;
80b1f41c
PT
1939 return true;
1940}
2f47a91f 1941
80b1f41c
PT
1942/*
1943 * Free pages to buddy allocator. Try to free aligned pages in
1944 * pageblock_nr_pages sizes.
1945 */
56ec43d8 1946static void __init deferred_free_pages(unsigned long pfn,
80b1f41c
PT
1947 unsigned long end_pfn)
1948{
80b1f41c 1949 unsigned long nr_free = 0;
2f47a91f 1950
80b1f41c 1951 for (; pfn < end_pfn; pfn++) {
56ec43d8 1952 if (!deferred_pfn_valid(pfn)) {
80b1f41c
PT
1953 deferred_free_range(pfn - nr_free, nr_free);
1954 nr_free = 0;
ee0913c4 1955 } else if (pageblock_aligned(pfn)) {
80b1f41c
PT
1956 deferred_free_range(pfn - nr_free, nr_free);
1957 nr_free = 1;
80b1f41c
PT
1958 } else {
1959 nr_free++;
1960 }
1961 }
1962 /* Free the last block of pages to allocator */
1963 deferred_free_range(pfn - nr_free, nr_free);
2f47a91f
PT
1964}
1965
80b1f41c
PT
1966/*
1967 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
1968 * by performing it only once every pageblock_nr_pages.
1969 * Return number of pages initialized.
1970 */
56ec43d8 1971static unsigned long __init deferred_init_pages(struct zone *zone,
80b1f41c
PT
1972 unsigned long pfn,
1973 unsigned long end_pfn)
2f47a91f 1974{
56ec43d8 1975 int nid = zone_to_nid(zone);
2f47a91f 1976 unsigned long nr_pages = 0;
56ec43d8 1977 int zid = zone_idx(zone);
2f47a91f 1978 struct page *page = NULL;
2f47a91f 1979
80b1f41c 1980 for (; pfn < end_pfn; pfn++) {
56ec43d8 1981 if (!deferred_pfn_valid(pfn)) {
80b1f41c 1982 page = NULL;
2f47a91f 1983 continue;
ee0913c4 1984 } else if (!page || pageblock_aligned(pfn)) {
2f47a91f 1985 page = pfn_to_page(pfn);
80b1f41c
PT
1986 } else {
1987 page++;
2f47a91f 1988 }
d0dc12e8 1989 __init_single_page(page, pfn, zid, nid);
80b1f41c 1990 nr_pages++;
2f47a91f 1991 }
80b1f41c 1992 return (nr_pages);
2f47a91f
PT
1993}
1994
0e56acae
AD
1995/*
1996 * This function is meant to pre-load the iterator for the zone init.
1997 * Specifically it walks through the ranges until we are caught up to the
1998 * first_init_pfn value and exits there. If we never encounter the value we
1999 * return false indicating there are no valid ranges left.
2000 */
2001static bool __init
2002deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
2003 unsigned long *spfn, unsigned long *epfn,
2004 unsigned long first_init_pfn)
2005{
2006 u64 j;
2007
2008 /*
2009 * Start out by walking through the ranges in this zone that have
2010 * already been initialized. We don't need to do anything with them
2011 * so we just need to flush them out of the system.
2012 */
2013 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
2014 if (*epfn <= first_init_pfn)
2015 continue;
2016 if (*spfn < first_init_pfn)
2017 *spfn = first_init_pfn;
2018 *i = j;
2019 return true;
2020 }
2021
2022 return false;
2023}
2024
2025/*
2026 * Initialize and free pages. We do it in two loops: first we initialize
2027 * struct page, then free to buddy allocator, because while we are
2028 * freeing pages we can access pages that are ahead (computing buddy
2029 * page in __free_one_page()).
2030 *
2031 * In order to try and keep some memory in the cache we have the loop
2032 * broken along max page order boundaries. This way we will not cause
2033 * any issues with the buddy page computation.
2034 */
2035static unsigned long __init
2036deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2037 unsigned long *end_pfn)
2038{
2039 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2040 unsigned long spfn = *start_pfn, epfn = *end_pfn;
2041 unsigned long nr_pages = 0;
2042 u64 j = *i;
2043
2044 /* First we loop through and initialize the page values */
2045 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2046 unsigned long t;
2047
2048 if (mo_pfn <= *start_pfn)
2049 break;
2050
2051 t = min(mo_pfn, *end_pfn);
2052 nr_pages += deferred_init_pages(zone, *start_pfn, t);
2053
2054 if (mo_pfn < *end_pfn) {
2055 *start_pfn = mo_pfn;
2056 break;
2057 }
2058 }
2059
2060 /* Reset values and now loop through freeing pages as needed */
2061 swap(j, *i);
2062
2063 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2064 unsigned long t;
2065
2066 if (mo_pfn <= spfn)
2067 break;
2068
2069 t = min(mo_pfn, epfn);
2070 deferred_free_pages(spfn, t);
2071
2072 if (mo_pfn <= epfn)
2073 break;
2074 }
2075
2076 return nr_pages;
2077}
2078
e4443149
DJ
2079static void __init
2080deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2081 void *arg)
2082{
2083 unsigned long spfn, epfn;
2084 struct zone *zone = arg;
2085 u64 i;
2086
2087 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2088
2089 /*
2090 * Initialize and free pages in MAX_ORDER sized increments so that we
2091 * can avoid introducing any issues with the buddy allocator.
2092 */
2093 while (spfn < end_pfn) {
2094 deferred_init_maxorder(&i, zone, &spfn, &epfn);
2095 cond_resched();
2096 }
2097}
2098
ecd09650
DJ
2099/* An arch may override for more concurrency. */
2100__weak int __init
2101deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2102{
2103 return 1;
2104}
2105
7e18adb4 2106/* Initialise remaining memory on a node */
0e1cc95b 2107static int __init deferred_init_memmap(void *data)
7e18adb4 2108{
0e1cc95b 2109 pg_data_t *pgdat = data;
0e56acae 2110 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
89c7c402 2111 unsigned long spfn = 0, epfn = 0;
0e56acae 2112 unsigned long first_init_pfn, flags;
7e18adb4 2113 unsigned long start = jiffies;
7e18adb4 2114 struct zone *zone;
e4443149 2115 int zid, max_threads;
2f47a91f 2116 u64 i;
7e18adb4 2117
3a2d7fa8
PT
2118 /* Bind memory initialisation thread to a local node if possible */
2119 if (!cpumask_empty(cpumask))
2120 set_cpus_allowed_ptr(current, cpumask);
2121
2122 pgdat_resize_lock(pgdat, &flags);
2123 first_init_pfn = pgdat->first_deferred_pfn;
0e1cc95b 2124 if (first_init_pfn == ULONG_MAX) {
3a2d7fa8 2125 pgdat_resize_unlock(pgdat, &flags);
d3cd131d 2126 pgdat_init_report_one_done();
0e1cc95b
MG
2127 return 0;
2128 }
2129
7e18adb4
MG
2130 /* Sanity check boundaries */
2131 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2132 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2133 pgdat->first_deferred_pfn = ULONG_MAX;
2134
3d060856
PT
2135 /*
2136 * Once we unlock here, the zone cannot be grown anymore, thus if an
2137 * interrupt thread must allocate this early in boot, zone must be
2138 * pre-grown prior to start of deferred page initialization.
2139 */
2140 pgdat_resize_unlock(pgdat, &flags);
2141
7e18adb4
MG
2142 /* Only the highest zone is deferred so find it */
2143 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2144 zone = pgdat->node_zones + zid;
2145 if (first_init_pfn < zone_end_pfn(zone))
2146 break;
2147 }
0e56acae
AD
2148
2149 /* If the zone is empty somebody else may have cleared out the zone */
2150 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2151 first_init_pfn))
2152 goto zone_empty;
7e18adb4 2153
ecd09650 2154 max_threads = deferred_page_init_max_threads(cpumask);
7e18adb4 2155
117003c3 2156 while (spfn < epfn) {
e4443149
DJ
2157 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2158 struct padata_mt_job job = {
2159 .thread_fn = deferred_init_memmap_chunk,
2160 .fn_arg = zone,
2161 .start = spfn,
2162 .size = epfn_align - spfn,
2163 .align = PAGES_PER_SECTION,
2164 .min_chunk = PAGES_PER_SECTION,
2165 .max_threads = max_threads,
2166 };
2167
2168 padata_do_multithreaded(&job);
2169 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2170 epfn_align);
117003c3 2171 }
0e56acae 2172zone_empty:
7e18adb4
MG
2173 /* Sanity check that the next zone really is unpopulated */
2174 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2175
89c7c402
DJ
2176 pr_info("node %d deferred pages initialised in %ums\n",
2177 pgdat->node_id, jiffies_to_msecs(jiffies - start));
d3cd131d
NS
2178
2179 pgdat_init_report_one_done();
0e1cc95b
MG
2180 return 0;
2181}
c9e97a19 2182
c9e97a19
PT
2183/*
2184 * If this zone has deferred pages, try to grow it by initializing enough
2185 * deferred pages to satisfy the allocation specified by order, rounded up to
2186 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
2187 * of SECTION_SIZE bytes by initializing struct pages in increments of
2188 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2189 *
2190 * Return true when zone was grown, otherwise return false. We return true even
2191 * when we grow less than requested, to let the caller decide if there are
2192 * enough pages to satisfy the allocation.
2193 *
2194 * Note: We use noinline because this function is needed only during boot, and
2195 * it is called from a __ref function _deferred_grow_zone. This way we are
2196 * making sure that it is not inlined into permanent text section.
2197 */
2198static noinline bool __init
2199deferred_grow_zone(struct zone *zone, unsigned int order)
2200{
c9e97a19 2201 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
837566e7 2202 pg_data_t *pgdat = zone->zone_pgdat;
c9e97a19 2203 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
0e56acae
AD
2204 unsigned long spfn, epfn, flags;
2205 unsigned long nr_pages = 0;
c9e97a19
PT
2206 u64 i;
2207
2208 /* Only the last zone may have deferred pages */
2209 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2210 return false;
2211
2212 pgdat_resize_lock(pgdat, &flags);
2213
c9e97a19
PT
2214 /*
2215 * If someone grew this zone while we were waiting for spinlock, return
2216 * true, as there might be enough pages already.
2217 */
2218 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2219 pgdat_resize_unlock(pgdat, &flags);
2220 return true;
2221 }
2222
0e56acae
AD
2223 /* If the zone is empty somebody else may have cleared out the zone */
2224 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2225 first_deferred_pfn)) {
2226 pgdat->first_deferred_pfn = ULONG_MAX;
c9e97a19 2227 pgdat_resize_unlock(pgdat, &flags);
b9705d87
JG
2228 /* Retry only once. */
2229 return first_deferred_pfn != ULONG_MAX;
c9e97a19
PT
2230 }
2231
0e56acae
AD
2232 /*
2233 * Initialize and free pages in MAX_ORDER sized increments so
2234 * that we can avoid introducing any issues with the buddy
2235 * allocator.
2236 */
2237 while (spfn < epfn) {
2238 /* update our first deferred PFN for this section */
2239 first_deferred_pfn = spfn;
2240
2241 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
117003c3 2242 touch_nmi_watchdog();
c9e97a19 2243
0e56acae
AD
2244 /* We should only stop along section boundaries */
2245 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2246 continue;
c9e97a19 2247
0e56acae 2248 /* If our quota has been met we can stop here */
c9e97a19
PT
2249 if (nr_pages >= nr_pages_needed)
2250 break;
2251 }
2252
0e56acae 2253 pgdat->first_deferred_pfn = spfn;
c9e97a19
PT
2254 pgdat_resize_unlock(pgdat, &flags);
2255
2256 return nr_pages > 0;
2257}
2258
2259/*
2260 * deferred_grow_zone() is __init, but it is called from
2261 * get_page_from_freelist() during early boot until deferred_pages permanently
2262 * disables this call. This is why we have refdata wrapper to avoid warning,
2263 * and to ensure that the function body gets unloaded.
2264 */
2265static bool __ref
2266_deferred_grow_zone(struct zone *zone, unsigned int order)
2267{
2268 return deferred_grow_zone(zone, order);
2269}
2270
7cf91a98 2271#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
0e1cc95b
MG
2272
2273void __init page_alloc_init_late(void)
2274{
7cf91a98 2275 struct zone *zone;
e900a918 2276 int nid;
7cf91a98
JK
2277
2278#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0e1cc95b 2279
d3cd131d
NS
2280 /* There will be num_node_state(N_MEMORY) threads */
2281 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
0e1cc95b 2282 for_each_node_state(nid, N_MEMORY) {
0e1cc95b
MG
2283 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2284 }
2285
2286 /* Block until all are initialised */
d3cd131d 2287 wait_for_completion(&pgdat_init_all_done_comp);
4248b0da 2288
c9e97a19
PT
2289 /*
2290 * We initialized the rest of the deferred pages. Permanently disable
2291 * on-demand struct page initialization.
2292 */
2293 static_branch_disable(&deferred_pages);
2294
4248b0da
MG
2295 /* Reinit limits that are based on free pages after the kernel is up */
2296 files_maxfiles_init();
7cf91a98 2297#endif
350e88ba 2298
ba8f3587
LF
2299 buffer_init();
2300
3010f876
PT
2301 /* Discard memblock private memory */
2302 memblock_discard();
7cf91a98 2303
e900a918
DW
2304 for_each_node_state(nid, N_MEMORY)
2305 shuffle_free_memory(NODE_DATA(nid));
2306
7cf91a98
JK
2307 for_each_populated_zone(zone)
2308 set_zone_contiguous(zone);
7e18adb4 2309}
7e18adb4 2310
47118af0 2311#ifdef CONFIG_CMA
9cf510a5 2312/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
47118af0
MN
2313void __init init_cma_reserved_pageblock(struct page *page)
2314{
2315 unsigned i = pageblock_nr_pages;
2316 struct page *p = page;
2317
2318 do {
2319 __ClearPageReserved(p);
2320 set_page_count(p, 0);
d883c6cf 2321 } while (++p, --i);
47118af0 2322
47118af0 2323 set_pageblock_migratetype(page, MIGRATE_CMA);
b3d40a2b
DH
2324 set_page_refcounted(page);
2325 __free_pages(page, pageblock_order);
dc78327c 2326
3dcc0571 2327 adjust_managed_page_count(page, pageblock_nr_pages);
3c381db1 2328 page_zone(page)->cma_pages += pageblock_nr_pages;
47118af0
MN
2329}
2330#endif
1da177e4
LT
2331
2332/*
2333 * The order of subdivision here is critical for the IO subsystem.
2334 * Please do not alter this order without good reasons and regression
2335 * testing. Specifically, as large blocks of memory are subdivided,
2336 * the order in which smaller blocks are delivered depends on the order
2337 * they're subdivided in this function. This is the primary factor
2338 * influencing the order in which pages are delivered to the IO
2339 * subsystem according to empirical testing, and this is also justified
2340 * by considering the behavior of a buddy system containing a single
2341 * large block of memory acted on by a series of small allocations.
2342 * This behavior is a critical factor in sglist merging's success.
2343 *
6d49e352 2344 * -- nyc
1da177e4 2345 */
085cc7d5 2346static inline void expand(struct zone *zone, struct page *page,
6ab01363 2347 int low, int high, int migratetype)
1da177e4
LT
2348{
2349 unsigned long size = 1 << high;
2350
2351 while (high > low) {
1da177e4
LT
2352 high--;
2353 size >>= 1;
309381fe 2354 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
c0a32fc5 2355
acbc15a4
JK
2356 /*
2357 * Mark as guard pages (or page), that will allow to
2358 * merge back to allocator when buddy will be freed.
2359 * Corresponding page table entries will not be touched,
2360 * pages will stay not present in virtual address space
2361 */
2362 if (set_page_guard(zone, &page[size], high, migratetype))
c0a32fc5 2363 continue;
acbc15a4 2364
6ab01363 2365 add_to_free_list(&page[size], zone, high, migratetype);
ab130f91 2366 set_buddy_order(&page[size], high);
1da177e4 2367 }
1da177e4
LT
2368}
2369
4e611801 2370static void check_new_page_bad(struct page *page)
1da177e4 2371{
f4c18e6f 2372 if (unlikely(page->flags & __PG_HWPOISON)) {
e570f56c
NH
2373 /* Don't complain about hwpoisoned pages */
2374 page_mapcount_reset(page); /* remove PageBuddy */
2375 return;
f4c18e6f 2376 }
58b7f119
WY
2377
2378 bad_page(page,
2379 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
4e611801
VB
2380}
2381
2382/*
2383 * This page is about to be returned from the page allocator
2384 */
2385static inline int check_new_page(struct page *page)
2386{
2387 if (likely(page_expected_state(page,
2388 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2389 return 0;
2390
2391 check_new_page_bad(page);
2392 return 1;
2a7684a2
WF
2393}
2394
77fe7f13
MG
2395static bool check_new_pages(struct page *page, unsigned int order)
2396{
2397 int i;
2398 for (i = 0; i < (1 << order); i++) {
2399 struct page *p = page + i;
2400
2401 if (unlikely(check_new_page(p)))
2402 return true;
2403 }
2404
2405 return false;
2406}
2407
479f854a 2408#ifdef CONFIG_DEBUG_VM
4462b32c
VB
2409/*
2410 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2411 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2412 * also checked when pcp lists are refilled from the free lists.
2413 */
77fe7f13 2414static inline bool check_pcp_refill(struct page *page, unsigned int order)
479f854a 2415{
8e57f8ac 2416 if (debug_pagealloc_enabled_static())
77fe7f13 2417 return check_new_pages(page, order);
4462b32c
VB
2418 else
2419 return false;
479f854a
MG
2420}
2421
77fe7f13 2422static inline bool check_new_pcp(struct page *page, unsigned int order)
479f854a 2423{
77fe7f13 2424 return check_new_pages(page, order);
479f854a
MG
2425}
2426#else
4462b32c
VB
2427/*
2428 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2429 * when pcp lists are being refilled from the free lists. With debug_pagealloc
2430 * enabled, they are also checked when being allocated from the pcp lists.
2431 */
77fe7f13 2432static inline bool check_pcp_refill(struct page *page, unsigned int order)
479f854a 2433{
77fe7f13 2434 return check_new_pages(page, order);
479f854a 2435}
77fe7f13 2436static inline bool check_new_pcp(struct page *page, unsigned int order)
479f854a 2437{
8e57f8ac 2438 if (debug_pagealloc_enabled_static())
77fe7f13 2439 return check_new_pages(page, order);
4462b32c
VB
2440 else
2441 return false;
479f854a
MG
2442}
2443#endif /* CONFIG_DEBUG_VM */
2444
6d05141a 2445static inline bool should_skip_kasan_unpoison(gfp_t flags)
53ae233c
AK
2446{
2447 /* Don't skip if a software KASAN mode is enabled. */
2448 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
2449 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
2450 return false;
2451
2452 /* Skip, if hardware tag-based KASAN is not enabled. */
2453 if (!kasan_hw_tags_enabled())
2454 return true;
2455
2456 /*
6d05141a
CM
2457 * With hardware tag-based KASAN enabled, skip if this has been
2458 * requested via __GFP_SKIP_KASAN_UNPOISON.
53ae233c 2459 */
6d05141a 2460 return flags & __GFP_SKIP_KASAN_UNPOISON;
53ae233c
AK
2461}
2462
9353ffa6
AK
2463static inline bool should_skip_init(gfp_t flags)
2464{
2465 /* Don't skip, if hardware tag-based KASAN is not enabled. */
2466 if (!kasan_hw_tags_enabled())
2467 return false;
2468
2469 /* For hardware tag-based KASAN, skip if requested. */
2470 return (flags & __GFP_SKIP_ZERO);
2471}
2472
46f24fd8
JK
2473inline void post_alloc_hook(struct page *page, unsigned int order,
2474 gfp_t gfp_flags)
2475{
9353ffa6
AK
2476 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
2477 !should_skip_init(gfp_flags);
b42090ae 2478 bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
70c248ac 2479 int i;
b42090ae 2480
46f24fd8
JK
2481 set_page_private(page, 0);
2482 set_page_refcounted(page);
2483
2484 arch_alloc_page(page, order);
77bc7fd6 2485 debug_pagealloc_map_pages(page, 1 << order);
1bb5eab3
AK
2486
2487 /*
2488 * Page unpoisoning must happen before memory initialization.
2489 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2490 * allocations and the page unpoisoning code will complain.
2491 */
8db26a3d 2492 kernel_unpoison_pages(page, 1 << order);
862b6dee 2493
1bb5eab3
AK
2494 /*
2495 * As memory initialization might be integrated into KASAN,
b42090ae 2496 * KASAN unpoisoning and memory initializion code must be
1bb5eab3
AK
2497 * kept together to avoid discrepancies in behavior.
2498 */
9294b128
AK
2499
2500 /*
2501 * If memory tags should be zeroed (which happens only when memory
2502 * should be initialized as well).
2503 */
2504 if (init_tags) {
9294b128
AK
2505 /* Initialize both memory and tags. */
2506 for (i = 0; i != 1 << order; ++i)
2507 tag_clear_highpage(page + i);
2508
2509 /* Note that memory is already initialized by the loop above. */
2510 init = false;
2511 }
6d05141a 2512 if (!should_skip_kasan_unpoison(gfp_flags)) {
53ae233c 2513 /* Unpoison shadow memory or set memory tags. */
e9d0ca92 2514 kasan_unpoison_pages(page, order, init);
7e3cbba6 2515
e9d0ca92
AK
2516 /* Note that memory is already initialized by KASAN. */
2517 if (kasan_has_integrated_init())
7e3cbba6 2518 init = false;
70c248ac
CM
2519 } else {
2520 /* Ensure page_address() dereferencing does not fault. */
2521 for (i = 0; i != 1 << order; ++i)
2522 page_kasan_tag_reset(page + i);
7a3b8353 2523 }
7e3cbba6
AK
2524 /* If memory is still not initialized, do it now. */
2525 if (init)
aeaec8e2 2526 kernel_init_pages(page, 1 << order);
89b27116
AK
2527 /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */
2528 if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON))
2529 SetPageSkipKASanPoison(page);
1bb5eab3
AK
2530
2531 set_page_owner(page, order, gfp_flags);
df4e817b 2532 page_table_check_alloc(page, order);
46f24fd8
JK
2533}
2534
479f854a 2535static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
c603844b 2536 unsigned int alloc_flags)
2a7684a2 2537{
46f24fd8 2538 post_alloc_hook(page, order, gfp_flags);
17cf4406 2539
17cf4406
NP
2540 if (order && (gfp_flags & __GFP_COMP))
2541 prep_compound_page(page, order);
2542
75379191 2543 /*
2f064f34 2544 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
75379191
VB
2545 * allocate the page. The expectation is that the caller is taking
2546 * steps that will free more memory. The caller should avoid the page
2547 * being used for !PFMEMALLOC purposes.
2548 */
2f064f34
MH
2549 if (alloc_flags & ALLOC_NO_WATERMARKS)
2550 set_page_pfmemalloc(page);
2551 else
2552 clear_page_pfmemalloc(page);
1da177e4
LT
2553}
2554
56fd56b8
MG
2555/*
2556 * Go through the free lists for the given migratetype and remove
2557 * the smallest available page from the freelists
2558 */
85ccc8fa 2559static __always_inline
728ec980 2560struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
2561 int migratetype)
2562{
2563 unsigned int current_order;
b8af2941 2564 struct free_area *area;
56fd56b8
MG
2565 struct page *page;
2566
2567 /* Find a page of the appropriate size in the preferred list */
2568 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2569 area = &(zone->free_area[current_order]);
b03641af 2570 page = get_page_from_free_area(area, migratetype);
a16601c5
GT
2571 if (!page)
2572 continue;
6ab01363
AD
2573 del_page_from_free_list(page, zone, current_order);
2574 expand(zone, page, order, current_order, migratetype);
bb14c2c7 2575 set_pcppage_migratetype(page, migratetype);
10e0f753
WY
2576 trace_mm_page_alloc_zone_locked(page, order, migratetype,
2577 pcp_allowed_order(order) &&
2578 migratetype < MIGRATE_PCPTYPES);
56fd56b8
MG
2579 return page;
2580 }
2581
2582 return NULL;
2583}
2584
2585
b2a0ac88
MG
2586/*
2587 * This array describes the order lists are fallen back to when
2588 * the free lists for the desirable migrate type are depleted
1dd214b8
ZY
2589 *
2590 * The other migratetypes do not have fallbacks.
b2a0ac88 2591 */
da415663 2592static int fallbacks[MIGRATE_TYPES][3] = {
974a786e 2593 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
974a786e 2594 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
7ead3342 2595 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
b2a0ac88
MG
2596};
2597
dc67647b 2598#ifdef CONFIG_CMA
85ccc8fa 2599static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
dc67647b
JK
2600 unsigned int order)
2601{
2602 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2603}
2604#else
2605static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2606 unsigned int order) { return NULL; }
2607#endif
2608
c361be55 2609/*
293ffa5e 2610 * Move the free pages in a range to the freelist tail of the requested type.
d9c23400 2611 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
2612 * boundary. If alignment is required, use move_freepages_block()
2613 */
02aa0cdd 2614static int move_freepages(struct zone *zone,
39ddb991 2615 unsigned long start_pfn, unsigned long end_pfn,
02aa0cdd 2616 int migratetype, int *num_movable)
c361be55
MG
2617{
2618 struct page *page;
39ddb991 2619 unsigned long pfn;
d00181b9 2620 unsigned int order;
d100313f 2621 int pages_moved = 0;
c361be55 2622
39ddb991 2623 for (pfn = start_pfn; pfn <= end_pfn;) {
39ddb991 2624 page = pfn_to_page(pfn);
c361be55 2625 if (!PageBuddy(page)) {
02aa0cdd
VB
2626 /*
2627 * We assume that pages that could be isolated for
2628 * migration are movable. But we don't actually try
2629 * isolating, as that would be expensive.
2630 */
2631 if (num_movable &&
2632 (PageLRU(page) || __PageMovable(page)))
2633 (*num_movable)++;
39ddb991 2634 pfn++;
c361be55
MG
2635 continue;
2636 }
2637
cd961038
DR
2638 /* Make sure we are not inadvertently changing nodes */
2639 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2640 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2641
ab130f91 2642 order = buddy_order(page);
6ab01363 2643 move_to_free_list(page, zone, order, migratetype);
39ddb991 2644 pfn += 1 << order;
d100313f 2645 pages_moved += 1 << order;
c361be55
MG
2646 }
2647
d100313f 2648 return pages_moved;
c361be55
MG
2649}
2650
ee6f509c 2651int move_freepages_block(struct zone *zone, struct page *page,
02aa0cdd 2652 int migratetype, int *num_movable)
c361be55 2653{
39ddb991 2654 unsigned long start_pfn, end_pfn, pfn;
c361be55 2655
4a222127
DR
2656 if (num_movable)
2657 *num_movable = 0;
2658
39ddb991 2659 pfn = page_to_pfn(page);
4f9bc69a
KW
2660 start_pfn = pageblock_start_pfn(pfn);
2661 end_pfn = pageblock_end_pfn(pfn) - 1;
c361be55
MG
2662
2663 /* Do not cross zone boundaries */
108bcc96 2664 if (!zone_spans_pfn(zone, start_pfn))
39ddb991 2665 start_pfn = pfn;
108bcc96 2666 if (!zone_spans_pfn(zone, end_pfn))
c361be55
MG
2667 return 0;
2668
39ddb991 2669 return move_freepages(zone, start_pfn, end_pfn, migratetype,
02aa0cdd 2670 num_movable);
c361be55
MG
2671}
2672
2f66a68f
MG
2673static void change_pageblock_range(struct page *pageblock_page,
2674 int start_order, int migratetype)
2675{
2676 int nr_pageblocks = 1 << (start_order - pageblock_order);
2677
2678 while (nr_pageblocks--) {
2679 set_pageblock_migratetype(pageblock_page, migratetype);
2680 pageblock_page += pageblock_nr_pages;
2681 }
2682}
2683
fef903ef 2684/*
9c0415eb
VB
2685 * When we are falling back to another migratetype during allocation, try to
2686 * steal extra free pages from the same pageblocks to satisfy further
2687 * allocations, instead of polluting multiple pageblocks.
2688 *
2689 * If we are stealing a relatively large buddy page, it is likely there will
2690 * be more free pages in the pageblock, so try to steal them all. For
2691 * reclaimable and unmovable allocations, we steal regardless of page size,
2692 * as fragmentation caused by those allocations polluting movable pageblocks
2693 * is worse than movable allocations stealing from unmovable and reclaimable
2694 * pageblocks.
fef903ef 2695 */
4eb7dce6
JK
2696static bool can_steal_fallback(unsigned int order, int start_mt)
2697{
2698 /*
2699 * Leaving this order check is intended, although there is
2700 * relaxed order check in next check. The reason is that
2701 * we can actually steal whole pageblock if this condition met,
2702 * but, below check doesn't guarantee it and that is just heuristic
2703 * so could be changed anytime.
2704 */
2705 if (order >= pageblock_order)
2706 return true;
2707
2708 if (order >= pageblock_order / 2 ||
2709 start_mt == MIGRATE_RECLAIMABLE ||
2710 start_mt == MIGRATE_UNMOVABLE ||
2711 page_group_by_mobility_disabled)
2712 return true;
2713
2714 return false;
2715}
2716
597c8920 2717static inline bool boost_watermark(struct zone *zone)
1c30844d
MG
2718{
2719 unsigned long max_boost;
2720
2721 if (!watermark_boost_factor)
597c8920 2722 return false;
14f69140
HW
2723 /*
2724 * Don't bother in zones that are unlikely to produce results.
2725 * On small machines, including kdump capture kernels running
2726 * in a small area, boosting the watermark can cause an out of
2727 * memory situation immediately.
2728 */
2729 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
597c8920 2730 return false;
1c30844d
MG
2731
2732 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2733 watermark_boost_factor, 10000);
94b3334c
MG
2734
2735 /*
2736 * high watermark may be uninitialised if fragmentation occurs
2737 * very early in boot so do not boost. We do not fall
2738 * through and boost by pageblock_nr_pages as failing
2739 * allocations that early means that reclaim is not going
2740 * to help and it may even be impossible to reclaim the
2741 * boosted watermark resulting in a hang.
2742 */
2743 if (!max_boost)
597c8920 2744 return false;
94b3334c 2745
1c30844d
MG
2746 max_boost = max(pageblock_nr_pages, max_boost);
2747
2748 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2749 max_boost);
597c8920
JW
2750
2751 return true;
1c30844d
MG
2752}
2753
4eb7dce6
JK
2754/*
2755 * This function implements actual steal behaviour. If order is large enough,
2756 * we can steal whole pageblock. If not, we first move freepages in this
02aa0cdd
VB
2757 * pageblock to our migratetype and determine how many already-allocated pages
2758 * are there in the pageblock with a compatible migratetype. If at least half
2759 * of pages are free or compatible, we can change migratetype of the pageblock
2760 * itself, so pages freed in the future will be put on the correct free list.
4eb7dce6
JK
2761 */
2762static void steal_suitable_fallback(struct zone *zone, struct page *page,
1c30844d 2763 unsigned int alloc_flags, int start_type, bool whole_block)
fef903ef 2764{
ab130f91 2765 unsigned int current_order = buddy_order(page);
02aa0cdd
VB
2766 int free_pages, movable_pages, alike_pages;
2767 int old_block_type;
2768
2769 old_block_type = get_pageblock_migratetype(page);
fef903ef 2770
3bc48f96
VB
2771 /*
2772 * This can happen due to races and we want to prevent broken
2773 * highatomic accounting.
2774 */
02aa0cdd 2775 if (is_migrate_highatomic(old_block_type))
3bc48f96
VB
2776 goto single_page;
2777
fef903ef
SB
2778 /* Take ownership for orders >= pageblock_order */
2779 if (current_order >= pageblock_order) {
2780 change_pageblock_range(page, current_order, start_type);
3bc48f96 2781 goto single_page;
fef903ef
SB
2782 }
2783
1c30844d
MG
2784 /*
2785 * Boost watermarks to increase reclaim pressure to reduce the
2786 * likelihood of future fallbacks. Wake kswapd now as the node
2787 * may be balanced overall and kswapd will not wake naturally.
2788 */
597c8920 2789 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
73444bc4 2790 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
1c30844d 2791
3bc48f96
VB
2792 /* We are not allowed to try stealing from the whole block */
2793 if (!whole_block)
2794 goto single_page;
2795
02aa0cdd
VB
2796 free_pages = move_freepages_block(zone, page, start_type,
2797 &movable_pages);
2798 /*
2799 * Determine how many pages are compatible with our allocation.
2800 * For movable allocation, it's the number of movable pages which
2801 * we just obtained. For other types it's a bit more tricky.
2802 */
2803 if (start_type == MIGRATE_MOVABLE) {
2804 alike_pages = movable_pages;
2805 } else {
2806 /*
2807 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2808 * to MOVABLE pageblock, consider all non-movable pages as
2809 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2810 * vice versa, be conservative since we can't distinguish the
2811 * exact migratetype of non-movable pages.
2812 */
2813 if (old_block_type == MIGRATE_MOVABLE)
2814 alike_pages = pageblock_nr_pages
2815 - (free_pages + movable_pages);
2816 else
2817 alike_pages = 0;
2818 }
2819
3bc48f96 2820 /* moving whole block can fail due to zone boundary conditions */
02aa0cdd 2821 if (!free_pages)
3bc48f96 2822 goto single_page;
fef903ef 2823
02aa0cdd
VB
2824 /*
2825 * If a sufficient number of pages in the block are either free or of
2826 * comparable migratability as our allocation, claim the whole block.
2827 */
2828 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
4eb7dce6
JK
2829 page_group_by_mobility_disabled)
2830 set_pageblock_migratetype(page, start_type);
3bc48f96
VB
2831
2832 return;
2833
2834single_page:
6ab01363 2835 move_to_free_list(page, zone, current_order, start_type);
4eb7dce6
JK
2836}
2837
2149cdae
JK
2838/*
2839 * Check whether there is a suitable fallback freepage with requested order.
2840 * If only_stealable is true, this function returns fallback_mt only if
2841 * we can steal other freepages all together. This would help to reduce
2842 * fragmentation due to mixed migratetype pages in one pageblock.
2843 */
2844int find_suitable_fallback(struct free_area *area, unsigned int order,
2845 int migratetype, bool only_stealable, bool *can_steal)
4eb7dce6
JK
2846{
2847 int i;
2848 int fallback_mt;
2849
2850 if (area->nr_free == 0)
2851 return -1;
2852
2853 *can_steal = false;
2854 for (i = 0;; i++) {
2855 fallback_mt = fallbacks[migratetype][i];
974a786e 2856 if (fallback_mt == MIGRATE_TYPES)
4eb7dce6
JK
2857 break;
2858
b03641af 2859 if (free_area_empty(area, fallback_mt))
4eb7dce6 2860 continue;
fef903ef 2861
4eb7dce6
JK
2862 if (can_steal_fallback(order, migratetype))
2863 *can_steal = true;
2864
2149cdae
JK
2865 if (!only_stealable)
2866 return fallback_mt;
2867
2868 if (*can_steal)
2869 return fallback_mt;
fef903ef 2870 }
4eb7dce6
JK
2871
2872 return -1;
fef903ef
SB
2873}
2874
0aaa29a5
MG
2875/*
2876 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2877 * there are no empty page blocks that contain a page with a suitable order
2878 */
2879static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2880 unsigned int alloc_order)
2881{
2882 int mt;
2883 unsigned long max_managed, flags;
2884
2885 /*
2886 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2887 * Check is race-prone but harmless.
2888 */
9705bea5 2889 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
0aaa29a5
MG
2890 if (zone->nr_reserved_highatomic >= max_managed)
2891 return;
2892
2893 spin_lock_irqsave(&zone->lock, flags);
2894
2895 /* Recheck the nr_reserved_highatomic limit under the lock */
2896 if (zone->nr_reserved_highatomic >= max_managed)
2897 goto out_unlock;
2898
2899 /* Yoink! */
2900 mt = get_pageblock_migratetype(page);
1dd214b8
ZY
2901 /* Only reserve normal pageblocks (i.e., they can merge with others) */
2902 if (migratetype_is_mergeable(mt)) {
0aaa29a5
MG
2903 zone->nr_reserved_highatomic += pageblock_nr_pages;
2904 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
02aa0cdd 2905 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
0aaa29a5
MG
2906 }
2907
2908out_unlock:
2909 spin_unlock_irqrestore(&zone->lock, flags);
2910}
2911
2912/*
2913 * Used when an allocation is about to fail under memory pressure. This
2914 * potentially hurts the reliability of high-order allocations when under
2915 * intense memory pressure but failed atomic allocations should be easier
2916 * to recover from than an OOM.
29fac03b
MK
2917 *
2918 * If @force is true, try to unreserve a pageblock even though highatomic
2919 * pageblock is exhausted.
0aaa29a5 2920 */
29fac03b
MK
2921static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2922 bool force)
0aaa29a5
MG
2923{
2924 struct zonelist *zonelist = ac->zonelist;
2925 unsigned long flags;
2926 struct zoneref *z;
2927 struct zone *zone;
2928 struct page *page;
2929 int order;
04c8716f 2930 bool ret;
0aaa29a5 2931
97a225e6 2932 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
0aaa29a5 2933 ac->nodemask) {
29fac03b
MK
2934 /*
2935 * Preserve at least one pageblock unless memory pressure
2936 * is really high.
2937 */
2938 if (!force && zone->nr_reserved_highatomic <=
2939 pageblock_nr_pages)
0aaa29a5
MG
2940 continue;
2941
2942 spin_lock_irqsave(&zone->lock, flags);
2943 for (order = 0; order < MAX_ORDER; order++) {
2944 struct free_area *area = &(zone->free_area[order]);
2945
b03641af 2946 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
a16601c5 2947 if (!page)
0aaa29a5
MG
2948 continue;
2949
0aaa29a5 2950 /*
4855e4a7
MK
2951 * In page freeing path, migratetype change is racy so
2952 * we can counter several free pages in a pageblock
f0953a1b 2953 * in this loop although we changed the pageblock type
4855e4a7
MK
2954 * from highatomic to ac->migratetype. So we should
2955 * adjust the count once.
0aaa29a5 2956 */
a6ffdc07 2957 if (is_migrate_highatomic_page(page)) {
4855e4a7
MK
2958 /*
2959 * It should never happen but changes to
2960 * locking could inadvertently allow a per-cpu
2961 * drain to add pages to MIGRATE_HIGHATOMIC
2962 * while unreserving so be safe and watch for
2963 * underflows.
2964 */
2965 zone->nr_reserved_highatomic -= min(
2966 pageblock_nr_pages,
2967 zone->nr_reserved_highatomic);
2968 }
0aaa29a5
MG
2969
2970 /*
2971 * Convert to ac->migratetype and avoid the normal
2972 * pageblock stealing heuristics. Minimally, the caller
2973 * is doing the work and needs the pages. More
2974 * importantly, if the block was always converted to
2975 * MIGRATE_UNMOVABLE or another type then the number
2976 * of pageblocks that cannot be completely freed
2977 * may increase.
2978 */
2979 set_pageblock_migratetype(page, ac->migratetype);
02aa0cdd
VB
2980 ret = move_freepages_block(zone, page, ac->migratetype,
2981 NULL);
29fac03b
MK
2982 if (ret) {
2983 spin_unlock_irqrestore(&zone->lock, flags);
2984 return ret;
2985 }
0aaa29a5
MG
2986 }
2987 spin_unlock_irqrestore(&zone->lock, flags);
2988 }
04c8716f
MK
2989
2990 return false;
0aaa29a5
MG
2991}
2992
3bc48f96
VB
2993/*
2994 * Try finding a free buddy page on the fallback list and put it on the free
2995 * list of requested migratetype, possibly along with other pages from the same
2996 * block, depending on fragmentation avoidance heuristics. Returns true if
2997 * fallback was found so that __rmqueue_smallest() can grab it.
b002529d
RV
2998 *
2999 * The use of signed ints for order and current_order is a deliberate
3000 * deviation from the rest of this file, to make the for loop
3001 * condition simpler.
3bc48f96 3002 */
85ccc8fa 3003static __always_inline bool
6bb15450
MG
3004__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
3005 unsigned int alloc_flags)
b2a0ac88 3006{
b8af2941 3007 struct free_area *area;
b002529d 3008 int current_order;
6bb15450 3009 int min_order = order;
b2a0ac88 3010 struct page *page;
4eb7dce6
JK
3011 int fallback_mt;
3012 bool can_steal;
b2a0ac88 3013
6bb15450
MG
3014 /*
3015 * Do not steal pages from freelists belonging to other pageblocks
3016 * i.e. orders < pageblock_order. If there are no local zones free,
3017 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
3018 */
e933dc4a 3019 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
6bb15450
MG
3020 min_order = pageblock_order;
3021
7a8f58f3
VB
3022 /*
3023 * Find the largest available free page in the other list. This roughly
3024 * approximates finding the pageblock with the most free pages, which
3025 * would be too costly to do exactly.
3026 */
6bb15450 3027 for (current_order = MAX_ORDER - 1; current_order >= min_order;
7aeb09f9 3028 --current_order) {
4eb7dce6
JK
3029 area = &(zone->free_area[current_order]);
3030 fallback_mt = find_suitable_fallback(area, current_order,
2149cdae 3031 start_migratetype, false, &can_steal);
4eb7dce6
JK
3032 if (fallback_mt == -1)
3033 continue;
b2a0ac88 3034
7a8f58f3
VB
3035 /*
3036 * We cannot steal all free pages from the pageblock and the
3037 * requested migratetype is movable. In that case it's better to
3038 * steal and split the smallest available page instead of the
3039 * largest available page, because even if the next movable
3040 * allocation falls back into a different pageblock than this
3041 * one, it won't cause permanent fragmentation.
3042 */
3043 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
3044 && current_order > order)
3045 goto find_smallest;
b2a0ac88 3046
7a8f58f3
VB
3047 goto do_steal;
3048 }
e0fff1bd 3049
7a8f58f3 3050 return false;
e0fff1bd 3051
7a8f58f3
VB
3052find_smallest:
3053 for (current_order = order; current_order < MAX_ORDER;
3054 current_order++) {
3055 area = &(zone->free_area[current_order]);
3056 fallback_mt = find_suitable_fallback(area, current_order,
3057 start_migratetype, false, &can_steal);
3058 if (fallback_mt != -1)
3059 break;
b2a0ac88
MG
3060 }
3061
7a8f58f3
VB
3062 /*
3063 * This should not happen - we already found a suitable fallback
3064 * when looking for the largest page.
3065 */
3066 VM_BUG_ON(current_order == MAX_ORDER);
3067
3068do_steal:
b03641af 3069 page = get_page_from_free_area(area, fallback_mt);
7a8f58f3 3070
1c30844d
MG
3071 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
3072 can_steal);
7a8f58f3
VB
3073
3074 trace_mm_page_alloc_extfrag(page, order, current_order,
3075 start_migratetype, fallback_mt);
3076
3077 return true;
3078
b2a0ac88
MG
3079}
3080
56fd56b8 3081/*
1da177e4
LT
3082 * Do the hard work of removing an element from the buddy allocator.
3083 * Call me with the zone->lock already held.
3084 */
85ccc8fa 3085static __always_inline struct page *
6bb15450
MG
3086__rmqueue(struct zone *zone, unsigned int order, int migratetype,
3087 unsigned int alloc_flags)
1da177e4 3088{
1da177e4
LT
3089 struct page *page;
3090
ce8f86ee
H
3091 if (IS_ENABLED(CONFIG_CMA)) {
3092 /*
3093 * Balance movable allocations between regular and CMA areas by
3094 * allocating from CMA when over half of the zone's free memory
3095 * is in the CMA area.
3096 */
3097 if (alloc_flags & ALLOC_CMA &&
3098 zone_page_state(zone, NR_FREE_CMA_PAGES) >
3099 zone_page_state(zone, NR_FREE_PAGES) / 2) {
3100 page = __rmqueue_cma_fallback(zone, order);
3101 if (page)
10e0f753 3102 return page;
ce8f86ee 3103 }
16867664 3104 }
3bc48f96 3105retry:
56fd56b8 3106 page = __rmqueue_smallest(zone, order, migratetype);
974a786e 3107 if (unlikely(!page)) {
8510e69c 3108 if (alloc_flags & ALLOC_CMA)
dc67647b
JK
3109 page = __rmqueue_cma_fallback(zone, order);
3110
6bb15450
MG
3111 if (!page && __rmqueue_fallback(zone, order, migratetype,
3112 alloc_flags))
3bc48f96 3113 goto retry;
728ec980 3114 }
b2a0ac88 3115 return page;
1da177e4
LT
3116}
3117
5f63b720 3118/*
1da177e4
LT
3119 * Obtain a specified number of elements from the buddy allocator, all under
3120 * a single hold of the lock, for efficiency. Add them to the supplied list.
3121 * Returns the number of new pages which were placed at *list.
3122 */
5f63b720 3123static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 3124 unsigned long count, struct list_head *list,
6bb15450 3125 int migratetype, unsigned int alloc_flags)
1da177e4 3126{
cb66bede 3127 int i, allocated = 0;
5f63b720 3128
01b44456 3129 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
d34b0733 3130 spin_lock(&zone->lock);
1da177e4 3131 for (i = 0; i < count; ++i) {
6bb15450
MG
3132 struct page *page = __rmqueue(zone, order, migratetype,
3133 alloc_flags);
085cc7d5 3134 if (unlikely(page == NULL))
1da177e4 3135 break;
81eabcbe 3136
77fe7f13 3137 if (unlikely(check_pcp_refill(page, order)))
479f854a
MG
3138 continue;
3139
81eabcbe 3140 /*
0fac3ba5
VB
3141 * Split buddy pages returned by expand() are received here in
3142 * physical page order. The page is added to the tail of
3143 * caller's list. From the callers perspective, the linked list
3144 * is ordered by page number under some conditions. This is
3145 * useful for IO devices that can forward direction from the
3146 * head, thus also in the physical page order. This is useful
3147 * for IO devices that can merge IO requests if the physical
3148 * pages are ordered properly.
81eabcbe 3149 */
bf75f200 3150 list_add_tail(&page->pcp_list, list);
cb66bede 3151 allocated++;
bb14c2c7 3152 if (is_migrate_cma(get_pcppage_migratetype(page)))
d1ce749a
BZ
3153 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3154 -(1 << order));
1da177e4 3155 }
a6de734b
MG
3156
3157 /*
3158 * i pages were removed from the buddy list even if some leak due
3159 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
cb66bede 3160 * on i. Do not confuse with 'allocated' which is the number of
a6de734b
MG
3161 * pages added to the pcp list.
3162 */
f2260e6b 3163 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
d34b0733 3164 spin_unlock(&zone->lock);
cb66bede 3165 return allocated;
1da177e4
LT
3166}
3167
4ae7c039 3168#ifdef CONFIG_NUMA
8fce4d8e 3169/*
4037d452
CL
3170 * Called from the vmstat counter updater to drain pagesets of this
3171 * currently executing processor on remote nodes after they have
3172 * expired.
8fce4d8e 3173 */
4037d452 3174void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 3175{
7be12fc9 3176 int to_drain, batch;
4ae7c039 3177
4db0c3c2 3178 batch = READ_ONCE(pcp->batch);
7be12fc9 3179 to_drain = min(pcp->count, batch);
4b23a68f
MG
3180 if (to_drain > 0) {
3181 unsigned long flags;
3182
3183 /*
3184 * free_pcppages_bulk expects IRQs disabled for zone->lock
3185 * so even though pcp->lock is not intended to be IRQ-safe,
3186 * it's needed in this context.
3187 */
3188 spin_lock_irqsave(&pcp->lock, flags);
fd56eef2 3189 free_pcppages_bulk(zone, to_drain, pcp, 0);
4b23a68f
MG
3190 spin_unlock_irqrestore(&pcp->lock, flags);
3191 }
4ae7c039
CL
3192}
3193#endif
3194
9f8f2172 3195/*
93481ff0 3196 * Drain pcplists of the indicated processor and zone.
9f8f2172 3197 */
93481ff0 3198static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1da177e4 3199{
93481ff0 3200 struct per_cpu_pages *pcp;
1da177e4 3201
28f836b6 3202 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
4b23a68f
MG
3203 if (pcp->count) {
3204 unsigned long flags;
28f836b6 3205
4b23a68f
MG
3206 /* See drain_zone_pages on why this is disabling IRQs */
3207 spin_lock_irqsave(&pcp->lock, flags);
3208 free_pcppages_bulk(zone, pcp->count, pcp, 0);
3209 spin_unlock_irqrestore(&pcp->lock, flags);
3210 }
93481ff0 3211}
3dfa5721 3212
93481ff0
VB
3213/*
3214 * Drain pcplists of all zones on the indicated processor.
93481ff0
VB
3215 */
3216static void drain_pages(unsigned int cpu)
3217{
3218 struct zone *zone;
3219
3220 for_each_populated_zone(zone) {
3221 drain_pages_zone(cpu, zone);
1da177e4
LT
3222 }
3223}
1da177e4 3224
9f8f2172
CL
3225/*
3226 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3227 */
93481ff0 3228void drain_local_pages(struct zone *zone)
9f8f2172 3229{
93481ff0
VB
3230 int cpu = smp_processor_id();
3231
3232 if (zone)
3233 drain_pages_zone(cpu, zone);
3234 else
3235 drain_pages(cpu);
9f8f2172
CL
3236}
3237
3238/*
ec6e8c7e
VB
3239 * The implementation of drain_all_pages(), exposing an extra parameter to
3240 * drain on all cpus.
93481ff0 3241 *
ec6e8c7e
VB
3242 * drain_all_pages() is optimized to only execute on cpus where pcplists are
3243 * not empty. The check for non-emptiness can however race with a free to
3244 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3245 * that need the guarantee that every CPU has drained can disable the
3246 * optimizing racy check.
9f8f2172 3247 */
3b1f3658 3248static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
9f8f2172 3249{
74046494 3250 int cpu;
74046494
GBY
3251
3252 /*
041711ce 3253 * Allocate in the BSS so we won't require allocation in
74046494
GBY
3254 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3255 */
3256 static cpumask_t cpus_with_pcps;
3257
bd233f53
MG
3258 /*
3259 * Do not drain if one is already in progress unless it's specific to
3260 * a zone. Such callers are primarily CMA and memory hotplug and need
3261 * the drain to be complete when the call returns.
3262 */
3263 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3264 if (!zone)
3265 return;
3266 mutex_lock(&pcpu_drain_mutex);
3267 }
0ccce3b9 3268
74046494
GBY
3269 /*
3270 * We don't care about racing with CPU hotplug event
3271 * as offline notification will cause the notified
3272 * cpu to drain that CPU pcps and on_each_cpu_mask
3273 * disables preemption as part of its processing
3274 */
3275 for_each_online_cpu(cpu) {
28f836b6 3276 struct per_cpu_pages *pcp;
93481ff0 3277 struct zone *z;
74046494 3278 bool has_pcps = false;
93481ff0 3279
ec6e8c7e
VB
3280 if (force_all_cpus) {
3281 /*
3282 * The pcp.count check is racy, some callers need a
3283 * guarantee that no cpu is missed.
3284 */
3285 has_pcps = true;
3286 } else if (zone) {
28f836b6
MG
3287 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3288 if (pcp->count)
74046494 3289 has_pcps = true;
93481ff0
VB
3290 } else {
3291 for_each_populated_zone(z) {
28f836b6
MG
3292 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3293 if (pcp->count) {
93481ff0
VB
3294 has_pcps = true;
3295 break;
3296 }
74046494
GBY
3297 }
3298 }
93481ff0 3299
74046494
GBY
3300 if (has_pcps)
3301 cpumask_set_cpu(cpu, &cpus_with_pcps);
3302 else
3303 cpumask_clear_cpu(cpu, &cpus_with_pcps);
3304 }
0ccce3b9 3305
bd233f53 3306 for_each_cpu(cpu, &cpus_with_pcps) {
443c2acc
NSJ
3307 if (zone)
3308 drain_pages_zone(cpu, zone);
3309 else
3310 drain_pages(cpu);
0ccce3b9 3311 }
bd233f53
MG
3312
3313 mutex_unlock(&pcpu_drain_mutex);
9f8f2172
CL
3314}
3315
ec6e8c7e
VB
3316/*
3317 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3318 *
3319 * When zone parameter is non-NULL, spill just the single zone's pages.
ec6e8c7e
VB
3320 */
3321void drain_all_pages(struct zone *zone)
3322{
3323 __drain_all_pages(zone, false);
3324}
3325
296699de 3326#ifdef CONFIG_HIBERNATION
1da177e4 3327
556b969a
CY
3328/*
3329 * Touch the watchdog for every WD_PAGE_COUNT pages.
3330 */
3331#define WD_PAGE_COUNT (128*1024)
3332
1da177e4
LT
3333void mark_free_pages(struct zone *zone)
3334{
556b969a 3335 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
f623f0db 3336 unsigned long flags;
7aeb09f9 3337 unsigned int order, t;
86760a2c 3338 struct page *page;
1da177e4 3339
8080fc03 3340 if (zone_is_empty(zone))
1da177e4
LT
3341 return;
3342
3343 spin_lock_irqsave(&zone->lock, flags);
f623f0db 3344
108bcc96 3345 max_zone_pfn = zone_end_pfn(zone);
f623f0db
RW
3346 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3347 if (pfn_valid(pfn)) {
86760a2c 3348 page = pfn_to_page(pfn);
ba6b0979 3349
556b969a
CY
3350 if (!--page_count) {
3351 touch_nmi_watchdog();
3352 page_count = WD_PAGE_COUNT;
3353 }
3354
ba6b0979
JK
3355 if (page_zone(page) != zone)
3356 continue;
3357
7be98234
RW
3358 if (!swsusp_page_is_forbidden(page))
3359 swsusp_unset_page_free(page);
f623f0db 3360 }
1da177e4 3361
b2a0ac88 3362 for_each_migratetype_order(order, t) {
86760a2c 3363 list_for_each_entry(page,
bf75f200 3364 &zone->free_area[order].free_list[t], buddy_list) {
f623f0db 3365 unsigned long i;
1da177e4 3366
86760a2c 3367 pfn = page_to_pfn(page);
556b969a
CY
3368 for (i = 0; i < (1UL << order); i++) {
3369 if (!--page_count) {
3370 touch_nmi_watchdog();
3371 page_count = WD_PAGE_COUNT;
3372 }
7be98234 3373 swsusp_set_page_free(pfn_to_page(pfn + i));
556b969a 3374 }
f623f0db 3375 }
b2a0ac88 3376 }
1da177e4
LT
3377 spin_unlock_irqrestore(&zone->lock, flags);
3378}
e2c55dc8 3379#endif /* CONFIG_PM */
1da177e4 3380
44042b44
MG
3381static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3382 unsigned int order)
1da177e4 3383{
5f8dcc21 3384 int migratetype;
1da177e4 3385
44042b44 3386 if (!free_pcp_prepare(page, order))
9cca35d4 3387 return false;
689bcebf 3388
dc4b0caf 3389 migratetype = get_pfnblock_migratetype(page, pfn);
bb14c2c7 3390 set_pcppage_migratetype(page, migratetype);
9cca35d4
MG
3391 return true;
3392}
3393
f26b3fa0
MG
3394static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch,
3395 bool free_high)
3b12e7e9
MG
3396{
3397 int min_nr_free, max_nr_free;
3398
f26b3fa0
MG
3399 /* Free everything if batch freeing high-order pages. */
3400 if (unlikely(free_high))
3401 return pcp->count;
3402
3b12e7e9
MG
3403 /* Check for PCP disabled or boot pageset */
3404 if (unlikely(high < batch))
3405 return 1;
3406
3407 /* Leave at least pcp->batch pages on the list */
3408 min_nr_free = batch;
3409 max_nr_free = high - batch;
3410
3411 /*
3412 * Double the number of pages freed each time there is subsequent
3413 * freeing of pages without any allocation.
3414 */
3415 batch <<= pcp->free_factor;
3416 if (batch < max_nr_free)
3417 pcp->free_factor++;
3418 batch = clamp(batch, min_nr_free, max_nr_free);
3419
3420 return batch;
3421}
3422
f26b3fa0
MG
3423static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
3424 bool free_high)
c49c2c47
MG
3425{
3426 int high = READ_ONCE(pcp->high);
3427
f26b3fa0 3428 if (unlikely(!high || free_high))
c49c2c47
MG
3429 return 0;
3430
3431 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3432 return high;
3433
3434 /*
3435 * If reclaim is active, limit the number of pages that can be
3436 * stored on pcp lists
3437 */
3438 return min(READ_ONCE(pcp->batch) << 2, high);
3439}
3440
4b23a68f
MG
3441static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
3442 struct page *page, int migratetype,
56651377 3443 unsigned int order)
9cca35d4 3444{
3b12e7e9 3445 int high;
44042b44 3446 int pindex;
f26b3fa0 3447 bool free_high;
9cca35d4 3448
d34b0733 3449 __count_vm_event(PGFREE);
44042b44 3450 pindex = order_to_pindex(migratetype, order);
bf75f200 3451 list_add(&page->pcp_list, &pcp->lists[pindex]);
44042b44 3452 pcp->count += 1 << order;
f26b3fa0
MG
3453
3454 /*
3455 * As high-order pages other than THP's stored on PCP can contribute
3456 * to fragmentation, limit the number stored when PCP is heavily
3457 * freeing without allocation. The remainder after bulk freeing
3458 * stops will be drained from vmstat refresh context.
3459 */
3460 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
3461
3462 high = nr_pcp_high(pcp, zone, free_high);
3b12e7e9
MG
3463 if (pcp->count >= high) {
3464 int batch = READ_ONCE(pcp->batch);
3465
f26b3fa0 3466 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex);
3b12e7e9 3467 }
9cca35d4 3468}
5f8dcc21 3469
9cca35d4 3470/*
44042b44 3471 * Free a pcp page
9cca35d4 3472 */
44042b44 3473void free_unref_page(struct page *page, unsigned int order)
9cca35d4
MG
3474{
3475 unsigned long flags;
4b23a68f
MG
3476 unsigned long __maybe_unused UP_flags;
3477 struct per_cpu_pages *pcp;
3478 struct zone *zone;
9cca35d4 3479 unsigned long pfn = page_to_pfn(page);
df1acc85 3480 int migratetype;
9cca35d4 3481
44042b44 3482 if (!free_unref_page_prepare(page, pfn, order))
9cca35d4 3483 return;
da456f14 3484
5f8dcc21
MG
3485 /*
3486 * We only track unmovable, reclaimable and movable on pcp lists.
df1acc85 3487 * Place ISOLATE pages on the isolated list because they are being
a6ffdc07 3488 * offlined but treat HIGHATOMIC as movable pages so we can get those
5f8dcc21
MG
3489 * areas back if necessary. Otherwise, we may have to free
3490 * excessively into the page allocator
3491 */
df1acc85
MG
3492 migratetype = get_pcppage_migratetype(page);
3493 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
194159fb 3494 if (unlikely(is_migrate_isolate(migratetype))) {
44042b44 3495 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
9cca35d4 3496 return;
5f8dcc21
MG
3497 }
3498 migratetype = MIGRATE_MOVABLE;
3499 }
3500
4b23a68f
MG
3501 zone = page_zone(page);
3502 pcp_trylock_prepare(UP_flags);
01b44456
MG
3503 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
3504 if (pcp) {
4b23a68f 3505 free_unref_page_commit(zone, pcp, page, migratetype, order);
01b44456 3506 pcp_spin_unlock_irqrestore(pcp, flags);
4b23a68f
MG
3507 } else {
3508 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
3509 }
3510 pcp_trylock_finish(UP_flags);
1da177e4
LT
3511}
3512
cc59850e
KK
3513/*
3514 * Free a list of 0-order pages
3515 */
2d4894b5 3516void free_unref_page_list(struct list_head *list)
cc59850e
KK
3517{
3518 struct page *page, *next;
4b23a68f
MG
3519 struct per_cpu_pages *pcp = NULL;
3520 struct zone *locked_zone = NULL;
56651377 3521 unsigned long flags;
c24ad77d 3522 int batch_count = 0;
df1acc85 3523 int migratetype;
9cca35d4
MG
3524
3525 /* Prepare pages for freeing */
3526 list_for_each_entry_safe(page, next, list, lru) {
56651377 3527 unsigned long pfn = page_to_pfn(page);
053cfda1 3528 if (!free_unref_page_prepare(page, pfn, 0)) {
9cca35d4 3529 list_del(&page->lru);
053cfda1
ML
3530 continue;
3531 }
df1acc85
MG
3532
3533 /*
3534 * Free isolated pages directly to the allocator, see
3535 * comment in free_unref_page.
3536 */
3537 migratetype = get_pcppage_migratetype(page);
47aef601
DB
3538 if (unlikely(is_migrate_isolate(migratetype))) {
3539 list_del(&page->lru);
3540 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
3541 continue;
df1acc85 3542 }
9cca35d4 3543 }
cc59850e
KK
3544
3545 list_for_each_entry_safe(page, next, list, lru) {
4b23a68f
MG
3546 struct zone *zone = page_zone(page);
3547
3548 /* Different zone, different pcp lock. */
3549 if (zone != locked_zone) {
3550 if (pcp)
01b44456
MG
3551 pcp_spin_unlock_irqrestore(pcp, flags);
3552
4b23a68f 3553 locked_zone = zone;
01b44456 3554 pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
4b23a68f
MG
3555 }
3556
47aef601
DB
3557 /*
3558 * Non-isolated types over MIGRATE_PCPTYPES get added
3559 * to the MIGRATE_MOVABLE pcp list.
3560 */
df1acc85 3561 migratetype = get_pcppage_migratetype(page);
47aef601
DB
3562 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3563 migratetype = MIGRATE_MOVABLE;
3564
2d4894b5 3565 trace_mm_page_free_batched(page);
4b23a68f 3566 free_unref_page_commit(zone, pcp, page, migratetype, 0);
c24ad77d
LS
3567
3568 /*
3569 * Guard against excessive IRQ disabled times when we get
3570 * a large list of pages to free.
3571 */
3572 if (++batch_count == SWAP_CLUSTER_MAX) {
01b44456 3573 pcp_spin_unlock_irqrestore(pcp, flags);
c24ad77d 3574 batch_count = 0;
01b44456 3575 pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
c24ad77d 3576 }
cc59850e 3577 }
4b23a68f
MG
3578
3579 if (pcp)
01b44456 3580 pcp_spin_unlock_irqrestore(pcp, flags);
cc59850e
KK
3581}
3582
8dfcc9ba
NP
3583/*
3584 * split_page takes a non-compound higher-order page, and splits it into
3585 * n (1<<order) sub-pages: page[0..n]
3586 * Each sub-page must be freed individually.
3587 *
3588 * Note: this is probably too low level an operation for use in drivers.
3589 * Please consult with lkml before using this in your driver.
3590 */
3591void split_page(struct page *page, unsigned int order)
3592{
3593 int i;
3594
309381fe
SL
3595 VM_BUG_ON_PAGE(PageCompound(page), page);
3596 VM_BUG_ON_PAGE(!page_count(page), page);
b1eeab67 3597
a9627bc5 3598 for (i = 1; i < (1 << order); i++)
7835e98b 3599 set_page_refcounted(page + i);
8fb156c9 3600 split_page_owner(page, 1 << order);
e1baddf8 3601 split_page_memcg(page, 1 << order);
8dfcc9ba 3602}
5853ff23 3603EXPORT_SYMBOL_GPL(split_page);
8dfcc9ba 3604
3c605096 3605int __isolate_free_page(struct page *page, unsigned int order)
748446bb 3606{
9a157dd8
KW
3607 struct zone *zone = page_zone(page);
3608 int mt = get_pageblock_migratetype(page);
748446bb 3609
194159fb 3610 if (!is_migrate_isolate(mt)) {
9a157dd8 3611 unsigned long watermark;
8348faf9
VB
3612 /*
3613 * Obey watermarks as if the page was being allocated. We can
3614 * emulate a high-order watermark check with a raised order-0
3615 * watermark, because we already know our high-order page
3616 * exists.
3617 */
fd1444b2 3618 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
d883c6cf 3619 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2e30abd1
MS
3620 return 0;
3621
8fb74b9f 3622 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2e30abd1 3623 }
748446bb 3624
6ab01363 3625 del_page_from_free_list(page, zone, order);
2139cbe6 3626
400bc7fd 3627 /*
3628 * Set the pageblock if the isolated page is at least half of a
3629 * pageblock
3630 */
748446bb
MG
3631 if (order >= pageblock_order - 1) {
3632 struct page *endpage = page + (1 << order) - 1;
47118af0
MN
3633 for (; page < endpage; page += pageblock_nr_pages) {
3634 int mt = get_pageblock_migratetype(page);
1dd214b8
ZY
3635 /*
3636 * Only change normal pageblocks (i.e., they can merge
3637 * with others)
3638 */
3639 if (migratetype_is_mergeable(mt))
47118af0
MN
3640 set_pageblock_migratetype(page,
3641 MIGRATE_MOVABLE);
3642 }
748446bb
MG
3643 }
3644
8fb74b9f 3645 return 1UL << order;
1fb3f8ca
MG
3646}
3647
624f58d8
AD
3648/**
3649 * __putback_isolated_page - Return a now-isolated page back where we got it
3650 * @page: Page that was isolated
3651 * @order: Order of the isolated page
e6a0a7ad 3652 * @mt: The page's pageblock's migratetype
624f58d8
AD
3653 *
3654 * This function is meant to return a page pulled from the free lists via
3655 * __isolate_free_page back to the free lists they were pulled from.
3656 */
3657void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3658{
3659 struct zone *zone = page_zone(page);
3660
3661 /* zone lock should be held when this function is called */
3662 lockdep_assert_held(&zone->lock);
3663
3664 /* Return isolated page to tail of freelist. */
f04a5d5d 3665 __free_one_page(page, page_to_pfn(page), zone, order, mt,
47b6a24a 3666 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
624f58d8
AD
3667}
3668
060e7417
MG
3669/*
3670 * Update NUMA hit/miss statistics
060e7417 3671 */
3e23060b
MG
3672static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3673 long nr_account)
060e7417
MG
3674{
3675#ifdef CONFIG_NUMA
3a321d2a 3676 enum numa_stat_item local_stat = NUMA_LOCAL;
060e7417 3677
4518085e
KW
3678 /* skip numa counters update if numa stats is disabled */
3679 if (!static_branch_likely(&vm_numa_stat_key))
3680 return;
3681
c1093b74 3682 if (zone_to_nid(z) != numa_node_id())
060e7417 3683 local_stat = NUMA_OTHER;
060e7417 3684
c1093b74 3685 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3e23060b 3686 __count_numa_events(z, NUMA_HIT, nr_account);
2df26639 3687 else {
3e23060b
MG
3688 __count_numa_events(z, NUMA_MISS, nr_account);
3689 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
060e7417 3690 }
3e23060b 3691 __count_numa_events(z, local_stat, nr_account);
060e7417
MG
3692#endif
3693}
3694
589d9973
MG
3695static __always_inline
3696struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
3697 unsigned int order, unsigned int alloc_flags,
3698 int migratetype)
3699{
3700 struct page *page;
3701 unsigned long flags;
3702
3703 do {
3704 page = NULL;
3705 spin_lock_irqsave(&zone->lock, flags);
3706 /*
3707 * order-0 request can reach here when the pcplist is skipped
3708 * due to non-CMA allocation context. HIGHATOMIC area is
3709 * reserved for high-order atomic allocation, so order-0
3710 * request should skip it.
3711 */
3712 if (order > 0 && alloc_flags & ALLOC_HARDER)
3713 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3714 if (!page) {
3715 page = __rmqueue(zone, order, migratetype, alloc_flags);
3716 if (!page) {
3717 spin_unlock_irqrestore(&zone->lock, flags);
3718 return NULL;
3719 }
3720 }
3721 __mod_zone_freepage_state(zone, -(1 << order),
3722 get_pcppage_migratetype(page));
3723 spin_unlock_irqrestore(&zone->lock, flags);
3724 } while (check_new_pages(page, order));
3725
3726 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3727 zone_statistics(preferred_zone, zone, 1);
3728
3729 return page;
3730}
3731
066b2393 3732/* Remove page from the per-cpu list, caller must protect the list */
3b822017 3733static inline
44042b44
MG
3734struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3735 int migratetype,
6bb15450 3736 unsigned int alloc_flags,
453f85d4 3737 struct per_cpu_pages *pcp,
066b2393
MG
3738 struct list_head *list)
3739{
3740 struct page *page;
3741
3742 do {
3743 if (list_empty(list)) {
44042b44
MG
3744 int batch = READ_ONCE(pcp->batch);
3745 int alloced;
3746
3747 /*
3748 * Scale batch relative to order if batch implies
3749 * free pages can be stored on the PCP. Batch can
3750 * be 1 for small zones or for boot pagesets which
3751 * should never store free pages as the pages may
3752 * belong to arbitrary zones.
3753 */
3754 if (batch > 1)
3755 batch = max(batch >> order, 2);
3756 alloced = rmqueue_bulk(zone, order,
3757 batch, list,
6bb15450 3758 migratetype, alloc_flags);
44042b44
MG
3759
3760 pcp->count += alloced << order;
066b2393
MG
3761 if (unlikely(list_empty(list)))
3762 return NULL;
3763 }
3764
bf75f200
MG
3765 page = list_first_entry(list, struct page, pcp_list);
3766 list_del(&page->pcp_list);
44042b44 3767 pcp->count -= 1 << order;
77fe7f13 3768 } while (check_new_pcp(page, order));
066b2393
MG
3769
3770 return page;
3771}
3772
3773/* Lock and remove page from the per-cpu list */
3774static struct page *rmqueue_pcplist(struct zone *preferred_zone,
44042b44 3775 struct zone *zone, unsigned int order,
663d0cfd 3776 int migratetype, unsigned int alloc_flags)
066b2393
MG
3777{
3778 struct per_cpu_pages *pcp;
3779 struct list_head *list;
066b2393 3780 struct page *page;
d34b0733 3781 unsigned long flags;
4b23a68f 3782 unsigned long __maybe_unused UP_flags;
066b2393 3783
4b23a68f
MG
3784 /*
3785 * spin_trylock may fail due to a parallel drain. In the future, the
3786 * trylock will also protect against IRQ reentrancy.
3787 */
4b23a68f 3788 pcp_trylock_prepare(UP_flags);
01b44456
MG
3789 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
3790 if (!pcp) {
4b23a68f 3791 pcp_trylock_finish(UP_flags);
4b23a68f
MG
3792 return NULL;
3793 }
3b12e7e9
MG
3794
3795 /*
3796 * On allocation, reduce the number of pages that are batch freed.
3797 * See nr_pcp_free() where free_factor is increased for subsequent
3798 * frees.
3799 */
3b12e7e9 3800 pcp->free_factor >>= 1;
44042b44
MG
3801 list = &pcp->lists[order_to_pindex(migratetype, order)];
3802 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
01b44456 3803 pcp_spin_unlock_irqrestore(pcp, flags);
4b23a68f 3804 pcp_trylock_finish(UP_flags);
066b2393 3805 if (page) {
1c52e6d0 3806 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3e23060b 3807 zone_statistics(preferred_zone, zone, 1);
066b2393 3808 }
066b2393
MG
3809 return page;
3810}
3811
1da177e4 3812/*
75379191 3813 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
1da177e4 3814 */
b073d7f8
AP
3815
3816/*
3817 * Do not instrument rmqueue() with KMSAN. This function may call
3818 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
3819 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
3820 * may call rmqueue() again, which will result in a deadlock.
3821 */
3822__no_sanitize_memory
0a15c3e9 3823static inline
066b2393 3824struct page *rmqueue(struct zone *preferred_zone,
7aeb09f9 3825 struct zone *zone, unsigned int order,
c603844b
MG
3826 gfp_t gfp_flags, unsigned int alloc_flags,
3827 int migratetype)
1da177e4 3828{
689bcebf 3829 struct page *page;
1da177e4 3830
589d9973
MG
3831 /*
3832 * We most definitely don't want callers attempting to
3833 * allocate greater than order-1 page units with __GFP_NOFAIL.
3834 */
3835 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3836
44042b44 3837 if (likely(pcp_allowed_order(order))) {
1d91df85
JK
3838 /*
3839 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3840 * we need to skip it when CMA area isn't allowed.
3841 */
3842 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3843 migratetype != MIGRATE_MOVABLE) {
44042b44 3844 page = rmqueue_pcplist(preferred_zone, zone, order,
663d0cfd 3845 migratetype, alloc_flags);
4b23a68f
MG
3846 if (likely(page))
3847 goto out;
1d91df85 3848 }
066b2393 3849 }
83b9355b 3850
589d9973
MG
3851 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3852 migratetype);
1da177e4 3853
066b2393 3854out:
73444bc4 3855 /* Separate test+clear to avoid unnecessary atomics */
e2a66c21 3856 if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
73444bc4
MG
3857 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3858 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3859 }
3860
066b2393 3861 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
1da177e4
LT
3862 return page;
3863}
3864
933e312e
AM
3865#ifdef CONFIG_FAIL_PAGE_ALLOC
3866
b2588c4b 3867static struct {
933e312e
AM
3868 struct fault_attr attr;
3869
621a5f7a 3870 bool ignore_gfp_highmem;
71baba4b 3871 bool ignore_gfp_reclaim;
54114994 3872 u32 min_order;
933e312e
AM
3873} fail_page_alloc = {
3874 .attr = FAULT_ATTR_INITIALIZER,
71baba4b 3875 .ignore_gfp_reclaim = true,
621a5f7a 3876 .ignore_gfp_highmem = true,
54114994 3877 .min_order = 1,
933e312e
AM
3878};
3879
3880static int __init setup_fail_page_alloc(char *str)
3881{
3882 return setup_fault_attr(&fail_page_alloc.attr, str);
3883}
3884__setup("fail_page_alloc=", setup_fail_page_alloc);
3885
af3b8544 3886static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 3887{
54114994 3888 if (order < fail_page_alloc.min_order)
deaf386e 3889 return false;
933e312e 3890 if (gfp_mask & __GFP_NOFAIL)
deaf386e 3891 return false;
933e312e 3892 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
deaf386e 3893 return false;
71baba4b
MG
3894 if (fail_page_alloc.ignore_gfp_reclaim &&
3895 (gfp_mask & __GFP_DIRECT_RECLAIM))
deaf386e 3896 return false;
933e312e 3897
3f913fc5
QZ
3898 if (gfp_mask & __GFP_NOWARN)
3899 fail_page_alloc.attr.no_warn = true;
3900
933e312e
AM
3901 return should_fail(&fail_page_alloc.attr, 1 << order);
3902}
3903
3904#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3905
3906static int __init fail_page_alloc_debugfs(void)
3907{
0825a6f9 3908 umode_t mode = S_IFREG | 0600;
933e312e 3909 struct dentry *dir;
933e312e 3910
dd48c085
AM
3911 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3912 &fail_page_alloc.attr);
b2588c4b 3913
d9f7979c
GKH
3914 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3915 &fail_page_alloc.ignore_gfp_reclaim);
3916 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3917 &fail_page_alloc.ignore_gfp_highmem);
3918 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
933e312e 3919
d9f7979c 3920 return 0;
933e312e
AM
3921}
3922
3923late_initcall(fail_page_alloc_debugfs);
3924
3925#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3926
3927#else /* CONFIG_FAIL_PAGE_ALLOC */
3928
af3b8544 3929static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 3930{
deaf386e 3931 return false;
933e312e
AM
3932}
3933
3934#endif /* CONFIG_FAIL_PAGE_ALLOC */
3935
54aa3866 3936noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
af3b8544
BP
3937{
3938 return __should_fail_alloc_page(gfp_mask, order);
3939}
3940ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3941
f27ce0e1
JK
3942static inline long __zone_watermark_unusable_free(struct zone *z,
3943 unsigned int order, unsigned int alloc_flags)
3944{
3945 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3946 long unusable_free = (1 << order) - 1;
3947
3948 /*
3949 * If the caller does not have rights to ALLOC_HARDER then subtract
3950 * the high-atomic reserves. This will over-estimate the size of the
3951 * atomic reserve but it avoids a search.
3952 */
3953 if (likely(!alloc_harder))
3954 unusable_free += z->nr_reserved_highatomic;
3955
3956#ifdef CONFIG_CMA
3957 /* If allocation can't use CMA areas don't use free CMA pages */
3958 if (!(alloc_flags & ALLOC_CMA))
3959 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3960#endif
3961
3962 return unusable_free;
3963}
3964
1da177e4 3965/*
97a16fc8
MG
3966 * Return true if free base pages are above 'mark'. For high-order checks it
3967 * will return true of the order-0 watermark is reached and there is at least
3968 * one free page of a suitable size. Checking now avoids taking the zone lock
3969 * to check in the allocation paths if no pages are free.
1da177e4 3970 */
86a294a8 3971bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
97a225e6 3972 int highest_zoneidx, unsigned int alloc_flags,
86a294a8 3973 long free_pages)
1da177e4 3974{
d23ad423 3975 long min = mark;
1da177e4 3976 int o;
cd04ae1e 3977 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
1da177e4 3978
0aaa29a5 3979 /* free_pages may go negative - that's OK */
f27ce0e1 3980 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
0aaa29a5 3981
7fb1d9fc 3982 if (alloc_flags & ALLOC_HIGH)
1da177e4 3983 min -= min / 2;
0aaa29a5 3984
f27ce0e1 3985 if (unlikely(alloc_harder)) {
cd04ae1e
MH
3986 /*
3987 * OOM victims can try even harder than normal ALLOC_HARDER
3988 * users on the grounds that it's definitely going to be in
3989 * the exit path shortly and free memory. Any allocation it
3990 * makes during the free path will be small and short-lived.
3991 */
3992 if (alloc_flags & ALLOC_OOM)
3993 min -= min / 2;
3994 else
3995 min -= min / 4;
3996 }
3997
97a16fc8
MG
3998 /*
3999 * Check watermarks for an order-0 allocation request. If these
4000 * are not met, then a high-order request also cannot go ahead
4001 * even if a suitable page happened to be free.
4002 */
97a225e6 4003 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
88f5acf8 4004 return false;
1da177e4 4005
97a16fc8
MG
4006 /* If this is an order-0 request then the watermark is fine */
4007 if (!order)
4008 return true;
4009
4010 /* For a high-order request, check at least one suitable page is free */
4011 for (o = order; o < MAX_ORDER; o++) {
4012 struct free_area *area = &z->free_area[o];
4013 int mt;
4014
4015 if (!area->nr_free)
4016 continue;
4017
97a16fc8 4018 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
b03641af 4019 if (!free_area_empty(area, mt))
97a16fc8
MG
4020 return true;
4021 }
4022
4023#ifdef CONFIG_CMA
d883c6cf 4024 if ((alloc_flags & ALLOC_CMA) &&
b03641af 4025 !free_area_empty(area, MIGRATE_CMA)) {
97a16fc8 4026 return true;
d883c6cf 4027 }
97a16fc8 4028#endif
76089d00 4029 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
b050e376 4030 return true;
1da177e4 4031 }
97a16fc8 4032 return false;
88f5acf8
MG
4033}
4034
7aeb09f9 4035bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
97a225e6 4036 int highest_zoneidx, unsigned int alloc_flags)
88f5acf8 4037{
97a225e6 4038 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
88f5acf8
MG
4039 zone_page_state(z, NR_FREE_PAGES));
4040}
4041
48ee5f36 4042static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
97a225e6 4043 unsigned long mark, int highest_zoneidx,
f80b08fc 4044 unsigned int alloc_flags, gfp_t gfp_mask)
48ee5f36 4045{
f27ce0e1 4046 long free_pages;
d883c6cf 4047
f27ce0e1 4048 free_pages = zone_page_state(z, NR_FREE_PAGES);
48ee5f36
MG
4049
4050 /*
4051 * Fast check for order-0 only. If this fails then the reserves
f27ce0e1 4052 * need to be calculated.
48ee5f36 4053 */
f27ce0e1 4054 if (!order) {
9282012f
JK
4055 long usable_free;
4056 long reserved;
f27ce0e1 4057
9282012f
JK
4058 usable_free = free_pages;
4059 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
4060
4061 /* reserved may over estimate high-atomic reserves. */
4062 usable_free -= min(usable_free, reserved);
4063 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
f27ce0e1
JK
4064 return true;
4065 }
48ee5f36 4066
f80b08fc
CTR
4067 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
4068 free_pages))
4069 return true;
4070 /*
4071 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
4072 * when checking the min watermark. The min watermark is the
4073 * point where boosting is ignored so that kswapd is woken up
4074 * when below the low watermark.
4075 */
4076 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
4077 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
4078 mark = z->_watermark[WMARK_MIN];
4079 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
4080 alloc_flags, free_pages);
4081 }
4082
4083 return false;
48ee5f36
MG
4084}
4085
7aeb09f9 4086bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
97a225e6 4087 unsigned long mark, int highest_zoneidx)
88f5acf8
MG
4088{
4089 long free_pages = zone_page_state(z, NR_FREE_PAGES);
4090
4091 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
4092 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
4093
97a225e6 4094 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
88f5acf8 4095 free_pages);
1da177e4
LT
4096}
4097
9276b1bc 4098#ifdef CONFIG_NUMA
61bb6cd2
GU
4099int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
4100
957f822a
DR
4101static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4102{
e02dc017 4103 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
a55c7454 4104 node_reclaim_distance;
957f822a 4105}
9276b1bc 4106#else /* CONFIG_NUMA */
957f822a
DR
4107static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4108{
4109 return true;
4110}
9276b1bc
PJ
4111#endif /* CONFIG_NUMA */
4112
6bb15450
MG
4113/*
4114 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
4115 * fragmentation is subtle. If the preferred zone was HIGHMEM then
4116 * premature use of a lower zone may cause lowmem pressure problems that
4117 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
4118 * probably too small. It only makes sense to spread allocations to avoid
4119 * fragmentation between the Normal and DMA32 zones.
4120 */
4121static inline unsigned int
0a79cdad 4122alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
6bb15450 4123{
736838e9 4124 unsigned int alloc_flags;
0a79cdad 4125
736838e9
MN
4126 /*
4127 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4128 * to save a branch.
4129 */
4130 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
0a79cdad
MG
4131
4132#ifdef CONFIG_ZONE_DMA32
8139ad04
AR
4133 if (!zone)
4134 return alloc_flags;
4135
6bb15450 4136 if (zone_idx(zone) != ZONE_NORMAL)
8118b82e 4137 return alloc_flags;
6bb15450
MG
4138
4139 /*
4140 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
4141 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
4142 * on UMA that if Normal is populated then so is DMA32.
4143 */
4144 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
4145 if (nr_online_nodes > 1 && !populated_zone(--zone))
8118b82e 4146 return alloc_flags;
6bb15450 4147
8118b82e 4148 alloc_flags |= ALLOC_NOFRAGMENT;
0a79cdad
MG
4149#endif /* CONFIG_ZONE_DMA32 */
4150 return alloc_flags;
6bb15450 4151}
6bb15450 4152
8e3560d9
PT
4153/* Must be called after current_gfp_context() which can change gfp_mask */
4154static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
4155 unsigned int alloc_flags)
8510e69c
JK
4156{
4157#ifdef CONFIG_CMA
8e3560d9 4158 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
8510e69c 4159 alloc_flags |= ALLOC_CMA;
8510e69c
JK
4160#endif
4161 return alloc_flags;
4162}
4163
7fb1d9fc 4164/*
0798e519 4165 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
4166 * a page.
4167 */
4168static struct page *
a9263751
VB
4169get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4170 const struct alloc_context *ac)
753ee728 4171{
6bb15450 4172 struct zoneref *z;
5117f45d 4173 struct zone *zone;
8a87d695
WY
4174 struct pglist_data *last_pgdat = NULL;
4175 bool last_pgdat_dirty_ok = false;
6bb15450 4176 bool no_fallback;
3b8c0be4 4177
6bb15450 4178retry:
7fb1d9fc 4179 /*
9276b1bc 4180 * Scan zonelist, looking for a zone with enough free.
189cdcfe 4181 * See also __cpuset_node_allowed() comment in kernel/cgroup/cpuset.c.
7fb1d9fc 4182 */
6bb15450
MG
4183 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4184 z = ac->preferred_zoneref;
30d8ec73
MN
4185 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4186 ac->nodemask) {
be06af00 4187 struct page *page;
e085dbc5
JW
4188 unsigned long mark;
4189
664eedde
MG
4190 if (cpusets_enabled() &&
4191 (alloc_flags & ALLOC_CPUSET) &&
002f2906 4192 !__cpuset_zone_allowed(zone, gfp_mask))
cd38b115 4193 continue;
a756cf59
JW
4194 /*
4195 * When allocating a page cache page for writing, we
281e3726
MG
4196 * want to get it from a node that is within its dirty
4197 * limit, such that no single node holds more than its
a756cf59 4198 * proportional share of globally allowed dirty pages.
281e3726 4199 * The dirty limits take into account the node's
a756cf59
JW
4200 * lowmem reserves and high watermark so that kswapd
4201 * should be able to balance it without having to
4202 * write pages from its LRU list.
4203 *
a756cf59 4204 * XXX: For now, allow allocations to potentially
281e3726 4205 * exceed the per-node dirty limit in the slowpath
c9ab0c4f 4206 * (spread_dirty_pages unset) before going into reclaim,
a756cf59 4207 * which is important when on a NUMA setup the allowed
281e3726 4208 * nodes are together not big enough to reach the
a756cf59 4209 * global limit. The proper fix for these situations
281e3726 4210 * will require awareness of nodes in the
a756cf59
JW
4211 * dirty-throttling and the flusher threads.
4212 */
3b8c0be4 4213 if (ac->spread_dirty_pages) {
8a87d695
WY
4214 if (last_pgdat != zone->zone_pgdat) {
4215 last_pgdat = zone->zone_pgdat;
4216 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
4217 }
3b8c0be4 4218
8a87d695 4219 if (!last_pgdat_dirty_ok)
3b8c0be4 4220 continue;
3b8c0be4 4221 }
7fb1d9fc 4222
6bb15450
MG
4223 if (no_fallback && nr_online_nodes > 1 &&
4224 zone != ac->preferred_zoneref->zone) {
4225 int local_nid;
4226
4227 /*
4228 * If moving to a remote node, retry but allow
4229 * fragmenting fallbacks. Locality is more important
4230 * than fragmentation avoidance.
4231 */
4232 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4233 if (zone_to_nid(zone) != local_nid) {
4234 alloc_flags &= ~ALLOC_NOFRAGMENT;
4235 goto retry;
4236 }
4237 }
4238
a9214443 4239 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
48ee5f36 4240 if (!zone_watermark_fast(zone, order, mark,
f80b08fc
CTR
4241 ac->highest_zoneidx, alloc_flags,
4242 gfp_mask)) {
fa5e084e
MG
4243 int ret;
4244
c9e97a19
PT
4245#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4246 /*
4247 * Watermark failed for this zone, but see if we can
4248 * grow this zone if it contains deferred pages.
4249 */
4250 if (static_branch_unlikely(&deferred_pages)) {
4251 if (_deferred_grow_zone(zone, order))
4252 goto try_this_zone;
4253 }
4254#endif
5dab2911
MG
4255 /* Checked here to keep the fast path fast */
4256 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4257 if (alloc_flags & ALLOC_NO_WATERMARKS)
4258 goto try_this_zone;
4259
202e35db 4260 if (!node_reclaim_enabled() ||
c33d6c06 4261 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
cd38b115
MG
4262 continue;
4263
a5f5f91d 4264 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
fa5e084e 4265 switch (ret) {
a5f5f91d 4266 case NODE_RECLAIM_NOSCAN:
fa5e084e 4267 /* did not scan */
cd38b115 4268 continue;
a5f5f91d 4269 case NODE_RECLAIM_FULL:
fa5e084e 4270 /* scanned but unreclaimable */
cd38b115 4271 continue;
fa5e084e
MG
4272 default:
4273 /* did we reclaim enough */
fed2719e 4274 if (zone_watermark_ok(zone, order, mark,
97a225e6 4275 ac->highest_zoneidx, alloc_flags))
fed2719e
MG
4276 goto try_this_zone;
4277
fed2719e 4278 continue;
0798e519 4279 }
7fb1d9fc
RS
4280 }
4281
fa5e084e 4282try_this_zone:
066b2393 4283 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
0aaa29a5 4284 gfp_mask, alloc_flags, ac->migratetype);
75379191 4285 if (page) {
479f854a 4286 prep_new_page(page, order, gfp_mask, alloc_flags);
0aaa29a5
MG
4287
4288 /*
4289 * If this is a high-order atomic allocation then check
4290 * if the pageblock should be reserved for the future
4291 */
4292 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4293 reserve_highatomic_pageblock(page, zone, order);
4294
75379191 4295 return page;
c9e97a19
PT
4296 } else {
4297#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4298 /* Try again if zone has deferred pages */
4299 if (static_branch_unlikely(&deferred_pages)) {
4300 if (_deferred_grow_zone(zone, order))
4301 goto try_this_zone;
4302 }
4303#endif
75379191 4304 }
54a6eb5c 4305 }
9276b1bc 4306
6bb15450
MG
4307 /*
4308 * It's possible on a UMA machine to get through all zones that are
4309 * fragmented. If avoiding fragmentation, reset and try again.
4310 */
4311 if (no_fallback) {
4312 alloc_flags &= ~ALLOC_NOFRAGMENT;
4313 goto retry;
4314 }
4315
4ffeaf35 4316 return NULL;
753ee728
MH
4317}
4318
9af744d7 4319static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
a238ab5b 4320{
a238ab5b 4321 unsigned int filter = SHOW_MEM_FILTER_NODES;
a238ab5b
DH
4322
4323 /*
4324 * This documents exceptions given to allocations in certain
4325 * contexts that are allowed to allocate outside current's set
4326 * of allowed nodes.
4327 */
4328 if (!(gfp_mask & __GFP_NOMEMALLOC))
cd04ae1e 4329 if (tsk_is_oom_victim(current) ||
a238ab5b
DH
4330 (current->flags & (PF_MEMALLOC | PF_EXITING)))
4331 filter &= ~SHOW_MEM_FILTER_NODES;
88dc6f20 4332 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
a238ab5b
DH
4333 filter &= ~SHOW_MEM_FILTER_NODES;
4334
974f4367 4335 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
aa187507
MH
4336}
4337
a8e99259 4338void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
aa187507
MH
4339{
4340 struct va_format vaf;
4341 va_list args;
1be334e5 4342 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
aa187507 4343
c4dc63f0
BH
4344 if ((gfp_mask & __GFP_NOWARN) ||
4345 !__ratelimit(&nopage_rs) ||
4346 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
aa187507
MH
4347 return;
4348
7877cdcc
MH
4349 va_start(args, fmt);
4350 vaf.fmt = fmt;
4351 vaf.va = &args;
ef8444ea 4352 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
0205f755
MH
4353 current->comm, &vaf, gfp_mask, &gfp_mask,
4354 nodemask_pr_args(nodemask));
7877cdcc 4355 va_end(args);
3ee9a4f0 4356
a8e99259 4357 cpuset_print_current_mems_allowed();
ef8444ea 4358 pr_cont("\n");
a238ab5b 4359 dump_stack();
685dbf6f 4360 warn_alloc_show_mem(gfp_mask, nodemask);
a238ab5b
DH
4361}
4362
6c18ba7a
MH
4363static inline struct page *
4364__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4365 unsigned int alloc_flags,
4366 const struct alloc_context *ac)
4367{
4368 struct page *page;
4369
4370 page = get_page_from_freelist(gfp_mask, order,
4371 alloc_flags|ALLOC_CPUSET, ac);
4372 /*
4373 * fallback to ignore cpuset restriction if our nodes
4374 * are depleted
4375 */
4376 if (!page)
4377 page = get_page_from_freelist(gfp_mask, order,
4378 alloc_flags, ac);
4379
4380 return page;
4381}
4382
11e33f6a
MG
4383static inline struct page *
4384__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
a9263751 4385 const struct alloc_context *ac, unsigned long *did_some_progress)
11e33f6a 4386{
6e0fc46d
DR
4387 struct oom_control oc = {
4388 .zonelist = ac->zonelist,
4389 .nodemask = ac->nodemask,
2a966b77 4390 .memcg = NULL,
6e0fc46d
DR
4391 .gfp_mask = gfp_mask,
4392 .order = order,
6e0fc46d 4393 };
11e33f6a
MG
4394 struct page *page;
4395
9879de73
JW
4396 *did_some_progress = 0;
4397
9879de73 4398 /*
dc56401f
JW
4399 * Acquire the oom lock. If that fails, somebody else is
4400 * making progress for us.
9879de73 4401 */
dc56401f 4402 if (!mutex_trylock(&oom_lock)) {
9879de73 4403 *did_some_progress = 1;
11e33f6a 4404 schedule_timeout_uninterruptible(1);
1da177e4
LT
4405 return NULL;
4406 }
6b1de916 4407
11e33f6a
MG
4408 /*
4409 * Go through the zonelist yet one more time, keep very high watermark
4410 * here, this is only to catch a parallel oom killing, we must fail if
e746bf73
TH
4411 * we're still under heavy pressure. But make sure that this reclaim
4412 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4413 * allocation which will never fail due to oom_lock already held.
11e33f6a 4414 */
e746bf73
TH
4415 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4416 ~__GFP_DIRECT_RECLAIM, order,
4417 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
7fb1d9fc 4418 if (page)
11e33f6a
MG
4419 goto out;
4420
06ad276a
MH
4421 /* Coredumps can quickly deplete all memory reserves */
4422 if (current->flags & PF_DUMPCORE)
4423 goto out;
4424 /* The OOM killer will not help higher order allocs */
4425 if (order > PAGE_ALLOC_COSTLY_ORDER)
4426 goto out;
dcda9b04
MH
4427 /*
4428 * We have already exhausted all our reclaim opportunities without any
4429 * success so it is time to admit defeat. We will skip the OOM killer
4430 * because it is very likely that the caller has a more reasonable
4431 * fallback than shooting a random task.
cfb4a541
MN
4432 *
4433 * The OOM killer may not free memory on a specific node.
dcda9b04 4434 */
cfb4a541 4435 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
dcda9b04 4436 goto out;
06ad276a 4437 /* The OOM killer does not needlessly kill tasks for lowmem */
97a225e6 4438 if (ac->highest_zoneidx < ZONE_NORMAL)
06ad276a
MH
4439 goto out;
4440 if (pm_suspended_storage())
4441 goto out;
4442 /*
4443 * XXX: GFP_NOFS allocations should rather fail than rely on
4444 * other request to make a forward progress.
4445 * We are in an unfortunate situation where out_of_memory cannot
4446 * do much for this context but let's try it to at least get
4447 * access to memory reserved if the current task is killed (see
4448 * out_of_memory). Once filesystems are ready to handle allocation
4449 * failures more gracefully we should just bail out here.
4450 */
4451
3c2c6488 4452 /* Exhausted what can be done so it's blame time */
3f913fc5
QZ
4453 if (out_of_memory(&oc) ||
4454 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
c32b3cbe 4455 *did_some_progress = 1;
5020e285 4456
6c18ba7a
MH
4457 /*
4458 * Help non-failing allocations by giving them access to memory
4459 * reserves
4460 */
4461 if (gfp_mask & __GFP_NOFAIL)
4462 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
5020e285 4463 ALLOC_NO_WATERMARKS, ac);
5020e285 4464 }
11e33f6a 4465out:
dc56401f 4466 mutex_unlock(&oom_lock);
11e33f6a
MG
4467 return page;
4468}
4469
33c2d214 4470/*
baf2f90b 4471 * Maximum number of compaction retries with a progress before OOM
33c2d214
MH
4472 * killer is consider as the only way to move forward.
4473 */
4474#define MAX_COMPACT_RETRIES 16
4475
56de7263
MG
4476#ifdef CONFIG_COMPACTION
4477/* Try memory compaction for high-order allocations before reclaim */
4478static struct page *
4479__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 4480 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 4481 enum compact_priority prio, enum compact_result *compact_result)
56de7263 4482{
5e1f0f09 4483 struct page *page = NULL;
eb414681 4484 unsigned long pflags;
499118e9 4485 unsigned int noreclaim_flag;
53853e2d
VB
4486
4487 if (!order)
66199712 4488 return NULL;
66199712 4489
eb414681 4490 psi_memstall_enter(&pflags);
5bf18281 4491 delayacct_compact_start();
499118e9 4492 noreclaim_flag = memalloc_noreclaim_save();
eb414681 4493
c5d01d0d 4494 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
5e1f0f09 4495 prio, &page);
eb414681 4496
499118e9 4497 memalloc_noreclaim_restore(noreclaim_flag);
eb414681 4498 psi_memstall_leave(&pflags);
5bf18281 4499 delayacct_compact_end();
56de7263 4500
06dac2f4
CTR
4501 if (*compact_result == COMPACT_SKIPPED)
4502 return NULL;
98dd3b48
VB
4503 /*
4504 * At least in one zone compaction wasn't deferred or skipped, so let's
4505 * count a compaction stall
4506 */
4507 count_vm_event(COMPACTSTALL);
8fb74b9f 4508
5e1f0f09
MG
4509 /* Prep a captured page if available */
4510 if (page)
4511 prep_new_page(page, order, gfp_mask, alloc_flags);
4512
4513 /* Try get a page from the freelist if available */
4514 if (!page)
4515 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
53853e2d 4516
98dd3b48
VB
4517 if (page) {
4518 struct zone *zone = page_zone(page);
53853e2d 4519
98dd3b48
VB
4520 zone->compact_blockskip_flush = false;
4521 compaction_defer_reset(zone, order, true);
4522 count_vm_event(COMPACTSUCCESS);
4523 return page;
4524 }
56de7263 4525
98dd3b48
VB
4526 /*
4527 * It's bad if compaction run occurs and fails. The most likely reason
4528 * is that pages exist, but not enough to satisfy watermarks.
4529 */
4530 count_vm_event(COMPACTFAIL);
66199712 4531
98dd3b48 4532 cond_resched();
56de7263
MG
4533
4534 return NULL;
4535}
33c2d214 4536
3250845d
VB
4537static inline bool
4538should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4539 enum compact_result compact_result,
4540 enum compact_priority *compact_priority,
d9436498 4541 int *compaction_retries)
3250845d
VB
4542{
4543 int max_retries = MAX_COMPACT_RETRIES;
c2033b00 4544 int min_priority;
65190cff
MH
4545 bool ret = false;
4546 int retries = *compaction_retries;
4547 enum compact_priority priority = *compact_priority;
3250845d
VB
4548
4549 if (!order)
4550 return false;
4551
691d9497
AT
4552 if (fatal_signal_pending(current))
4553 return false;
4554
d9436498
VB
4555 if (compaction_made_progress(compact_result))
4556 (*compaction_retries)++;
4557
3250845d
VB
4558 /*
4559 * compaction considers all the zone as desperately out of memory
4560 * so it doesn't really make much sense to retry except when the
4561 * failure could be caused by insufficient priority
4562 */
d9436498
VB
4563 if (compaction_failed(compact_result))
4564 goto check_priority;
3250845d 4565
49433085
VB
4566 /*
4567 * compaction was skipped because there are not enough order-0 pages
4568 * to work with, so we retry only if it looks like reclaim can help.
4569 */
4570 if (compaction_needs_reclaim(compact_result)) {
4571 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4572 goto out;
4573 }
4574
3250845d
VB
4575 /*
4576 * make sure the compaction wasn't deferred or didn't bail out early
4577 * due to locks contention before we declare that we should give up.
49433085
VB
4578 * But the next retry should use a higher priority if allowed, so
4579 * we don't just keep bailing out endlessly.
3250845d 4580 */
65190cff 4581 if (compaction_withdrawn(compact_result)) {
49433085 4582 goto check_priority;
65190cff 4583 }
3250845d
VB
4584
4585 /*
dcda9b04 4586 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
3250845d
VB
4587 * costly ones because they are de facto nofail and invoke OOM
4588 * killer to move on while costly can fail and users are ready
4589 * to cope with that. 1/4 retries is rather arbitrary but we
4590 * would need much more detailed feedback from compaction to
4591 * make a better decision.
4592 */
4593 if (order > PAGE_ALLOC_COSTLY_ORDER)
4594 max_retries /= 4;
65190cff
MH
4595 if (*compaction_retries <= max_retries) {
4596 ret = true;
4597 goto out;
4598 }
3250845d 4599
d9436498
VB
4600 /*
4601 * Make sure there are attempts at the highest priority if we exhausted
4602 * all retries or failed at the lower priorities.
4603 */
4604check_priority:
c2033b00
VB
4605 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4606 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
65190cff 4607
c2033b00 4608 if (*compact_priority > min_priority) {
d9436498
VB
4609 (*compact_priority)--;
4610 *compaction_retries = 0;
65190cff 4611 ret = true;
d9436498 4612 }
65190cff
MH
4613out:
4614 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4615 return ret;
3250845d 4616}
56de7263
MG
4617#else
4618static inline struct page *
4619__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 4620 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 4621 enum compact_priority prio, enum compact_result *compact_result)
56de7263 4622{
33c2d214 4623 *compact_result = COMPACT_SKIPPED;
56de7263
MG
4624 return NULL;
4625}
33c2d214
MH
4626
4627static inline bool
86a294a8
MH
4628should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4629 enum compact_result compact_result,
a5508cd8 4630 enum compact_priority *compact_priority,
d9436498 4631 int *compaction_retries)
33c2d214 4632{
31e49bfd
MH
4633 struct zone *zone;
4634 struct zoneref *z;
4635
4636 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4637 return false;
4638
4639 /*
4640 * There are setups with compaction disabled which would prefer to loop
4641 * inside the allocator rather than hit the oom killer prematurely.
4642 * Let's give them a good hope and keep retrying while the order-0
4643 * watermarks are OK.
4644 */
97a225e6
JK
4645 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4646 ac->highest_zoneidx, ac->nodemask) {
31e49bfd 4647 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
97a225e6 4648 ac->highest_zoneidx, alloc_flags))
31e49bfd
MH
4649 return true;
4650 }
33c2d214
MH
4651 return false;
4652}
3250845d 4653#endif /* CONFIG_COMPACTION */
56de7263 4654
d92a8cfc 4655#ifdef CONFIG_LOCKDEP
93781325 4656static struct lockdep_map __fs_reclaim_map =
d92a8cfc
PZ
4657 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4658
f920e413 4659static bool __need_reclaim(gfp_t gfp_mask)
d92a8cfc 4660{
d92a8cfc
PZ
4661 /* no reclaim without waiting on it */
4662 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4663 return false;
4664
4665 /* this guy won't enter reclaim */
2e517d68 4666 if (current->flags & PF_MEMALLOC)
d92a8cfc
PZ
4667 return false;
4668
d92a8cfc
PZ
4669 if (gfp_mask & __GFP_NOLOCKDEP)
4670 return false;
4671
4672 return true;
4673}
4674
4f3eaf45 4675void __fs_reclaim_acquire(unsigned long ip)
93781325 4676{
4f3eaf45 4677 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
93781325
OS
4678}
4679
4f3eaf45 4680void __fs_reclaim_release(unsigned long ip)
93781325 4681{
4f3eaf45 4682 lock_release(&__fs_reclaim_map, ip);
93781325
OS
4683}
4684
d92a8cfc
PZ
4685void fs_reclaim_acquire(gfp_t gfp_mask)
4686{
f920e413
DV
4687 gfp_mask = current_gfp_context(gfp_mask);
4688
4689 if (__need_reclaim(gfp_mask)) {
4690 if (gfp_mask & __GFP_FS)
4f3eaf45 4691 __fs_reclaim_acquire(_RET_IP_);
f920e413
DV
4692
4693#ifdef CONFIG_MMU_NOTIFIER
4694 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4695 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4696#endif
4697
4698 }
d92a8cfc
PZ
4699}
4700EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4701
4702void fs_reclaim_release(gfp_t gfp_mask)
4703{
f920e413
DV
4704 gfp_mask = current_gfp_context(gfp_mask);
4705
4706 if (__need_reclaim(gfp_mask)) {
4707 if (gfp_mask & __GFP_FS)
4f3eaf45 4708 __fs_reclaim_release(_RET_IP_);
f920e413 4709 }
d92a8cfc
PZ
4710}
4711EXPORT_SYMBOL_GPL(fs_reclaim_release);
4712#endif
4713
3d36424b
MG
4714/*
4715 * Zonelists may change due to hotplug during allocation. Detect when zonelists
4716 * have been rebuilt so allocation retries. Reader side does not lock and
4717 * retries the allocation if zonelist changes. Writer side is protected by the
4718 * embedded spin_lock.
4719 */
4720static DEFINE_SEQLOCK(zonelist_update_seq);
4721
4722static unsigned int zonelist_iter_begin(void)
4723{
4724 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4725 return read_seqbegin(&zonelist_update_seq);
4726
4727 return 0;
4728}
4729
4730static unsigned int check_retry_zonelist(unsigned int seq)
4731{
4732 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4733 return read_seqretry(&zonelist_update_seq, seq);
4734
4735 return seq;
4736}
4737
bba90710 4738/* Perform direct synchronous page reclaim */
2187e17b 4739static unsigned long
a9263751
VB
4740__perform_reclaim(gfp_t gfp_mask, unsigned int order,
4741 const struct alloc_context *ac)
11e33f6a 4742{
499118e9 4743 unsigned int noreclaim_flag;
fa7fc75f 4744 unsigned long progress;
11e33f6a
MG
4745
4746 cond_resched();
4747
4748 /* We now go into synchronous reclaim */
4749 cpuset_memory_pressure_bump();
d92a8cfc 4750 fs_reclaim_acquire(gfp_mask);
93781325 4751 noreclaim_flag = memalloc_noreclaim_save();
11e33f6a 4752
a9263751
VB
4753 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4754 ac->nodemask);
11e33f6a 4755
499118e9 4756 memalloc_noreclaim_restore(noreclaim_flag);
93781325 4757 fs_reclaim_release(gfp_mask);
11e33f6a
MG
4758
4759 cond_resched();
4760
bba90710
MS
4761 return progress;
4762}
4763
4764/* The really slow allocator path where we enter direct reclaim */
4765static inline struct page *
4766__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
c603844b 4767 unsigned int alloc_flags, const struct alloc_context *ac,
a9263751 4768 unsigned long *did_some_progress)
bba90710
MS
4769{
4770 struct page *page = NULL;
fa7fc75f 4771 unsigned long pflags;
bba90710
MS
4772 bool drained = false;
4773
fa7fc75f 4774 psi_memstall_enter(&pflags);
a9263751 4775 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
9ee493ce 4776 if (unlikely(!(*did_some_progress)))
fa7fc75f 4777 goto out;
11e33f6a 4778
9ee493ce 4779retry:
31a6c190 4780 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
9ee493ce
MG
4781
4782 /*
4783 * If an allocation failed after direct reclaim, it could be because
0aaa29a5 4784 * pages are pinned on the per-cpu lists or in high alloc reserves.
047b9967 4785 * Shrink them and try again
9ee493ce
MG
4786 */
4787 if (!page && !drained) {
29fac03b 4788 unreserve_highatomic_pageblock(ac, false);
93481ff0 4789 drain_all_pages(NULL);
9ee493ce
MG
4790 drained = true;
4791 goto retry;
4792 }
fa7fc75f
SB
4793out:
4794 psi_memstall_leave(&pflags);
9ee493ce 4795
11e33f6a
MG
4796 return page;
4797}
4798
5ecd9d40
DR
4799static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4800 const struct alloc_context *ac)
3a025760
JW
4801{
4802 struct zoneref *z;
4803 struct zone *zone;
e1a55637 4804 pg_data_t *last_pgdat = NULL;
97a225e6 4805 enum zone_type highest_zoneidx = ac->highest_zoneidx;
3a025760 4806
97a225e6 4807 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
5ecd9d40 4808 ac->nodemask) {
bc53008e
WY
4809 if (!managed_zone(zone))
4810 continue;
d137a7cb 4811 if (last_pgdat != zone->zone_pgdat) {
97a225e6 4812 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
d137a7cb
CW
4813 last_pgdat = zone->zone_pgdat;
4814 }
e1a55637 4815 }
3a025760
JW
4816}
4817
c603844b 4818static inline unsigned int
341ce06f
PZ
4819gfp_to_alloc_flags(gfp_t gfp_mask)
4820{
c603844b 4821 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1da177e4 4822
736838e9
MN
4823 /*
4824 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4825 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4826 * to save two branches.
4827 */
e6223a3b 4828 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
736838e9 4829 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
933e312e 4830
341ce06f
PZ
4831 /*
4832 * The caller may dip into page reserves a bit more if the caller
4833 * cannot run direct reclaim, or if the caller has realtime scheduling
4834 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
d0164adc 4835 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
341ce06f 4836 */
736838e9
MN
4837 alloc_flags |= (__force int)
4838 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
1da177e4 4839
d0164adc 4840 if (gfp_mask & __GFP_ATOMIC) {
5c3240d9 4841 /*
b104a35d
DR
4842 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4843 * if it can't schedule.
5c3240d9 4844 */
b104a35d 4845 if (!(gfp_mask & __GFP_NOMEMALLOC))
5c3240d9 4846 alloc_flags |= ALLOC_HARDER;
523b9458 4847 /*
b104a35d 4848 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
344736f2 4849 * comment for __cpuset_node_allowed().
523b9458 4850 */
341ce06f 4851 alloc_flags &= ~ALLOC_CPUSET;
88dc6f20 4852 } else if (unlikely(rt_task(current)) && in_task())
341ce06f
PZ
4853 alloc_flags |= ALLOC_HARDER;
4854
8e3560d9 4855 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
8510e69c 4856
341ce06f
PZ
4857 return alloc_flags;
4858}
4859
cd04ae1e 4860static bool oom_reserves_allowed(struct task_struct *tsk)
072bb0aa 4861{
cd04ae1e
MH
4862 if (!tsk_is_oom_victim(tsk))
4863 return false;
4864
4865 /*
4866 * !MMU doesn't have oom reaper so give access to memory reserves
4867 * only to the thread with TIF_MEMDIE set
4868 */
4869 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
31a6c190
VB
4870 return false;
4871
cd04ae1e
MH
4872 return true;
4873}
4874
4875/*
4876 * Distinguish requests which really need access to full memory
4877 * reserves from oom victims which can live with a portion of it
4878 */
4879static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4880{
4881 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4882 return 0;
31a6c190 4883 if (gfp_mask & __GFP_MEMALLOC)
cd04ae1e 4884 return ALLOC_NO_WATERMARKS;
31a6c190 4885 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
cd04ae1e
MH
4886 return ALLOC_NO_WATERMARKS;
4887 if (!in_interrupt()) {
4888 if (current->flags & PF_MEMALLOC)
4889 return ALLOC_NO_WATERMARKS;
4890 else if (oom_reserves_allowed(current))
4891 return ALLOC_OOM;
4892 }
31a6c190 4893
cd04ae1e
MH
4894 return 0;
4895}
4896
4897bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4898{
4899 return !!__gfp_pfmemalloc_flags(gfp_mask);
072bb0aa
MG
4900}
4901
0a0337e0
MH
4902/*
4903 * Checks whether it makes sense to retry the reclaim to make a forward progress
4904 * for the given allocation request.
491d79ae
JW
4905 *
4906 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4907 * without success, or when we couldn't even meet the watermark if we
4908 * reclaimed all remaining pages on the LRU lists.
0a0337e0
MH
4909 *
4910 * Returns true if a retry is viable or false to enter the oom path.
4911 */
4912static inline bool
4913should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4914 struct alloc_context *ac, int alloc_flags,
423b452e 4915 bool did_some_progress, int *no_progress_loops)
0a0337e0
MH
4916{
4917 struct zone *zone;
4918 struct zoneref *z;
15f570bf 4919 bool ret = false;
0a0337e0 4920
423b452e
VB
4921 /*
4922 * Costly allocations might have made a progress but this doesn't mean
4923 * their order will become available due to high fragmentation so
4924 * always increment the no progress counter for them
4925 */
4926 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4927 *no_progress_loops = 0;
4928 else
4929 (*no_progress_loops)++;
4930
0a0337e0
MH
4931 /*
4932 * Make sure we converge to OOM if we cannot make any progress
4933 * several times in the row.
4934 */
04c8716f
MK
4935 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4936 /* Before OOM, exhaust highatomic_reserve */
29fac03b 4937 return unreserve_highatomic_pageblock(ac, true);
04c8716f 4938 }
0a0337e0 4939
bca67592
MG
4940 /*
4941 * Keep reclaiming pages while there is a chance this will lead
4942 * somewhere. If none of the target zones can satisfy our allocation
4943 * request even if all reclaimable pages are considered then we are
4944 * screwed and have to go OOM.
0a0337e0 4945 */
97a225e6
JK
4946 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4947 ac->highest_zoneidx, ac->nodemask) {
0a0337e0 4948 unsigned long available;
ede37713 4949 unsigned long reclaimable;
d379f01d
MH
4950 unsigned long min_wmark = min_wmark_pages(zone);
4951 bool wmark;
0a0337e0 4952
5a1c84b4 4953 available = reclaimable = zone_reclaimable_pages(zone);
5a1c84b4 4954 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
0a0337e0
MH
4955
4956 /*
491d79ae
JW
4957 * Would the allocation succeed if we reclaimed all
4958 * reclaimable pages?
0a0337e0 4959 */
d379f01d 4960 wmark = __zone_watermark_ok(zone, order, min_wmark,
97a225e6 4961 ac->highest_zoneidx, alloc_flags, available);
d379f01d
MH
4962 trace_reclaim_retry_zone(z, order, reclaimable,
4963 available, min_wmark, *no_progress_loops, wmark);
4964 if (wmark) {
15f570bf 4965 ret = true;
132b0d21 4966 break;
0a0337e0
MH
4967 }
4968 }
4969
15f570bf
MH
4970 /*
4971 * Memory allocation/reclaim might be called from a WQ context and the
4972 * current implementation of the WQ concurrency control doesn't
4973 * recognize that a particular WQ is congested if the worker thread is
4974 * looping without ever sleeping. Therefore we have to do a short sleep
4975 * here rather than calling cond_resched().
4976 */
4977 if (current->flags & PF_WQ_WORKER)
4978 schedule_timeout_uninterruptible(1);
4979 else
4980 cond_resched();
4981 return ret;
0a0337e0
MH
4982}
4983
902b6281
VB
4984static inline bool
4985check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4986{
4987 /*
4988 * It's possible that cpuset's mems_allowed and the nodemask from
4989 * mempolicy don't intersect. This should be normally dealt with by
4990 * policy_nodemask(), but it's possible to race with cpuset update in
4991 * such a way the check therein was true, and then it became false
4992 * before we got our cpuset_mems_cookie here.
4993 * This assumes that for all allocations, ac->nodemask can come only
4994 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4995 * when it does not intersect with the cpuset restrictions) or the
4996 * caller can deal with a violated nodemask.
4997 */
4998 if (cpusets_enabled() && ac->nodemask &&
4999 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
5000 ac->nodemask = NULL;
5001 return true;
5002 }
5003
5004 /*
5005 * When updating a task's mems_allowed or mempolicy nodemask, it is
5006 * possible to race with parallel threads in such a way that our
5007 * allocation can fail while the mask is being updated. If we are about
5008 * to fail, check if the cpuset changed during allocation and if so,
5009 * retry.
5010 */
5011 if (read_mems_allowed_retry(cpuset_mems_cookie))
5012 return true;
5013
5014 return false;
5015}
5016
11e33f6a
MG
5017static inline struct page *
5018__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
a9263751 5019 struct alloc_context *ac)
11e33f6a 5020{
d0164adc 5021 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
282722b0 5022 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
11e33f6a 5023 struct page *page = NULL;
c603844b 5024 unsigned int alloc_flags;
11e33f6a 5025 unsigned long did_some_progress;
5ce9bfef 5026 enum compact_priority compact_priority;
c5d01d0d 5027 enum compact_result compact_result;
5ce9bfef
VB
5028 int compaction_retries;
5029 int no_progress_loops;
5ce9bfef 5030 unsigned int cpuset_mems_cookie;
3d36424b 5031 unsigned int zonelist_iter_cookie;
cd04ae1e 5032 int reserve_flags;
1da177e4 5033
d0164adc
MG
5034 /*
5035 * We also sanity check to catch abuse of atomic reserves being used by
5036 * callers that are not in atomic context.
5037 */
5038 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
5039 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
5040 gfp_mask &= ~__GFP_ATOMIC;
5041
3d36424b 5042restart:
5ce9bfef
VB
5043 compaction_retries = 0;
5044 no_progress_loops = 0;
5045 compact_priority = DEF_COMPACT_PRIORITY;
5046 cpuset_mems_cookie = read_mems_allowed_begin();
3d36424b 5047 zonelist_iter_cookie = zonelist_iter_begin();
9a67f648
MH
5048
5049 /*
5050 * The fast path uses conservative alloc_flags to succeed only until
5051 * kswapd needs to be woken up, and to avoid the cost of setting up
5052 * alloc_flags precisely. So we do that now.
5053 */
5054 alloc_flags = gfp_to_alloc_flags(gfp_mask);
5055
e47483bc
VB
5056 /*
5057 * We need to recalculate the starting point for the zonelist iterator
5058 * because we might have used different nodemask in the fast path, or
5059 * there was a cpuset modification and we are retrying - otherwise we
5060 * could end up iterating over non-eligible zones endlessly.
5061 */
5062 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
97a225e6 5063 ac->highest_zoneidx, ac->nodemask);
e47483bc
VB
5064 if (!ac->preferred_zoneref->zone)
5065 goto nopage;
5066
8ca1b5a4
FT
5067 /*
5068 * Check for insane configurations where the cpuset doesn't contain
5069 * any suitable zone to satisfy the request - e.g. non-movable
5070 * GFP_HIGHUSER allocations from MOVABLE nodes only.
5071 */
5072 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
5073 struct zoneref *z = first_zones_zonelist(ac->zonelist,
5074 ac->highest_zoneidx,
5075 &cpuset_current_mems_allowed);
5076 if (!z->zone)
5077 goto nopage;
5078 }
5079
0a79cdad 5080 if (alloc_flags & ALLOC_KSWAPD)
5ecd9d40 5081 wake_all_kswapds(order, gfp_mask, ac);
23771235
VB
5082
5083 /*
5084 * The adjusted alloc_flags might result in immediate success, so try
5085 * that first
5086 */
5087 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5088 if (page)
5089 goto got_pg;
5090
a8161d1e
VB
5091 /*
5092 * For costly allocations, try direct compaction first, as it's likely
282722b0
VB
5093 * that we have enough base pages and don't need to reclaim. For non-
5094 * movable high-order allocations, do that as well, as compaction will
5095 * try prevent permanent fragmentation by migrating from blocks of the
5096 * same migratetype.
5097 * Don't try this for allocations that are allowed to ignore
5098 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
a8161d1e 5099 */
282722b0
VB
5100 if (can_direct_reclaim &&
5101 (costly_order ||
5102 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
5103 && !gfp_pfmemalloc_allowed(gfp_mask)) {
a8161d1e
VB
5104 page = __alloc_pages_direct_compact(gfp_mask, order,
5105 alloc_flags, ac,
a5508cd8 5106 INIT_COMPACT_PRIORITY,
a8161d1e
VB
5107 &compact_result);
5108 if (page)
5109 goto got_pg;
5110
cc638f32
VB
5111 /*
5112 * Checks for costly allocations with __GFP_NORETRY, which
5113 * includes some THP page fault allocations
5114 */
5115 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
b39d0ee2
DR
5116 /*
5117 * If allocating entire pageblock(s) and compaction
5118 * failed because all zones are below low watermarks
5119 * or is prohibited because it recently failed at this
3f36d866
DR
5120 * order, fail immediately unless the allocator has
5121 * requested compaction and reclaim retry.
b39d0ee2
DR
5122 *
5123 * Reclaim is
5124 * - potentially very expensive because zones are far
5125 * below their low watermarks or this is part of very
5126 * bursty high order allocations,
5127 * - not guaranteed to help because isolate_freepages()
5128 * may not iterate over freed pages as part of its
5129 * linear scan, and
5130 * - unlikely to make entire pageblocks free on its
5131 * own.
5132 */
5133 if (compact_result == COMPACT_SKIPPED ||
5134 compact_result == COMPACT_DEFERRED)
5135 goto nopage;
a8161d1e 5136
a8161d1e 5137 /*
3eb2771b
VB
5138 * Looks like reclaim/compaction is worth trying, but
5139 * sync compaction could be very expensive, so keep
25160354 5140 * using async compaction.
a8161d1e 5141 */
a5508cd8 5142 compact_priority = INIT_COMPACT_PRIORITY;
a8161d1e
VB
5143 }
5144 }
23771235 5145
31a6c190 5146retry:
23771235 5147 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
0a79cdad 5148 if (alloc_flags & ALLOC_KSWAPD)
5ecd9d40 5149 wake_all_kswapds(order, gfp_mask, ac);
31a6c190 5150
cd04ae1e
MH
5151 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
5152 if (reserve_flags)
ce96fa62
ML
5153 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
5154 (alloc_flags & ALLOC_KSWAPD);
23771235 5155
e46e7b77 5156 /*
d6a24df0
VB
5157 * Reset the nodemask and zonelist iterators if memory policies can be
5158 * ignored. These allocations are high priority and system rather than
5159 * user oriented.
e46e7b77 5160 */
cd04ae1e 5161 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
d6a24df0 5162 ac->nodemask = NULL;
e46e7b77 5163 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
97a225e6 5164 ac->highest_zoneidx, ac->nodemask);
e46e7b77
MG
5165 }
5166
23771235 5167 /* Attempt with potentially adjusted zonelist and alloc_flags */
31a6c190 5168 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
7fb1d9fc
RS
5169 if (page)
5170 goto got_pg;
1da177e4 5171
d0164adc 5172 /* Caller is not willing to reclaim, we can't balance anything */
9a67f648 5173 if (!can_direct_reclaim)
1da177e4
LT
5174 goto nopage;
5175
9a67f648
MH
5176 /* Avoid recursion of direct reclaim */
5177 if (current->flags & PF_MEMALLOC)
6583bb64
DR
5178 goto nopage;
5179
a8161d1e
VB
5180 /* Try direct reclaim and then allocating */
5181 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
5182 &did_some_progress);
5183 if (page)
5184 goto got_pg;
5185
5186 /* Try direct compaction and then allocating */
a9263751 5187 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
a5508cd8 5188 compact_priority, &compact_result);
56de7263
MG
5189 if (page)
5190 goto got_pg;
75f30861 5191
9083905a
JW
5192 /* Do not loop if specifically requested */
5193 if (gfp_mask & __GFP_NORETRY)
a8161d1e 5194 goto nopage;
9083905a 5195
0a0337e0
MH
5196 /*
5197 * Do not retry costly high order allocations unless they are
dcda9b04 5198 * __GFP_RETRY_MAYFAIL
0a0337e0 5199 */
dcda9b04 5200 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
a8161d1e 5201 goto nopage;
0a0337e0 5202
0a0337e0 5203 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
423b452e 5204 did_some_progress > 0, &no_progress_loops))
0a0337e0
MH
5205 goto retry;
5206
33c2d214
MH
5207 /*
5208 * It doesn't make any sense to retry for the compaction if the order-0
5209 * reclaim is not able to make any progress because the current
5210 * implementation of the compaction depends on the sufficient amount
5211 * of free memory (see __compaction_suitable)
5212 */
5213 if (did_some_progress > 0 &&
86a294a8 5214 should_compact_retry(ac, order, alloc_flags,
a5508cd8 5215 compact_result, &compact_priority,
d9436498 5216 &compaction_retries))
33c2d214
MH
5217 goto retry;
5218
902b6281 5219
3d36424b
MG
5220 /*
5221 * Deal with possible cpuset update races or zonelist updates to avoid
5222 * a unnecessary OOM kill.
5223 */
5224 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5225 check_retry_zonelist(zonelist_iter_cookie))
5226 goto restart;
e47483bc 5227
9083905a
JW
5228 /* Reclaim has failed us, start killing things */
5229 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
5230 if (page)
5231 goto got_pg;
5232
9a67f648 5233 /* Avoid allocations with no watermarks from looping endlessly */
cd04ae1e 5234 if (tsk_is_oom_victim(current) &&
8510e69c 5235 (alloc_flags & ALLOC_OOM ||
c288983d 5236 (gfp_mask & __GFP_NOMEMALLOC)))
9a67f648
MH
5237 goto nopage;
5238
9083905a 5239 /* Retry as long as the OOM killer is making progress */
0a0337e0
MH
5240 if (did_some_progress) {
5241 no_progress_loops = 0;
9083905a 5242 goto retry;
0a0337e0 5243 }
9083905a 5244
1da177e4 5245nopage:
3d36424b
MG
5246 /*
5247 * Deal with possible cpuset update races or zonelist updates to avoid
5248 * a unnecessary OOM kill.
5249 */
5250 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5251 check_retry_zonelist(zonelist_iter_cookie))
5252 goto restart;
5ce9bfef 5253
9a67f648
MH
5254 /*
5255 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5256 * we always retry
5257 */
5258 if (gfp_mask & __GFP_NOFAIL) {
5259 /*
5260 * All existing users of the __GFP_NOFAIL are blockable, so warn
5261 * of any new users that actually require GFP_NOWAIT
5262 */
3f913fc5 5263 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
9a67f648
MH
5264 goto fail;
5265
5266 /*
5267 * PF_MEMALLOC request from this context is rather bizarre
5268 * because we cannot reclaim anything and only can loop waiting
5269 * for somebody to do a work for us
5270 */
3f913fc5 5271 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
9a67f648
MH
5272
5273 /*
5274 * non failing costly orders are a hard requirement which we
5275 * are not prepared for much so let's warn about these users
5276 * so that we can identify them and convert them to something
5277 * else.
5278 */
896c4d52 5279 WARN_ON_ONCE_GFP(costly_order, gfp_mask);
9a67f648 5280
6c18ba7a
MH
5281 /*
5282 * Help non-failing allocations by giving them access to memory
5283 * reserves but do not use ALLOC_NO_WATERMARKS because this
5284 * could deplete whole memory reserves which would just make
5285 * the situation worse
5286 */
5287 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
5288 if (page)
5289 goto got_pg;
5290
9a67f648
MH
5291 cond_resched();
5292 goto retry;
5293 }
5294fail:
a8e99259 5295 warn_alloc(gfp_mask, ac->nodemask,
7877cdcc 5296 "page allocation failure: order:%u", order);
1da177e4 5297got_pg:
072bb0aa 5298 return page;
1da177e4 5299}
11e33f6a 5300
9cd75558 5301static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
04ec6264 5302 int preferred_nid, nodemask_t *nodemask,
8e6a930b 5303 struct alloc_context *ac, gfp_t *alloc_gfp,
9cd75558 5304 unsigned int *alloc_flags)
11e33f6a 5305{
97a225e6 5306 ac->highest_zoneidx = gfp_zone(gfp_mask);
04ec6264 5307 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
9cd75558 5308 ac->nodemask = nodemask;
01c0bfe0 5309 ac->migratetype = gfp_migratetype(gfp_mask);
11e33f6a 5310
682a3385 5311 if (cpusets_enabled()) {
8e6a930b 5312 *alloc_gfp |= __GFP_HARDWALL;
182f3d7a
MS
5313 /*
5314 * When we are in the interrupt context, it is irrelevant
5315 * to the current task context. It means that any node ok.
5316 */
88dc6f20 5317 if (in_task() && !ac->nodemask)
9cd75558 5318 ac->nodemask = &cpuset_current_mems_allowed;
51047820
VB
5319 else
5320 *alloc_flags |= ALLOC_CPUSET;
682a3385
MG
5321 }
5322
446ec838 5323 might_alloc(gfp_mask);
11e33f6a
MG
5324
5325 if (should_fail_alloc_page(gfp_mask, order))
9cd75558 5326 return false;
11e33f6a 5327
8e3560d9 5328 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
d883c6cf 5329
c9ab0c4f 5330 /* Dirty zone balancing only done in the fast path */
9cd75558 5331 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
c9ab0c4f 5332
e46e7b77
MG
5333 /*
5334 * The preferred zone is used for statistics but crucially it is
5335 * also used as the starting point for the zonelist iterator. It
5336 * may get reset for allocations that ignore memory policies.
5337 */
9cd75558 5338 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
97a225e6 5339 ac->highest_zoneidx, ac->nodemask);
a0622d05
MN
5340
5341 return true;
9cd75558
MG
5342}
5343
387ba26f 5344/*
0f87d9d3 5345 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
387ba26f
MG
5346 * @gfp: GFP flags for the allocation
5347 * @preferred_nid: The preferred NUMA node ID to allocate from
5348 * @nodemask: Set of nodes to allocate from, may be NULL
0f87d9d3
MG
5349 * @nr_pages: The number of pages desired on the list or array
5350 * @page_list: Optional list to store the allocated pages
5351 * @page_array: Optional array to store the pages
387ba26f
MG
5352 *
5353 * This is a batched version of the page allocator that attempts to
0f87d9d3
MG
5354 * allocate nr_pages quickly. Pages are added to page_list if page_list
5355 * is not NULL, otherwise it is assumed that the page_array is valid.
387ba26f 5356 *
0f87d9d3
MG
5357 * For lists, nr_pages is the number of pages that should be allocated.
5358 *
5359 * For arrays, only NULL elements are populated with pages and nr_pages
5360 * is the maximum number of pages that will be stored in the array.
5361 *
5362 * Returns the number of pages on the list or array.
387ba26f
MG
5363 */
5364unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5365 nodemask_t *nodemask, int nr_pages,
0f87d9d3
MG
5366 struct list_head *page_list,
5367 struct page **page_array)
387ba26f
MG
5368{
5369 struct page *page;
5370 unsigned long flags;
4b23a68f 5371 unsigned long __maybe_unused UP_flags;
387ba26f
MG
5372 struct zone *zone;
5373 struct zoneref *z;
5374 struct per_cpu_pages *pcp;
5375 struct list_head *pcp_list;
5376 struct alloc_context ac;
5377 gfp_t alloc_gfp;
5378 unsigned int alloc_flags = ALLOC_WMARK_LOW;
3e23060b 5379 int nr_populated = 0, nr_account = 0;
387ba26f 5380
0f87d9d3
MG
5381 /*
5382 * Skip populated array elements to determine if any pages need
5383 * to be allocated before disabling IRQs.
5384 */
b08e50dd 5385 while (page_array && nr_populated < nr_pages && page_array[nr_populated])
0f87d9d3
MG
5386 nr_populated++;
5387
06147843
CL
5388 /* No pages requested? */
5389 if (unlikely(nr_pages <= 0))
5390 goto out;
5391
b3b64ebd
MG
5392 /* Already populated array? */
5393 if (unlikely(page_array && nr_pages - nr_populated == 0))
06147843 5394 goto out;
b3b64ebd 5395
8dcb3060
SB
5396 /* Bulk allocator does not support memcg accounting. */
5397 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT))
5398 goto failed;
5399
387ba26f 5400 /* Use the single page allocator for one page. */
0f87d9d3 5401 if (nr_pages - nr_populated == 1)
387ba26f
MG
5402 goto failed;
5403
187ad460
MG
5404#ifdef CONFIG_PAGE_OWNER
5405 /*
5406 * PAGE_OWNER may recurse into the allocator to allocate space to
5407 * save the stack with pagesets.lock held. Releasing/reacquiring
5408 * removes much of the performance benefit of bulk allocation so
5409 * force the caller to allocate one page at a time as it'll have
5410 * similar performance to added complexity to the bulk allocator.
5411 */
5412 if (static_branch_unlikely(&page_owner_inited))
5413 goto failed;
5414#endif
5415
387ba26f
MG
5416 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5417 gfp &= gfp_allowed_mask;
5418 alloc_gfp = gfp;
5419 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
06147843 5420 goto out;
387ba26f
MG
5421 gfp = alloc_gfp;
5422
5423 /* Find an allowed local zone that meets the low watermark. */
5424 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5425 unsigned long mark;
5426
5427 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5428 !__cpuset_zone_allowed(zone, gfp)) {
5429 continue;
5430 }
5431
5432 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5433 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5434 goto failed;
5435 }
5436
5437 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5438 if (zone_watermark_fast(zone, 0, mark,
5439 zonelist_zone_idx(ac.preferred_zoneref),
5440 alloc_flags, gfp)) {
5441 break;
5442 }
5443 }
5444
5445 /*
5446 * If there are no allowed local zones that meets the watermarks then
5447 * try to allocate a single page and reclaim if necessary.
5448 */
ce76f9a1 5449 if (unlikely(!zone))
387ba26f
MG
5450 goto failed;
5451
4b23a68f 5452 /* Is a parallel drain in progress? */
4b23a68f 5453 pcp_trylock_prepare(UP_flags);
01b44456
MG
5454 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
5455 if (!pcp)
4b23a68f 5456 goto failed_irq;
387ba26f 5457
387ba26f 5458 /* Attempt the batch allocation */
44042b44 5459 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
0f87d9d3
MG
5460 while (nr_populated < nr_pages) {
5461
5462 /* Skip existing pages */
5463 if (page_array && page_array[nr_populated]) {
5464 nr_populated++;
5465 continue;
5466 }
5467
44042b44 5468 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
387ba26f 5469 pcp, pcp_list);
ce76f9a1 5470 if (unlikely(!page)) {
c572e488 5471 /* Try and allocate at least one page */
4b23a68f 5472 if (!nr_account) {
01b44456 5473 pcp_spin_unlock_irqrestore(pcp, flags);
387ba26f 5474 goto failed_irq;
4b23a68f 5475 }
387ba26f
MG
5476 break;
5477 }
3e23060b 5478 nr_account++;
387ba26f
MG
5479
5480 prep_new_page(page, 0, gfp, 0);
0f87d9d3
MG
5481 if (page_list)
5482 list_add(&page->lru, page_list);
5483 else
5484 page_array[nr_populated] = page;
5485 nr_populated++;
387ba26f
MG
5486 }
5487
01b44456 5488 pcp_spin_unlock_irqrestore(pcp, flags);
4b23a68f 5489 pcp_trylock_finish(UP_flags);
43c95bcc 5490
3e23060b
MG
5491 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5492 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
387ba26f 5493
06147843 5494out:
0f87d9d3 5495 return nr_populated;
387ba26f
MG
5496
5497failed_irq:
4b23a68f 5498 pcp_trylock_finish(UP_flags);
387ba26f
MG
5499
5500failed:
5501 page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5502 if (page) {
0f87d9d3
MG
5503 if (page_list)
5504 list_add(&page->lru, page_list);
5505 else
5506 page_array[nr_populated] = page;
5507 nr_populated++;
387ba26f
MG
5508 }
5509
06147843 5510 goto out;
387ba26f
MG
5511}
5512EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5513
9cd75558
MG
5514/*
5515 * This is the 'heart' of the zoned buddy allocator.
5516 */
84172f4b 5517struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
04ec6264 5518 nodemask_t *nodemask)
9cd75558
MG
5519{
5520 struct page *page;
5521 unsigned int alloc_flags = ALLOC_WMARK_LOW;
8e6a930b 5522 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
9cd75558
MG
5523 struct alloc_context ac = { };
5524
c63ae43b
MH
5525 /*
5526 * There are several places where we assume that the order value is sane
5527 * so bail out early if the request is out of bound.
5528 */
3f913fc5 5529 if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
c63ae43b 5530 return NULL;
c63ae43b 5531
6e5e0f28 5532 gfp &= gfp_allowed_mask;
da6df1b0
PT
5533 /*
5534 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5535 * resp. GFP_NOIO which has to be inherited for all allocation requests
5536 * from a particular context which has been marked by
8e3560d9
PT
5537 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5538 * movable zones are not used during allocation.
da6df1b0
PT
5539 */
5540 gfp = current_gfp_context(gfp);
6e5e0f28
MWO
5541 alloc_gfp = gfp;
5542 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
8e6a930b 5543 &alloc_gfp, &alloc_flags))
9cd75558
MG
5544 return NULL;
5545
6bb15450
MG
5546 /*
5547 * Forbid the first pass from falling back to types that fragment
5548 * memory until all local zones are considered.
5549 */
6e5e0f28 5550 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
6bb15450 5551
5117f45d 5552 /* First allocation attempt */
8e6a930b 5553 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
4fcb0971
MG
5554 if (likely(page))
5555 goto out;
11e33f6a 5556
da6df1b0 5557 alloc_gfp = gfp;
4fcb0971 5558 ac.spread_dirty_pages = false;
23f086f9 5559
4741526b
MG
5560 /*
5561 * Restore the original nodemask if it was potentially replaced with
5562 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5563 */
97ce86f9 5564 ac.nodemask = nodemask;
16096c25 5565
8e6a930b 5566 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
cc9a6c87 5567
4fcb0971 5568out:
6e5e0f28
MWO
5569 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
5570 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
c4159a75
VD
5571 __free_pages(page, order);
5572 page = NULL;
4949148a
VD
5573 }
5574
8e6a930b 5575 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
b073d7f8 5576 kmsan_alloc_page(page, order, alloc_gfp);
4fcb0971 5577
11e33f6a 5578 return page;
1da177e4 5579}
84172f4b 5580EXPORT_SYMBOL(__alloc_pages);
1da177e4 5581
cc09cb13
MWO
5582struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
5583 nodemask_t *nodemask)
5584{
5585 struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
5586 preferred_nid, nodemask);
5587
5588 if (page && order > 1)
5589 prep_transhuge_page(page);
5590 return (struct folio *)page;
5591}
5592EXPORT_SYMBOL(__folio_alloc);
5593
1da177e4 5594/*
9ea9a680
MH
5595 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5596 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5597 * you need to access high mem.
1da177e4 5598 */
920c7a5d 5599unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 5600{
945a1113
AM
5601 struct page *page;
5602
9ea9a680 5603 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
1da177e4
LT
5604 if (!page)
5605 return 0;
5606 return (unsigned long) page_address(page);
5607}
1da177e4
LT
5608EXPORT_SYMBOL(__get_free_pages);
5609
920c7a5d 5610unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 5611{
945a1113 5612 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 5613}
1da177e4
LT
5614EXPORT_SYMBOL(get_zeroed_page);
5615
7f194fbb
MWO
5616/**
5617 * __free_pages - Free pages allocated with alloc_pages().
5618 * @page: The page pointer returned from alloc_pages().
5619 * @order: The order of the allocation.
5620 *
5621 * This function can free multi-page allocations that are not compound
5622 * pages. It does not check that the @order passed in matches that of
5623 * the allocation, so it is easy to leak memory. Freeing more memory
5624 * than was allocated will probably emit a warning.
5625 *
5626 * If the last reference to this page is speculative, it will be released
5627 * by put_page() which only frees the first page of a non-compound
5628 * allocation. To prevent the remaining pages from being leaked, we free
5629 * the subsequent pages here. If you want to use the page's reference
5630 * count to decide when to free the allocation, you should allocate a
5631 * compound page, and use put_page() instead of __free_pages().
5632 *
5633 * Context: May be called in interrupt context or while holding a normal
5634 * spinlock, but not in NMI context or while holding a raw spinlock.
5635 */
742aa7fb
AL
5636void __free_pages(struct page *page, unsigned int order)
5637{
5638 if (put_page_testzero(page))
5639 free_the_page(page, order);
e320d301
MWO
5640 else if (!PageHead(page))
5641 while (order-- > 0)
5642 free_the_page(page + (1 << order), order);
742aa7fb 5643}
1da177e4
LT
5644EXPORT_SYMBOL(__free_pages);
5645
920c7a5d 5646void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
5647{
5648 if (addr != 0) {
725d704e 5649 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
5650 __free_pages(virt_to_page((void *)addr), order);
5651 }
5652}
5653
5654EXPORT_SYMBOL(free_pages);
5655
b63ae8ca
AD
5656/*
5657 * Page Fragment:
5658 * An arbitrary-length arbitrary-offset area of memory which resides
5659 * within a 0 or higher order page. Multiple fragments within that page
5660 * are individually refcounted, in the page's reference counter.
5661 *
5662 * The page_frag functions below provide a simple allocation framework for
5663 * page fragments. This is used by the network stack and network device
5664 * drivers to provide a backing region of memory for use as either an
5665 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5666 */
2976db80
AD
5667static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5668 gfp_t gfp_mask)
b63ae8ca
AD
5669{
5670 struct page *page = NULL;
5671 gfp_t gfp = gfp_mask;
5672
5673#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5674 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5675 __GFP_NOMEMALLOC;
5676 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5677 PAGE_FRAG_CACHE_MAX_ORDER);
5678 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5679#endif
5680 if (unlikely(!page))
5681 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5682
5683 nc->va = page ? page_address(page) : NULL;
5684
5685 return page;
5686}
5687
2976db80 5688void __page_frag_cache_drain(struct page *page, unsigned int count)
44fdffd7
AD
5689{
5690 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5691
742aa7fb
AL
5692 if (page_ref_sub_and_test(page, count))
5693 free_the_page(page, compound_order(page));
44fdffd7 5694}
2976db80 5695EXPORT_SYMBOL(__page_frag_cache_drain);
44fdffd7 5696
b358e212
KH
5697void *page_frag_alloc_align(struct page_frag_cache *nc,
5698 unsigned int fragsz, gfp_t gfp_mask,
5699 unsigned int align_mask)
b63ae8ca
AD
5700{
5701 unsigned int size = PAGE_SIZE;
5702 struct page *page;
5703 int offset;
5704
5705 if (unlikely(!nc->va)) {
5706refill:
2976db80 5707 page = __page_frag_cache_refill(nc, gfp_mask);
b63ae8ca
AD
5708 if (!page)
5709 return NULL;
5710
5711#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5712 /* if size can vary use size else just use PAGE_SIZE */
5713 size = nc->size;
5714#endif
5715 /* Even if we own the page, we do not use atomic_set().
5716 * This would break get_page_unless_zero() users.
5717 */
86447726 5718 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
b63ae8ca
AD
5719
5720 /* reset page count bias and offset to start of new frag */
2f064f34 5721 nc->pfmemalloc = page_is_pfmemalloc(page);
86447726 5722 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
b63ae8ca
AD
5723 nc->offset = size;
5724 }
5725
5726 offset = nc->offset - fragsz;
5727 if (unlikely(offset < 0)) {
5728 page = virt_to_page(nc->va);
5729
fe896d18 5730 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
b63ae8ca
AD
5731 goto refill;
5732
d8c19014
DZ
5733 if (unlikely(nc->pfmemalloc)) {
5734 free_the_page(page, compound_order(page));
5735 goto refill;
5736 }
5737
b63ae8ca
AD
5738#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5739 /* if size can vary use size else just use PAGE_SIZE */
5740 size = nc->size;
5741#endif
5742 /* OK, page count is 0, we can safely set it */
86447726 5743 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
b63ae8ca
AD
5744
5745 /* reset page count bias and offset to start of new frag */
86447726 5746 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
b63ae8ca 5747 offset = size - fragsz;
dac22531
ML
5748 if (unlikely(offset < 0)) {
5749 /*
5750 * The caller is trying to allocate a fragment
5751 * with fragsz > PAGE_SIZE but the cache isn't big
5752 * enough to satisfy the request, this may
5753 * happen in low memory conditions.
5754 * We don't release the cache page because
5755 * it could make memory pressure worse
5756 * so we simply return NULL here.
5757 */
5758 return NULL;
5759 }
b63ae8ca
AD
5760 }
5761
5762 nc->pagecnt_bias--;
b358e212 5763 offset &= align_mask;
b63ae8ca
AD
5764 nc->offset = offset;
5765
5766 return nc->va + offset;
5767}
b358e212 5768EXPORT_SYMBOL(page_frag_alloc_align);
b63ae8ca
AD
5769
5770/*
5771 * Frees a page fragment allocated out of either a compound or order 0 page.
5772 */
8c2dd3e4 5773void page_frag_free(void *addr)
b63ae8ca
AD
5774{
5775 struct page *page = virt_to_head_page(addr);
5776
742aa7fb
AL
5777 if (unlikely(put_page_testzero(page)))
5778 free_the_page(page, compound_order(page));
b63ae8ca 5779}
8c2dd3e4 5780EXPORT_SYMBOL(page_frag_free);
b63ae8ca 5781
d00181b9
KS
5782static void *make_alloc_exact(unsigned long addr, unsigned int order,
5783 size_t size)
ee85c2e1
AK
5784{
5785 if (addr) {
5786 unsigned long alloc_end = addr + (PAGE_SIZE << order);
5787 unsigned long used = addr + PAGE_ALIGN(size);
5788
5789 split_page(virt_to_page((void *)addr), order);
5790 while (used < alloc_end) {
5791 free_page(used);
5792 used += PAGE_SIZE;
5793 }
5794 }
5795 return (void *)addr;
5796}
5797
2be0ffe2
TT
5798/**
5799 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5800 * @size: the number of bytes to allocate
63931eb9 5801 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
2be0ffe2
TT
5802 *
5803 * This function is similar to alloc_pages(), except that it allocates the
5804 * minimum number of pages to satisfy the request. alloc_pages() can only
5805 * allocate memory in power-of-two pages.
5806 *
5807 * This function is also limited by MAX_ORDER.
5808 *
5809 * Memory allocated by this function must be released by free_pages_exact().
a862f68a
MR
5810 *
5811 * Return: pointer to the allocated area or %NULL in case of error.
2be0ffe2
TT
5812 */
5813void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5814{
5815 unsigned int order = get_order(size);
5816 unsigned long addr;
5817
ba7f1b9e
ML
5818 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5819 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
63931eb9 5820
2be0ffe2 5821 addr = __get_free_pages(gfp_mask, order);
ee85c2e1 5822 return make_alloc_exact(addr, order, size);
2be0ffe2
TT
5823}
5824EXPORT_SYMBOL(alloc_pages_exact);
5825
ee85c2e1
AK
5826/**
5827 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5828 * pages on a node.
b5e6ab58 5829 * @nid: the preferred node ID where memory should be allocated
ee85c2e1 5830 * @size: the number of bytes to allocate
63931eb9 5831 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
ee85c2e1
AK
5832 *
5833 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5834 * back.
a862f68a
MR
5835 *
5836 * Return: pointer to the allocated area or %NULL in case of error.
ee85c2e1 5837 */
e1931811 5838void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
ee85c2e1 5839{
d00181b9 5840 unsigned int order = get_order(size);
63931eb9
VB
5841 struct page *p;
5842
ba7f1b9e
ML
5843 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5844 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
63931eb9
VB
5845
5846 p = alloc_pages_node(nid, gfp_mask, order);
ee85c2e1
AK
5847 if (!p)
5848 return NULL;
5849 return make_alloc_exact((unsigned long)page_address(p), order, size);
5850}
ee85c2e1 5851
2be0ffe2
TT
5852/**
5853 * free_pages_exact - release memory allocated via alloc_pages_exact()
5854 * @virt: the value returned by alloc_pages_exact.
5855 * @size: size of allocation, same value as passed to alloc_pages_exact().
5856 *
5857 * Release the memory allocated by a previous call to alloc_pages_exact.
5858 */
5859void free_pages_exact(void *virt, size_t size)
5860{
5861 unsigned long addr = (unsigned long)virt;
5862 unsigned long end = addr + PAGE_ALIGN(size);
5863
5864 while (addr < end) {
5865 free_page(addr);
5866 addr += PAGE_SIZE;
5867 }
5868}
5869EXPORT_SYMBOL(free_pages_exact);
5870
e0fb5815
ZY
5871/**
5872 * nr_free_zone_pages - count number of pages beyond high watermark
5873 * @offset: The zone index of the highest zone
5874 *
a862f68a 5875 * nr_free_zone_pages() counts the number of pages which are beyond the
e0fb5815
ZY
5876 * high watermark within all zones at or below a given zone index. For each
5877 * zone, the number of pages is calculated as:
0e056eb5 5878 *
5879 * nr_free_zone_pages = managed_pages - high_pages
a862f68a
MR
5880 *
5881 * Return: number of pages beyond high watermark.
e0fb5815 5882 */
ebec3862 5883static unsigned long nr_free_zone_pages(int offset)
1da177e4 5884{
dd1a239f 5885 struct zoneref *z;
54a6eb5c
MG
5886 struct zone *zone;
5887
e310fd43 5888 /* Just pick one node, since fallback list is circular */
ebec3862 5889 unsigned long sum = 0;
1da177e4 5890
0e88460d 5891 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 5892
54a6eb5c 5893 for_each_zone_zonelist(zone, z, zonelist, offset) {
9705bea5 5894 unsigned long size = zone_managed_pages(zone);
41858966 5895 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
5896 if (size > high)
5897 sum += size - high;
1da177e4
LT
5898 }
5899
5900 return sum;
5901}
5902
e0fb5815
ZY
5903/**
5904 * nr_free_buffer_pages - count number of pages beyond high watermark
5905 *
5906 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5907 * watermark within ZONE_DMA and ZONE_NORMAL.
a862f68a
MR
5908 *
5909 * Return: number of pages beyond high watermark within ZONE_DMA and
5910 * ZONE_NORMAL.
1da177e4 5911 */
ebec3862 5912unsigned long nr_free_buffer_pages(void)
1da177e4 5913{
af4ca457 5914 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 5915}
c2f1a551 5916EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4 5917
08e0f6a9 5918static inline void show_node(struct zone *zone)
1da177e4 5919{
e5adfffc 5920 if (IS_ENABLED(CONFIG_NUMA))
25ba77c1 5921 printk("Node %d ", zone_to_nid(zone));
1da177e4 5922}
1da177e4 5923
d02bd27b
IR
5924long si_mem_available(void)
5925{
5926 long available;
5927 unsigned long pagecache;
5928 unsigned long wmark_low = 0;
5929 unsigned long pages[NR_LRU_LISTS];
b29940c1 5930 unsigned long reclaimable;
d02bd27b
IR
5931 struct zone *zone;
5932 int lru;
5933
5934 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
2f95ff90 5935 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
d02bd27b
IR
5936
5937 for_each_zone(zone)
a9214443 5938 wmark_low += low_wmark_pages(zone);
d02bd27b
IR
5939
5940 /*
5941 * Estimate the amount of memory available for userspace allocations,
ade63b41 5942 * without causing swapping or OOM.
d02bd27b 5943 */
c41f012a 5944 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
d02bd27b
IR
5945
5946 /*
5947 * Not all the page cache can be freed, otherwise the system will
ade63b41
YY
5948 * start swapping or thrashing. Assume at least half of the page
5949 * cache, or the low watermark worth of cache, needs to stay.
d02bd27b
IR
5950 */
5951 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5952 pagecache -= min(pagecache / 2, wmark_low);
5953 available += pagecache;
5954
5955 /*
b29940c1
VB
5956 * Part of the reclaimable slab and other kernel memory consists of
5957 * items that are in use, and cannot be freed. Cap this estimate at the
5958 * low watermark.
d02bd27b 5959 */
d42f3245
RG
5960 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5961 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
b29940c1 5962 available += reclaimable - min(reclaimable / 2, wmark_low);
034ebf65 5963
d02bd27b
IR
5964 if (available < 0)
5965 available = 0;
5966 return available;
5967}
5968EXPORT_SYMBOL_GPL(si_mem_available);
5969
1da177e4
LT
5970void si_meminfo(struct sysinfo *val)
5971{
ca79b0c2 5972 val->totalram = totalram_pages();
11fb9989 5973 val->sharedram = global_node_page_state(NR_SHMEM);
c41f012a 5974 val->freeram = global_zone_page_state(NR_FREE_PAGES);
1da177e4 5975 val->bufferram = nr_blockdev_pages();
ca79b0c2 5976 val->totalhigh = totalhigh_pages();
1da177e4 5977 val->freehigh = nr_free_highpages();
1da177e4
LT
5978 val->mem_unit = PAGE_SIZE;
5979}
5980
5981EXPORT_SYMBOL(si_meminfo);
5982
5983#ifdef CONFIG_NUMA
5984void si_meminfo_node(struct sysinfo *val, int nid)
5985{
cdd91a77
JL
5986 int zone_type; /* needs to be signed */
5987 unsigned long managed_pages = 0;
fc2bd799
JK
5988 unsigned long managed_highpages = 0;
5989 unsigned long free_highpages = 0;
1da177e4
LT
5990 pg_data_t *pgdat = NODE_DATA(nid);
5991
cdd91a77 5992 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
9705bea5 5993 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
cdd91a77 5994 val->totalram = managed_pages;
11fb9989 5995 val->sharedram = node_page_state(pgdat, NR_SHMEM);
75ef7184 5996 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 5997#ifdef CONFIG_HIGHMEM
fc2bd799
JK
5998 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5999 struct zone *zone = &pgdat->node_zones[zone_type];
6000
6001 if (is_highmem(zone)) {
9705bea5 6002 managed_highpages += zone_managed_pages(zone);
fc2bd799
JK
6003 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
6004 }
6005 }
6006 val->totalhigh = managed_highpages;
6007 val->freehigh = free_highpages;
98d2b0eb 6008#else
fc2bd799
JK
6009 val->totalhigh = managed_highpages;
6010 val->freehigh = free_highpages;
98d2b0eb 6011#endif
1da177e4
LT
6012 val->mem_unit = PAGE_SIZE;
6013}
6014#endif
6015
ddd588b5 6016/*
7bf02ea2
DR
6017 * Determine whether the node should be displayed or not, depending on whether
6018 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
ddd588b5 6019 */
9af744d7 6020static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
ddd588b5 6021{
ddd588b5 6022 if (!(flags & SHOW_MEM_FILTER_NODES))
9af744d7 6023 return false;
ddd588b5 6024
9af744d7
MH
6025 /*
6026 * no node mask - aka implicit memory numa policy. Do not bother with
6027 * the synchronization - read_mems_allowed_begin - because we do not
6028 * have to be precise here.
6029 */
6030 if (!nodemask)
6031 nodemask = &cpuset_current_mems_allowed;
6032
6033 return !node_isset(nid, *nodemask);
ddd588b5
DR
6034}
6035
1da177e4
LT
6036#define K(x) ((x) << (PAGE_SHIFT-10))
6037
377e4f16
RV
6038static void show_migration_types(unsigned char type)
6039{
6040 static const char types[MIGRATE_TYPES] = {
6041 [MIGRATE_UNMOVABLE] = 'U',
377e4f16 6042 [MIGRATE_MOVABLE] = 'M',
475a2f90
VB
6043 [MIGRATE_RECLAIMABLE] = 'E',
6044 [MIGRATE_HIGHATOMIC] = 'H',
377e4f16
RV
6045#ifdef CONFIG_CMA
6046 [MIGRATE_CMA] = 'C',
6047#endif
194159fb 6048#ifdef CONFIG_MEMORY_ISOLATION
377e4f16 6049 [MIGRATE_ISOLATE] = 'I',
194159fb 6050#endif
377e4f16
RV
6051 };
6052 char tmp[MIGRATE_TYPES + 1];
6053 char *p = tmp;
6054 int i;
6055
6056 for (i = 0; i < MIGRATE_TYPES; i++) {
6057 if (type & (1 << i))
6058 *p++ = types[i];
6059 }
6060
6061 *p = '\0';
1f84a18f 6062 printk(KERN_CONT "(%s) ", tmp);
377e4f16
RV
6063}
6064
974f4367
MH
6065static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
6066{
6067 int zone_idx;
6068 for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
6069 if (zone_managed_pages(pgdat->node_zones + zone_idx))
6070 return true;
6071 return false;
6072}
6073
1da177e4
LT
6074/*
6075 * Show free area list (used inside shift_scroll-lock stuff)
6076 * We also calculate the percentage fragmentation. We do this by counting the
6077 * memory on each free list with the exception of the first item on the list.
d1bfcdb8
KK
6078 *
6079 * Bits in @filter:
6080 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
6081 * cpuset.
1da177e4 6082 */
974f4367 6083void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
1da177e4 6084{
d1bfcdb8 6085 unsigned long free_pcp = 0;
dcadcf1c 6086 int cpu, nid;
1da177e4 6087 struct zone *zone;
599d0c95 6088 pg_data_t *pgdat;
1da177e4 6089
ee99c71c 6090 for_each_populated_zone(zone) {
974f4367
MH
6091 if (zone_idx(zone) > max_zone_idx)
6092 continue;
9af744d7 6093 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 6094 continue;
d1bfcdb8 6095
761b0677 6096 for_each_online_cpu(cpu)
28f836b6 6097 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
1da177e4
LT
6098 }
6099
a731286d
KM
6100 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
6101 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
8d92890b 6102 " unevictable:%lu dirty:%lu writeback:%lu\n"
d1bfcdb8 6103 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
d1ce749a 6104 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
eb2169ce 6105 " kernel_misc_reclaimable:%lu\n"
d1bfcdb8 6106 " free:%lu free_pcp:%lu free_cma:%lu\n",
599d0c95
MG
6107 global_node_page_state(NR_ACTIVE_ANON),
6108 global_node_page_state(NR_INACTIVE_ANON),
6109 global_node_page_state(NR_ISOLATED_ANON),
6110 global_node_page_state(NR_ACTIVE_FILE),
6111 global_node_page_state(NR_INACTIVE_FILE),
6112 global_node_page_state(NR_ISOLATED_FILE),
6113 global_node_page_state(NR_UNEVICTABLE),
11fb9989
MG
6114 global_node_page_state(NR_FILE_DIRTY),
6115 global_node_page_state(NR_WRITEBACK),
d42f3245
RG
6116 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
6117 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
50658e2e 6118 global_node_page_state(NR_FILE_MAPPED),
11fb9989 6119 global_node_page_state(NR_SHMEM),
f0c0c115 6120 global_node_page_state(NR_PAGETABLE),
c41f012a 6121 global_zone_page_state(NR_BOUNCE),
eb2169ce 6122 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
c41f012a 6123 global_zone_page_state(NR_FREE_PAGES),
d1bfcdb8 6124 free_pcp,
c41f012a 6125 global_zone_page_state(NR_FREE_CMA_PAGES));
1da177e4 6126
599d0c95 6127 for_each_online_pgdat(pgdat) {
9af744d7 6128 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
c02e50bb 6129 continue;
974f4367
MH
6130 if (!node_has_managed_zones(pgdat, max_zone_idx))
6131 continue;
c02e50bb 6132
599d0c95
MG
6133 printk("Node %d"
6134 " active_anon:%lukB"
6135 " inactive_anon:%lukB"
6136 " active_file:%lukB"
6137 " inactive_file:%lukB"
6138 " unevictable:%lukB"
6139 " isolated(anon):%lukB"
6140 " isolated(file):%lukB"
50658e2e 6141 " mapped:%lukB"
11fb9989
MG
6142 " dirty:%lukB"
6143 " writeback:%lukB"
6144 " shmem:%lukB"
6145#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6146 " shmem_thp: %lukB"
6147 " shmem_pmdmapped: %lukB"
6148 " anon_thp: %lukB"
6149#endif
6150 " writeback_tmp:%lukB"
991e7673
SB
6151 " kernel_stack:%lukB"
6152#ifdef CONFIG_SHADOW_CALL_STACK
6153 " shadow_call_stack:%lukB"
6154#endif
f0c0c115 6155 " pagetables:%lukB"
599d0c95
MG
6156 " all_unreclaimable? %s"
6157 "\n",
6158 pgdat->node_id,
6159 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
6160 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
6161 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
6162 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
6163 K(node_page_state(pgdat, NR_UNEVICTABLE)),
6164 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
6165 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
50658e2e 6166 K(node_page_state(pgdat, NR_FILE_MAPPED)),
11fb9989
MG
6167 K(node_page_state(pgdat, NR_FILE_DIRTY)),
6168 K(node_page_state(pgdat, NR_WRITEBACK)),
1f06b81a 6169 K(node_page_state(pgdat, NR_SHMEM)),
11fb9989 6170#ifdef CONFIG_TRANSPARENT_HUGEPAGE
57b2847d 6171 K(node_page_state(pgdat, NR_SHMEM_THPS)),
a1528e21 6172 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
69473e5d 6173 K(node_page_state(pgdat, NR_ANON_THPS)),
11fb9989 6174#endif
11fb9989 6175 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
991e7673
SB
6176 node_page_state(pgdat, NR_KERNEL_STACK_KB),
6177#ifdef CONFIG_SHADOW_CALL_STACK
6178 node_page_state(pgdat, NR_KERNEL_SCS_KB),
6179#endif
f0c0c115 6180 K(node_page_state(pgdat, NR_PAGETABLE)),
c73322d0
JW
6181 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
6182 "yes" : "no");
599d0c95
MG
6183 }
6184
ee99c71c 6185 for_each_populated_zone(zone) {
1da177e4
LT
6186 int i;
6187
974f4367
MH
6188 if (zone_idx(zone) > max_zone_idx)
6189 continue;
9af744d7 6190 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 6191 continue;
d1bfcdb8
KK
6192
6193 free_pcp = 0;
6194 for_each_online_cpu(cpu)
28f836b6 6195 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
d1bfcdb8 6196
1da177e4 6197 show_node(zone);
1f84a18f
JP
6198 printk(KERN_CONT
6199 "%s"
1da177e4 6200 " free:%lukB"
a6ea8b5b 6201 " boost:%lukB"
1da177e4
LT
6202 " min:%lukB"
6203 " low:%lukB"
6204 " high:%lukB"
e47b346a 6205 " reserved_highatomic:%luKB"
71c799f4
MK
6206 " active_anon:%lukB"
6207 " inactive_anon:%lukB"
6208 " active_file:%lukB"
6209 " inactive_file:%lukB"
6210 " unevictable:%lukB"
5a1c84b4 6211 " writepending:%lukB"
1da177e4 6212 " present:%lukB"
9feedc9d 6213 " managed:%lukB"
4a0aa73f 6214 " mlocked:%lukB"
4a0aa73f 6215 " bounce:%lukB"
d1bfcdb8
KK
6216 " free_pcp:%lukB"
6217 " local_pcp:%ukB"
d1ce749a 6218 " free_cma:%lukB"
1da177e4
LT
6219 "\n",
6220 zone->name,
88f5acf8 6221 K(zone_page_state(zone, NR_FREE_PAGES)),
a6ea8b5b 6222 K(zone->watermark_boost),
41858966
MG
6223 K(min_wmark_pages(zone)),
6224 K(low_wmark_pages(zone)),
6225 K(high_wmark_pages(zone)),
e47b346a 6226 K(zone->nr_reserved_highatomic),
71c799f4
MK
6227 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
6228 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
6229 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
6230 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
6231 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5a1c84b4 6232 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
1da177e4 6233 K(zone->present_pages),
9705bea5 6234 K(zone_managed_pages(zone)),
4a0aa73f 6235 K(zone_page_state(zone, NR_MLOCK)),
4a0aa73f 6236 K(zone_page_state(zone, NR_BOUNCE)),
d1bfcdb8 6237 K(free_pcp),
28f836b6 6238 K(this_cpu_read(zone->per_cpu_pageset->count)),
33e077bd 6239 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
1da177e4
LT
6240 printk("lowmem_reserve[]:");
6241 for (i = 0; i < MAX_NR_ZONES; i++)
1f84a18f
JP
6242 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
6243 printk(KERN_CONT "\n");
1da177e4
LT
6244 }
6245
ee99c71c 6246 for_each_populated_zone(zone) {
d00181b9
KS
6247 unsigned int order;
6248 unsigned long nr[MAX_ORDER], flags, total = 0;
377e4f16 6249 unsigned char types[MAX_ORDER];
1da177e4 6250
974f4367
MH
6251 if (zone_idx(zone) > max_zone_idx)
6252 continue;
9af744d7 6253 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 6254 continue;
1da177e4 6255 show_node(zone);
1f84a18f 6256 printk(KERN_CONT "%s: ", zone->name);
1da177e4
LT
6257
6258 spin_lock_irqsave(&zone->lock, flags);
6259 for (order = 0; order < MAX_ORDER; order++) {
377e4f16
RV
6260 struct free_area *area = &zone->free_area[order];
6261 int type;
6262
6263 nr[order] = area->nr_free;
8f9de51a 6264 total += nr[order] << order;
377e4f16
RV
6265
6266 types[order] = 0;
6267 for (type = 0; type < MIGRATE_TYPES; type++) {
b03641af 6268 if (!free_area_empty(area, type))
377e4f16
RV
6269 types[order] |= 1 << type;
6270 }
1da177e4
LT
6271 }
6272 spin_unlock_irqrestore(&zone->lock, flags);
377e4f16 6273 for (order = 0; order < MAX_ORDER; order++) {
1f84a18f
JP
6274 printk(KERN_CONT "%lu*%lukB ",
6275 nr[order], K(1UL) << order);
377e4f16
RV
6276 if (nr[order])
6277 show_migration_types(types[order]);
6278 }
1f84a18f 6279 printk(KERN_CONT "= %lukB\n", K(total));
1da177e4
LT
6280 }
6281
dcadcf1c
GL
6282 for_each_online_node(nid) {
6283 if (show_mem_node_skip(filter, nid, nodemask))
6284 continue;
6285 hugetlb_show_meminfo_node(nid);
6286 }
949f7ec5 6287
11fb9989 6288 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
e6f3602d 6289
1da177e4
LT
6290 show_swap_cache_info();
6291}
6292
19770b32
MG
6293static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
6294{
6295 zoneref->zone = zone;
6296 zoneref->zone_idx = zone_idx(zone);
6297}
6298
1da177e4
LT
6299/*
6300 * Builds allocation fallback zone lists.
1a93205b
CL
6301 *
6302 * Add all populated zones of a node to the zonelist.
1da177e4 6303 */
9d3be21b 6304static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
1da177e4 6305{
1a93205b 6306 struct zone *zone;
bc732f1d 6307 enum zone_type zone_type = MAX_NR_ZONES;
9d3be21b 6308 int nr_zones = 0;
02a68a5e
CL
6309
6310 do {
2f6726e5 6311 zone_type--;
070f8032 6312 zone = pgdat->node_zones + zone_type;
e553f62f 6313 if (populated_zone(zone)) {
9d3be21b 6314 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
070f8032 6315 check_highest_zone(zone_type);
1da177e4 6316 }
2f6726e5 6317 } while (zone_type);
bc732f1d 6318
070f8032 6319 return nr_zones;
1da177e4
LT
6320}
6321
6322#ifdef CONFIG_NUMA
f0c0b2b8
KH
6323
6324static int __parse_numa_zonelist_order(char *s)
6325{
c9bff3ee 6326 /*
f0953a1b 6327 * We used to support different zonelists modes but they turned
c9bff3ee
MH
6328 * out to be just not useful. Let's keep the warning in place
6329 * if somebody still use the cmd line parameter so that we do
6330 * not fail it silently
6331 */
6332 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
6333 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
f0c0b2b8
KH
6334 return -EINVAL;
6335 }
6336 return 0;
6337}
6338
c9bff3ee
MH
6339char numa_zonelist_order[] = "Node";
6340
f0c0b2b8
KH
6341/*
6342 * sysctl handler for numa_zonelist_order
6343 */
cccad5b9 6344int numa_zonelist_order_handler(struct ctl_table *table, int write,
32927393 6345 void *buffer, size_t *length, loff_t *ppos)
f0c0b2b8 6346{
32927393
CH
6347 if (write)
6348 return __parse_numa_zonelist_order(buffer);
6349 return proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8
KH
6350}
6351
6352
f0c0b2b8
KH
6353static int node_load[MAX_NUMNODES];
6354
1da177e4 6355/**
4dc3b16b 6356 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
6357 * @node: node whose fallback list we're appending
6358 * @used_node_mask: nodemask_t of already used nodes
6359 *
6360 * We use a number of factors to determine which is the next node that should
6361 * appear on a given node's fallback list. The node should not have appeared
6362 * already in @node's fallback list, and it should be the next closest node
6363 * according to the distance array (which contains arbitrary distance values
6364 * from each node to each node in the system), and should also prefer nodes
6365 * with no CPUs, since presumably they'll have very little allocation pressure
6366 * on them otherwise.
a862f68a
MR
6367 *
6368 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
1da177e4 6369 */
79c28a41 6370int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 6371{
4cf808eb 6372 int n, val;
1da177e4 6373 int min_val = INT_MAX;
00ef2d2f 6374 int best_node = NUMA_NO_NODE;
1da177e4 6375
4cf808eb
LT
6376 /* Use the local node if we haven't already */
6377 if (!node_isset(node, *used_node_mask)) {
6378 node_set(node, *used_node_mask);
6379 return node;
6380 }
1da177e4 6381
4b0ef1fe 6382 for_each_node_state(n, N_MEMORY) {
1da177e4
LT
6383
6384 /* Don't want a node to appear more than once */
6385 if (node_isset(n, *used_node_mask))
6386 continue;
6387
1da177e4
LT
6388 /* Use the distance array to find the distance */
6389 val = node_distance(node, n);
6390
4cf808eb
LT
6391 /* Penalize nodes under us ("prefer the next node") */
6392 val += (n < node);
6393
1da177e4 6394 /* Give preference to headless and unused nodes */
b630749f 6395 if (!cpumask_empty(cpumask_of_node(n)))
1da177e4
LT
6396 val += PENALTY_FOR_NODE_WITH_CPUS;
6397
6398 /* Slight preference for less loaded node */
37931324 6399 val *= MAX_NUMNODES;
1da177e4
LT
6400 val += node_load[n];
6401
6402 if (val < min_val) {
6403 min_val = val;
6404 best_node = n;
6405 }
6406 }
6407
6408 if (best_node >= 0)
6409 node_set(best_node, *used_node_mask);
6410
6411 return best_node;
6412}
6413
f0c0b2b8
KH
6414
6415/*
6416 * Build zonelists ordered by node and zones within node.
6417 * This results in maximum locality--normal zone overflows into local
6418 * DMA zone, if any--but risks exhausting DMA zone.
6419 */
9d3be21b
MH
6420static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6421 unsigned nr_nodes)
1da177e4 6422{
9d3be21b
MH
6423 struct zoneref *zonerefs;
6424 int i;
6425
6426 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6427
6428 for (i = 0; i < nr_nodes; i++) {
6429 int nr_zones;
6430
6431 pg_data_t *node = NODE_DATA(node_order[i]);
f0c0b2b8 6432
9d3be21b
MH
6433 nr_zones = build_zonerefs_node(node, zonerefs);
6434 zonerefs += nr_zones;
6435 }
6436 zonerefs->zone = NULL;
6437 zonerefs->zone_idx = 0;
f0c0b2b8
KH
6438}
6439
523b9458
CL
6440/*
6441 * Build gfp_thisnode zonelists
6442 */
6443static void build_thisnode_zonelists(pg_data_t *pgdat)
6444{
9d3be21b
MH
6445 struct zoneref *zonerefs;
6446 int nr_zones;
523b9458 6447
9d3be21b
MH
6448 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6449 nr_zones = build_zonerefs_node(pgdat, zonerefs);
6450 zonerefs += nr_zones;
6451 zonerefs->zone = NULL;
6452 zonerefs->zone_idx = 0;
523b9458
CL
6453}
6454
f0c0b2b8
KH
6455/*
6456 * Build zonelists ordered by zone and nodes within zones.
6457 * This results in conserving DMA zone[s] until all Normal memory is
6458 * exhausted, but results in overflowing to remote node while memory
6459 * may still exist in local DMA zone.
6460 */
f0c0b2b8 6461
f0c0b2b8
KH
6462static void build_zonelists(pg_data_t *pgdat)
6463{
9d3be21b 6464 static int node_order[MAX_NUMNODES];
37931324 6465 int node, nr_nodes = 0;
d0ddf49b 6466 nodemask_t used_mask = NODE_MASK_NONE;
f0c0b2b8 6467 int local_node, prev_node;
1da177e4
LT
6468
6469 /* NUMA-aware ordering of nodes */
6470 local_node = pgdat->node_id;
1da177e4 6471 prev_node = local_node;
f0c0b2b8 6472
f0c0b2b8 6473 memset(node_order, 0, sizeof(node_order));
1da177e4
LT
6474 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6475 /*
6476 * We don't want to pressure a particular node.
6477 * So adding penalty to the first node in same
6478 * distance group to make it round-robin.
6479 */
957f822a
DR
6480 if (node_distance(local_node, node) !=
6481 node_distance(local_node, prev_node))
37931324 6482 node_load[node] += 1;
f0c0b2b8 6483
9d3be21b 6484 node_order[nr_nodes++] = node;
1da177e4 6485 prev_node = node;
1da177e4 6486 }
523b9458 6487
9d3be21b 6488 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
523b9458 6489 build_thisnode_zonelists(pgdat);
6cf25392
BR
6490 pr_info("Fallback order for Node %d: ", local_node);
6491 for (node = 0; node < nr_nodes; node++)
6492 pr_cont("%d ", node_order[node]);
6493 pr_cont("\n");
1da177e4
LT
6494}
6495
7aac7898
LS
6496#ifdef CONFIG_HAVE_MEMORYLESS_NODES
6497/*
6498 * Return node id of node used for "local" allocations.
6499 * I.e., first node id of first zone in arg node's generic zonelist.
6500 * Used for initializing percpu 'numa_mem', which is used primarily
6501 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6502 */
6503int local_memory_node(int node)
6504{
c33d6c06 6505 struct zoneref *z;
7aac7898 6506
c33d6c06 6507 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
7aac7898 6508 gfp_zone(GFP_KERNEL),
c33d6c06 6509 NULL);
c1093b74 6510 return zone_to_nid(z->zone);
7aac7898
LS
6511}
6512#endif
f0c0b2b8 6513
6423aa81
JK
6514static void setup_min_unmapped_ratio(void);
6515static void setup_min_slab_ratio(void);
1da177e4
LT
6516#else /* CONFIG_NUMA */
6517
f0c0b2b8 6518static void build_zonelists(pg_data_t *pgdat)
1da177e4 6519{
19655d34 6520 int node, local_node;
9d3be21b
MH
6521 struct zoneref *zonerefs;
6522 int nr_zones;
1da177e4
LT
6523
6524 local_node = pgdat->node_id;
1da177e4 6525
9d3be21b
MH
6526 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6527 nr_zones = build_zonerefs_node(pgdat, zonerefs);
6528 zonerefs += nr_zones;
1da177e4 6529
54a6eb5c
MG
6530 /*
6531 * Now we build the zonelist so that it contains the zones
6532 * of all the other nodes.
6533 * We don't want to pressure a particular node, so when
6534 * building the zones for node N, we make sure that the
6535 * zones coming right after the local ones are those from
6536 * node N+1 (modulo N)
6537 */
6538 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6539 if (!node_online(node))
6540 continue;
9d3be21b
MH
6541 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6542 zonerefs += nr_zones;
1da177e4 6543 }
54a6eb5c
MG
6544 for (node = 0; node < local_node; node++) {
6545 if (!node_online(node))
6546 continue;
9d3be21b
MH
6547 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6548 zonerefs += nr_zones;
54a6eb5c
MG
6549 }
6550
9d3be21b
MH
6551 zonerefs->zone = NULL;
6552 zonerefs->zone_idx = 0;
1da177e4
LT
6553}
6554
6555#endif /* CONFIG_NUMA */
6556
99dcc3e5
CL
6557/*
6558 * Boot pageset table. One per cpu which is going to be used for all
6559 * zones and all nodes. The parameters will be set in such a way
6560 * that an item put on a list will immediately be handed over to
6561 * the buddy list. This is safe since pageset manipulation is done
6562 * with interrupts disabled.
6563 *
6564 * The boot_pagesets must be kept even after bootup is complete for
6565 * unused processors and/or zones. They do play a role for bootstrapping
6566 * hotplugged processors.
6567 *
6568 * zoneinfo_show() and maybe other functions do
6569 * not check if the processor is online before following the pageset pointer.
6570 * Other parts of the kernel may not check if the zone is available.
6571 */
28f836b6 6572static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
952eaf81
VB
6573/* These effectively disable the pcplists in the boot pageset completely */
6574#define BOOT_PAGESET_HIGH 0
6575#define BOOT_PAGESET_BATCH 1
28f836b6
MG
6576static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
6577static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
6dc2c87a 6578static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
99dcc3e5 6579
11cd8638 6580static void __build_all_zonelists(void *data)
1da177e4 6581{
6811378e 6582 int nid;
afb6ebb3 6583 int __maybe_unused cpu;
9adb62a5 6584 pg_data_t *self = data;
b93e0f32 6585
3d36424b 6586 write_seqlock(&zonelist_update_seq);
9276b1bc 6587
7f9cfb31
BL
6588#ifdef CONFIG_NUMA
6589 memset(node_load, 0, sizeof(node_load));
6590#endif
9adb62a5 6591
c1152583
WY
6592 /*
6593 * This node is hotadded and no memory is yet present. So just
6594 * building zonelists is fine - no need to touch other nodes.
6595 */
9adb62a5
JL
6596 if (self && !node_online(self->node_id)) {
6597 build_zonelists(self);
c1152583 6598 } else {
09f49dca
MH
6599 /*
6600 * All possible nodes have pgdat preallocated
6601 * in free_area_init
6602 */
6603 for_each_node(nid) {
c1152583 6604 pg_data_t *pgdat = NODE_DATA(nid);
7ea1530a 6605
c1152583
WY
6606 build_zonelists(pgdat);
6607 }
99dcc3e5 6608
7aac7898
LS
6609#ifdef CONFIG_HAVE_MEMORYLESS_NODES
6610 /*
6611 * We now know the "local memory node" for each node--
6612 * i.e., the node of the first zone in the generic zonelist.
6613 * Set up numa_mem percpu variable for on-line cpus. During
6614 * boot, only the boot cpu should be on-line; we'll init the
6615 * secondary cpus' numa_mem as they come on-line. During
6616 * node/memory hotplug, we'll fixup all on-line cpus.
6617 */
d9c9a0b9 6618 for_each_online_cpu(cpu)
7aac7898 6619 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
afb6ebb3 6620#endif
d9c9a0b9 6621 }
b93e0f32 6622
3d36424b 6623 write_sequnlock(&zonelist_update_seq);
6811378e
YG
6624}
6625
061f67bc
RV
6626static noinline void __init
6627build_all_zonelists_init(void)
6628{
afb6ebb3
MH
6629 int cpu;
6630
061f67bc 6631 __build_all_zonelists(NULL);
afb6ebb3
MH
6632
6633 /*
6634 * Initialize the boot_pagesets that are going to be used
6635 * for bootstrapping processors. The real pagesets for
6636 * each zone will be allocated later when the per cpu
6637 * allocator is available.
6638 *
6639 * boot_pagesets are used also for bootstrapping offline
6640 * cpus if the system is already booted because the pagesets
6641 * are needed to initialize allocators on a specific cpu too.
6642 * F.e. the percpu allocator needs the page allocator which
6643 * needs the percpu allocator in order to allocate its pagesets
6644 * (a chicken-egg dilemma).
6645 */
6646 for_each_possible_cpu(cpu)
28f836b6 6647 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
afb6ebb3 6648
061f67bc
RV
6649 mminit_verify_zonelist();
6650 cpuset_init_current_mems_allowed();
6651}
6652
4eaf3f64 6653/*
4eaf3f64 6654 * unless system_state == SYSTEM_BOOTING.
061f67bc 6655 *
72675e13 6656 * __ref due to call of __init annotated helper build_all_zonelists_init
061f67bc 6657 * [protected by SYSTEM_BOOTING].
4eaf3f64 6658 */
72675e13 6659void __ref build_all_zonelists(pg_data_t *pgdat)
6811378e 6660{
0a18e607
DH
6661 unsigned long vm_total_pages;
6662
6811378e 6663 if (system_state == SYSTEM_BOOTING) {
061f67bc 6664 build_all_zonelists_init();
6811378e 6665 } else {
11cd8638 6666 __build_all_zonelists(pgdat);
6811378e
YG
6667 /* cpuset refresh routine should be here */
6668 }
56b9413b
DH
6669 /* Get the number of free pages beyond high watermark in all zones. */
6670 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
9ef9acb0
MG
6671 /*
6672 * Disable grouping by mobility if the number of pages in the
6673 * system is too low to allow the mechanism to work. It would be
6674 * more accurate, but expensive to check per-zone. This check is
6675 * made on memory-hotadd so a system can start with mobility
6676 * disabled and enable it later
6677 */
d9c23400 6678 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
6679 page_group_by_mobility_disabled = 1;
6680 else
6681 page_group_by_mobility_disabled = 0;
6682
ce0725f7 6683 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
756a025f 6684 nr_online_nodes,
756a025f
JP
6685 page_group_by_mobility_disabled ? "off" : "on",
6686 vm_total_pages);
f0c0b2b8 6687#ifdef CONFIG_NUMA
f88dfff5 6688 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
f0c0b2b8 6689#endif
1da177e4
LT
6690}
6691
a9a9e77f
PT
6692/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6693static bool __meminit
6694overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6695{
a9a9e77f
PT
6696 static struct memblock_region *r;
6697
6698 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6699 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
cc6de168 6700 for_each_mem_region(r) {
a9a9e77f
PT
6701 if (*pfn < memblock_region_memory_end_pfn(r))
6702 break;
6703 }
6704 }
6705 if (*pfn >= memblock_region_memory_base_pfn(r) &&
6706 memblock_is_mirror(r)) {
6707 *pfn = memblock_region_memory_end_pfn(r);
6708 return true;
6709 }
6710 }
a9a9e77f
PT
6711 return false;
6712}
6713
1da177e4
LT
6714/*
6715 * Initially all pages are reserved - free ones are freed
c6ffc5ca 6716 * up by memblock_free_all() once the early boot process is
1da177e4 6717 * done. Non-atomic initialization, single-pass.
d882c006
DH
6718 *
6719 * All aligned pageblocks are initialized to the specified migratetype
6720 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6721 * zone stats (e.g., nr_isolate_pageblock) are touched.
1da177e4 6722 */
ab28cb6e 6723void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
dc2da7b4 6724 unsigned long start_pfn, unsigned long zone_end_pfn,
d882c006
DH
6725 enum meminit_context context,
6726 struct vmem_altmap *altmap, int migratetype)
1da177e4 6727{
a9a9e77f 6728 unsigned long pfn, end_pfn = start_pfn + size;
d0dc12e8 6729 struct page *page;
1da177e4 6730
22b31eec
HD
6731 if (highest_memmap_pfn < end_pfn - 1)
6732 highest_memmap_pfn = end_pfn - 1;
6733
966cf44f 6734#ifdef CONFIG_ZONE_DEVICE
4b94ffdc
DW
6735 /*
6736 * Honor reservation requested by the driver for this ZONE_DEVICE
966cf44f
AD
6737 * memory. We limit the total number of pages to initialize to just
6738 * those that might contain the memory mapping. We will defer the
6739 * ZONE_DEVICE page initialization until after we have released
6740 * the hotplug lock.
4b94ffdc 6741 */
966cf44f
AD
6742 if (zone == ZONE_DEVICE) {
6743 if (!altmap)
6744 return;
6745
6746 if (start_pfn == altmap->base_pfn)
6747 start_pfn += altmap->reserve;
6748 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6749 }
6750#endif
4b94ffdc 6751
948c436e 6752 for (pfn = start_pfn; pfn < end_pfn; ) {
a2f3aa02 6753 /*
b72d0ffb
AM
6754 * There can be holes in boot-time mem_map[]s handed to this
6755 * function. They do not exist on hotplugged memory.
a2f3aa02 6756 */
c1d0da83 6757 if (context == MEMINIT_EARLY) {
a9a9e77f
PT
6758 if (overlap_memmap_init(zone, &pfn))
6759 continue;
dc2da7b4 6760 if (defer_init(nid, pfn, zone_end_pfn))
a9a9e77f 6761 break;
a2f3aa02 6762 }
ac5d2539 6763
d0dc12e8
PT
6764 page = pfn_to_page(pfn);
6765 __init_single_page(page, pfn, zone, nid);
c1d0da83 6766 if (context == MEMINIT_HOTPLUG)
d483da5b 6767 __SetPageReserved(page);
d0dc12e8 6768
ac5d2539 6769 /*
d882c006
DH
6770 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6771 * such that unmovable allocations won't be scattered all
6772 * over the place during system boot.
ac5d2539 6773 */
ee0913c4 6774 if (pageblock_aligned(pfn)) {
d882c006 6775 set_pageblock_migratetype(page, migratetype);
9b6e63cb 6776 cond_resched();
ac5d2539 6777 }
948c436e 6778 pfn++;
1da177e4
LT
6779 }
6780}
6781
966cf44f 6782#ifdef CONFIG_ZONE_DEVICE
46487e00
JM
6783static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
6784 unsigned long zone_idx, int nid,
6785 struct dev_pagemap *pgmap)
6786{
6787
6788 __init_single_page(page, pfn, zone_idx, nid);
6789
6790 /*
6791 * Mark page reserved as it will need to wait for onlining
6792 * phase for it to be fully associated with a zone.
6793 *
6794 * We can use the non-atomic __set_bit operation for setting
6795 * the flag as we are still initializing the pages.
6796 */
6797 __SetPageReserved(page);
6798
6799 /*
6800 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6801 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
6802 * ever freed or placed on a driver-private list.
6803 */
6804 page->pgmap = pgmap;
6805 page->zone_device_data = NULL;
6806
6807 /*
6808 * Mark the block movable so that blocks are reserved for
6809 * movable at startup. This will force kernel allocations
6810 * to reserve their blocks rather than leaking throughout
6811 * the address space during boot when many long-lived
6812 * kernel allocations are made.
6813 *
6814 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6815 * because this is done early in section_activate()
6816 */
ee0913c4 6817 if (pageblock_aligned(pfn)) {
46487e00
JM
6818 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6819 cond_resched();
6820 }
6821}
6822
6fd3620b
JM
6823/*
6824 * With compound page geometry and when struct pages are stored in ram most
6825 * tail pages are reused. Consequently, the amount of unique struct pages to
6826 * initialize is a lot smaller that the total amount of struct pages being
6827 * mapped. This is a paired / mild layering violation with explicit knowledge
6828 * of how the sparse_vmemmap internals handle compound pages in the lack
6829 * of an altmap. See vmemmap_populate_compound_pages().
6830 */
6831static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
6832 unsigned long nr_pages)
6833{
6834 return is_power_of_2(sizeof(struct page)) &&
6835 !altmap ? 2 * (PAGE_SIZE / sizeof(struct page)) : nr_pages;
6836}
6837
c4386bd8
JM
6838static void __ref memmap_init_compound(struct page *head,
6839 unsigned long head_pfn,
6840 unsigned long zone_idx, int nid,
6841 struct dev_pagemap *pgmap,
6842 unsigned long nr_pages)
6843{
6844 unsigned long pfn, end_pfn = head_pfn + nr_pages;
6845 unsigned int order = pgmap->vmemmap_shift;
6846
6847 __SetPageHead(head);
6848 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
6849 struct page *page = pfn_to_page(pfn);
6850
6851 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
6852 prep_compound_tail(head, pfn - head_pfn);
6853 set_page_count(page, 0);
6854
6855 /*
6856 * The first tail page stores compound_mapcount_ptr() and
6857 * compound_order() and the second tail page stores
6858 * compound_pincount_ptr(). Call prep_compound_head() after
6859 * the first and second tail pages have been initialized to
6860 * not have the data overwritten.
6861 */
6862 if (pfn == head_pfn + 2)
6863 prep_compound_head(head, order);
6864 }
6865}
6866
966cf44f
AD
6867void __ref memmap_init_zone_device(struct zone *zone,
6868 unsigned long start_pfn,
1f8d75c1 6869 unsigned long nr_pages,
966cf44f
AD
6870 struct dev_pagemap *pgmap)
6871{
1f8d75c1 6872 unsigned long pfn, end_pfn = start_pfn + nr_pages;
966cf44f 6873 struct pglist_data *pgdat = zone->zone_pgdat;
514caf23 6874 struct vmem_altmap *altmap = pgmap_altmap(pgmap);
c4386bd8 6875 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
966cf44f
AD
6876 unsigned long zone_idx = zone_idx(zone);
6877 unsigned long start = jiffies;
6878 int nid = pgdat->node_id;
6879
c0352904 6880 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
966cf44f
AD
6881 return;
6882
6883 /*
122e093c 6884 * The call to memmap_init should have already taken care
966cf44f
AD
6885 * of the pages reserved for the memmap, so we can just jump to
6886 * the end of that region and start processing the device pages.
6887 */
514caf23 6888 if (altmap) {
966cf44f 6889 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1f8d75c1 6890 nr_pages = end_pfn - start_pfn;
966cf44f
AD
6891 }
6892
c4386bd8 6893 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
966cf44f
AD
6894 struct page *page = pfn_to_page(pfn);
6895
46487e00 6896 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
c4386bd8
JM
6897
6898 if (pfns_per_compound == 1)
6899 continue;
6900
6901 memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
6fd3620b 6902 compound_nr_pages(altmap, pfns_per_compound));
966cf44f
AD
6903 }
6904
fdc029b1 6905 pr_info("%s initialised %lu pages in %ums\n", __func__,
1f8d75c1 6906 nr_pages, jiffies_to_msecs(jiffies - start));
966cf44f
AD
6907}
6908
6909#endif
1e548deb 6910static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 6911{
7aeb09f9 6912 unsigned int order, t;
b2a0ac88
MG
6913 for_each_migratetype_order(order, t) {
6914 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
6915 zone->free_area[order].nr_free = 0;
6916 }
6917}
6918
0740a50b
MR
6919/*
6920 * Only struct pages that correspond to ranges defined by memblock.memory
6921 * are zeroed and initialized by going through __init_single_page() during
122e093c 6922 * memmap_init_zone_range().
0740a50b
MR
6923 *
6924 * But, there could be struct pages that correspond to holes in
6925 * memblock.memory. This can happen because of the following reasons:
6926 * - physical memory bank size is not necessarily the exact multiple of the
6927 * arbitrary section size
6928 * - early reserved memory may not be listed in memblock.memory
6929 * - memory layouts defined with memmap= kernel parameter may not align
6930 * nicely with memmap sections
6931 *
6932 * Explicitly initialize those struct pages so that:
6933 * - PG_Reserved is set
6934 * - zone and node links point to zone and node that span the page if the
6935 * hole is in the middle of a zone
6936 * - zone and node links point to adjacent zone/node if the hole falls on
6937 * the zone boundary; the pages in such holes will be prepended to the
6938 * zone/node above the hole except for the trailing pages in the last
6939 * section that will be appended to the zone/node below.
6940 */
122e093c
MR
6941static void __init init_unavailable_range(unsigned long spfn,
6942 unsigned long epfn,
6943 int zone, int node)
0740a50b
MR
6944{
6945 unsigned long pfn;
6946 u64 pgcnt = 0;
6947
6948 for (pfn = spfn; pfn < epfn; pfn++) {
4f9bc69a
KW
6949 if (!pfn_valid(pageblock_start_pfn(pfn))) {
6950 pfn = pageblock_end_pfn(pfn) - 1;
0740a50b
MR
6951 continue;
6952 }
6953 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
6954 __SetPageReserved(pfn_to_page(pfn));
6955 pgcnt++;
6956 }
6957
122e093c
MR
6958 if (pgcnt)
6959 pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6960 node, zone_names[zone], pgcnt);
0740a50b 6961}
0740a50b 6962
122e093c
MR
6963static void __init memmap_init_zone_range(struct zone *zone,
6964 unsigned long start_pfn,
6965 unsigned long end_pfn,
6966 unsigned long *hole_pfn)
dfb3ccd0 6967{
3256ff83
BH
6968 unsigned long zone_start_pfn = zone->zone_start_pfn;
6969 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
122e093c
MR
6970 int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6971
6972 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6973 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6974
6975 if (start_pfn >= end_pfn)
6976 return;
6977
6978 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
6979 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6980
6981 if (*hole_pfn < start_pfn)
6982 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6983
6984 *hole_pfn = end_pfn;
6985}
6986
6987static void __init memmap_init(void)
6988{
73a6e474 6989 unsigned long start_pfn, end_pfn;
122e093c 6990 unsigned long hole_pfn = 0;
b346075f 6991 int i, j, zone_id = 0, nid;
73a6e474 6992
122e093c
MR
6993 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6994 struct pglist_data *node = NODE_DATA(nid);
73a6e474 6995
122e093c
MR
6996 for (j = 0; j < MAX_NR_ZONES; j++) {
6997 struct zone *zone = node->node_zones + j;
0740a50b 6998
122e093c
MR
6999 if (!populated_zone(zone))
7000 continue;
0740a50b 7001
122e093c
MR
7002 memmap_init_zone_range(zone, start_pfn, end_pfn,
7003 &hole_pfn);
7004 zone_id = j;
7005 }
73a6e474 7006 }
0740a50b
MR
7007
7008#ifdef CONFIG_SPARSEMEM
7009 /*
122e093c
MR
7010 * Initialize the memory map for hole in the range [memory_end,
7011 * section_end].
7012 * Append the pages in this hole to the highest zone in the last
7013 * node.
7014 * The call to init_unavailable_range() is outside the ifdef to
7015 * silence the compiler warining about zone_id set but not used;
7016 * for FLATMEM it is a nop anyway
0740a50b 7017 */
122e093c 7018 end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
0740a50b 7019 if (hole_pfn < end_pfn)
0740a50b 7020#endif
122e093c 7021 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
dfb3ccd0 7022}
1da177e4 7023
c803b3c8
MR
7024void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
7025 phys_addr_t min_addr, int nid, bool exact_nid)
7026{
7027 void *ptr;
7028
7029 if (exact_nid)
7030 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
7031 MEMBLOCK_ALLOC_ACCESSIBLE,
7032 nid);
7033 else
7034 ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
7035 MEMBLOCK_ALLOC_ACCESSIBLE,
7036 nid);
7037
7038 if (ptr && size > 0)
7039 page_init_poison(ptr, size);
7040
7041 return ptr;
7042}
7043
7cd2b0a3 7044static int zone_batchsize(struct zone *zone)
e7c8d5c9 7045{
3a6be87f 7046#ifdef CONFIG_MMU
e7c8d5c9
CL
7047 int batch;
7048
7049 /*
b92ca18e
MG
7050 * The number of pages to batch allocate is either ~0.1%
7051 * of the zone or 1MB, whichever is smaller. The batch
7052 * size is striking a balance between allocation latency
7053 * and zone lock contention.
e7c8d5c9 7054 */
c940e020 7055 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
e7c8d5c9
CL
7056 batch /= 4; /* We effectively *= 4 below */
7057 if (batch < 1)
7058 batch = 1;
7059
7060 /*
0ceaacc9
NP
7061 * Clamp the batch to a 2^n - 1 value. Having a power
7062 * of 2 value was found to be more likely to have
7063 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 7064 *
0ceaacc9
NP
7065 * For example if 2 tasks are alternately allocating
7066 * batches of pages, one task can end up with a lot
7067 * of pages of one half of the possible page colors
7068 * and the other with pages of the other colors.
e7c8d5c9 7069 */
9155203a 7070 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 7071
e7c8d5c9 7072 return batch;
3a6be87f
DH
7073
7074#else
7075 /* The deferral and batching of frees should be suppressed under NOMMU
7076 * conditions.
7077 *
7078 * The problem is that NOMMU needs to be able to allocate large chunks
7079 * of contiguous memory as there's no hardware page translation to
7080 * assemble apparent contiguous memory from discontiguous pages.
7081 *
7082 * Queueing large contiguous runs of pages for batching, however,
7083 * causes the pages to actually be freed in smaller chunks. As there
7084 * can be a significant delay between the individual batches being
7085 * recycled, this leads to the once large chunks of space being
7086 * fragmented and becoming unavailable for high-order allocations.
7087 */
7088 return 0;
7089#endif
e7c8d5c9
CL
7090}
7091
04f8cfea 7092static int zone_highsize(struct zone *zone, int batch, int cpu_online)
b92ca18e
MG
7093{
7094#ifdef CONFIG_MMU
7095 int high;
203c06ee 7096 int nr_split_cpus;
74f44822
MG
7097 unsigned long total_pages;
7098
7099 if (!percpu_pagelist_high_fraction) {
7100 /*
7101 * By default, the high value of the pcp is based on the zone
7102 * low watermark so that if they are full then background
7103 * reclaim will not be started prematurely.
7104 */
7105 total_pages = low_wmark_pages(zone);
7106 } else {
7107 /*
7108 * If percpu_pagelist_high_fraction is configured, the high
7109 * value is based on a fraction of the managed pages in the
7110 * zone.
7111 */
7112 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
7113 }
b92ca18e
MG
7114
7115 /*
74f44822
MG
7116 * Split the high value across all online CPUs local to the zone. Note
7117 * that early in boot that CPUs may not be online yet and that during
7118 * CPU hotplug that the cpumask is not yet updated when a CPU is being
203c06ee
MG
7119 * onlined. For memory nodes that have no CPUs, split pcp->high across
7120 * all online CPUs to mitigate the risk that reclaim is triggered
7121 * prematurely due to pages stored on pcp lists.
b92ca18e 7122 */
203c06ee
MG
7123 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
7124 if (!nr_split_cpus)
7125 nr_split_cpus = num_online_cpus();
7126 high = total_pages / nr_split_cpus;
b92ca18e
MG
7127
7128 /*
7129 * Ensure high is at least batch*4. The multiple is based on the
7130 * historical relationship between high and batch.
7131 */
7132 high = max(high, batch << 2);
7133
7134 return high;
7135#else
7136 return 0;
7137#endif
7138}
7139
8d7a8fa9 7140/*
5c3ad2eb
VB
7141 * pcp->high and pcp->batch values are related and generally batch is lower
7142 * than high. They are also related to pcp->count such that count is lower
7143 * than high, and as soon as it reaches high, the pcplist is flushed.
8d7a8fa9 7144 *
5c3ad2eb
VB
7145 * However, guaranteeing these relations at all times would require e.g. write
7146 * barriers here but also careful usage of read barriers at the read side, and
7147 * thus be prone to error and bad for performance. Thus the update only prevents
7148 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
7149 * can cope with those fields changing asynchronously, and fully trust only the
7150 * pcp->count field on the local CPU with interrupts disabled.
8d7a8fa9
CS
7151 *
7152 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
7153 * outside of boot time (or some other assurance that no concurrent updaters
7154 * exist).
7155 */
7156static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
7157 unsigned long batch)
7158{
5c3ad2eb
VB
7159 WRITE_ONCE(pcp->batch, batch);
7160 WRITE_ONCE(pcp->high, high);
8d7a8fa9
CS
7161}
7162
28f836b6 7163static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
2caaad41 7164{
44042b44 7165 int pindex;
2caaad41 7166
28f836b6
MG
7167 memset(pcp, 0, sizeof(*pcp));
7168 memset(pzstats, 0, sizeof(*pzstats));
1c6fe946 7169
4b23a68f 7170 spin_lock_init(&pcp->lock);
44042b44
MG
7171 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
7172 INIT_LIST_HEAD(&pcp->lists[pindex]);
2caaad41 7173
69a8396a
VB
7174 /*
7175 * Set batch and high values safe for a boot pageset. A true percpu
7176 * pageset's initialization will update them subsequently. Here we don't
7177 * need to be as careful as pageset_update() as nobody can access the
7178 * pageset yet.
7179 */
952eaf81
VB
7180 pcp->high = BOOT_PAGESET_HIGH;
7181 pcp->batch = BOOT_PAGESET_BATCH;
3b12e7e9 7182 pcp->free_factor = 0;
88c90dbc
CS
7183}
7184
3b1f3658 7185static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
ec6e8c7e
VB
7186 unsigned long batch)
7187{
28f836b6 7188 struct per_cpu_pages *pcp;
ec6e8c7e
VB
7189 int cpu;
7190
7191 for_each_possible_cpu(cpu) {
28f836b6
MG
7192 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
7193 pageset_update(pcp, high, batch);
ec6e8c7e
VB
7194 }
7195}
7196
8ad4b1fb 7197/*
0a8b4f1d 7198 * Calculate and set new high and batch values for all per-cpu pagesets of a
bbbecb35 7199 * zone based on the zone's size.
8ad4b1fb 7200 */
04f8cfea 7201static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
56cef2b8 7202{
b92ca18e 7203 int new_high, new_batch;
7115ac6e 7204
b92ca18e 7205 new_batch = max(1, zone_batchsize(zone));
04f8cfea 7206 new_high = zone_highsize(zone, new_batch, cpu_online);
169f6c19 7207
952eaf81
VB
7208 if (zone->pageset_high == new_high &&
7209 zone->pageset_batch == new_batch)
7210 return;
7211
7212 zone->pageset_high = new_high;
7213 zone->pageset_batch = new_batch;
7214
ec6e8c7e 7215 __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
169f6c19
CS
7216}
7217
72675e13 7218void __meminit setup_zone_pageset(struct zone *zone)
319774e2
WF
7219{
7220 int cpu;
0a8b4f1d 7221
28f836b6
MG
7222 /* Size may be 0 on !SMP && !NUMA */
7223 if (sizeof(struct per_cpu_zonestat) > 0)
7224 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
7225
7226 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
0a8b4f1d 7227 for_each_possible_cpu(cpu) {
28f836b6
MG
7228 struct per_cpu_pages *pcp;
7229 struct per_cpu_zonestat *pzstats;
7230
7231 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
7232 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
7233 per_cpu_pages_init(pcp, pzstats);
0a8b4f1d
VB
7234 }
7235
04f8cfea 7236 zone_set_pageset_high_and_batch(zone, 0);
319774e2
WF
7237}
7238
b89f1735
ML
7239/*
7240 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7241 * page high values need to be recalculated.
7242 */
7243static void zone_pcp_update(struct zone *zone, int cpu_online)
7244{
7245 mutex_lock(&pcp_batch_high_lock);
7246 zone_set_pageset_high_and_batch(zone, cpu_online);
7247 mutex_unlock(&pcp_batch_high_lock);
7248}
7249
2caaad41 7250/*
99dcc3e5
CL
7251 * Allocate per cpu pagesets and initialize them.
7252 * Before this call only boot pagesets were available.
e7c8d5c9 7253 */
99dcc3e5 7254void __init setup_per_cpu_pageset(void)
e7c8d5c9 7255{
b4911ea2 7256 struct pglist_data *pgdat;
99dcc3e5 7257 struct zone *zone;
b418a0f9 7258 int __maybe_unused cpu;
e7c8d5c9 7259
319774e2
WF
7260 for_each_populated_zone(zone)
7261 setup_zone_pageset(zone);
b4911ea2 7262
b418a0f9
SD
7263#ifdef CONFIG_NUMA
7264 /*
7265 * Unpopulated zones continue using the boot pagesets.
7266 * The numa stats for these pagesets need to be reset.
7267 * Otherwise, they will end up skewing the stats of
7268 * the nodes these zones are associated with.
7269 */
7270 for_each_possible_cpu(cpu) {
28f836b6 7271 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
f19298b9
MG
7272 memset(pzstats->vm_numa_event, 0,
7273 sizeof(pzstats->vm_numa_event));
b418a0f9
SD
7274 }
7275#endif
7276
b4911ea2
MG
7277 for_each_online_pgdat(pgdat)
7278 pgdat->per_cpu_nodestats =
7279 alloc_percpu(struct per_cpu_nodestat);
e7c8d5c9
CL
7280}
7281
c09b4240 7282static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 7283{
99dcc3e5
CL
7284 /*
7285 * per cpu subsystem is not up at this point. The following code
7286 * relies on the ability of the linker to provide the
7287 * offset of a (static) per cpu variable into the per cpu area.
7288 */
28f836b6
MG
7289 zone->per_cpu_pageset = &boot_pageset;
7290 zone->per_cpu_zonestats = &boot_zonestats;
952eaf81
VB
7291 zone->pageset_high = BOOT_PAGESET_HIGH;
7292 zone->pageset_batch = BOOT_PAGESET_BATCH;
ed8ece2e 7293
b38a8725 7294 if (populated_zone(zone))
9660ecaa
HK
7295 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
7296 zone->present_pages, zone_batchsize(zone));
ed8ece2e
DH
7297}
7298
dc0bbf3b 7299void __meminit init_currently_empty_zone(struct zone *zone,
718127cc 7300 unsigned long zone_start_pfn,
b171e409 7301 unsigned long size)
ed8ece2e
DH
7302{
7303 struct pglist_data *pgdat = zone->zone_pgdat;
8f416836 7304 int zone_idx = zone_idx(zone) + 1;
9dcb8b68 7305
8f416836
WY
7306 if (zone_idx > pgdat->nr_zones)
7307 pgdat->nr_zones = zone_idx;
ed8ece2e 7308
ed8ece2e
DH
7309 zone->zone_start_pfn = zone_start_pfn;
7310
708614e6
MG
7311 mminit_dprintk(MMINIT_TRACE, "memmap_init",
7312 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
7313 pgdat->node_id,
7314 (unsigned long)zone_idx(zone),
7315 zone_start_pfn, (zone_start_pfn + size));
7316
1e548deb 7317 zone_init_free_lists(zone);
9dcb8b68 7318 zone->initialized = 1;
ed8ece2e
DH
7319}
7320
c713216d
MG
7321/**
7322 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
7323 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
7324 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
7325 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
7326 *
7327 * It returns the start and end page frame of a node based on information
7d018176 7328 * provided by memblock_set_node(). If called for a node
c713216d 7329 * with no available memory, a warning is printed and the start and end
88ca3b94 7330 * PFNs will be 0.
c713216d 7331 */
bbe5d993 7332void __init get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
7333 unsigned long *start_pfn, unsigned long *end_pfn)
7334{
c13291a5 7335 unsigned long this_start_pfn, this_end_pfn;
c713216d 7336 int i;
c13291a5 7337
c713216d
MG
7338 *start_pfn = -1UL;
7339 *end_pfn = 0;
7340
c13291a5
TH
7341 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
7342 *start_pfn = min(*start_pfn, this_start_pfn);
7343 *end_pfn = max(*end_pfn, this_end_pfn);
c713216d
MG
7344 }
7345
633c0666 7346 if (*start_pfn == -1UL)
c713216d 7347 *start_pfn = 0;
c713216d
MG
7348}
7349
2a1e274a
MG
7350/*
7351 * This finds a zone that can be used for ZONE_MOVABLE pages. The
7352 * assumption is made that zones within a node are ordered in monotonic
7353 * increasing memory addresses so that the "highest" populated zone is used
7354 */
b69a7288 7355static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
7356{
7357 int zone_index;
7358 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
7359 if (zone_index == ZONE_MOVABLE)
7360 continue;
7361
7362 if (arch_zone_highest_possible_pfn[zone_index] >
7363 arch_zone_lowest_possible_pfn[zone_index])
7364 break;
7365 }
7366
7367 VM_BUG_ON(zone_index == -1);
7368 movable_zone = zone_index;
7369}
7370
7371/*
7372 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
25985edc 7373 * because it is sized independent of architecture. Unlike the other zones,
2a1e274a
MG
7374 * the starting point for ZONE_MOVABLE is not fixed. It may be different
7375 * in each node depending on the size of each node and how evenly kernelcore
7376 * is distributed. This helper function adjusts the zone ranges
7377 * provided by the architecture for a given node by using the end of the
7378 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7379 * zones within a node are in order of monotonic increases memory addresses
7380 */
bbe5d993 7381static void __init adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
7382 unsigned long zone_type,
7383 unsigned long node_start_pfn,
7384 unsigned long node_end_pfn,
7385 unsigned long *zone_start_pfn,
7386 unsigned long *zone_end_pfn)
7387{
7388 /* Only adjust if ZONE_MOVABLE is on this node */
7389 if (zone_movable_pfn[nid]) {
7390 /* Size ZONE_MOVABLE */
7391 if (zone_type == ZONE_MOVABLE) {
7392 *zone_start_pfn = zone_movable_pfn[nid];
7393 *zone_end_pfn = min(node_end_pfn,
7394 arch_zone_highest_possible_pfn[movable_zone]);
7395
e506b996
XQ
7396 /* Adjust for ZONE_MOVABLE starting within this range */
7397 } else if (!mirrored_kernelcore &&
7398 *zone_start_pfn < zone_movable_pfn[nid] &&
7399 *zone_end_pfn > zone_movable_pfn[nid]) {
7400 *zone_end_pfn = zone_movable_pfn[nid];
7401
2a1e274a
MG
7402 /* Check if this whole range is within ZONE_MOVABLE */
7403 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
7404 *zone_start_pfn = *zone_end_pfn;
7405 }
7406}
7407
c713216d
MG
7408/*
7409 * Return the number of pages a zone spans in a node, including holes
7410 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
7411 */
bbe5d993 7412static unsigned long __init zone_spanned_pages_in_node(int nid,
c713216d 7413 unsigned long zone_type,
7960aedd
ZY
7414 unsigned long node_start_pfn,
7415 unsigned long node_end_pfn,
d91749c1 7416 unsigned long *zone_start_pfn,
854e8848 7417 unsigned long *zone_end_pfn)
c713216d 7418{
299c83dc
LF
7419 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7420 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
b5685e92 7421 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
7422 if (!node_start_pfn && !node_end_pfn)
7423 return 0;
7424
7960aedd 7425 /* Get the start and end of the zone */
299c83dc
LF
7426 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7427 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
2a1e274a
MG
7428 adjust_zone_range_for_zone_movable(nid, zone_type,
7429 node_start_pfn, node_end_pfn,
d91749c1 7430 zone_start_pfn, zone_end_pfn);
c713216d
MG
7431
7432 /* Check that this node has pages within the zone's required range */
d91749c1 7433 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
c713216d
MG
7434 return 0;
7435
7436 /* Move the zone boundaries inside the node if necessary */
d91749c1
TI
7437 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
7438 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
c713216d
MG
7439
7440 /* Return the spanned pages */
d91749c1 7441 return *zone_end_pfn - *zone_start_pfn;
c713216d
MG
7442}
7443
7444/*
7445 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 7446 * then all holes in the requested range will be accounted for.
c713216d 7447 */
bbe5d993 7448unsigned long __init __absent_pages_in_range(int nid,
c713216d
MG
7449 unsigned long range_start_pfn,
7450 unsigned long range_end_pfn)
7451{
96e907d1
TH
7452 unsigned long nr_absent = range_end_pfn - range_start_pfn;
7453 unsigned long start_pfn, end_pfn;
7454 int i;
c713216d 7455
96e907d1
TH
7456 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7457 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
7458 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
7459 nr_absent -= end_pfn - start_pfn;
c713216d 7460 }
96e907d1 7461 return nr_absent;
c713216d
MG
7462}
7463
7464/**
7465 * absent_pages_in_range - Return number of page frames in holes within a range
7466 * @start_pfn: The start PFN to start searching for holes
7467 * @end_pfn: The end PFN to stop searching for holes
7468 *
a862f68a 7469 * Return: the number of pages frames in memory holes within a range.
c713216d
MG
7470 */
7471unsigned long __init absent_pages_in_range(unsigned long start_pfn,
7472 unsigned long end_pfn)
7473{
7474 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
7475}
7476
7477/* Return the number of page frames in holes in a zone on a node */
bbe5d993 7478static unsigned long __init zone_absent_pages_in_node(int nid,
c713216d 7479 unsigned long zone_type,
7960aedd 7480 unsigned long node_start_pfn,
854e8848 7481 unsigned long node_end_pfn)
c713216d 7482{
96e907d1
TH
7483 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7484 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
9c7cd687 7485 unsigned long zone_start_pfn, zone_end_pfn;
342332e6 7486 unsigned long nr_absent;
9c7cd687 7487
b5685e92 7488 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
7489 if (!node_start_pfn && !node_end_pfn)
7490 return 0;
7491
96e907d1
TH
7492 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7493 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
9c7cd687 7494
2a1e274a
MG
7495 adjust_zone_range_for_zone_movable(nid, zone_type,
7496 node_start_pfn, node_end_pfn,
7497 &zone_start_pfn, &zone_end_pfn);
342332e6
TI
7498 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
7499
7500 /*
7501 * ZONE_MOVABLE handling.
7502 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
7503 * and vice versa.
7504 */
e506b996
XQ
7505 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
7506 unsigned long start_pfn, end_pfn;
7507 struct memblock_region *r;
7508
cc6de168 7509 for_each_mem_region(r) {
e506b996
XQ
7510 start_pfn = clamp(memblock_region_memory_base_pfn(r),
7511 zone_start_pfn, zone_end_pfn);
7512 end_pfn = clamp(memblock_region_memory_end_pfn(r),
7513 zone_start_pfn, zone_end_pfn);
7514
7515 if (zone_type == ZONE_MOVABLE &&
7516 memblock_is_mirror(r))
7517 nr_absent += end_pfn - start_pfn;
7518
7519 if (zone_type == ZONE_NORMAL &&
7520 !memblock_is_mirror(r))
7521 nr_absent += end_pfn - start_pfn;
342332e6
TI
7522 }
7523 }
7524
7525 return nr_absent;
c713216d 7526}
0e0b864e 7527
bbe5d993 7528static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7960aedd 7529 unsigned long node_start_pfn,
854e8848 7530 unsigned long node_end_pfn)
c713216d 7531{
febd5949 7532 unsigned long realtotalpages = 0, totalpages = 0;
c713216d
MG
7533 enum zone_type i;
7534
febd5949
GZ
7535 for (i = 0; i < MAX_NR_ZONES; i++) {
7536 struct zone *zone = pgdat->node_zones + i;
d91749c1 7537 unsigned long zone_start_pfn, zone_end_pfn;
3f08a302 7538 unsigned long spanned, absent;
febd5949 7539 unsigned long size, real_size;
c713216d 7540
854e8848
MR
7541 spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
7542 node_start_pfn,
7543 node_end_pfn,
7544 &zone_start_pfn,
7545 &zone_end_pfn);
7546 absent = zone_absent_pages_in_node(pgdat->node_id, i,
7547 node_start_pfn,
7548 node_end_pfn);
3f08a302
MR
7549
7550 size = spanned;
7551 real_size = size - absent;
7552
d91749c1
TI
7553 if (size)
7554 zone->zone_start_pfn = zone_start_pfn;
7555 else
7556 zone->zone_start_pfn = 0;
febd5949
GZ
7557 zone->spanned_pages = size;
7558 zone->present_pages = real_size;
4b097002
DH
7559#if defined(CONFIG_MEMORY_HOTPLUG)
7560 zone->present_early_pages = real_size;
7561#endif
febd5949
GZ
7562
7563 totalpages += size;
7564 realtotalpages += real_size;
7565 }
7566
7567 pgdat->node_spanned_pages = totalpages;
c713216d 7568 pgdat->node_present_pages = realtotalpages;
9660ecaa 7569 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
c713216d
MG
7570}
7571
835c134e
MG
7572#ifndef CONFIG_SPARSEMEM
7573/*
7574 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
7575 * Start by making sure zonesize is a multiple of pageblock_order by rounding
7576 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
7577 * round what is now in bits to nearest long in bits, then return it in
7578 * bytes.
7579 */
7c45512d 7580static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
835c134e
MG
7581{
7582 unsigned long usemapsize;
7583
7c45512d 7584 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
d9c23400
MG
7585 usemapsize = roundup(zonesize, pageblock_nr_pages);
7586 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
7587 usemapsize *= NR_PAGEBLOCK_BITS;
7588 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
7589
7590 return usemapsize / 8;
7591}
7592
7010a6ec 7593static void __ref setup_usemap(struct zone *zone)
835c134e 7594{
7010a6ec
BH
7595 unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
7596 zone->spanned_pages);
835c134e 7597 zone->pageblock_flags = NULL;
23a7052a 7598 if (usemapsize) {
6782832e 7599 zone->pageblock_flags =
26fb3dae 7600 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
7010a6ec 7601 zone_to_nid(zone));
23a7052a
MR
7602 if (!zone->pageblock_flags)
7603 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7010a6ec 7604 usemapsize, zone->name, zone_to_nid(zone));
23a7052a 7605 }
835c134e
MG
7606}
7607#else
7010a6ec 7608static inline void setup_usemap(struct zone *zone) {}
835c134e
MG
7609#endif /* CONFIG_SPARSEMEM */
7610
d9c23400 7611#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c 7612
d9c23400 7613/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
03e85f9d 7614void __init set_pageblock_order(void)
d9c23400 7615{
b3d40a2b 7616 unsigned int order = MAX_ORDER - 1;
955c1cd7 7617
d9c23400
MG
7618 /* Check that pageblock_nr_pages has not already been setup */
7619 if (pageblock_order)
7620 return;
7621
b3d40a2b
DH
7622 /* Don't let pageblocks exceed the maximum allocation granularity. */
7623 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
955c1cd7 7624 order = HUGETLB_PAGE_ORDER;
955c1cd7 7625
d9c23400
MG
7626 /*
7627 * Assume the largest contiguous order of interest is a huge page.
955c1cd7
AM
7628 * This value may be variable depending on boot parameters on IA64 and
7629 * powerpc.
d9c23400
MG
7630 */
7631 pageblock_order = order;
7632}
7633#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7634
ba72cb8c
MG
7635/*
7636 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
955c1cd7
AM
7637 * is unused as pageblock_order is set at compile-time. See
7638 * include/linux/pageblock-flags.h for the values of pageblock_order based on
7639 * the kernel config
ba72cb8c 7640 */
03e85f9d 7641void __init set_pageblock_order(void)
ba72cb8c 7642{
ba72cb8c 7643}
d9c23400
MG
7644
7645#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7646
03e85f9d 7647static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7cc2a959 7648 unsigned long present_pages)
01cefaef
JL
7649{
7650 unsigned long pages = spanned_pages;
7651
7652 /*
7653 * Provide a more accurate estimation if there are holes within
7654 * the zone and SPARSEMEM is in use. If there are holes within the
7655 * zone, each populated memory region may cost us one or two extra
7656 * memmap pages due to alignment because memmap pages for each
89d790ab 7657 * populated regions may not be naturally aligned on page boundary.
01cefaef
JL
7658 * So the (present_pages >> 4) heuristic is a tradeoff for that.
7659 */
7660 if (spanned_pages > present_pages + (present_pages >> 4) &&
7661 IS_ENABLED(CONFIG_SPARSEMEM))
7662 pages = present_pages;
7663
7664 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
7665}
7666
ace1db39
OS
7667#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7668static void pgdat_init_split_queue(struct pglist_data *pgdat)
7669{
364c1eeb
YS
7670 struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
7671
7672 spin_lock_init(&ds_queue->split_queue_lock);
7673 INIT_LIST_HEAD(&ds_queue->split_queue);
7674 ds_queue->split_queue_len = 0;
ace1db39
OS
7675}
7676#else
7677static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
7678#endif
7679
7680#ifdef CONFIG_COMPACTION
7681static void pgdat_init_kcompactd(struct pglist_data *pgdat)
7682{
7683 init_waitqueue_head(&pgdat->kcompactd_wait);
7684}
7685#else
7686static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
7687#endif
7688
03e85f9d 7689static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1da177e4 7690{
8cd7c588
MG
7691 int i;
7692
208d54e5 7693 pgdat_resize_init(pgdat);
b4a0215e 7694 pgdat_kswapd_lock_init(pgdat);
ace1db39 7695
ace1db39
OS
7696 pgdat_init_split_queue(pgdat);
7697 pgdat_init_kcompactd(pgdat);
7698
1da177e4 7699 init_waitqueue_head(&pgdat->kswapd_wait);
5515061d 7700 init_waitqueue_head(&pgdat->pfmemalloc_wait);
ace1db39 7701
8cd7c588
MG
7702 for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
7703 init_waitqueue_head(&pgdat->reclaim_wait[i]);
7704
eefa864b 7705 pgdat_page_ext_init(pgdat);
867e5e1d 7706 lruvec_init(&pgdat->__lruvec);
03e85f9d
OS
7707}
7708
7709static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
7710 unsigned long remaining_pages)
7711{
9705bea5 7712 atomic_long_set(&zone->managed_pages, remaining_pages);
03e85f9d
OS
7713 zone_set_nid(zone, nid);
7714 zone->name = zone_names[idx];
7715 zone->zone_pgdat = NODE_DATA(nid);
7716 spin_lock_init(&zone->lock);
7717 zone_seqlock_init(zone);
7718 zone_pcp_init(zone);
7719}
7720
7721/*
7722 * Set up the zone data structures
7723 * - init pgdat internals
7724 * - init all zones belonging to this node
7725 *
7726 * NOTE: this function is only called during memory hotplug
7727 */
7728#ifdef CONFIG_MEMORY_HOTPLUG
70b5b46a 7729void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
03e85f9d 7730{
70b5b46a 7731 int nid = pgdat->node_id;
03e85f9d 7732 enum zone_type z;
70b5b46a 7733 int cpu;
03e85f9d
OS
7734
7735 pgdat_init_internals(pgdat);
70b5b46a
MH
7736
7737 if (pgdat->per_cpu_nodestats == &boot_nodestats)
7738 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
7739
7740 /*
7741 * Reset the nr_zones, order and highest_zoneidx before reuse.
7742 * Note that kswapd will init kswapd_highest_zoneidx properly
7743 * when it starts in the near future.
7744 */
7745 pgdat->nr_zones = 0;
7746 pgdat->kswapd_order = 0;
7747 pgdat->kswapd_highest_zoneidx = 0;
7748 pgdat->node_start_pfn = 0;
7749 for_each_online_cpu(cpu) {
7750 struct per_cpu_nodestat *p;
7751
7752 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
7753 memset(p, 0, sizeof(*p));
7754 }
7755
03e85f9d
OS
7756 for (z = 0; z < MAX_NR_ZONES; z++)
7757 zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
7758}
7759#endif
7760
7761/*
7762 * Set up the zone data structures:
7763 * - mark all pages reserved
7764 * - mark all memory queues empty
7765 * - clear the memory bitmaps
7766 *
7767 * NOTE: pgdat should get zeroed by caller.
7768 * NOTE: this function is only called during early init.
7769 */
7770static void __init free_area_init_core(struct pglist_data *pgdat)
7771{
7772 enum zone_type j;
7773 int nid = pgdat->node_id;
5f63b720 7774
03e85f9d 7775 pgdat_init_internals(pgdat);
385386cf
JW
7776 pgdat->per_cpu_nodestats = &boot_nodestats;
7777
1da177e4
LT
7778 for (j = 0; j < MAX_NR_ZONES; j++) {
7779 struct zone *zone = pgdat->node_zones + j;
e6943859 7780 unsigned long size, freesize, memmap_pages;
1da177e4 7781
febd5949 7782 size = zone->spanned_pages;
e6943859 7783 freesize = zone->present_pages;
1da177e4 7784
0e0b864e 7785 /*
9feedc9d 7786 * Adjust freesize so that it accounts for how much memory
0e0b864e
MG
7787 * is used by this zone for memmap. This affects the watermark
7788 * and per-cpu initialisations
7789 */
e6943859 7790 memmap_pages = calc_memmap_size(size, freesize);
ba914f48
ZH
7791 if (!is_highmem_idx(j)) {
7792 if (freesize >= memmap_pages) {
7793 freesize -= memmap_pages;
7794 if (memmap_pages)
9660ecaa
HK
7795 pr_debug(" %s zone: %lu pages used for memmap\n",
7796 zone_names[j], memmap_pages);
ba914f48 7797 } else
e47aa905 7798 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
ba914f48
ZH
7799 zone_names[j], memmap_pages, freesize);
7800 }
0e0b864e 7801
6267276f 7802 /* Account for reserved pages */
9feedc9d
JL
7803 if (j == 0 && freesize > dma_reserve) {
7804 freesize -= dma_reserve;
9660ecaa 7805 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
0e0b864e
MG
7806 }
7807
98d2b0eb 7808 if (!is_highmem_idx(j))
9feedc9d 7809 nr_kernel_pages += freesize;
01cefaef
JL
7810 /* Charge for highmem memmap if there are enough kernel pages */
7811 else if (nr_kernel_pages > memmap_pages * 2)
7812 nr_kernel_pages -= memmap_pages;
9feedc9d 7813 nr_all_pages += freesize;
1da177e4 7814
9feedc9d
JL
7815 /*
7816 * Set an approximate value for lowmem here, it will be adjusted
7817 * when the bootmem allocator frees pages into the buddy system.
7818 * And all highmem pages will be managed by the buddy system.
7819 */
03e85f9d 7820 zone_init_internals(zone, j, nid, freesize);
81c0a2bb 7821
d883c6cf 7822 if (!size)
1da177e4
LT
7823 continue;
7824
955c1cd7 7825 set_pageblock_order();
7010a6ec 7826 setup_usemap(zone);
9699ee7b 7827 init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1da177e4
LT
7828 }
7829}
7830
43b02ba9 7831#ifdef CONFIG_FLATMEM
3b446da6 7832static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 7833{
b0aeba74 7834 unsigned long __maybe_unused start = 0;
a1c34a3b
LA
7835 unsigned long __maybe_unused offset = 0;
7836
1da177e4
LT
7837 /* Skip empty nodes */
7838 if (!pgdat->node_spanned_pages)
7839 return;
7840
b0aeba74
TL
7841 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7842 offset = pgdat->node_start_pfn - start;
1da177e4
LT
7843 /* ia64 gets its own node_mem_map, before this, without bootmem */
7844 if (!pgdat->node_mem_map) {
b0aeba74 7845 unsigned long size, end;
d41dee36
AW
7846 struct page *map;
7847
e984bb43
BP
7848 /*
7849 * The zone's endpoints aren't required to be MAX_ORDER
7850 * aligned but the node_mem_map endpoints must be in order
7851 * for the buddy allocator to function correctly.
7852 */
108bcc96 7853 end = pgdat_end_pfn(pgdat);
e984bb43
BP
7854 end = ALIGN(end, MAX_ORDER_NR_PAGES);
7855 size = (end - start) * sizeof(struct page);
c803b3c8
MR
7856 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
7857 pgdat->node_id, false);
23a7052a
MR
7858 if (!map)
7859 panic("Failed to allocate %ld bytes for node %d memory map\n",
7860 size, pgdat->node_id);
a1c34a3b 7861 pgdat->node_mem_map = map + offset;
1da177e4 7862 }
0cd842f9
OS
7863 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7864 __func__, pgdat->node_id, (unsigned long)pgdat,
7865 (unsigned long)pgdat->node_mem_map);
a9ee6cf5 7866#ifndef CONFIG_NUMA
1da177e4
LT
7867 /*
7868 * With no DISCONTIG, the global mem_map is just set as node 0's
7869 */
c713216d 7870 if (pgdat == NODE_DATA(0)) {
1da177e4 7871 mem_map = NODE_DATA(0)->node_mem_map;
c713216d 7872 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
a1c34a3b 7873 mem_map -= offset;
c713216d 7874 }
1da177e4
LT
7875#endif
7876}
0cd842f9 7877#else
3b446da6 7878static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
43b02ba9 7879#endif /* CONFIG_FLATMEM */
1da177e4 7880
0188dc98
OS
7881#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7882static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7883{
0188dc98
OS
7884 pgdat->first_deferred_pfn = ULONG_MAX;
7885}
7886#else
7887static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7888#endif
7889
854e8848 7890static void __init free_area_init_node(int nid)
1da177e4 7891{
9109fb7b 7892 pg_data_t *pgdat = NODE_DATA(nid);
7960aedd
ZY
7893 unsigned long start_pfn = 0;
7894 unsigned long end_pfn = 0;
9109fb7b 7895
88fdf75d 7896 /* pg_data_t should be reset to zero when it's allocated */
97a225e6 7897 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
88fdf75d 7898
854e8848 7899 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
88fdf75d 7900
1da177e4 7901 pgdat->node_id = nid;
854e8848 7902 pgdat->node_start_pfn = start_pfn;
75ef7184 7903 pgdat->per_cpu_nodestats = NULL;
854e8848 7904
7c30daac
MH
7905 if (start_pfn != end_pfn) {
7906 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7907 (u64)start_pfn << PAGE_SHIFT,
7908 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7909 } else {
7910 pr_info("Initmem setup node %d as memoryless\n", nid);
7911 }
7912
854e8848 7913 calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1da177e4
LT
7914
7915 alloc_node_mem_map(pgdat);
0188dc98 7916 pgdat_set_deferred_range(pgdat);
1da177e4 7917
7f3eb55b 7918 free_area_init_core(pgdat);
1da177e4
LT
7919}
7920
1ca75fa7 7921static void __init free_area_init_memoryless_node(int nid)
3f08a302 7922{
854e8848 7923 free_area_init_node(nid);
3f08a302
MR
7924}
7925
418508c1
MS
7926#if MAX_NUMNODES > 1
7927/*
7928 * Figure out the number of possible node ids.
7929 */
f9872caf 7930void __init setup_nr_node_ids(void)
418508c1 7931{
904a9553 7932 unsigned int highest;
418508c1 7933
904a9553 7934 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
418508c1
MS
7935 nr_node_ids = highest + 1;
7936}
418508c1
MS
7937#endif
7938
1e01979c
TH
7939/**
7940 * node_map_pfn_alignment - determine the maximum internode alignment
7941 *
7942 * This function should be called after node map is populated and sorted.
7943 * It calculates the maximum power of two alignment which can distinguish
7944 * all the nodes.
7945 *
7946 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7947 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
7948 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
7949 * shifted, 1GiB is enough and this function will indicate so.
7950 *
7951 * This is used to test whether pfn -> nid mapping of the chosen memory
7952 * model has fine enough granularity to avoid incorrect mapping for the
7953 * populated node map.
7954 *
a862f68a 7955 * Return: the determined alignment in pfn's. 0 if there is no alignment
1e01979c
TH
7956 * requirement (single node).
7957 */
7958unsigned long __init node_map_pfn_alignment(void)
7959{
7960 unsigned long accl_mask = 0, last_end = 0;
c13291a5 7961 unsigned long start, end, mask;
98fa15f3 7962 int last_nid = NUMA_NO_NODE;
c13291a5 7963 int i, nid;
1e01979c 7964
c13291a5 7965 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1e01979c
TH
7966 if (!start || last_nid < 0 || last_nid == nid) {
7967 last_nid = nid;
7968 last_end = end;
7969 continue;
7970 }
7971
7972 /*
7973 * Start with a mask granular enough to pin-point to the
7974 * start pfn and tick off bits one-by-one until it becomes
7975 * too coarse to separate the current node from the last.
7976 */
7977 mask = ~((1 << __ffs(start)) - 1);
7978 while (mask && last_end <= (start & (mask << 1)))
7979 mask <<= 1;
7980
7981 /* accumulate all internode masks */
7982 accl_mask |= mask;
7983 }
7984
7985 /* convert mask to number of pages */
7986 return ~accl_mask + 1;
7987}
7988
37b07e41
LS
7989/*
7990 * early_calculate_totalpages()
7991 * Sum pages in active regions for movable zone.
4b0ef1fe 7992 * Populate N_MEMORY for calculating usable_nodes.
37b07e41 7993 */
484f51f8 7994static unsigned long __init early_calculate_totalpages(void)
7e63efef 7995{
7e63efef 7996 unsigned long totalpages = 0;
c13291a5
TH
7997 unsigned long start_pfn, end_pfn;
7998 int i, nid;
7999
8000 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8001 unsigned long pages = end_pfn - start_pfn;
7e63efef 8002
37b07e41
LS
8003 totalpages += pages;
8004 if (pages)
4b0ef1fe 8005 node_set_state(nid, N_MEMORY);
37b07e41 8006 }
b8af2941 8007 return totalpages;
7e63efef
MG
8008}
8009
2a1e274a
MG
8010/*
8011 * Find the PFN the Movable zone begins in each node. Kernel memory
8012 * is spread evenly between nodes as long as the nodes have enough
8013 * memory. When they don't, some nodes will have more kernelcore than
8014 * others
8015 */
b224ef85 8016static void __init find_zone_movable_pfns_for_nodes(void)
2a1e274a
MG
8017{
8018 int i, nid;
8019 unsigned long usable_startpfn;
8020 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd 8021 /* save the state before borrow the nodemask */
4b0ef1fe 8022 nodemask_t saved_node_state = node_states[N_MEMORY];
37b07e41 8023 unsigned long totalpages = early_calculate_totalpages();
4b0ef1fe 8024 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
136199f0 8025 struct memblock_region *r;
b2f3eebe
TC
8026
8027 /* Need to find movable_zone earlier when movable_node is specified. */
8028 find_usable_zone_for_movable();
8029
8030 /*
8031 * If movable_node is specified, ignore kernelcore and movablecore
8032 * options.
8033 */
8034 if (movable_node_is_enabled()) {
cc6de168 8035 for_each_mem_region(r) {
136199f0 8036 if (!memblock_is_hotpluggable(r))
b2f3eebe
TC
8037 continue;
8038
d622abf7 8039 nid = memblock_get_region_node(r);
b2f3eebe 8040
136199f0 8041 usable_startpfn = PFN_DOWN(r->base);
b2f3eebe
TC
8042 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8043 min(usable_startpfn, zone_movable_pfn[nid]) :
8044 usable_startpfn;
8045 }
8046
8047 goto out2;
8048 }
2a1e274a 8049
342332e6
TI
8050 /*
8051 * If kernelcore=mirror is specified, ignore movablecore option
8052 */
8053 if (mirrored_kernelcore) {
8054 bool mem_below_4gb_not_mirrored = false;
8055
cc6de168 8056 for_each_mem_region(r) {
342332e6
TI
8057 if (memblock_is_mirror(r))
8058 continue;
8059
d622abf7 8060 nid = memblock_get_region_node(r);
342332e6
TI
8061
8062 usable_startpfn = memblock_region_memory_base_pfn(r);
8063
aa282a15 8064 if (usable_startpfn < PHYS_PFN(SZ_4G)) {
342332e6
TI
8065 mem_below_4gb_not_mirrored = true;
8066 continue;
8067 }
8068
8069 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8070 min(usable_startpfn, zone_movable_pfn[nid]) :
8071 usable_startpfn;
8072 }
8073
8074 if (mem_below_4gb_not_mirrored)
633bf2fe 8075 pr_warn("This configuration results in unmirrored kernel memory.\n");
342332e6
TI
8076
8077 goto out2;
8078 }
8079
7e63efef 8080 /*
a5c6d650
DR
8081 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
8082 * amount of necessary memory.
8083 */
8084 if (required_kernelcore_percent)
8085 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
8086 10000UL;
8087 if (required_movablecore_percent)
8088 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
8089 10000UL;
8090
8091 /*
8092 * If movablecore= was specified, calculate what size of
7e63efef
MG
8093 * kernelcore that corresponds so that memory usable for
8094 * any allocation type is evenly spread. If both kernelcore
8095 * and movablecore are specified, then the value of kernelcore
8096 * will be used for required_kernelcore if it's greater than
8097 * what movablecore would have allowed.
8098 */
8099 if (required_movablecore) {
7e63efef
MG
8100 unsigned long corepages;
8101
8102 /*
8103 * Round-up so that ZONE_MOVABLE is at least as large as what
8104 * was requested by the user
8105 */
8106 required_movablecore =
8107 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
9fd745d4 8108 required_movablecore = min(totalpages, required_movablecore);
7e63efef
MG
8109 corepages = totalpages - required_movablecore;
8110
8111 required_kernelcore = max(required_kernelcore, corepages);
8112 }
8113
bde304bd
XQ
8114 /*
8115 * If kernelcore was not specified or kernelcore size is larger
8116 * than totalpages, there is no ZONE_MOVABLE.
8117 */
8118 if (!required_kernelcore || required_kernelcore >= totalpages)
66918dcd 8119 goto out;
2a1e274a
MG
8120
8121 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
2a1e274a
MG
8122 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
8123
8124restart:
8125 /* Spread kernelcore memory as evenly as possible throughout nodes */
8126 kernelcore_node = required_kernelcore / usable_nodes;
4b0ef1fe 8127 for_each_node_state(nid, N_MEMORY) {
c13291a5
TH
8128 unsigned long start_pfn, end_pfn;
8129
2a1e274a
MG
8130 /*
8131 * Recalculate kernelcore_node if the division per node
8132 * now exceeds what is necessary to satisfy the requested
8133 * amount of memory for the kernel
8134 */
8135 if (required_kernelcore < kernelcore_node)
8136 kernelcore_node = required_kernelcore / usable_nodes;
8137
8138 /*
8139 * As the map is walked, we track how much memory is usable
8140 * by the kernel using kernelcore_remaining. When it is
8141 * 0, the rest of the node is usable by ZONE_MOVABLE
8142 */
8143 kernelcore_remaining = kernelcore_node;
8144
8145 /* Go through each range of PFNs within this node */
c13291a5 8146 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2a1e274a
MG
8147 unsigned long size_pages;
8148
c13291a5 8149 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
2a1e274a
MG
8150 if (start_pfn >= end_pfn)
8151 continue;
8152
8153 /* Account for what is only usable for kernelcore */
8154 if (start_pfn < usable_startpfn) {
8155 unsigned long kernel_pages;
8156 kernel_pages = min(end_pfn, usable_startpfn)
8157 - start_pfn;
8158
8159 kernelcore_remaining -= min(kernel_pages,
8160 kernelcore_remaining);
8161 required_kernelcore -= min(kernel_pages,
8162 required_kernelcore);
8163
8164 /* Continue if range is now fully accounted */
8165 if (end_pfn <= usable_startpfn) {
8166
8167 /*
8168 * Push zone_movable_pfn to the end so
8169 * that if we have to rebalance
8170 * kernelcore across nodes, we will
8171 * not double account here
8172 */
8173 zone_movable_pfn[nid] = end_pfn;
8174 continue;
8175 }
8176 start_pfn = usable_startpfn;
8177 }
8178
8179 /*
8180 * The usable PFN range for ZONE_MOVABLE is from
8181 * start_pfn->end_pfn. Calculate size_pages as the
8182 * number of pages used as kernelcore
8183 */
8184 size_pages = end_pfn - start_pfn;
8185 if (size_pages > kernelcore_remaining)
8186 size_pages = kernelcore_remaining;
8187 zone_movable_pfn[nid] = start_pfn + size_pages;
8188
8189 /*
8190 * Some kernelcore has been met, update counts and
8191 * break if the kernelcore for this node has been
b8af2941 8192 * satisfied
2a1e274a
MG
8193 */
8194 required_kernelcore -= min(required_kernelcore,
8195 size_pages);
8196 kernelcore_remaining -= size_pages;
8197 if (!kernelcore_remaining)
8198 break;
8199 }
8200 }
8201
8202 /*
8203 * If there is still required_kernelcore, we do another pass with one
8204 * less node in the count. This will push zone_movable_pfn[nid] further
8205 * along on the nodes that still have memory until kernelcore is
b8af2941 8206 * satisfied
2a1e274a
MG
8207 */
8208 usable_nodes--;
8209 if (usable_nodes && required_kernelcore > usable_nodes)
8210 goto restart;
8211
b2f3eebe 8212out2:
2a1e274a 8213 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
ddbc84f3
AP
8214 for (nid = 0; nid < MAX_NUMNODES; nid++) {
8215 unsigned long start_pfn, end_pfn;
8216
2a1e274a
MG
8217 zone_movable_pfn[nid] =
8218 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd 8219
ddbc84f3
AP
8220 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8221 if (zone_movable_pfn[nid] >= end_pfn)
8222 zone_movable_pfn[nid] = 0;
8223 }
8224
20e6926d 8225out:
66918dcd 8226 /* restore the node_state */
4b0ef1fe 8227 node_states[N_MEMORY] = saved_node_state;
2a1e274a
MG
8228}
8229
4b0ef1fe
LJ
8230/* Any regular or high memory on that node ? */
8231static void check_for_memory(pg_data_t *pgdat, int nid)
37b07e41 8232{
37b07e41
LS
8233 enum zone_type zone_type;
8234
4b0ef1fe 8235 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
37b07e41 8236 struct zone *zone = &pgdat->node_zones[zone_type];
b38a8725 8237 if (populated_zone(zone)) {
7b0e0c0e
OS
8238 if (IS_ENABLED(CONFIG_HIGHMEM))
8239 node_set_state(nid, N_HIGH_MEMORY);
8240 if (zone_type <= ZONE_NORMAL)
4b0ef1fe 8241 node_set_state(nid, N_NORMAL_MEMORY);
d0048b0e
BL
8242 break;
8243 }
37b07e41 8244 }
37b07e41
LS
8245}
8246
51930df5 8247/*
f0953a1b 8248 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
51930df5
MR
8249 * such cases we allow max_zone_pfn sorted in the descending order
8250 */
8251bool __weak arch_has_descending_max_zone_pfns(void)
8252{
8253 return false;
8254}
8255
c713216d 8256/**
9691a071 8257 * free_area_init - Initialise all pg_data_t and zone data
88ca3b94 8258 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
8259 *
8260 * This will call free_area_init_node() for each active node in the system.
7d018176 8261 * Using the page ranges provided by memblock_set_node(), the size of each
c713216d
MG
8262 * zone in each node and their holes is calculated. If the maximum PFN
8263 * between two adjacent zones match, it is assumed that the zone is empty.
8264 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
8265 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
8266 * starts where the previous one ended. For example, ZONE_DMA32 starts
8267 * at arch_max_dma_pfn.
8268 */
9691a071 8269void __init free_area_init(unsigned long *max_zone_pfn)
c713216d 8270{
c13291a5 8271 unsigned long start_pfn, end_pfn;
51930df5
MR
8272 int i, nid, zone;
8273 bool descending;
a6af2bc3 8274
c713216d
MG
8275 /* Record where the zone boundaries are */
8276 memset(arch_zone_lowest_possible_pfn, 0,
8277 sizeof(arch_zone_lowest_possible_pfn));
8278 memset(arch_zone_highest_possible_pfn, 0,
8279 sizeof(arch_zone_highest_possible_pfn));
90cae1fe 8280
fb70c487 8281 start_pfn = PHYS_PFN(memblock_start_of_DRAM());
51930df5 8282 descending = arch_has_descending_max_zone_pfns();
90cae1fe
OH
8283
8284 for (i = 0; i < MAX_NR_ZONES; i++) {
51930df5
MR
8285 if (descending)
8286 zone = MAX_NR_ZONES - i - 1;
8287 else
8288 zone = i;
8289
8290 if (zone == ZONE_MOVABLE)
2a1e274a 8291 continue;
90cae1fe 8292
51930df5
MR
8293 end_pfn = max(max_zone_pfn[zone], start_pfn);
8294 arch_zone_lowest_possible_pfn[zone] = start_pfn;
8295 arch_zone_highest_possible_pfn[zone] = end_pfn;
90cae1fe
OH
8296
8297 start_pfn = end_pfn;
c713216d 8298 }
2a1e274a
MG
8299
8300 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
8301 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
b224ef85 8302 find_zone_movable_pfns_for_nodes();
c713216d 8303
c713216d 8304 /* Print out the zone ranges */
f88dfff5 8305 pr_info("Zone ranges:\n");
2a1e274a
MG
8306 for (i = 0; i < MAX_NR_ZONES; i++) {
8307 if (i == ZONE_MOVABLE)
8308 continue;
f88dfff5 8309 pr_info(" %-8s ", zone_names[i]);
72f0ba02
DR
8310 if (arch_zone_lowest_possible_pfn[i] ==
8311 arch_zone_highest_possible_pfn[i])
f88dfff5 8312 pr_cont("empty\n");
72f0ba02 8313 else
8d29e18a
JG
8314 pr_cont("[mem %#018Lx-%#018Lx]\n",
8315 (u64)arch_zone_lowest_possible_pfn[i]
8316 << PAGE_SHIFT,
8317 ((u64)arch_zone_highest_possible_pfn[i]
a62e2f4f 8318 << PAGE_SHIFT) - 1);
2a1e274a
MG
8319 }
8320
8321 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
f88dfff5 8322 pr_info("Movable zone start for each node\n");
2a1e274a
MG
8323 for (i = 0; i < MAX_NUMNODES; i++) {
8324 if (zone_movable_pfn[i])
8d29e18a
JG
8325 pr_info(" Node %d: %#018Lx\n", i,
8326 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
2a1e274a 8327 }
c713216d 8328
f46edbd1
DW
8329 /*
8330 * Print out the early node map, and initialize the
8331 * subsection-map relative to active online memory ranges to
8332 * enable future "sub-section" extensions of the memory map.
8333 */
f88dfff5 8334 pr_info("Early memory node ranges\n");
f46edbd1 8335 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8d29e18a
JG
8336 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
8337 (u64)start_pfn << PAGE_SHIFT,
8338 ((u64)end_pfn << PAGE_SHIFT) - 1);
f46edbd1
DW
8339 subsection_map_init(start_pfn, end_pfn - start_pfn);
8340 }
c713216d
MG
8341
8342 /* Initialise every node */
708614e6 8343 mminit_verify_pageflags_layout();
8ef82866 8344 setup_nr_node_ids();
09f49dca
MH
8345 for_each_node(nid) {
8346 pg_data_t *pgdat;
8347
8348 if (!node_online(nid)) {
8349 pr_info("Initializing node %d as memoryless\n", nid);
8350
8351 /* Allocator not initialized yet */
8352 pgdat = arch_alloc_nodedata(nid);
8353 if (!pgdat) {
8354 pr_err("Cannot allocate %zuB for node %d.\n",
8355 sizeof(*pgdat), nid);
8356 continue;
8357 }
8358 arch_refresh_nodedata(nid, pgdat);
8359 free_area_init_memoryless_node(nid);
8360
8361 /*
8362 * We do not want to confuse userspace by sysfs
8363 * files/directories for node without any memory
8364 * attached to it, so this node is not marked as
8365 * N_MEMORY and not marked online so that no sysfs
8366 * hierarchy will be created via register_one_node for
8367 * it. The pgdat will get fully initialized by
8368 * hotadd_init_pgdat() when memory is hotplugged into
8369 * this node.
8370 */
8371 continue;
8372 }
8373
8374 pgdat = NODE_DATA(nid);
854e8848 8375 free_area_init_node(nid);
37b07e41
LS
8376
8377 /* Any memory on that node */
8378 if (pgdat->node_present_pages)
4b0ef1fe
LJ
8379 node_set_state(nid, N_MEMORY);
8380 check_for_memory(pgdat, nid);
c713216d 8381 }
122e093c
MR
8382
8383 memmap_init();
c713216d 8384}
2a1e274a 8385
a5c6d650
DR
8386static int __init cmdline_parse_core(char *p, unsigned long *core,
8387 unsigned long *percent)
2a1e274a
MG
8388{
8389 unsigned long long coremem;
a5c6d650
DR
8390 char *endptr;
8391
2a1e274a
MG
8392 if (!p)
8393 return -EINVAL;
8394
a5c6d650
DR
8395 /* Value may be a percentage of total memory, otherwise bytes */
8396 coremem = simple_strtoull(p, &endptr, 0);
8397 if (*endptr == '%') {
8398 /* Paranoid check for percent values greater than 100 */
8399 WARN_ON(coremem > 100);
2a1e274a 8400
a5c6d650
DR
8401 *percent = coremem;
8402 } else {
8403 coremem = memparse(p, &p);
8404 /* Paranoid check that UL is enough for the coremem value */
8405 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
2a1e274a 8406
a5c6d650
DR
8407 *core = coremem >> PAGE_SHIFT;
8408 *percent = 0UL;
8409 }
2a1e274a
MG
8410 return 0;
8411}
ed7ed365 8412
7e63efef
MG
8413/*
8414 * kernelcore=size sets the amount of memory for use for allocations that
8415 * cannot be reclaimed or migrated.
8416 */
8417static int __init cmdline_parse_kernelcore(char *p)
8418{
342332e6
TI
8419 /* parse kernelcore=mirror */
8420 if (parse_option_str(p, "mirror")) {
8421 mirrored_kernelcore = true;
8422 return 0;
8423 }
8424
a5c6d650
DR
8425 return cmdline_parse_core(p, &required_kernelcore,
8426 &required_kernelcore_percent);
7e63efef
MG
8427}
8428
8429/*
8430 * movablecore=size sets the amount of memory for use for allocations that
8431 * can be reclaimed or migrated.
8432 */
8433static int __init cmdline_parse_movablecore(char *p)
8434{
a5c6d650
DR
8435 return cmdline_parse_core(p, &required_movablecore,
8436 &required_movablecore_percent);
7e63efef
MG
8437}
8438
ed7ed365 8439early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 8440early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 8441
c3d5f5f0
JL
8442void adjust_managed_page_count(struct page *page, long count)
8443{
9705bea5 8444 atomic_long_add(count, &page_zone(page)->managed_pages);
ca79b0c2 8445 totalram_pages_add(count);
3dcc0571
JL
8446#ifdef CONFIG_HIGHMEM
8447 if (PageHighMem(page))
ca79b0c2 8448 totalhigh_pages_add(count);
3dcc0571 8449#endif
c3d5f5f0 8450}
3dcc0571 8451EXPORT_SYMBOL(adjust_managed_page_count);
c3d5f5f0 8452
e5cb113f 8453unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
69afade7 8454{
11199692
JL
8455 void *pos;
8456 unsigned long pages = 0;
69afade7 8457
11199692
JL
8458 start = (void *)PAGE_ALIGN((unsigned long)start);
8459 end = (void *)((unsigned long)end & PAGE_MASK);
8460 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
0d834328
DH
8461 struct page *page = virt_to_page(pos);
8462 void *direct_map_addr;
8463
8464 /*
8465 * 'direct_map_addr' might be different from 'pos'
8466 * because some architectures' virt_to_page()
8467 * work with aliases. Getting the direct map
8468 * address ensures that we get a _writeable_
8469 * alias for the memset().
8470 */
8471 direct_map_addr = page_address(page);
c746170d
VF
8472 /*
8473 * Perform a kasan-unchecked memset() since this memory
8474 * has not been initialized.
8475 */
8476 direct_map_addr = kasan_reset_tag(direct_map_addr);
dbe67df4 8477 if ((unsigned int)poison <= 0xFF)
0d834328
DH
8478 memset(direct_map_addr, poison, PAGE_SIZE);
8479
8480 free_reserved_page(page);
69afade7
JL
8481 }
8482
8483 if (pages && s)
ff7ed9e4 8484 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
69afade7
JL
8485
8486 return pages;
8487}
8488
1f9d03c5 8489void __init mem_init_print_info(void)
7ee3d4e8
JL
8490{
8491 unsigned long physpages, codesize, datasize, rosize, bss_size;
8492 unsigned long init_code_size, init_data_size;
8493
8494 physpages = get_num_physpages();
8495 codesize = _etext - _stext;
8496 datasize = _edata - _sdata;
8497 rosize = __end_rodata - __start_rodata;
8498 bss_size = __bss_stop - __bss_start;
8499 init_data_size = __init_end - __init_begin;
8500 init_code_size = _einittext - _sinittext;
8501
8502 /*
8503 * Detect special cases and adjust section sizes accordingly:
8504 * 1) .init.* may be embedded into .data sections
8505 * 2) .init.text.* may be out of [__init_begin, __init_end],
8506 * please refer to arch/tile/kernel/vmlinux.lds.S.
8507 * 3) .rodata.* may be embedded into .text or .data sections.
8508 */
8509#define adj_init_size(start, end, size, pos, adj) \
b8af2941 8510 do { \
ca831f29 8511 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
b8af2941
PK
8512 size -= adj; \
8513 } while (0)
7ee3d4e8
JL
8514
8515 adj_init_size(__init_begin, __init_end, init_data_size,
8516 _sinittext, init_code_size);
8517 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
8518 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
8519 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
8520 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
8521
8522#undef adj_init_size
8523
756a025f 8524 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7ee3d4e8 8525#ifdef CONFIG_HIGHMEM
756a025f 8526 ", %luK highmem"
7ee3d4e8 8527#endif
1f9d03c5 8528 ")\n",
ff7ed9e4 8529 K(nr_free_pages()), K(physpages),
c940e020
ML
8530 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
8531 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
ff7ed9e4
ML
8532 K(physpages - totalram_pages() - totalcma_pages),
8533 K(totalcma_pages)
7ee3d4e8 8534#ifdef CONFIG_HIGHMEM
ff7ed9e4 8535 , K(totalhigh_pages())
7ee3d4e8 8536#endif
1f9d03c5 8537 );
7ee3d4e8
JL
8538}
8539
0e0b864e 8540/**
88ca3b94
RD
8541 * set_dma_reserve - set the specified number of pages reserved in the first zone
8542 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e 8543 *
013110a7 8544 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
0e0b864e
MG
8545 * In the DMA zone, a significant percentage may be consumed by kernel image
8546 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
8547 * function may optionally be used to account for unfreeable pages in the
8548 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8549 * smaller per-cpu batchsize.
0e0b864e
MG
8550 */
8551void __init set_dma_reserve(unsigned long new_dma_reserve)
8552{
8553 dma_reserve = new_dma_reserve;
8554}
8555
005fd4bb 8556static int page_alloc_cpu_dead(unsigned int cpu)
1da177e4 8557{
04f8cfea 8558 struct zone *zone;
1da177e4 8559
005fd4bb 8560 lru_add_drain_cpu(cpu);
adb11e78 8561 mlock_page_drain_remote(cpu);
005fd4bb 8562 drain_pages(cpu);
9f8f2172 8563
005fd4bb
SAS
8564 /*
8565 * Spill the event counters of the dead processor
8566 * into the current processors event counters.
8567 * This artificially elevates the count of the current
8568 * processor.
8569 */
8570 vm_events_fold_cpu(cpu);
9f8f2172 8571
005fd4bb
SAS
8572 /*
8573 * Zero the differential counters of the dead processor
8574 * so that the vm statistics are consistent.
8575 *
8576 * This is only okay since the processor is dead and cannot
8577 * race with what we are doing.
8578 */
8579 cpu_vm_stats_fold(cpu);
04f8cfea
MG
8580
8581 for_each_populated_zone(zone)
8582 zone_pcp_update(zone, 0);
8583
8584 return 0;
8585}
8586
8587static int page_alloc_cpu_online(unsigned int cpu)
8588{
8589 struct zone *zone;
8590
8591 for_each_populated_zone(zone)
8592 zone_pcp_update(zone, 1);
005fd4bb 8593 return 0;
1da177e4 8594}
1da177e4 8595
e03a5125
NP
8596#ifdef CONFIG_NUMA
8597int hashdist = HASHDIST_DEFAULT;
8598
8599static int __init set_hashdist(char *str)
8600{
8601 if (!str)
8602 return 0;
8603 hashdist = simple_strtoul(str, &str, 0);
8604 return 1;
8605}
8606__setup("hashdist=", set_hashdist);
8607#endif
8608
1da177e4
LT
8609void __init page_alloc_init(void)
8610{
005fd4bb
SAS
8611 int ret;
8612
e03a5125
NP
8613#ifdef CONFIG_NUMA
8614 if (num_node_state(N_MEMORY) == 1)
8615 hashdist = 0;
8616#endif
8617
04f8cfea
MG
8618 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
8619 "mm/page_alloc:pcp",
8620 page_alloc_cpu_online,
005fd4bb
SAS
8621 page_alloc_cpu_dead);
8622 WARN_ON(ret < 0);
1da177e4
LT
8623}
8624
cb45b0e9 8625/*
34b10060 8626 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
cb45b0e9
HA
8627 * or min_free_kbytes changes.
8628 */
8629static void calculate_totalreserve_pages(void)
8630{
8631 struct pglist_data *pgdat;
8632 unsigned long reserve_pages = 0;
2f6726e5 8633 enum zone_type i, j;
cb45b0e9
HA
8634
8635 for_each_online_pgdat(pgdat) {
281e3726
MG
8636
8637 pgdat->totalreserve_pages = 0;
8638
cb45b0e9
HA
8639 for (i = 0; i < MAX_NR_ZONES; i++) {
8640 struct zone *zone = pgdat->node_zones + i;
3484b2de 8641 long max = 0;
9705bea5 8642 unsigned long managed_pages = zone_managed_pages(zone);
cb45b0e9
HA
8643
8644 /* Find valid and maximum lowmem_reserve in the zone */
8645 for (j = i; j < MAX_NR_ZONES; j++) {
8646 if (zone->lowmem_reserve[j] > max)
8647 max = zone->lowmem_reserve[j];
8648 }
8649
41858966
MG
8650 /* we treat the high watermark as reserved pages. */
8651 max += high_wmark_pages(zone);
cb45b0e9 8652
3d6357de
AK
8653 if (max > managed_pages)
8654 max = managed_pages;
a8d01437 8655
281e3726 8656 pgdat->totalreserve_pages += max;
a8d01437 8657
cb45b0e9
HA
8658 reserve_pages += max;
8659 }
8660 }
8661 totalreserve_pages = reserve_pages;
8662}
8663
1da177e4
LT
8664/*
8665 * setup_per_zone_lowmem_reserve - called whenever
34b10060 8666 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
1da177e4
LT
8667 * has a correct pages reserved value, so an adequate number of
8668 * pages are left in the zone after a successful __alloc_pages().
8669 */
8670static void setup_per_zone_lowmem_reserve(void)
8671{
8672 struct pglist_data *pgdat;
470c61d7 8673 enum zone_type i, j;
1da177e4 8674
ec936fc5 8675 for_each_online_pgdat(pgdat) {
470c61d7
LS
8676 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
8677 struct zone *zone = &pgdat->node_zones[i];
8678 int ratio = sysctl_lowmem_reserve_ratio[i];
8679 bool clear = !ratio || !zone_managed_pages(zone);
8680 unsigned long managed_pages = 0;
8681
8682 for (j = i + 1; j < MAX_NR_ZONES; j++) {
f7ec1044
LS
8683 struct zone *upper_zone = &pgdat->node_zones[j];
8684
8685 managed_pages += zone_managed_pages(upper_zone);
470c61d7 8686
f7ec1044
LS
8687 if (clear)
8688 zone->lowmem_reserve[j] = 0;
8689 else
470c61d7 8690 zone->lowmem_reserve[j] = managed_pages / ratio;
1da177e4
LT
8691 }
8692 }
8693 }
cb45b0e9
HA
8694
8695 /* update totalreserve_pages */
8696 calculate_totalreserve_pages();
1da177e4
LT
8697}
8698
cfd3da1e 8699static void __setup_per_zone_wmarks(void)
1da177e4
LT
8700{
8701 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
8702 unsigned long lowmem_pages = 0;
8703 struct zone *zone;
8704 unsigned long flags;
8705
8706 /* Calculate total number of !ZONE_HIGHMEM pages */
8707 for_each_zone(zone) {
8708 if (!is_highmem(zone))
9705bea5 8709 lowmem_pages += zone_managed_pages(zone);
1da177e4
LT
8710 }
8711
8712 for_each_zone(zone) {
ac924c60
AM
8713 u64 tmp;
8714
1125b4e3 8715 spin_lock_irqsave(&zone->lock, flags);
9705bea5 8716 tmp = (u64)pages_min * zone_managed_pages(zone);
ac924c60 8717 do_div(tmp, lowmem_pages);
1da177e4
LT
8718 if (is_highmem(zone)) {
8719 /*
669ed175
NP
8720 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8721 * need highmem pages, so cap pages_min to a small
8722 * value here.
8723 *
41858966 8724 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8bb4e7a2 8725 * deltas control async page reclaim, and so should
669ed175 8726 * not be capped for highmem.
1da177e4 8727 */
90ae8d67 8728 unsigned long min_pages;
1da177e4 8729
9705bea5 8730 min_pages = zone_managed_pages(zone) / 1024;
90ae8d67 8731 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
a9214443 8732 zone->_watermark[WMARK_MIN] = min_pages;
1da177e4 8733 } else {
669ed175
NP
8734 /*
8735 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
8736 * proportionate to the zone's size.
8737 */
a9214443 8738 zone->_watermark[WMARK_MIN] = tmp;
1da177e4
LT
8739 }
8740
795ae7a0
JW
8741 /*
8742 * Set the kswapd watermarks distance according to the
8743 * scale factor in proportion to available memory, but
8744 * ensure a minimum size on small systems.
8745 */
8746 tmp = max_t(u64, tmp >> 2,
9705bea5 8747 mult_frac(zone_managed_pages(zone),
795ae7a0
JW
8748 watermark_scale_factor, 10000));
8749
aa092591 8750 zone->watermark_boost = 0;
a9214443 8751 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
c574bbe9
HY
8752 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
8753 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
49f223a9 8754
1125b4e3 8755 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 8756 }
cb45b0e9
HA
8757
8758 /* update totalreserve_pages */
8759 calculate_totalreserve_pages();
1da177e4
LT
8760}
8761
cfd3da1e
MG
8762/**
8763 * setup_per_zone_wmarks - called when min_free_kbytes changes
8764 * or when memory is hot-{added|removed}
8765 *
8766 * Ensures that the watermark[min,low,high] values for each zone are set
8767 * correctly with respect to min_free_kbytes.
8768 */
8769void setup_per_zone_wmarks(void)
8770{
b92ca18e 8771 struct zone *zone;
b93e0f32
MH
8772 static DEFINE_SPINLOCK(lock);
8773
8774 spin_lock(&lock);
cfd3da1e 8775 __setup_per_zone_wmarks();
b93e0f32 8776 spin_unlock(&lock);
b92ca18e
MG
8777
8778 /*
8779 * The watermark size have changed so update the pcpu batch
8780 * and high limits or the limits may be inappropriate.
8781 */
8782 for_each_zone(zone)
04f8cfea 8783 zone_pcp_update(zone, 0);
cfd3da1e
MG
8784}
8785
1da177e4
LT
8786/*
8787 * Initialise min_free_kbytes.
8788 *
8789 * For small machines we want it small (128k min). For large machines
8beeae86 8790 * we want it large (256MB max). But it is not linear, because network
1da177e4
LT
8791 * bandwidth does not increase linearly with machine size. We use
8792 *
b8af2941 8793 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
1da177e4
LT
8794 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
8795 *
8796 * which yields
8797 *
8798 * 16MB: 512k
8799 * 32MB: 724k
8800 * 64MB: 1024k
8801 * 128MB: 1448k
8802 * 256MB: 2048k
8803 * 512MB: 2896k
8804 * 1024MB: 4096k
8805 * 2048MB: 5792k
8806 * 4096MB: 8192k
8807 * 8192MB: 11584k
8808 * 16384MB: 16384k
8809 */
bd3400ea 8810void calculate_min_free_kbytes(void)
1da177e4
LT
8811{
8812 unsigned long lowmem_kbytes;
5f12733e 8813 int new_min_free_kbytes;
1da177e4
LT
8814
8815 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5f12733e
MH
8816 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8817
59d336bd
WS
8818 if (new_min_free_kbytes > user_min_free_kbytes)
8819 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
8820 else
5f12733e
MH
8821 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8822 new_min_free_kbytes, user_min_free_kbytes);
59d336bd 8823
bd3400ea
LF
8824}
8825
8826int __meminit init_per_zone_wmark_min(void)
8827{
8828 calculate_min_free_kbytes();
bc75d33f 8829 setup_per_zone_wmarks();
a6cccdc3 8830 refresh_zone_stat_thresholds();
1da177e4 8831 setup_per_zone_lowmem_reserve();
6423aa81
JK
8832
8833#ifdef CONFIG_NUMA
8834 setup_min_unmapped_ratio();
8835 setup_min_slab_ratio();
8836#endif
8837
4aab2be0
VB
8838 khugepaged_min_free_kbytes_update();
8839
1da177e4
LT
8840 return 0;
8841}
e08d3fdf 8842postcore_initcall(init_per_zone_wmark_min)
1da177e4
LT
8843
8844/*
b8af2941 8845 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
1da177e4
LT
8846 * that we can call two helper functions whenever min_free_kbytes
8847 * changes.
8848 */
cccad5b9 8849int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
32927393 8850 void *buffer, size_t *length, loff_t *ppos)
1da177e4 8851{
da8c757b
HP
8852 int rc;
8853
8854 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8855 if (rc)
8856 return rc;
8857
5f12733e
MH
8858 if (write) {
8859 user_min_free_kbytes = min_free_kbytes;
bc75d33f 8860 setup_per_zone_wmarks();
5f12733e 8861 }
1da177e4
LT
8862 return 0;
8863}
8864
795ae7a0 8865int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
32927393 8866 void *buffer, size_t *length, loff_t *ppos)
795ae7a0
JW
8867{
8868 int rc;
8869
8870 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8871 if (rc)
8872 return rc;
8873
8874 if (write)
8875 setup_per_zone_wmarks();
8876
8877 return 0;
8878}
8879
9614634f 8880#ifdef CONFIG_NUMA
6423aa81 8881static void setup_min_unmapped_ratio(void)
9614634f 8882{
6423aa81 8883 pg_data_t *pgdat;
9614634f 8884 struct zone *zone;
9614634f 8885
a5f5f91d 8886 for_each_online_pgdat(pgdat)
81cbcbc2 8887 pgdat->min_unmapped_pages = 0;
a5f5f91d 8888
9614634f 8889 for_each_zone(zone)
9705bea5
AK
8890 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8891 sysctl_min_unmapped_ratio) / 100;
9614634f 8892}
0ff38490 8893
6423aa81
JK
8894
8895int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
32927393 8896 void *buffer, size_t *length, loff_t *ppos)
0ff38490 8897{
0ff38490
CL
8898 int rc;
8899
8d65af78 8900 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
8901 if (rc)
8902 return rc;
8903
6423aa81
JK
8904 setup_min_unmapped_ratio();
8905
8906 return 0;
8907}
8908
8909static void setup_min_slab_ratio(void)
8910{
8911 pg_data_t *pgdat;
8912 struct zone *zone;
8913
a5f5f91d
MG
8914 for_each_online_pgdat(pgdat)
8915 pgdat->min_slab_pages = 0;
8916
0ff38490 8917 for_each_zone(zone)
9705bea5
AK
8918 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8919 sysctl_min_slab_ratio) / 100;
6423aa81
JK
8920}
8921
8922int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
32927393 8923 void *buffer, size_t *length, loff_t *ppos)
6423aa81
JK
8924{
8925 int rc;
8926
8927 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8928 if (rc)
8929 return rc;
8930
8931 setup_min_slab_ratio();
8932
0ff38490
CL
8933 return 0;
8934}
9614634f
CL
8935#endif
8936
1da177e4
LT
8937/*
8938 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8939 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8940 * whenever sysctl_lowmem_reserve_ratio changes.
8941 *
8942 * The reserve ratio obviously has absolutely no relation with the
41858966 8943 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
8944 * if in function of the boot time zone sizes.
8945 */
cccad5b9 8946int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
32927393 8947 void *buffer, size_t *length, loff_t *ppos)
1da177e4 8948{
86aaf255
BH
8949 int i;
8950
8d65af78 8951 proc_dointvec_minmax(table, write, buffer, length, ppos);
86aaf255
BH
8952
8953 for (i = 0; i < MAX_NR_ZONES; i++) {
8954 if (sysctl_lowmem_reserve_ratio[i] < 1)
8955 sysctl_lowmem_reserve_ratio[i] = 0;
8956 }
8957
1da177e4
LT
8958 setup_per_zone_lowmem_reserve();
8959 return 0;
8960}
8961
8ad4b1fb 8962/*
74f44822
MG
8963 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
8964 * cpu. It is the fraction of total pages in each zone that a hot per cpu
b8af2941 8965 * pagelist can have before it gets flushed back to buddy allocator.
8ad4b1fb 8966 */
74f44822
MG
8967int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
8968 int write, void *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
8969{
8970 struct zone *zone;
74f44822 8971 int old_percpu_pagelist_high_fraction;
8ad4b1fb
RS
8972 int ret;
8973
7cd2b0a3 8974 mutex_lock(&pcp_batch_high_lock);
74f44822 8975 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
7cd2b0a3 8976
8d65af78 8977 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7cd2b0a3
DR
8978 if (!write || ret < 0)
8979 goto out;
8980
8981 /* Sanity checking to avoid pcp imbalance */
74f44822
MG
8982 if (percpu_pagelist_high_fraction &&
8983 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
8984 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
7cd2b0a3
DR
8985 ret = -EINVAL;
8986 goto out;
8987 }
8988
8989 /* No change? */
74f44822 8990 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
7cd2b0a3 8991 goto out;
c8e251fa 8992
cb1ef534 8993 for_each_populated_zone(zone)
74f44822 8994 zone_set_pageset_high_and_batch(zone, 0);
7cd2b0a3 8995out:
c8e251fa 8996 mutex_unlock(&pcp_batch_high_lock);
7cd2b0a3 8997 return ret;
8ad4b1fb
RS
8998}
8999
f6f34b43
SD
9000#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
9001/*
9002 * Returns the number of pages that arch has reserved but
9003 * is not known to alloc_large_system_hash().
9004 */
9005static unsigned long __init arch_reserved_kernel_pages(void)
9006{
9007 return 0;
9008}
9009#endif
9010
9017217b
PT
9011/*
9012 * Adaptive scale is meant to reduce sizes of hash tables on large memory
9013 * machines. As memory size is increased the scale is also increased but at
9014 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
9015 * quadruples the scale is increased by one, which means the size of hash table
9016 * only doubles, instead of quadrupling as well.
9017 * Because 32-bit systems cannot have large physical memory, where this scaling
9018 * makes sense, it is disabled on such platforms.
9019 */
9020#if __BITS_PER_LONG > 32
9021#define ADAPT_SCALE_BASE (64ul << 30)
9022#define ADAPT_SCALE_SHIFT 2
9023#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
9024#endif
9025
1da177e4
LT
9026/*
9027 * allocate a large system hash table from bootmem
9028 * - it is assumed that the hash table must contain an exact power-of-2
9029 * quantity of entries
9030 * - limit is the number of hash buckets, not the total allocation size
9031 */
9032void *__init alloc_large_system_hash(const char *tablename,
9033 unsigned long bucketsize,
9034 unsigned long numentries,
9035 int scale,
9036 int flags,
9037 unsigned int *_hash_shift,
9038 unsigned int *_hash_mask,
31fe62b9
TB
9039 unsigned long low_limit,
9040 unsigned long high_limit)
1da177e4 9041{
31fe62b9 9042 unsigned long long max = high_limit;
1da177e4 9043 unsigned long log2qty, size;
97bab178 9044 void *table;
3749a8f0 9045 gfp_t gfp_flags;
ec11408a 9046 bool virt;
121e6f32 9047 bool huge;
1da177e4
LT
9048
9049 /* allow the kernel cmdline to have a say */
9050 if (!numentries) {
9051 /* round applicable memory size up to nearest megabyte */
04903664 9052 numentries = nr_kernel_pages;
f6f34b43 9053 numentries -= arch_reserved_kernel_pages();
a7e83318
JZ
9054
9055 /* It isn't necessary when PAGE_SIZE >= 1MB */
c940e020
ML
9056 if (PAGE_SIZE < SZ_1M)
9057 numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
1da177e4 9058
9017217b
PT
9059#if __BITS_PER_LONG > 32
9060 if (!high_limit) {
9061 unsigned long adapt;
9062
9063 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
9064 adapt <<= ADAPT_SCALE_SHIFT)
9065 scale++;
9066 }
9067#endif
9068
1da177e4
LT
9069 /* limit to 1 bucket per 2^scale bytes of low memory */
9070 if (scale > PAGE_SHIFT)
9071 numentries >>= (scale - PAGE_SHIFT);
9072 else
9073 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
9074
9075 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
9076 if (unlikely(flags & HASH_SMALL)) {
9077 /* Makes no sense without HASH_EARLY */
9078 WARN_ON(!(flags & HASH_EARLY));
9079 if (!(numentries >> *_hash_shift)) {
9080 numentries = 1UL << *_hash_shift;
9081 BUG_ON(!numentries);
9082 }
9083 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 9084 numentries = PAGE_SIZE / bucketsize;
1da177e4 9085 }
6e692ed3 9086 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
9087
9088 /* limit allocation size to 1/16 total memory by default */
9089 if (max == 0) {
9090 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
9091 do_div(max, bucketsize);
9092 }
074b8517 9093 max = min(max, 0x80000000ULL);
1da177e4 9094
31fe62b9
TB
9095 if (numentries < low_limit)
9096 numentries = low_limit;
1da177e4
LT
9097 if (numentries > max)
9098 numentries = max;
9099
f0d1b0b3 9100 log2qty = ilog2(numentries);
1da177e4 9101
3749a8f0 9102 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
1da177e4 9103 do {
ec11408a 9104 virt = false;
1da177e4 9105 size = bucketsize << log2qty;
ea1f5f37
PT
9106 if (flags & HASH_EARLY) {
9107 if (flags & HASH_ZERO)
26fb3dae 9108 table = memblock_alloc(size, SMP_CACHE_BYTES);
ea1f5f37 9109 else
7e1c4e27
MR
9110 table = memblock_alloc_raw(size,
9111 SMP_CACHE_BYTES);
ec11408a 9112 } else if (get_order(size) >= MAX_ORDER || hashdist) {
f2edd118 9113 table = vmalloc_huge(size, gfp_flags);
ec11408a 9114 virt = true;
084f7e23
ED
9115 if (table)
9116 huge = is_vm_area_hugepages(table);
ea1f5f37 9117 } else {
1037b83b
ED
9118 /*
9119 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
9120 * some pages at the end of hash table which
9121 * alloc_pages_exact() automatically does
1037b83b 9122 */
ec11408a
NP
9123 table = alloc_pages_exact(size, gfp_flags);
9124 kmemleak_alloc(table, size, 1, gfp_flags);
1da177e4
LT
9125 }
9126 } while (!table && size > PAGE_SIZE && --log2qty);
9127
9128 if (!table)
9129 panic("Failed to allocate %s hash table\n", tablename);
9130
ec11408a
NP
9131 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
9132 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
121e6f32 9133 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
1da177e4
LT
9134
9135 if (_hash_shift)
9136 *_hash_shift = log2qty;
9137 if (_hash_mask)
9138 *_hash_mask = (1 << log2qty) - 1;
9139
9140 return table;
9141}
a117e66e 9142
8df995f6 9143#ifdef CONFIG_CONTIG_ALLOC
a1394bdd
MK
9144#if defined(CONFIG_DYNAMIC_DEBUG) || \
9145 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
9146/* Usage: See admin-guide/dynamic-debug-howto.rst */
9147static void alloc_contig_dump_pages(struct list_head *page_list)
9148{
9149 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
9150
9151 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
9152 struct page *page;
9153
9154 dump_stack();
9155 list_for_each_entry(page, page_list, lru)
9156 dump_page(page, "migration failure");
9157 }
9158}
9159#else
9160static inline void alloc_contig_dump_pages(struct list_head *page_list)
9161{
9162}
9163#endif
9164
041d3a8c 9165/* [start, end) must belong to a single zone. */
b2c9e2fb 9166int __alloc_contig_migrate_range(struct compact_control *cc,
bb13ffeb 9167 unsigned long start, unsigned long end)
041d3a8c
MN
9168{
9169 /* This function is based on compact_zone() from compaction.c. */
730ec8c0 9170 unsigned int nr_reclaimed;
041d3a8c
MN
9171 unsigned long pfn = start;
9172 unsigned int tries = 0;
9173 int ret = 0;
8b94e0b8
JK
9174 struct migration_target_control mtc = {
9175 .nid = zone_to_nid(cc->zone),
9176 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
9177 };
041d3a8c 9178
361a2a22 9179 lru_cache_disable();
041d3a8c 9180
bb13ffeb 9181 while (pfn < end || !list_empty(&cc->migratepages)) {
041d3a8c
MN
9182 if (fatal_signal_pending(current)) {
9183 ret = -EINTR;
9184 break;
9185 }
9186
bb13ffeb
MG
9187 if (list_empty(&cc->migratepages)) {
9188 cc->nr_migratepages = 0;
c2ad7a1f
OS
9189 ret = isolate_migratepages_range(cc, pfn, end);
9190 if (ret && ret != -EAGAIN)
041d3a8c 9191 break;
c2ad7a1f 9192 pfn = cc->migrate_pfn;
041d3a8c
MN
9193 tries = 0;
9194 } else if (++tries == 5) {
c8e28b47 9195 ret = -EBUSY;
041d3a8c
MN
9196 break;
9197 }
9198
beb51eaa
MK
9199 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
9200 &cc->migratepages);
9201 cc->nr_migratepages -= nr_reclaimed;
02c6de8d 9202
8b94e0b8 9203 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
5ac95884 9204 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
c8e28b47
OS
9205
9206 /*
9207 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
9208 * to retry again over this error, so do the same here.
9209 */
9210 if (ret == -ENOMEM)
9211 break;
041d3a8c 9212 }
d479960e 9213
361a2a22 9214 lru_cache_enable();
2a6f5124 9215 if (ret < 0) {
3f913fc5 9216 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
151e084a 9217 alloc_contig_dump_pages(&cc->migratepages);
2a6f5124
SP
9218 putback_movable_pages(&cc->migratepages);
9219 return ret;
9220 }
9221 return 0;
041d3a8c
MN
9222}
9223
9224/**
9225 * alloc_contig_range() -- tries to allocate given range of pages
9226 * @start: start PFN to allocate
9227 * @end: one-past-the-last PFN to allocate
f0953a1b 9228 * @migratetype: migratetype of the underlying pageblocks (either
0815f3d8
MN
9229 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
9230 * in range must have the same migratetype and it must
9231 * be either of the two.
ca96b625 9232 * @gfp_mask: GFP mask to use during compaction
041d3a8c 9233 *
11ac3e87
ZY
9234 * The PFN range does not have to be pageblock aligned. The PFN range must
9235 * belong to a single zone.
041d3a8c 9236 *
2c7452a0
MK
9237 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
9238 * pageblocks in the range. Once isolated, the pageblocks should not
9239 * be modified by others.
041d3a8c 9240 *
a862f68a 9241 * Return: zero on success or negative error code. On success all
041d3a8c
MN
9242 * pages which PFN is in [start, end) are allocated for the caller and
9243 * need to be freed with free_contig_range().
9244 */
0815f3d8 9245int alloc_contig_range(unsigned long start, unsigned long end,
ca96b625 9246 unsigned migratetype, gfp_t gfp_mask)
041d3a8c 9247{
041d3a8c 9248 unsigned long outer_start, outer_end;
b2c9e2fb 9249 int order;
d00181b9 9250 int ret = 0;
041d3a8c 9251
bb13ffeb
MG
9252 struct compact_control cc = {
9253 .nr_migratepages = 0,
9254 .order = -1,
9255 .zone = page_zone(pfn_to_page(start)),
e0b9daeb 9256 .mode = MIGRATE_SYNC,
bb13ffeb 9257 .ignore_skip_hint = true,
2583d671 9258 .no_set_skip_hint = true,
7dea19f9 9259 .gfp_mask = current_gfp_context(gfp_mask),
b06eda09 9260 .alloc_contig = true,
bb13ffeb
MG
9261 };
9262 INIT_LIST_HEAD(&cc.migratepages);
9263
041d3a8c
MN
9264 /*
9265 * What we do here is we mark all pageblocks in range as
9266 * MIGRATE_ISOLATE. Because pageblock and max order pages may
9267 * have different sizes, and due to the way page allocator
b2c9e2fb 9268 * work, start_isolate_page_range() has special handlings for this.
041d3a8c
MN
9269 *
9270 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
9271 * migrate the pages from an unaligned range (ie. pages that
b2c9e2fb 9272 * we are interested in). This will put all the pages in
041d3a8c
MN
9273 * range back to page allocator as MIGRATE_ISOLATE.
9274 *
9275 * When this is done, we take the pages in range from page
9276 * allocator removing them from the buddy system. This way
9277 * page allocator will never consider using them.
9278 *
9279 * This lets us mark the pageblocks back as
9280 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
9281 * aligned range but not in the unaligned, original range are
9282 * put back to page allocator so that buddy can use them.
9283 */
9284
6e263fff 9285 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
3fa0c7c7 9286 if (ret)
b2c9e2fb 9287 goto done;
041d3a8c 9288
7612921f
VB
9289 drain_all_pages(cc.zone);
9290
8ef5849f
JK
9291 /*
9292 * In case of -EBUSY, we'd like to know which page causes problem.
63cd4489
MK
9293 * So, just fall through. test_pages_isolated() has a tracepoint
9294 * which will report the busy page.
9295 *
9296 * It is possible that busy pages could become available before
9297 * the call to test_pages_isolated, and the range will actually be
9298 * allocated. So, if we fall through be sure to clear ret so that
9299 * -EBUSY is not accidentally used or returned to caller.
8ef5849f 9300 */
bb13ffeb 9301 ret = __alloc_contig_migrate_range(&cc, start, end);
8ef5849f 9302 if (ret && ret != -EBUSY)
041d3a8c 9303 goto done;
68d68ff6 9304 ret = 0;
041d3a8c
MN
9305
9306 /*
b2c9e2fb 9307 * Pages from [start, end) are within a pageblock_nr_pages
041d3a8c
MN
9308 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
9309 * more, all pages in [start, end) are free in page allocator.
9310 * What we are going to do is to allocate all pages from
9311 * [start, end) (that is remove them from page allocator).
9312 *
9313 * The only problem is that pages at the beginning and at the
9314 * end of interesting range may be not aligned with pages that
9315 * page allocator holds, ie. they can be part of higher order
9316 * pages. Because of this, we reserve the bigger range and
9317 * once this is done free the pages we are not interested in.
9318 *
9319 * We don't have to hold zone->lock here because the pages are
9320 * isolated thus they won't get removed from buddy.
9321 */
9322
041d3a8c
MN
9323 order = 0;
9324 outer_start = start;
9325 while (!PageBuddy(pfn_to_page(outer_start))) {
9326 if (++order >= MAX_ORDER) {
8ef5849f
JK
9327 outer_start = start;
9328 break;
041d3a8c
MN
9329 }
9330 outer_start &= ~0UL << order;
9331 }
9332
8ef5849f 9333 if (outer_start != start) {
ab130f91 9334 order = buddy_order(pfn_to_page(outer_start));
8ef5849f
JK
9335
9336 /*
9337 * outer_start page could be small order buddy page and
9338 * it doesn't include start page. Adjust outer_start
9339 * in this case to report failed page properly
9340 * on tracepoint in test_pages_isolated()
9341 */
9342 if (outer_start + (1UL << order) <= start)
9343 outer_start = start;
9344 }
9345
041d3a8c 9346 /* Make sure the range is really isolated. */
756d25be 9347 if (test_pages_isolated(outer_start, end, 0)) {
041d3a8c
MN
9348 ret = -EBUSY;
9349 goto done;
9350 }
9351
49f223a9 9352 /* Grab isolated pages from freelists. */
bb13ffeb 9353 outer_end = isolate_freepages_range(&cc, outer_start, end);
041d3a8c
MN
9354 if (!outer_end) {
9355 ret = -EBUSY;
9356 goto done;
9357 }
9358
9359 /* Free head and tail (if any) */
9360 if (start != outer_start)
9361 free_contig_range(outer_start, start - outer_start);
9362 if (end != outer_end)
9363 free_contig_range(end, outer_end - end);
9364
9365done:
6e263fff 9366 undo_isolate_page_range(start, end, migratetype);
041d3a8c
MN
9367 return ret;
9368}
255f5985 9369EXPORT_SYMBOL(alloc_contig_range);
5e27a2df
AK
9370
9371static int __alloc_contig_pages(unsigned long start_pfn,
9372 unsigned long nr_pages, gfp_t gfp_mask)
9373{
9374 unsigned long end_pfn = start_pfn + nr_pages;
9375
9376 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
9377 gfp_mask);
9378}
9379
9380static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
9381 unsigned long nr_pages)
9382{
9383 unsigned long i, end_pfn = start_pfn + nr_pages;
9384 struct page *page;
9385
9386 for (i = start_pfn; i < end_pfn; i++) {
9387 page = pfn_to_online_page(i);
9388 if (!page)
9389 return false;
9390
9391 if (page_zone(page) != z)
9392 return false;
9393
9394 if (PageReserved(page))
9395 return false;
5e27a2df
AK
9396 }
9397 return true;
9398}
9399
9400static bool zone_spans_last_pfn(const struct zone *zone,
9401 unsigned long start_pfn, unsigned long nr_pages)
9402{
9403 unsigned long last_pfn = start_pfn + nr_pages - 1;
9404
9405 return zone_spans_pfn(zone, last_pfn);
9406}
9407
9408/**
9409 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
9410 * @nr_pages: Number of contiguous pages to allocate
9411 * @gfp_mask: GFP mask to limit search and used during compaction
9412 * @nid: Target node
9413 * @nodemask: Mask for other possible nodes
9414 *
9415 * This routine is a wrapper around alloc_contig_range(). It scans over zones
9416 * on an applicable zonelist to find a contiguous pfn range which can then be
9417 * tried for allocation with alloc_contig_range(). This routine is intended
9418 * for allocation requests which can not be fulfilled with the buddy allocator.
9419 *
9420 * The allocated memory is always aligned to a page boundary. If nr_pages is a
eaab8e75
AK
9421 * power of two, then allocated range is also guaranteed to be aligned to same
9422 * nr_pages (e.g. 1GB request would be aligned to 1GB).
5e27a2df
AK
9423 *
9424 * Allocated pages can be freed with free_contig_range() or by manually calling
9425 * __free_page() on each allocated page.
9426 *
9427 * Return: pointer to contiguous pages on success, or NULL if not successful.
9428 */
9429struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
9430 int nid, nodemask_t *nodemask)
9431{
9432 unsigned long ret, pfn, flags;
9433 struct zonelist *zonelist;
9434 struct zone *zone;
9435 struct zoneref *z;
9436
9437 zonelist = node_zonelist(nid, gfp_mask);
9438 for_each_zone_zonelist_nodemask(zone, z, zonelist,
9439 gfp_zone(gfp_mask), nodemask) {
9440 spin_lock_irqsave(&zone->lock, flags);
9441
9442 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
9443 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
9444 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
9445 /*
9446 * We release the zone lock here because
9447 * alloc_contig_range() will also lock the zone
9448 * at some point. If there's an allocation
9449 * spinning on this lock, it may win the race
9450 * and cause alloc_contig_range() to fail...
9451 */
9452 spin_unlock_irqrestore(&zone->lock, flags);
9453 ret = __alloc_contig_pages(pfn, nr_pages,
9454 gfp_mask);
9455 if (!ret)
9456 return pfn_to_page(pfn);
9457 spin_lock_irqsave(&zone->lock, flags);
9458 }
9459 pfn += nr_pages;
9460 }
9461 spin_unlock_irqrestore(&zone->lock, flags);
9462 }
9463 return NULL;
9464}
4eb0716e 9465#endif /* CONFIG_CONTIG_ALLOC */
041d3a8c 9466
78fa5150 9467void free_contig_range(unsigned long pfn, unsigned long nr_pages)
041d3a8c 9468{
78fa5150 9469 unsigned long count = 0;
bcc2b02f
MS
9470
9471 for (; nr_pages--; pfn++) {
9472 struct page *page = pfn_to_page(pfn);
9473
9474 count += page_count(page) != 1;
9475 __free_page(page);
9476 }
78fa5150 9477 WARN(count != 0, "%lu pages are still in use!\n", count);
041d3a8c 9478}
255f5985 9479EXPORT_SYMBOL(free_contig_range);
041d3a8c 9480
ec6e8c7e
VB
9481/*
9482 * Effectively disable pcplists for the zone by setting the high limit to 0
9483 * and draining all cpus. A concurrent page freeing on another CPU that's about
9484 * to put the page on pcplist will either finish before the drain and the page
9485 * will be drained, or observe the new high limit and skip the pcplist.
9486 *
9487 * Must be paired with a call to zone_pcp_enable().
9488 */
9489void zone_pcp_disable(struct zone *zone)
9490{
9491 mutex_lock(&pcp_batch_high_lock);
9492 __zone_set_pageset_high_and_batch(zone, 0, 1);
9493 __drain_all_pages(zone, true);
9494}
9495
9496void zone_pcp_enable(struct zone *zone)
9497{
9498 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
9499 mutex_unlock(&pcp_batch_high_lock);
9500}
9501
340175b7
JL
9502void zone_pcp_reset(struct zone *zone)
9503{
5a883813 9504 int cpu;
28f836b6 9505 struct per_cpu_zonestat *pzstats;
340175b7 9506
28f836b6 9507 if (zone->per_cpu_pageset != &boot_pageset) {
5a883813 9508 for_each_online_cpu(cpu) {
28f836b6
MG
9509 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
9510 drain_zonestat(zone, pzstats);
5a883813 9511 }
28f836b6 9512 free_percpu(zone->per_cpu_pageset);
28f836b6 9513 zone->per_cpu_pageset = &boot_pageset;
022e7fa0
ML
9514 if (zone->per_cpu_zonestats != &boot_zonestats) {
9515 free_percpu(zone->per_cpu_zonestats);
9516 zone->per_cpu_zonestats = &boot_zonestats;
9517 }
340175b7 9518 }
340175b7
JL
9519}
9520
6dcd73d7 9521#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195 9522/*
257bea71
DH
9523 * All pages in the range must be in a single zone, must not contain holes,
9524 * must span full sections, and must be isolated before calling this function.
0c0e6195 9525 */
257bea71 9526void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
0c0e6195 9527{
257bea71 9528 unsigned long pfn = start_pfn;
0c0e6195
KH
9529 struct page *page;
9530 struct zone *zone;
0ee5f4f3 9531 unsigned int order;
0c0e6195 9532 unsigned long flags;
5557c766 9533
2d070eab 9534 offline_mem_sections(pfn, end_pfn);
0c0e6195
KH
9535 zone = page_zone(pfn_to_page(pfn));
9536 spin_lock_irqsave(&zone->lock, flags);
0c0e6195 9537 while (pfn < end_pfn) {
0c0e6195 9538 page = pfn_to_page(pfn);
b023f468
WC
9539 /*
9540 * The HWPoisoned page may be not in buddy system, and
9541 * page_count() is not 0.
9542 */
9543 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
9544 pfn++;
b023f468
WC
9545 continue;
9546 }
aa218795
DH
9547 /*
9548 * At this point all remaining PageOffline() pages have a
9549 * reference count of 0 and can simply be skipped.
9550 */
9551 if (PageOffline(page)) {
9552 BUG_ON(page_count(page));
9553 BUG_ON(PageBuddy(page));
9554 pfn++;
aa218795
DH
9555 continue;
9556 }
b023f468 9557
0c0e6195
KH
9558 BUG_ON(page_count(page));
9559 BUG_ON(!PageBuddy(page));
ab130f91 9560 order = buddy_order(page);
6ab01363 9561 del_page_from_free_list(page, zone, order);
0c0e6195
KH
9562 pfn += (1 << order);
9563 }
9564 spin_unlock_irqrestore(&zone->lock, flags);
9565}
9566#endif
8d22ba1b 9567
8446b59b
ED
9568/*
9569 * This function returns a stable result only if called under zone lock.
9570 */
8d22ba1b
WF
9571bool is_free_buddy_page(struct page *page)
9572{
8d22ba1b 9573 unsigned long pfn = page_to_pfn(page);
7aeb09f9 9574 unsigned int order;
8d22ba1b 9575
8d22ba1b
WF
9576 for (order = 0; order < MAX_ORDER; order++) {
9577 struct page *page_head = page - (pfn & ((1 << order) - 1));
9578
8446b59b
ED
9579 if (PageBuddy(page_head) &&
9580 buddy_order_unsafe(page_head) >= order)
8d22ba1b
WF
9581 break;
9582 }
8d22ba1b
WF
9583
9584 return order < MAX_ORDER;
9585}
a581865e 9586EXPORT_SYMBOL(is_free_buddy_page);
d4ae9916
NH
9587
9588#ifdef CONFIG_MEMORY_FAILURE
9589/*
06be6ff3
OS
9590 * Break down a higher-order page in sub-pages, and keep our target out of
9591 * buddy allocator.
d4ae9916 9592 */
06be6ff3
OS
9593static void break_down_buddy_pages(struct zone *zone, struct page *page,
9594 struct page *target, int low, int high,
9595 int migratetype)
9596{
9597 unsigned long size = 1 << high;
9598 struct page *current_buddy, *next_page;
9599
9600 while (high > low) {
9601 high--;
9602 size >>= 1;
9603
9604 if (target >= &page[size]) {
9605 next_page = page + size;
9606 current_buddy = page;
9607 } else {
9608 next_page = page;
9609 current_buddy = page + size;
9610 }
9611
9612 if (set_page_guard(zone, current_buddy, high, migratetype))
9613 continue;
9614
9615 if (current_buddy != target) {
9616 add_to_free_list(current_buddy, zone, high, migratetype);
ab130f91 9617 set_buddy_order(current_buddy, high);
06be6ff3
OS
9618 page = next_page;
9619 }
9620 }
9621}
9622
9623/*
9624 * Take a page that will be marked as poisoned off the buddy allocator.
9625 */
9626bool take_page_off_buddy(struct page *page)
d4ae9916
NH
9627{
9628 struct zone *zone = page_zone(page);
9629 unsigned long pfn = page_to_pfn(page);
9630 unsigned long flags;
9631 unsigned int order;
06be6ff3 9632 bool ret = false;
d4ae9916
NH
9633
9634 spin_lock_irqsave(&zone->lock, flags);
9635 for (order = 0; order < MAX_ORDER; order++) {
9636 struct page *page_head = page - (pfn & ((1 << order) - 1));
ab130f91 9637 int page_order = buddy_order(page_head);
d4ae9916 9638
ab130f91 9639 if (PageBuddy(page_head) && page_order >= order) {
06be6ff3
OS
9640 unsigned long pfn_head = page_to_pfn(page_head);
9641 int migratetype = get_pfnblock_migratetype(page_head,
9642 pfn_head);
9643
ab130f91 9644 del_page_from_free_list(page_head, zone, page_order);
06be6ff3 9645 break_down_buddy_pages(zone, page_head, page, 0,
ab130f91 9646 page_order, migratetype);
bf181c58 9647 SetPageHWPoisonTakenOff(page);
bac9c6fa
DH
9648 if (!is_migrate_isolate(migratetype))
9649 __mod_zone_freepage_state(zone, -1, migratetype);
06be6ff3 9650 ret = true;
d4ae9916
NH
9651 break;
9652 }
06be6ff3
OS
9653 if (page_count(page_head) > 0)
9654 break;
d4ae9916
NH
9655 }
9656 spin_unlock_irqrestore(&zone->lock, flags);
06be6ff3 9657 return ret;
d4ae9916 9658}
bf181c58
NH
9659
9660/*
9661 * Cancel takeoff done by take_page_off_buddy().
9662 */
9663bool put_page_back_buddy(struct page *page)
9664{
9665 struct zone *zone = page_zone(page);
9666 unsigned long pfn = page_to_pfn(page);
9667 unsigned long flags;
9668 int migratetype = get_pfnblock_migratetype(page, pfn);
9669 bool ret = false;
9670
9671 spin_lock_irqsave(&zone->lock, flags);
9672 if (put_page_testzero(page)) {
9673 ClearPageHWPoisonTakenOff(page);
9674 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
9675 if (TestClearPageHWPoison(page)) {
bf181c58
NH
9676 ret = true;
9677 }
9678 }
9679 spin_unlock_irqrestore(&zone->lock, flags);
9680
9681 return ret;
9682}
d4ae9916 9683#endif
62b31070
BH
9684
9685#ifdef CONFIG_ZONE_DMA
9686bool has_managed_dma(void)
9687{
9688 struct pglist_data *pgdat;
9689
9690 for_each_online_pgdat(pgdat) {
9691 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
9692
9693 if (managed_zone(zone))
9694 return true;
9695 }
9696 return false;
9697}
9698#endif /* CONFIG_ZONE_DMA */