mm: add prototype for __add_to_page_cache_locked()
[linux-block.git] / mm / page_alloc.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
1da177e4
LT
18#include <linux/stddef.h>
19#include <linux/mm.h>
ca79b0c2 20#include <linux/highmem.h>
1da177e4
LT
21#include <linux/swap.h>
22#include <linux/interrupt.h>
23#include <linux/pagemap.h>
10ed273f 24#include <linux/jiffies.h>
edbe7d23 25#include <linux/memblock.h>
1da177e4 26#include <linux/compiler.h>
9f158333 27#include <linux/kernel.h>
b8c73fc2 28#include <linux/kasan.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
a238ab5b 34#include <linux/ratelimit.h>
5a3135c2 35#include <linux/oom.h>
1da177e4
LT
36#include <linux/topology.h>
37#include <linux/sysctl.h>
38#include <linux/cpu.h>
39#include <linux/cpuset.h>
bdc8cb98 40#include <linux/memory_hotplug.h>
1da177e4
LT
41#include <linux/nodemask.h>
42#include <linux/vmalloc.h>
a6cccdc3 43#include <linux/vmstat.h>
4be38e35 44#include <linux/mempolicy.h>
4b94ffdc 45#include <linux/memremap.h>
6811378e 46#include <linux/stop_machine.h>
97500a4a 47#include <linux/random.h>
c713216d
MG
48#include <linux/sort.h>
49#include <linux/pfn.h>
3fcfab16 50#include <linux/backing-dev.h>
933e312e 51#include <linux/fault-inject.h>
a5d76b54 52#include <linux/page-isolation.h>
3ac7fe5a 53#include <linux/debugobjects.h>
dbb1f81c 54#include <linux/kmemleak.h>
56de7263 55#include <linux/compaction.h>
0d3d062a 56#include <trace/events/kmem.h>
d379f01d 57#include <trace/events/oom.h>
268bb0ce 58#include <linux/prefetch.h>
6e543d57 59#include <linux/mm_inline.h>
f920e413 60#include <linux/mmu_notifier.h>
041d3a8c 61#include <linux/migrate.h>
949f7ec5 62#include <linux/hugetlb.h>
8bd75c77 63#include <linux/sched/rt.h>
5b3cc15a 64#include <linux/sched/mm.h>
48c96a36 65#include <linux/page_owner.h>
0e1cc95b 66#include <linux/kthread.h>
4949148a 67#include <linux/memcontrol.h>
42c269c8 68#include <linux/ftrace.h>
d92a8cfc 69#include <linux/lockdep.h>
556b969a 70#include <linux/nmi.h>
eb414681 71#include <linux/psi.h>
e4443149 72#include <linux/padata.h>
4aab2be0 73#include <linux/khugepaged.h>
ba8f3587 74#include <linux/buffer_head.h>
1da177e4 75
7ee3d4e8 76#include <asm/sections.h>
1da177e4 77#include <asm/tlbflush.h>
ac924c60 78#include <asm/div64.h>
1da177e4 79#include "internal.h"
e900a918 80#include "shuffle.h"
36e66c55 81#include "page_reporting.h"
1da177e4 82
f04a5d5d
DH
83/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
84typedef int __bitwise fpi_t;
85
86/* No special request */
87#define FPI_NONE ((__force fpi_t)0)
88
89/*
90 * Skip free page reporting notification for the (possibly merged) page.
91 * This does not hinder free page reporting from grabbing the page,
92 * reporting it and marking it "reported" - it only skips notifying
93 * the free page reporting infrastructure about a newly freed page. For
94 * example, used when temporarily pulling a page from a freelist and
95 * putting it back unmodified.
96 */
97#define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
98
47b6a24a
DH
99/*
100 * Place the (possibly merged) page to the tail of the freelist. Will ignore
101 * page shuffling (relevant code - e.g., memory onlining - is expected to
102 * shuffle the whole zone).
103 *
104 * Note: No code should rely on this flag for correctness - it's purely
105 * to allow for optimizations when handing back either fresh pages
106 * (memory onlining) or untouched pages (page isolation, free page
107 * reporting).
108 */
109#define FPI_TO_TAIL ((__force fpi_t)BIT(1))
110
c8e251fa
CS
111/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
112static DEFINE_MUTEX(pcp_batch_high_lock);
7cd2b0a3 113#define MIN_PERCPU_PAGELIST_FRACTION (8)
c8e251fa 114
72812019
LS
115#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
116DEFINE_PER_CPU(int, numa_node);
117EXPORT_PER_CPU_SYMBOL(numa_node);
118#endif
119
4518085e
KW
120DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
121
7aac7898
LS
122#ifdef CONFIG_HAVE_MEMORYLESS_NODES
123/*
124 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
125 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
126 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
127 * defined in <linux/topology.h>.
128 */
129DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
130EXPORT_PER_CPU_SYMBOL(_numa_mem_);
131#endif
132
bd233f53 133/* work_structs for global per-cpu drains */
d9367bd0
WY
134struct pcpu_drain {
135 struct zone *zone;
136 struct work_struct work;
137};
8b885f53
JY
138static DEFINE_MUTEX(pcpu_drain_mutex);
139static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
bd233f53 140
38addce8 141#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
58bea414 142volatile unsigned long latent_entropy __latent_entropy;
38addce8
ER
143EXPORT_SYMBOL(latent_entropy);
144#endif
145
1da177e4 146/*
13808910 147 * Array of node states.
1da177e4 148 */
13808910
CL
149nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
150 [N_POSSIBLE] = NODE_MASK_ALL,
151 [N_ONLINE] = { { [0] = 1UL } },
152#ifndef CONFIG_NUMA
153 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
154#ifdef CONFIG_HIGHMEM
155 [N_HIGH_MEMORY] = { { [0] = 1UL } },
20b2f52b 156#endif
20b2f52b 157 [N_MEMORY] = { { [0] = 1UL } },
13808910
CL
158 [N_CPU] = { { [0] = 1UL } },
159#endif /* NUMA */
160};
161EXPORT_SYMBOL(node_states);
162
ca79b0c2
AK
163atomic_long_t _totalram_pages __read_mostly;
164EXPORT_SYMBOL(_totalram_pages);
cb45b0e9 165unsigned long totalreserve_pages __read_mostly;
e48322ab 166unsigned long totalcma_pages __read_mostly;
ab8fabd4 167
1b76b02f 168int percpu_pagelist_fraction;
dcce284a 169gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
6471384a 170DEFINE_STATIC_KEY_FALSE(init_on_alloc);
6471384a
AP
171EXPORT_SYMBOL(init_on_alloc);
172
6471384a 173DEFINE_STATIC_KEY_FALSE(init_on_free);
6471384a
AP
174EXPORT_SYMBOL(init_on_free);
175
04013513
VB
176static bool _init_on_alloc_enabled_early __read_mostly
177 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
6471384a
AP
178static int __init early_init_on_alloc(char *buf)
179{
6471384a 180
04013513 181 return kstrtobool(buf, &_init_on_alloc_enabled_early);
6471384a
AP
182}
183early_param("init_on_alloc", early_init_on_alloc);
184
04013513
VB
185static bool _init_on_free_enabled_early __read_mostly
186 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
6471384a
AP
187static int __init early_init_on_free(char *buf)
188{
04013513 189 return kstrtobool(buf, &_init_on_free_enabled_early);
6471384a
AP
190}
191early_param("init_on_free", early_init_on_free);
1da177e4 192
bb14c2c7
VB
193/*
194 * A cached value of the page's pageblock's migratetype, used when the page is
195 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
196 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
197 * Also the migratetype set in the page does not necessarily match the pcplist
198 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
199 * other index - this ensures that it will be put on the correct CMA freelist.
200 */
201static inline int get_pcppage_migratetype(struct page *page)
202{
203 return page->index;
204}
205
206static inline void set_pcppage_migratetype(struct page *page, int migratetype)
207{
208 page->index = migratetype;
209}
210
452aa699
RW
211#ifdef CONFIG_PM_SLEEP
212/*
213 * The following functions are used by the suspend/hibernate code to temporarily
214 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
215 * while devices are suspended. To avoid races with the suspend/hibernate code,
55f2503c
PL
216 * they should always be called with system_transition_mutex held
217 * (gfp_allowed_mask also should only be modified with system_transition_mutex
218 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
219 * with that modification).
452aa699 220 */
c9e664f1
RW
221
222static gfp_t saved_gfp_mask;
223
224void pm_restore_gfp_mask(void)
452aa699 225{
55f2503c 226 WARN_ON(!mutex_is_locked(&system_transition_mutex));
c9e664f1
RW
227 if (saved_gfp_mask) {
228 gfp_allowed_mask = saved_gfp_mask;
229 saved_gfp_mask = 0;
230 }
452aa699
RW
231}
232
c9e664f1 233void pm_restrict_gfp_mask(void)
452aa699 234{
55f2503c 235 WARN_ON(!mutex_is_locked(&system_transition_mutex));
c9e664f1
RW
236 WARN_ON(saved_gfp_mask);
237 saved_gfp_mask = gfp_allowed_mask;
d0164adc 238 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
452aa699 239}
f90ac398
MG
240
241bool pm_suspended_storage(void)
242{
d0164adc 243 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
f90ac398
MG
244 return false;
245 return true;
246}
452aa699
RW
247#endif /* CONFIG_PM_SLEEP */
248
d9c23400 249#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
d00181b9 250unsigned int pageblock_order __read_mostly;
d9c23400
MG
251#endif
252
7fef431b
DH
253static void __free_pages_ok(struct page *page, unsigned int order,
254 fpi_t fpi_flags);
a226f6c8 255
1da177e4
LT
256/*
257 * results with 256, 32 in the lowmem_reserve sysctl:
258 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
259 * 1G machine -> (16M dma, 784M normal, 224M high)
260 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
261 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
84109e15 262 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
263 *
264 * TBD: should special case ZONE_DMA32 machines here - in those we normally
265 * don't need any ZONE_NORMAL reservation
1da177e4 266 */
d3cda233 267int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
4b51d669 268#ifdef CONFIG_ZONE_DMA
d3cda233 269 [ZONE_DMA] = 256,
4b51d669 270#endif
fb0e7942 271#ifdef CONFIG_ZONE_DMA32
d3cda233 272 [ZONE_DMA32] = 256,
fb0e7942 273#endif
d3cda233 274 [ZONE_NORMAL] = 32,
e53ef38d 275#ifdef CONFIG_HIGHMEM
d3cda233 276 [ZONE_HIGHMEM] = 0,
e53ef38d 277#endif
d3cda233 278 [ZONE_MOVABLE] = 0,
2f1b6248 279};
1da177e4 280
15ad7cdc 281static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 282#ifdef CONFIG_ZONE_DMA
2f1b6248 283 "DMA",
4b51d669 284#endif
fb0e7942 285#ifdef CONFIG_ZONE_DMA32
2f1b6248 286 "DMA32",
fb0e7942 287#endif
2f1b6248 288 "Normal",
e53ef38d 289#ifdef CONFIG_HIGHMEM
2a1e274a 290 "HighMem",
e53ef38d 291#endif
2a1e274a 292 "Movable",
033fbae9
DW
293#ifdef CONFIG_ZONE_DEVICE
294 "Device",
295#endif
2f1b6248
CL
296};
297
c999fbd3 298const char * const migratetype_names[MIGRATE_TYPES] = {
60f30350
VB
299 "Unmovable",
300 "Movable",
301 "Reclaimable",
302 "HighAtomic",
303#ifdef CONFIG_CMA
304 "CMA",
305#endif
306#ifdef CONFIG_MEMORY_ISOLATION
307 "Isolate",
308#endif
309};
310
ae70eddd
AK
311compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
312 [NULL_COMPOUND_DTOR] = NULL,
313 [COMPOUND_PAGE_DTOR] = free_compound_page,
f1e61557 314#ifdef CONFIG_HUGETLB_PAGE
ae70eddd 315 [HUGETLB_PAGE_DTOR] = free_huge_page,
f1e61557 316#endif
9a982250 317#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ae70eddd 318 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
9a982250 319#endif
f1e61557
KS
320};
321
1da177e4 322int min_free_kbytes = 1024;
42aa83cb 323int user_min_free_kbytes = -1;
24512228
MG
324#ifdef CONFIG_DISCONTIGMEM
325/*
326 * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
327 * are not on separate NUMA nodes. Functionally this works but with
328 * watermark_boost_factor, it can reclaim prematurely as the ranges can be
329 * quite small. By default, do not boost watermarks on discontigmem as in
330 * many cases very high-order allocations like THP are likely to be
331 * unsupported and the premature reclaim offsets the advantage of long-term
332 * fragmentation avoidance.
333 */
334int watermark_boost_factor __read_mostly;
335#else
1c30844d 336int watermark_boost_factor __read_mostly = 15000;
24512228 337#endif
795ae7a0 338int watermark_scale_factor = 10;
1da177e4 339
bbe5d993
OS
340static unsigned long nr_kernel_pages __initdata;
341static unsigned long nr_all_pages __initdata;
342static unsigned long dma_reserve __initdata;
1da177e4 343
bbe5d993
OS
344static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
345static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
7f16f91f 346static unsigned long required_kernelcore __initdata;
a5c6d650 347static unsigned long required_kernelcore_percent __initdata;
7f16f91f 348static unsigned long required_movablecore __initdata;
a5c6d650 349static unsigned long required_movablecore_percent __initdata;
bbe5d993 350static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
7f16f91f 351static bool mirrored_kernelcore __meminitdata;
0ee332c1
TH
352
353/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
354int movable_zone;
355EXPORT_SYMBOL(movable_zone);
c713216d 356
418508c1 357#if MAX_NUMNODES > 1
b9726c26 358unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
ce0725f7 359unsigned int nr_online_nodes __read_mostly = 1;
418508c1 360EXPORT_SYMBOL(nr_node_ids);
62bc62a8 361EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
362#endif
363
9ef9acb0
MG
364int page_group_by_mobility_disabled __read_mostly;
365
3a80a7fa 366#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3c0c12cc
WL
367/*
368 * During boot we initialize deferred pages on-demand, as needed, but once
369 * page_alloc_init_late() has finished, the deferred pages are all initialized,
370 * and we can permanently disable that path.
371 */
372static DEFINE_STATIC_KEY_TRUE(deferred_pages);
373
374/*
375 * Calling kasan_free_pages() only after deferred memory initialization
376 * has completed. Poisoning pages during deferred memory init will greatly
377 * lengthen the process and cause problem in large memory systems as the
378 * deferred pages initialization is done with interrupt disabled.
379 *
380 * Assuming that there will be no reference to those newly initialized
381 * pages before they are ever allocated, this should have no effect on
382 * KASAN memory tracking as the poison will be properly inserted at page
383 * allocation time. The only corner case is when pages are allocated by
384 * on-demand allocation and then freed again before the deferred pages
385 * initialization is done, but this is not likely to happen.
386 */
387static inline void kasan_free_nondeferred_pages(struct page *page, int order)
388{
389 if (!static_branch_unlikely(&deferred_pages))
390 kasan_free_pages(page, order);
391}
392
3a80a7fa 393/* Returns true if the struct page for the pfn is uninitialised */
0e1cc95b 394static inline bool __meminit early_page_uninitialised(unsigned long pfn)
3a80a7fa 395{
ef70b6f4
MG
396 int nid = early_pfn_to_nid(pfn);
397
398 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
3a80a7fa
MG
399 return true;
400
401 return false;
402}
403
404/*
d3035be4 405 * Returns true when the remaining initialisation should be deferred until
3a80a7fa
MG
406 * later in the boot cycle when it can be parallelised.
407 */
d3035be4
PT
408static bool __meminit
409defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
3a80a7fa 410{
d3035be4
PT
411 static unsigned long prev_end_pfn, nr_initialised;
412
413 /*
414 * prev_end_pfn static that contains the end of previous zone
415 * No need to protect because called very early in boot before smp_init.
416 */
417 if (prev_end_pfn != end_pfn) {
418 prev_end_pfn = end_pfn;
419 nr_initialised = 0;
420 }
421
3c2c6488 422 /* Always populate low zones for address-constrained allocations */
d3035be4 423 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
3a80a7fa 424 return false;
23b68cfa
WY
425
426 /*
427 * We start only with one section of pages, more pages are added as
428 * needed until the rest of deferred pages are initialized.
429 */
d3035be4 430 nr_initialised++;
23b68cfa 431 if ((nr_initialised > PAGES_PER_SECTION) &&
d3035be4
PT
432 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
433 NODE_DATA(nid)->first_deferred_pfn = pfn;
434 return true;
3a80a7fa 435 }
d3035be4 436 return false;
3a80a7fa
MG
437}
438#else
3c0c12cc
WL
439#define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o)
440
3a80a7fa
MG
441static inline bool early_page_uninitialised(unsigned long pfn)
442{
443 return false;
444}
445
d3035be4 446static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
3a80a7fa 447{
d3035be4 448 return false;
3a80a7fa
MG
449}
450#endif
451
0b423ca2
MG
452/* Return a pointer to the bitmap storing bits affecting a block of pages */
453static inline unsigned long *get_pageblock_bitmap(struct page *page,
454 unsigned long pfn)
455{
456#ifdef CONFIG_SPARSEMEM
f1eca35a 457 return section_to_usemap(__pfn_to_section(pfn));
0b423ca2
MG
458#else
459 return page_zone(page)->pageblock_flags;
460#endif /* CONFIG_SPARSEMEM */
461}
462
463static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
464{
465#ifdef CONFIG_SPARSEMEM
466 pfn &= (PAGES_PER_SECTION-1);
0b423ca2
MG
467#else
468 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
0b423ca2 469#endif /* CONFIG_SPARSEMEM */
399b795b 470 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
0b423ca2
MG
471}
472
535b81e2
WY
473static __always_inline
474unsigned long __get_pfnblock_flags_mask(struct page *page,
0b423ca2 475 unsigned long pfn,
0b423ca2
MG
476 unsigned long mask)
477{
478 unsigned long *bitmap;
479 unsigned long bitidx, word_bitidx;
480 unsigned long word;
481
482 bitmap = get_pageblock_bitmap(page, pfn);
483 bitidx = pfn_to_bitidx(page, pfn);
484 word_bitidx = bitidx / BITS_PER_LONG;
485 bitidx &= (BITS_PER_LONG-1);
486
487 word = bitmap[word_bitidx];
d93d5ab9 488 return (word >> bitidx) & mask;
0b423ca2
MG
489}
490
a00cda3f
MCC
491/**
492 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
493 * @page: The page within the block of interest
494 * @pfn: The target page frame number
495 * @mask: mask of bits that the caller is interested in
496 *
497 * Return: pageblock_bits flags
498 */
0b423ca2 499unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
0b423ca2
MG
500 unsigned long mask)
501{
535b81e2 502 return __get_pfnblock_flags_mask(page, pfn, mask);
0b423ca2
MG
503}
504
505static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
506{
535b81e2 507 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
0b423ca2
MG
508}
509
510/**
511 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
512 * @page: The page within the block of interest
513 * @flags: The flags to set
514 * @pfn: The target page frame number
0b423ca2
MG
515 * @mask: mask of bits that the caller is interested in
516 */
517void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
518 unsigned long pfn,
0b423ca2
MG
519 unsigned long mask)
520{
521 unsigned long *bitmap;
522 unsigned long bitidx, word_bitidx;
523 unsigned long old_word, word;
524
525 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
125b860b 526 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
0b423ca2
MG
527
528 bitmap = get_pageblock_bitmap(page, pfn);
529 bitidx = pfn_to_bitidx(page, pfn);
530 word_bitidx = bitidx / BITS_PER_LONG;
531 bitidx &= (BITS_PER_LONG-1);
532
533 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
534
d93d5ab9
WY
535 mask <<= bitidx;
536 flags <<= bitidx;
0b423ca2
MG
537
538 word = READ_ONCE(bitmap[word_bitidx]);
539 for (;;) {
540 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
541 if (word == old_word)
542 break;
543 word = old_word;
544 }
545}
3a80a7fa 546
ee6f509c 547void set_pageblock_migratetype(struct page *page, int migratetype)
b2a0ac88 548{
5d0f3f72
KM
549 if (unlikely(page_group_by_mobility_disabled &&
550 migratetype < MIGRATE_PCPTYPES))
49255c61
MG
551 migratetype = MIGRATE_UNMOVABLE;
552
d93d5ab9 553 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
535b81e2 554 page_to_pfn(page), MIGRATETYPE_MASK);
b2a0ac88
MG
555}
556
13e7444b 557#ifdef CONFIG_DEBUG_VM
c6a57e19 558static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 559{
bdc8cb98
DH
560 int ret = 0;
561 unsigned seq;
562 unsigned long pfn = page_to_pfn(page);
b5e6a5a2 563 unsigned long sp, start_pfn;
c6a57e19 564
bdc8cb98
DH
565 do {
566 seq = zone_span_seqbegin(zone);
b5e6a5a2
CS
567 start_pfn = zone->zone_start_pfn;
568 sp = zone->spanned_pages;
108bcc96 569 if (!zone_spans_pfn(zone, pfn))
bdc8cb98
DH
570 ret = 1;
571 } while (zone_span_seqretry(zone, seq));
572
b5e6a5a2 573 if (ret)
613813e8
DH
574 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
575 pfn, zone_to_nid(zone), zone->name,
576 start_pfn, start_pfn + sp);
b5e6a5a2 577
bdc8cb98 578 return ret;
c6a57e19
DH
579}
580
581static int page_is_consistent(struct zone *zone, struct page *page)
582{
14e07298 583 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 584 return 0;
1da177e4 585 if (zone != page_zone(page))
c6a57e19
DH
586 return 0;
587
588 return 1;
589}
590/*
591 * Temporary debugging check for pages not lying within a given zone.
592 */
d73d3c9f 593static int __maybe_unused bad_range(struct zone *zone, struct page *page)
c6a57e19
DH
594{
595 if (page_outside_zone_boundaries(zone, page))
1da177e4 596 return 1;
c6a57e19
DH
597 if (!page_is_consistent(zone, page))
598 return 1;
599
1da177e4
LT
600 return 0;
601}
13e7444b 602#else
d73d3c9f 603static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
13e7444b
NP
604{
605 return 0;
606}
607#endif
608
82a3241a 609static void bad_page(struct page *page, const char *reason)
1da177e4 610{
d936cf9b
HD
611 static unsigned long resume;
612 static unsigned long nr_shown;
613 static unsigned long nr_unshown;
614
615 /*
616 * Allow a burst of 60 reports, then keep quiet for that minute;
617 * or allow a steady drip of one report per second.
618 */
619 if (nr_shown == 60) {
620 if (time_before(jiffies, resume)) {
621 nr_unshown++;
622 goto out;
623 }
624 if (nr_unshown) {
ff8e8116 625 pr_alert(
1e9e6365 626 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
627 nr_unshown);
628 nr_unshown = 0;
629 }
630 nr_shown = 0;
631 }
632 if (nr_shown++ == 0)
633 resume = jiffies + 60 * HZ;
634
ff8e8116 635 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 636 current->comm, page_to_pfn(page));
ff8e8116 637 __dump_page(page, reason);
4e462112 638 dump_page_owner(page);
3dc14741 639
4f31888c 640 print_modules();
1da177e4 641 dump_stack();
d936cf9b 642out:
8cc3b392 643 /* Leave bad fields for debug, except PageBuddy could make trouble */
22b751c3 644 page_mapcount_reset(page); /* remove PageBuddy */
373d4d09 645 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
646}
647
1da177e4
LT
648/*
649 * Higher-order pages are called "compound pages". They are structured thusly:
650 *
1d798ca3 651 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
1da177e4 652 *
1d798ca3
KS
653 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
654 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
1da177e4 655 *
1d798ca3
KS
656 * The first tail page's ->compound_dtor holds the offset in array of compound
657 * page destructors. See compound_page_dtors.
1da177e4 658 *
1d798ca3 659 * The first tail page's ->compound_order holds the order of allocation.
41d78ba5 660 * This usage means that zero-order pages may not be compound.
1da177e4 661 */
d98c7a09 662
9a982250 663void free_compound_page(struct page *page)
d98c7a09 664{
7ae88534 665 mem_cgroup_uncharge(page);
7fef431b 666 __free_pages_ok(page, compound_order(page), FPI_NONE);
d98c7a09
HD
667}
668
d00181b9 669void prep_compound_page(struct page *page, unsigned int order)
18229df5
AW
670{
671 int i;
672 int nr_pages = 1 << order;
673
18229df5
AW
674 __SetPageHead(page);
675 for (i = 1; i < nr_pages; i++) {
676 struct page *p = page + i;
58a84aa9 677 set_page_count(p, 0);
1c290f64 678 p->mapping = TAIL_MAPPING;
1d798ca3 679 set_compound_head(p, page);
18229df5 680 }
1378a5ee
MWO
681
682 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
683 set_compound_order(page, order);
53f9263b 684 atomic_set(compound_mapcount_ptr(page), -1);
47e29d32
JH
685 if (hpage_pincount_available(page))
686 atomic_set(compound_pincount_ptr(page), 0);
18229df5
AW
687}
688
c0a32fc5
SG
689#ifdef CONFIG_DEBUG_PAGEALLOC
690unsigned int _debug_guardpage_minorder;
96a2b03f 691
8e57f8ac
VB
692bool _debug_pagealloc_enabled_early __read_mostly
693 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
694EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
96a2b03f 695DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
505f6d22 696EXPORT_SYMBOL(_debug_pagealloc_enabled);
96a2b03f
VB
697
698DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
e30825f1 699
031bc574
JK
700static int __init early_debug_pagealloc(char *buf)
701{
8e57f8ac 702 return kstrtobool(buf, &_debug_pagealloc_enabled_early);
031bc574
JK
703}
704early_param("debug_pagealloc", early_debug_pagealloc);
705
c0a32fc5
SG
706static int __init debug_guardpage_minorder_setup(char *buf)
707{
708 unsigned long res;
709
710 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
1170532b 711 pr_err("Bad debug_guardpage_minorder value\n");
c0a32fc5
SG
712 return 0;
713 }
714 _debug_guardpage_minorder = res;
1170532b 715 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
c0a32fc5
SG
716 return 0;
717}
f1c1e9f7 718early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
c0a32fc5 719
acbc15a4 720static inline bool set_page_guard(struct zone *zone, struct page *page,
2847cf95 721 unsigned int order, int migratetype)
c0a32fc5 722{
e30825f1 723 if (!debug_guardpage_enabled())
acbc15a4
JK
724 return false;
725
726 if (order >= debug_guardpage_minorder())
727 return false;
e30825f1 728
3972f6bb 729 __SetPageGuard(page);
2847cf95
JK
730 INIT_LIST_HEAD(&page->lru);
731 set_page_private(page, order);
732 /* Guard pages are not available for any usage */
733 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
acbc15a4
JK
734
735 return true;
c0a32fc5
SG
736}
737
2847cf95
JK
738static inline void clear_page_guard(struct zone *zone, struct page *page,
739 unsigned int order, int migratetype)
c0a32fc5 740{
e30825f1
JK
741 if (!debug_guardpage_enabled())
742 return;
743
3972f6bb 744 __ClearPageGuard(page);
e30825f1 745
2847cf95
JK
746 set_page_private(page, 0);
747 if (!is_migrate_isolate(migratetype))
748 __mod_zone_freepage_state(zone, (1 << order), migratetype);
c0a32fc5
SG
749}
750#else
acbc15a4
JK
751static inline bool set_page_guard(struct zone *zone, struct page *page,
752 unsigned int order, int migratetype) { return false; }
2847cf95
JK
753static inline void clear_page_guard(struct zone *zone, struct page *page,
754 unsigned int order, int migratetype) {}
c0a32fc5
SG
755#endif
756
04013513
VB
757/*
758 * Enable static keys related to various memory debugging and hardening options.
759 * Some override others, and depend on early params that are evaluated in the
760 * order of appearance. So we need to first gather the full picture of what was
761 * enabled, and then make decisions.
762 */
763void init_mem_debugging_and_hardening(void)
764{
765 if (_init_on_alloc_enabled_early) {
766 if (page_poisoning_enabled())
767 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
768 "will take precedence over init_on_alloc\n");
769 else
770 static_branch_enable(&init_on_alloc);
771 }
772 if (_init_on_free_enabled_early) {
773 if (page_poisoning_enabled())
774 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
775 "will take precedence over init_on_free\n");
776 else
777 static_branch_enable(&init_on_free);
778 }
779
8db26a3d
VB
780#ifdef CONFIG_PAGE_POISONING
781 /*
782 * Page poisoning is debug page alloc for some arches. If
783 * either of those options are enabled, enable poisoning.
784 */
785 if (page_poisoning_enabled() ||
786 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
787 debug_pagealloc_enabled()))
788 static_branch_enable(&_page_poisoning_enabled);
789#endif
790
04013513
VB
791#ifdef CONFIG_DEBUG_PAGEALLOC
792 if (!debug_pagealloc_enabled())
793 return;
794
795 static_branch_enable(&_debug_pagealloc_enabled);
796
797 if (!debug_guardpage_minorder())
798 return;
799
800 static_branch_enable(&_debug_guardpage_enabled);
801#endif
802}
803
ab130f91 804static inline void set_buddy_order(struct page *page, unsigned int order)
6aa3001b 805{
4c21e2f2 806 set_page_private(page, order);
676165a8 807 __SetPageBuddy(page);
1da177e4
LT
808}
809
1da177e4
LT
810/*
811 * This function checks whether a page is free && is the buddy
6e292b9b 812 * we can coalesce a page and its buddy if
13ad59df 813 * (a) the buddy is not in a hole (check before calling!) &&
676165a8 814 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
815 * (c) a page and its buddy have the same order &&
816 * (d) a page and its buddy are in the same zone.
676165a8 817 *
6e292b9b
MW
818 * For recording whether a page is in the buddy system, we set PageBuddy.
819 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
1da177e4 820 *
676165a8 821 * For recording page's order, we use page_private(page).
1da177e4 822 */
fe925c0c 823static inline bool page_is_buddy(struct page *page, struct page *buddy,
7aeb09f9 824 unsigned int order)
1da177e4 825{
fe925c0c 826 if (!page_is_guard(buddy) && !PageBuddy(buddy))
827 return false;
4c5018ce 828
ab130f91 829 if (buddy_order(buddy) != order)
fe925c0c 830 return false;
c0a32fc5 831
fe925c0c 832 /*
833 * zone check is done late to avoid uselessly calculating
834 * zone/node ids for pages that could never merge.
835 */
836 if (page_zone_id(page) != page_zone_id(buddy))
837 return false;
d34c5fa0 838
fe925c0c 839 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
4c5018ce 840
fe925c0c 841 return true;
1da177e4
LT
842}
843
5e1f0f09
MG
844#ifdef CONFIG_COMPACTION
845static inline struct capture_control *task_capc(struct zone *zone)
846{
847 struct capture_control *capc = current->capture_control;
848
deba0487 849 return unlikely(capc) &&
5e1f0f09
MG
850 !(current->flags & PF_KTHREAD) &&
851 !capc->page &&
deba0487 852 capc->cc->zone == zone ? capc : NULL;
5e1f0f09
MG
853}
854
855static inline bool
856compaction_capture(struct capture_control *capc, struct page *page,
857 int order, int migratetype)
858{
859 if (!capc || order != capc->cc->order)
860 return false;
861
862 /* Do not accidentally pollute CMA or isolated regions*/
863 if (is_migrate_cma(migratetype) ||
864 is_migrate_isolate(migratetype))
865 return false;
866
867 /*
868 * Do not let lower order allocations polluate a movable pageblock.
869 * This might let an unmovable request use a reclaimable pageblock
870 * and vice-versa but no more than normal fallback logic which can
871 * have trouble finding a high-order free page.
872 */
873 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
874 return false;
875
876 capc->page = page;
877 return true;
878}
879
880#else
881static inline struct capture_control *task_capc(struct zone *zone)
882{
883 return NULL;
884}
885
886static inline bool
887compaction_capture(struct capture_control *capc, struct page *page,
888 int order, int migratetype)
889{
890 return false;
891}
892#endif /* CONFIG_COMPACTION */
893
6ab01363
AD
894/* Used for pages not on another list */
895static inline void add_to_free_list(struct page *page, struct zone *zone,
896 unsigned int order, int migratetype)
897{
898 struct free_area *area = &zone->free_area[order];
899
900 list_add(&page->lru, &area->free_list[migratetype]);
901 area->nr_free++;
902}
903
904/* Used for pages not on another list */
905static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
906 unsigned int order, int migratetype)
907{
908 struct free_area *area = &zone->free_area[order];
909
910 list_add_tail(&page->lru, &area->free_list[migratetype]);
911 area->nr_free++;
912}
913
293ffa5e
DH
914/*
915 * Used for pages which are on another list. Move the pages to the tail
916 * of the list - so the moved pages won't immediately be considered for
917 * allocation again (e.g., optimization for memory onlining).
918 */
6ab01363
AD
919static inline void move_to_free_list(struct page *page, struct zone *zone,
920 unsigned int order, int migratetype)
921{
922 struct free_area *area = &zone->free_area[order];
923
293ffa5e 924 list_move_tail(&page->lru, &area->free_list[migratetype]);
6ab01363
AD
925}
926
927static inline void del_page_from_free_list(struct page *page, struct zone *zone,
928 unsigned int order)
929{
36e66c55
AD
930 /* clear reported state and update reported page count */
931 if (page_reported(page))
932 __ClearPageReported(page);
933
6ab01363
AD
934 list_del(&page->lru);
935 __ClearPageBuddy(page);
936 set_page_private(page, 0);
937 zone->free_area[order].nr_free--;
938}
939
a2129f24
AD
940/*
941 * If this is not the largest possible page, check if the buddy
942 * of the next-highest order is free. If it is, it's possible
943 * that pages are being freed that will coalesce soon. In case,
944 * that is happening, add the free page to the tail of the list
945 * so it's less likely to be used soon and more likely to be merged
946 * as a higher order page
947 */
948static inline bool
949buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
950 struct page *page, unsigned int order)
951{
952 struct page *higher_page, *higher_buddy;
953 unsigned long combined_pfn;
954
955 if (order >= MAX_ORDER - 2)
956 return false;
957
958 if (!pfn_valid_within(buddy_pfn))
959 return false;
960
961 combined_pfn = buddy_pfn & pfn;
962 higher_page = page + (combined_pfn - pfn);
963 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
964 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
965
966 return pfn_valid_within(buddy_pfn) &&
967 page_is_buddy(higher_page, higher_buddy, order + 1);
968}
969
1da177e4
LT
970/*
971 * Freeing function for a buddy system allocator.
972 *
973 * The concept of a buddy system is to maintain direct-mapped table
974 * (containing bit values) for memory blocks of various "orders".
975 * The bottom level table contains the map for the smallest allocatable
976 * units of memory (here, pages), and each level above it describes
977 * pairs of units from the levels below, hence, "buddies".
978 * At a high level, all that happens here is marking the table entry
979 * at the bottom level available, and propagating the changes upward
980 * as necessary, plus some accounting needed to play nicely with other
981 * parts of the VM system.
982 * At each level, we keep a list of pages, which are heads of continuous
6e292b9b
MW
983 * free pages of length of (1 << order) and marked with PageBuddy.
984 * Page's order is recorded in page_private(page) field.
1da177e4 985 * So when we are allocating or freeing one, we can derive the state of the
5f63b720
MN
986 * other. That is, if we allocate a small block, and both were
987 * free, the remainder of the region must be split into blocks.
1da177e4 988 * If a block is freed, and its buddy is also free, then this
5f63b720 989 * triggers coalescing into a block of larger size.
1da177e4 990 *
6d49e352 991 * -- nyc
1da177e4
LT
992 */
993
48db57f8 994static inline void __free_one_page(struct page *page,
dc4b0caf 995 unsigned long pfn,
ed0ae21d 996 struct zone *zone, unsigned int order,
f04a5d5d 997 int migratetype, fpi_t fpi_flags)
1da177e4 998{
a2129f24 999 struct capture_control *capc = task_capc(zone);
3f649ab7 1000 unsigned long buddy_pfn;
a2129f24 1001 unsigned long combined_pfn;
d9dddbf5 1002 unsigned int max_order;
a2129f24
AD
1003 struct page *buddy;
1004 bool to_tail;
d9dddbf5 1005
7ad69832 1006 max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1da177e4 1007
d29bb978 1008 VM_BUG_ON(!zone_is_initialized(zone));
6e9f0d58 1009 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1da177e4 1010
ed0ae21d 1011 VM_BUG_ON(migratetype == -1);
d9dddbf5 1012 if (likely(!is_migrate_isolate(migratetype)))
8f82b55d 1013 __mod_zone_freepage_state(zone, 1 << order, migratetype);
ed0ae21d 1014
76741e77 1015 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
309381fe 1016 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1da177e4 1017
d9dddbf5 1018continue_merging:
7ad69832 1019 while (order < max_order) {
5e1f0f09
MG
1020 if (compaction_capture(capc, page, order, migratetype)) {
1021 __mod_zone_freepage_state(zone, -(1 << order),
1022 migratetype);
1023 return;
1024 }
76741e77
VB
1025 buddy_pfn = __find_buddy_pfn(pfn, order);
1026 buddy = page + (buddy_pfn - pfn);
13ad59df
VB
1027
1028 if (!pfn_valid_within(buddy_pfn))
1029 goto done_merging;
cb2b95e1 1030 if (!page_is_buddy(page, buddy, order))
d9dddbf5 1031 goto done_merging;
c0a32fc5
SG
1032 /*
1033 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1034 * merge with it and move up one order.
1035 */
b03641af 1036 if (page_is_guard(buddy))
2847cf95 1037 clear_page_guard(zone, buddy, order, migratetype);
b03641af 1038 else
6ab01363 1039 del_page_from_free_list(buddy, zone, order);
76741e77
VB
1040 combined_pfn = buddy_pfn & pfn;
1041 page = page + (combined_pfn - pfn);
1042 pfn = combined_pfn;
1da177e4
LT
1043 order++;
1044 }
7ad69832 1045 if (order < MAX_ORDER - 1) {
d9dddbf5
VB
1046 /* If we are here, it means order is >= pageblock_order.
1047 * We want to prevent merge between freepages on isolate
1048 * pageblock and normal pageblock. Without this, pageblock
1049 * isolation could cause incorrect freepage or CMA accounting.
1050 *
1051 * We don't want to hit this code for the more frequent
1052 * low-order merging.
1053 */
1054 if (unlikely(has_isolate_pageblock(zone))) {
1055 int buddy_mt;
1056
76741e77
VB
1057 buddy_pfn = __find_buddy_pfn(pfn, order);
1058 buddy = page + (buddy_pfn - pfn);
d9dddbf5
VB
1059 buddy_mt = get_pageblock_migratetype(buddy);
1060
1061 if (migratetype != buddy_mt
1062 && (is_migrate_isolate(migratetype) ||
1063 is_migrate_isolate(buddy_mt)))
1064 goto done_merging;
1065 }
7ad69832 1066 max_order = order + 1;
d9dddbf5
VB
1067 goto continue_merging;
1068 }
1069
1070done_merging:
ab130f91 1071 set_buddy_order(page, order);
6dda9d55 1072
47b6a24a
DH
1073 if (fpi_flags & FPI_TO_TAIL)
1074 to_tail = true;
1075 else if (is_shuffle_order(order))
a2129f24 1076 to_tail = shuffle_pick_tail();
97500a4a 1077 else
a2129f24 1078 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
97500a4a 1079
a2129f24 1080 if (to_tail)
6ab01363 1081 add_to_free_list_tail(page, zone, order, migratetype);
a2129f24 1082 else
6ab01363 1083 add_to_free_list(page, zone, order, migratetype);
36e66c55
AD
1084
1085 /* Notify page reporting subsystem of freed page */
f04a5d5d 1086 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
36e66c55 1087 page_reporting_notify_free(order);
1da177e4
LT
1088}
1089
7bfec6f4
MG
1090/*
1091 * A bad page could be due to a number of fields. Instead of multiple branches,
1092 * try and check multiple fields with one check. The caller must do a detailed
1093 * check if necessary.
1094 */
1095static inline bool page_expected_state(struct page *page,
1096 unsigned long check_flags)
1097{
1098 if (unlikely(atomic_read(&page->_mapcount) != -1))
1099 return false;
1100
1101 if (unlikely((unsigned long)page->mapping |
1102 page_ref_count(page) |
1103#ifdef CONFIG_MEMCG
bcfe06bf 1104 (unsigned long)page_memcg(page) |
7bfec6f4
MG
1105#endif
1106 (page->flags & check_flags)))
1107 return false;
1108
1109 return true;
1110}
1111
58b7f119 1112static const char *page_bad_reason(struct page *page, unsigned long flags)
1da177e4 1113{
82a3241a 1114 const char *bad_reason = NULL;
f0b791a3 1115
53f9263b 1116 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
1117 bad_reason = "nonzero mapcount";
1118 if (unlikely(page->mapping != NULL))
1119 bad_reason = "non-NULL mapping";
fe896d18 1120 if (unlikely(page_ref_count(page) != 0))
0139aa7b 1121 bad_reason = "nonzero _refcount";
58b7f119
WY
1122 if (unlikely(page->flags & flags)) {
1123 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1124 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1125 else
1126 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
f0b791a3 1127 }
9edad6ea 1128#ifdef CONFIG_MEMCG
bcfe06bf 1129 if (unlikely(page_memcg(page)))
9edad6ea
JW
1130 bad_reason = "page still charged to cgroup";
1131#endif
58b7f119
WY
1132 return bad_reason;
1133}
1134
1135static void check_free_page_bad(struct page *page)
1136{
1137 bad_page(page,
1138 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
bb552ac6
MG
1139}
1140
534fe5e3 1141static inline int check_free_page(struct page *page)
bb552ac6 1142{
da838d4f 1143 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
bb552ac6 1144 return 0;
bb552ac6
MG
1145
1146 /* Something has gone sideways, find it */
0d0c48a2 1147 check_free_page_bad(page);
7bfec6f4 1148 return 1;
1da177e4
LT
1149}
1150
4db7548c
MG
1151static int free_tail_pages_check(struct page *head_page, struct page *page)
1152{
1153 int ret = 1;
1154
1155 /*
1156 * We rely page->lru.next never has bit 0 set, unless the page
1157 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1158 */
1159 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1160
1161 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1162 ret = 0;
1163 goto out;
1164 }
1165 switch (page - head_page) {
1166 case 1:
4da1984e 1167 /* the first tail page: ->mapping may be compound_mapcount() */
4db7548c 1168 if (unlikely(compound_mapcount(page))) {
82a3241a 1169 bad_page(page, "nonzero compound_mapcount");
4db7548c
MG
1170 goto out;
1171 }
1172 break;
1173 case 2:
1174 /*
1175 * the second tail page: ->mapping is
fa3015b7 1176 * deferred_list.next -- ignore value.
4db7548c
MG
1177 */
1178 break;
1179 default:
1180 if (page->mapping != TAIL_MAPPING) {
82a3241a 1181 bad_page(page, "corrupted mapping in tail page");
4db7548c
MG
1182 goto out;
1183 }
1184 break;
1185 }
1186 if (unlikely(!PageTail(page))) {
82a3241a 1187 bad_page(page, "PageTail not set");
4db7548c
MG
1188 goto out;
1189 }
1190 if (unlikely(compound_head(page) != head_page)) {
82a3241a 1191 bad_page(page, "compound_head not consistent");
4db7548c
MG
1192 goto out;
1193 }
1194 ret = 0;
1195out:
1196 page->mapping = NULL;
1197 clear_compound_head(page);
1198 return ret;
1199}
1200
6471384a
AP
1201static void kernel_init_free_pages(struct page *page, int numpages)
1202{
1203 int i;
1204
9e15afa5
QC
1205 /* s390's use of memset() could override KASAN redzones. */
1206 kasan_disable_current();
aa1ef4d7
AK
1207 for (i = 0; i < numpages; i++) {
1208 page_kasan_tag_reset(page + i);
6471384a 1209 clear_highpage(page + i);
aa1ef4d7 1210 }
9e15afa5 1211 kasan_enable_current();
6471384a
AP
1212}
1213
e2769dbd
MG
1214static __always_inline bool free_pages_prepare(struct page *page,
1215 unsigned int order, bool check_free)
4db7548c 1216{
e2769dbd 1217 int bad = 0;
4db7548c 1218
4db7548c
MG
1219 VM_BUG_ON_PAGE(PageTail(page), page);
1220
e2769dbd 1221 trace_mm_page_free(page, order);
e2769dbd 1222
79f5f8fa
OS
1223 if (unlikely(PageHWPoison(page)) && !order) {
1224 /*
1225 * Do not let hwpoison pages hit pcplists/buddy
1226 * Untie memcg state and reset page's owner
1227 */
18b2db3b 1228 if (memcg_kmem_enabled() && PageMemcgKmem(page))
79f5f8fa
OS
1229 __memcg_kmem_uncharge_page(page, order);
1230 reset_page_owner(page, order);
1231 return false;
1232 }
1233
e2769dbd
MG
1234 /*
1235 * Check tail pages before head page information is cleared to
1236 * avoid checking PageCompound for order-0 pages.
1237 */
1238 if (unlikely(order)) {
1239 bool compound = PageCompound(page);
1240 int i;
1241
1242 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
4db7548c 1243
9a73f61b
KS
1244 if (compound)
1245 ClearPageDoubleMap(page);
e2769dbd
MG
1246 for (i = 1; i < (1 << order); i++) {
1247 if (compound)
1248 bad += free_tail_pages_check(page, page + i);
534fe5e3 1249 if (unlikely(check_free_page(page + i))) {
e2769dbd
MG
1250 bad++;
1251 continue;
1252 }
1253 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1254 }
1255 }
bda807d4 1256 if (PageMappingFlags(page))
4db7548c 1257 page->mapping = NULL;
18b2db3b 1258 if (memcg_kmem_enabled() && PageMemcgKmem(page))
f4b00eab 1259 __memcg_kmem_uncharge_page(page, order);
e2769dbd 1260 if (check_free)
534fe5e3 1261 bad += check_free_page(page);
e2769dbd
MG
1262 if (bad)
1263 return false;
4db7548c 1264
e2769dbd
MG
1265 page_cpupid_reset_last(page);
1266 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1267 reset_page_owner(page, order);
4db7548c
MG
1268
1269 if (!PageHighMem(page)) {
1270 debug_check_no_locks_freed(page_address(page),
e2769dbd 1271 PAGE_SIZE << order);
4db7548c 1272 debug_check_no_obj_freed(page_address(page),
e2769dbd 1273 PAGE_SIZE << order);
4db7548c 1274 }
6471384a
AP
1275 if (want_init_on_free())
1276 kernel_init_free_pages(page, 1 << order);
1277
8db26a3d
VB
1278 kernel_poison_pages(page, 1 << order);
1279
234fdce8
QC
1280 /*
1281 * arch_free_page() can make the page's contents inaccessible. s390
1282 * does this. So nothing which can access the page's contents should
1283 * happen after this.
1284 */
1285 arch_free_page(page, order);
1286
77bc7fd6 1287 debug_pagealloc_unmap_pages(page, 1 << order);
d6332692 1288
3c0c12cc 1289 kasan_free_nondeferred_pages(page, order);
4db7548c 1290
4db7548c
MG
1291 return true;
1292}
1293
e2769dbd 1294#ifdef CONFIG_DEBUG_VM
4462b32c
VB
1295/*
1296 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1297 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1298 * moved from pcp lists to free lists.
1299 */
1300static bool free_pcp_prepare(struct page *page)
e2769dbd
MG
1301{
1302 return free_pages_prepare(page, 0, true);
1303}
1304
4462b32c 1305static bool bulkfree_pcp_prepare(struct page *page)
e2769dbd 1306{
8e57f8ac 1307 if (debug_pagealloc_enabled_static())
534fe5e3 1308 return check_free_page(page);
4462b32c
VB
1309 else
1310 return false;
e2769dbd
MG
1311}
1312#else
4462b32c
VB
1313/*
1314 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1315 * moving from pcp lists to free list in order to reduce overhead. With
1316 * debug_pagealloc enabled, they are checked also immediately when being freed
1317 * to the pcp lists.
1318 */
e2769dbd
MG
1319static bool free_pcp_prepare(struct page *page)
1320{
8e57f8ac 1321 if (debug_pagealloc_enabled_static())
4462b32c
VB
1322 return free_pages_prepare(page, 0, true);
1323 else
1324 return free_pages_prepare(page, 0, false);
e2769dbd
MG
1325}
1326
4db7548c
MG
1327static bool bulkfree_pcp_prepare(struct page *page)
1328{
534fe5e3 1329 return check_free_page(page);
4db7548c
MG
1330}
1331#endif /* CONFIG_DEBUG_VM */
1332
97334162
AL
1333static inline void prefetch_buddy(struct page *page)
1334{
1335 unsigned long pfn = page_to_pfn(page);
1336 unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1337 struct page *buddy = page + (buddy_pfn - pfn);
1338
1339 prefetch(buddy);
1340}
1341
1da177e4 1342/*
5f8dcc21 1343 * Frees a number of pages from the PCP lists
1da177e4 1344 * Assumes all pages on list are in same zone, and of same order.
207f36ee 1345 * count is the number of pages to free.
1da177e4
LT
1346 *
1347 * If the zone was previously in an "all pages pinned" state then look to
1348 * see if this freeing clears that state.
1349 *
1350 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1351 * pinned" detection logic.
1352 */
5f8dcc21
MG
1353static void free_pcppages_bulk(struct zone *zone, int count,
1354 struct per_cpu_pages *pcp)
1da177e4 1355{
5f8dcc21 1356 int migratetype = 0;
a6f9edd6 1357 int batch_free = 0;
5c3ad2eb 1358 int prefetch_nr = READ_ONCE(pcp->batch);
3777999d 1359 bool isolated_pageblocks;
0a5f4e5b
AL
1360 struct page *page, *tmp;
1361 LIST_HEAD(head);
f2260e6b 1362
88e8ac11
CTR
1363 /*
1364 * Ensure proper count is passed which otherwise would stuck in the
1365 * below while (list_empty(list)) loop.
1366 */
1367 count = min(pcp->count, count);
e5b31ac2 1368 while (count) {
5f8dcc21
MG
1369 struct list_head *list;
1370
1371 /*
a6f9edd6
MG
1372 * Remove pages from lists in a round-robin fashion. A
1373 * batch_free count is maintained that is incremented when an
1374 * empty list is encountered. This is so more pages are freed
1375 * off fuller lists instead of spinning excessively around empty
1376 * lists
5f8dcc21
MG
1377 */
1378 do {
a6f9edd6 1379 batch_free++;
5f8dcc21
MG
1380 if (++migratetype == MIGRATE_PCPTYPES)
1381 migratetype = 0;
1382 list = &pcp->lists[migratetype];
1383 } while (list_empty(list));
48db57f8 1384
1d16871d
NK
1385 /* This is the only non-empty list. Free them all. */
1386 if (batch_free == MIGRATE_PCPTYPES)
e5b31ac2 1387 batch_free = count;
1d16871d 1388
a6f9edd6 1389 do {
a16601c5 1390 page = list_last_entry(list, struct page, lru);
0a5f4e5b 1391 /* must delete to avoid corrupting pcp list */
a6f9edd6 1392 list_del(&page->lru);
77ba9062 1393 pcp->count--;
aa016d14 1394
4db7548c
MG
1395 if (bulkfree_pcp_prepare(page))
1396 continue;
1397
0a5f4e5b 1398 list_add_tail(&page->lru, &head);
97334162
AL
1399
1400 /*
1401 * We are going to put the page back to the global
1402 * pool, prefetch its buddy to speed up later access
1403 * under zone->lock. It is believed the overhead of
1404 * an additional test and calculating buddy_pfn here
1405 * can be offset by reduced memory latency later. To
1406 * avoid excessive prefetching due to large count, only
1407 * prefetch buddy for the first pcp->batch nr of pages.
1408 */
5c3ad2eb 1409 if (prefetch_nr) {
97334162 1410 prefetch_buddy(page);
5c3ad2eb
VB
1411 prefetch_nr--;
1412 }
e5b31ac2 1413 } while (--count && --batch_free && !list_empty(list));
1da177e4 1414 }
0a5f4e5b
AL
1415
1416 spin_lock(&zone->lock);
1417 isolated_pageblocks = has_isolate_pageblock(zone);
1418
1419 /*
1420 * Use safe version since after __free_one_page(),
1421 * page->lru.next will not point to original list.
1422 */
1423 list_for_each_entry_safe(page, tmp, &head, lru) {
1424 int mt = get_pcppage_migratetype(page);
1425 /* MIGRATE_ISOLATE page should not go to pcplists */
1426 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1427 /* Pageblock could have been isolated meanwhile */
1428 if (unlikely(isolated_pageblocks))
1429 mt = get_pageblock_migratetype(page);
1430
f04a5d5d 1431 __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
0a5f4e5b
AL
1432 trace_mm_page_pcpu_drain(page, 0, mt);
1433 }
d34b0733 1434 spin_unlock(&zone->lock);
1da177e4
LT
1435}
1436
dc4b0caf
MG
1437static void free_one_page(struct zone *zone,
1438 struct page *page, unsigned long pfn,
7aeb09f9 1439 unsigned int order,
7fef431b 1440 int migratetype, fpi_t fpi_flags)
1da177e4 1441{
d34b0733 1442 spin_lock(&zone->lock);
ad53f92e
JK
1443 if (unlikely(has_isolate_pageblock(zone) ||
1444 is_migrate_isolate(migratetype))) {
1445 migratetype = get_pfnblock_migratetype(page, pfn);
ad53f92e 1446 }
7fef431b 1447 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
d34b0733 1448 spin_unlock(&zone->lock);
48db57f8
NP
1449}
1450
1e8ce83c 1451static void __meminit __init_single_page(struct page *page, unsigned long pfn,
d0dc12e8 1452 unsigned long zone, int nid)
1e8ce83c 1453{
d0dc12e8 1454 mm_zero_struct_page(page);
1e8ce83c 1455 set_page_links(page, zone, nid, pfn);
1e8ce83c
RH
1456 init_page_count(page);
1457 page_mapcount_reset(page);
1458 page_cpupid_reset_last(page);
2813b9c0 1459 page_kasan_tag_reset(page);
1e8ce83c 1460
1e8ce83c
RH
1461 INIT_LIST_HEAD(&page->lru);
1462#ifdef WANT_PAGE_VIRTUAL
1463 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1464 if (!is_highmem_idx(zone))
1465 set_page_address(page, __va(pfn << PAGE_SHIFT));
1466#endif
1467}
1468
7e18adb4 1469#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
57148a64 1470static void __meminit init_reserved_page(unsigned long pfn)
7e18adb4
MG
1471{
1472 pg_data_t *pgdat;
1473 int nid, zid;
1474
1475 if (!early_page_uninitialised(pfn))
1476 return;
1477
1478 nid = early_pfn_to_nid(pfn);
1479 pgdat = NODE_DATA(nid);
1480
1481 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1482 struct zone *zone = &pgdat->node_zones[zid];
1483
1484 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1485 break;
1486 }
d0dc12e8 1487 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
7e18adb4
MG
1488}
1489#else
1490static inline void init_reserved_page(unsigned long pfn)
1491{
1492}
1493#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1494
92923ca3
NZ
1495/*
1496 * Initialised pages do not have PageReserved set. This function is
1497 * called for each range allocated by the bootmem allocator and
1498 * marks the pages PageReserved. The remaining valid pages are later
1499 * sent to the buddy page allocator.
1500 */
4b50bcc7 1501void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
92923ca3
NZ
1502{
1503 unsigned long start_pfn = PFN_DOWN(start);
1504 unsigned long end_pfn = PFN_UP(end);
1505
7e18adb4
MG
1506 for (; start_pfn < end_pfn; start_pfn++) {
1507 if (pfn_valid(start_pfn)) {
1508 struct page *page = pfn_to_page(start_pfn);
1509
1510 init_reserved_page(start_pfn);
1d798ca3
KS
1511
1512 /* Avoid false-positive PageTail() */
1513 INIT_LIST_HEAD(&page->lru);
1514
d483da5b
AD
1515 /*
1516 * no need for atomic set_bit because the struct
1517 * page is not visible yet so nobody should
1518 * access it yet.
1519 */
1520 __SetPageReserved(page);
7e18adb4
MG
1521 }
1522 }
92923ca3
NZ
1523}
1524
7fef431b
DH
1525static void __free_pages_ok(struct page *page, unsigned int order,
1526 fpi_t fpi_flags)
ec95f53a 1527{
d34b0733 1528 unsigned long flags;
95e34412 1529 int migratetype;
dc4b0caf 1530 unsigned long pfn = page_to_pfn(page);
ec95f53a 1531
e2769dbd 1532 if (!free_pages_prepare(page, order, true))
ec95f53a
KM
1533 return;
1534
cfc47a28 1535 migratetype = get_pfnblock_migratetype(page, pfn);
d34b0733
MG
1536 local_irq_save(flags);
1537 __count_vm_events(PGFREE, 1 << order);
7fef431b
DH
1538 free_one_page(page_zone(page), page, pfn, order, migratetype,
1539 fpi_flags);
d34b0733 1540 local_irq_restore(flags);
1da177e4
LT
1541}
1542
a9cd410a 1543void __free_pages_core(struct page *page, unsigned int order)
a226f6c8 1544{
c3993076 1545 unsigned int nr_pages = 1 << order;
e2d0bd2b 1546 struct page *p = page;
c3993076 1547 unsigned int loop;
a226f6c8 1548
7fef431b
DH
1549 /*
1550 * When initializing the memmap, __init_single_page() sets the refcount
1551 * of all pages to 1 ("allocated"/"not free"). We have to set the
1552 * refcount of all involved pages to 0.
1553 */
e2d0bd2b
YL
1554 prefetchw(p);
1555 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1556 prefetchw(p + 1);
c3993076
JW
1557 __ClearPageReserved(p);
1558 set_page_count(p, 0);
a226f6c8 1559 }
e2d0bd2b
YL
1560 __ClearPageReserved(p);
1561 set_page_count(p, 0);
c3993076 1562
9705bea5 1563 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
7fef431b
DH
1564
1565 /*
1566 * Bypass PCP and place fresh pages right to the tail, primarily
1567 * relevant for memory onlining.
1568 */
1569 __free_pages_ok(page, order, FPI_TO_TAIL);
a226f6c8
DH
1570}
1571
3f08a302 1572#ifdef CONFIG_NEED_MULTIPLE_NODES
7ace9917 1573
03e92a5e
MR
1574/*
1575 * During memory init memblocks map pfns to nids. The search is expensive and
1576 * this caches recent lookups. The implementation of __early_pfn_to_nid
1577 * treats start/end as pfns.
1578 */
1579struct mminit_pfnnid_cache {
1580 unsigned long last_start;
1581 unsigned long last_end;
1582 int last_nid;
1583};
75a592a4 1584
03e92a5e 1585static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
6f24fbd3
MR
1586
1587/*
1588 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1589 */
03e92a5e 1590static int __meminit __early_pfn_to_nid(unsigned long pfn,
6f24fbd3 1591 struct mminit_pfnnid_cache *state)
75a592a4 1592{
6f24fbd3 1593 unsigned long start_pfn, end_pfn;
75a592a4
MG
1594 int nid;
1595
6f24fbd3
MR
1596 if (state->last_start <= pfn && pfn < state->last_end)
1597 return state->last_nid;
1598
1599 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1600 if (nid != NUMA_NO_NODE) {
1601 state->last_start = start_pfn;
1602 state->last_end = end_pfn;
1603 state->last_nid = nid;
1604 }
7ace9917
MG
1605
1606 return nid;
75a592a4 1607}
75a592a4 1608
75a592a4 1609int __meminit early_pfn_to_nid(unsigned long pfn)
75a592a4 1610{
7ace9917 1611 static DEFINE_SPINLOCK(early_pfn_lock);
75a592a4
MG
1612 int nid;
1613
7ace9917 1614 spin_lock(&early_pfn_lock);
56ec43d8 1615 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
7ace9917 1616 if (nid < 0)
e4568d38 1617 nid = first_online_node;
7ace9917 1618 spin_unlock(&early_pfn_lock);
75a592a4 1619
7ace9917 1620 return nid;
75a592a4 1621}
3f08a302 1622#endif /* CONFIG_NEED_MULTIPLE_NODES */
75a592a4 1623
7c2ee349 1624void __init memblock_free_pages(struct page *page, unsigned long pfn,
3a80a7fa
MG
1625 unsigned int order)
1626{
1627 if (early_page_uninitialised(pfn))
1628 return;
a9cd410a 1629 __free_pages_core(page, order);
3a80a7fa
MG
1630}
1631
7cf91a98
JK
1632/*
1633 * Check that the whole (or subset of) a pageblock given by the interval of
1634 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1635 * with the migration of free compaction scanner. The scanners then need to
1636 * use only pfn_valid_within() check for arches that allow holes within
1637 * pageblocks.
1638 *
1639 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1640 *
1641 * It's possible on some configurations to have a setup like node0 node1 node0
1642 * i.e. it's possible that all pages within a zones range of pages do not
1643 * belong to a single zone. We assume that a border between node0 and node1
1644 * can occur within a single pageblock, but not a node0 node1 node0
1645 * interleaving within a single pageblock. It is therefore sufficient to check
1646 * the first and last page of a pageblock and avoid checking each individual
1647 * page in a pageblock.
1648 */
1649struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1650 unsigned long end_pfn, struct zone *zone)
1651{
1652 struct page *start_page;
1653 struct page *end_page;
1654
1655 /* end_pfn is one past the range we are checking */
1656 end_pfn--;
1657
1658 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1659 return NULL;
1660
2d070eab
MH
1661 start_page = pfn_to_online_page(start_pfn);
1662 if (!start_page)
1663 return NULL;
7cf91a98
JK
1664
1665 if (page_zone(start_page) != zone)
1666 return NULL;
1667
1668 end_page = pfn_to_page(end_pfn);
1669
1670 /* This gives a shorter code than deriving page_zone(end_page) */
1671 if (page_zone_id(start_page) != page_zone_id(end_page))
1672 return NULL;
1673
1674 return start_page;
1675}
1676
1677void set_zone_contiguous(struct zone *zone)
1678{
1679 unsigned long block_start_pfn = zone->zone_start_pfn;
1680 unsigned long block_end_pfn;
1681
1682 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1683 for (; block_start_pfn < zone_end_pfn(zone);
1684 block_start_pfn = block_end_pfn,
1685 block_end_pfn += pageblock_nr_pages) {
1686
1687 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1688
1689 if (!__pageblock_pfn_to_page(block_start_pfn,
1690 block_end_pfn, zone))
1691 return;
e84fe99b 1692 cond_resched();
7cf91a98
JK
1693 }
1694
1695 /* We confirm that there is no hole */
1696 zone->contiguous = true;
1697}
1698
1699void clear_zone_contiguous(struct zone *zone)
1700{
1701 zone->contiguous = false;
1702}
1703
7e18adb4 1704#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2f47a91f
PT
1705static void __init deferred_free_range(unsigned long pfn,
1706 unsigned long nr_pages)
a4de83dd 1707{
2f47a91f
PT
1708 struct page *page;
1709 unsigned long i;
a4de83dd 1710
2f47a91f 1711 if (!nr_pages)
a4de83dd
MG
1712 return;
1713
2f47a91f
PT
1714 page = pfn_to_page(pfn);
1715
a4de83dd 1716 /* Free a large naturally-aligned chunk if possible */
e780149b
XQ
1717 if (nr_pages == pageblock_nr_pages &&
1718 (pfn & (pageblock_nr_pages - 1)) == 0) {
ac5d2539 1719 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
a9cd410a 1720 __free_pages_core(page, pageblock_order);
a4de83dd
MG
1721 return;
1722 }
1723
e780149b
XQ
1724 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1725 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1726 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
a9cd410a 1727 __free_pages_core(page, 0);
e780149b 1728 }
a4de83dd
MG
1729}
1730
d3cd131d
NS
1731/* Completion tracking for deferred_init_memmap() threads */
1732static atomic_t pgdat_init_n_undone __initdata;
1733static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1734
1735static inline void __init pgdat_init_report_one_done(void)
1736{
1737 if (atomic_dec_and_test(&pgdat_init_n_undone))
1738 complete(&pgdat_init_all_done_comp);
1739}
0e1cc95b 1740
2f47a91f 1741/*
80b1f41c
PT
1742 * Returns true if page needs to be initialized or freed to buddy allocator.
1743 *
1744 * First we check if pfn is valid on architectures where it is possible to have
1745 * holes within pageblock_nr_pages. On systems where it is not possible, this
1746 * function is optimized out.
1747 *
1748 * Then, we check if a current large page is valid by only checking the validity
1749 * of the head pfn.
2f47a91f 1750 */
56ec43d8 1751static inline bool __init deferred_pfn_valid(unsigned long pfn)
2f47a91f 1752{
80b1f41c
PT
1753 if (!pfn_valid_within(pfn))
1754 return false;
1755 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1756 return false;
80b1f41c
PT
1757 return true;
1758}
2f47a91f 1759
80b1f41c
PT
1760/*
1761 * Free pages to buddy allocator. Try to free aligned pages in
1762 * pageblock_nr_pages sizes.
1763 */
56ec43d8 1764static void __init deferred_free_pages(unsigned long pfn,
80b1f41c
PT
1765 unsigned long end_pfn)
1766{
80b1f41c
PT
1767 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1768 unsigned long nr_free = 0;
2f47a91f 1769
80b1f41c 1770 for (; pfn < end_pfn; pfn++) {
56ec43d8 1771 if (!deferred_pfn_valid(pfn)) {
80b1f41c
PT
1772 deferred_free_range(pfn - nr_free, nr_free);
1773 nr_free = 0;
1774 } else if (!(pfn & nr_pgmask)) {
1775 deferred_free_range(pfn - nr_free, nr_free);
1776 nr_free = 1;
80b1f41c
PT
1777 } else {
1778 nr_free++;
1779 }
1780 }
1781 /* Free the last block of pages to allocator */
1782 deferred_free_range(pfn - nr_free, nr_free);
2f47a91f
PT
1783}
1784
80b1f41c
PT
1785/*
1786 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
1787 * by performing it only once every pageblock_nr_pages.
1788 * Return number of pages initialized.
1789 */
56ec43d8 1790static unsigned long __init deferred_init_pages(struct zone *zone,
80b1f41c
PT
1791 unsigned long pfn,
1792 unsigned long end_pfn)
2f47a91f 1793{
2f47a91f 1794 unsigned long nr_pgmask = pageblock_nr_pages - 1;
56ec43d8 1795 int nid = zone_to_nid(zone);
2f47a91f 1796 unsigned long nr_pages = 0;
56ec43d8 1797 int zid = zone_idx(zone);
2f47a91f 1798 struct page *page = NULL;
2f47a91f 1799
80b1f41c 1800 for (; pfn < end_pfn; pfn++) {
56ec43d8 1801 if (!deferred_pfn_valid(pfn)) {
80b1f41c 1802 page = NULL;
2f47a91f 1803 continue;
80b1f41c 1804 } else if (!page || !(pfn & nr_pgmask)) {
2f47a91f 1805 page = pfn_to_page(pfn);
80b1f41c
PT
1806 } else {
1807 page++;
2f47a91f 1808 }
d0dc12e8 1809 __init_single_page(page, pfn, zid, nid);
80b1f41c 1810 nr_pages++;
2f47a91f 1811 }
80b1f41c 1812 return (nr_pages);
2f47a91f
PT
1813}
1814
0e56acae
AD
1815/*
1816 * This function is meant to pre-load the iterator for the zone init.
1817 * Specifically it walks through the ranges until we are caught up to the
1818 * first_init_pfn value and exits there. If we never encounter the value we
1819 * return false indicating there are no valid ranges left.
1820 */
1821static bool __init
1822deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1823 unsigned long *spfn, unsigned long *epfn,
1824 unsigned long first_init_pfn)
1825{
1826 u64 j;
1827
1828 /*
1829 * Start out by walking through the ranges in this zone that have
1830 * already been initialized. We don't need to do anything with them
1831 * so we just need to flush them out of the system.
1832 */
1833 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1834 if (*epfn <= first_init_pfn)
1835 continue;
1836 if (*spfn < first_init_pfn)
1837 *spfn = first_init_pfn;
1838 *i = j;
1839 return true;
1840 }
1841
1842 return false;
1843}
1844
1845/*
1846 * Initialize and free pages. We do it in two loops: first we initialize
1847 * struct page, then free to buddy allocator, because while we are
1848 * freeing pages we can access pages that are ahead (computing buddy
1849 * page in __free_one_page()).
1850 *
1851 * In order to try and keep some memory in the cache we have the loop
1852 * broken along max page order boundaries. This way we will not cause
1853 * any issues with the buddy page computation.
1854 */
1855static unsigned long __init
1856deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1857 unsigned long *end_pfn)
1858{
1859 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1860 unsigned long spfn = *start_pfn, epfn = *end_pfn;
1861 unsigned long nr_pages = 0;
1862 u64 j = *i;
1863
1864 /* First we loop through and initialize the page values */
1865 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1866 unsigned long t;
1867
1868 if (mo_pfn <= *start_pfn)
1869 break;
1870
1871 t = min(mo_pfn, *end_pfn);
1872 nr_pages += deferred_init_pages(zone, *start_pfn, t);
1873
1874 if (mo_pfn < *end_pfn) {
1875 *start_pfn = mo_pfn;
1876 break;
1877 }
1878 }
1879
1880 /* Reset values and now loop through freeing pages as needed */
1881 swap(j, *i);
1882
1883 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1884 unsigned long t;
1885
1886 if (mo_pfn <= spfn)
1887 break;
1888
1889 t = min(mo_pfn, epfn);
1890 deferred_free_pages(spfn, t);
1891
1892 if (mo_pfn <= epfn)
1893 break;
1894 }
1895
1896 return nr_pages;
1897}
1898
e4443149
DJ
1899static void __init
1900deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
1901 void *arg)
1902{
1903 unsigned long spfn, epfn;
1904 struct zone *zone = arg;
1905 u64 i;
1906
1907 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
1908
1909 /*
1910 * Initialize and free pages in MAX_ORDER sized increments so that we
1911 * can avoid introducing any issues with the buddy allocator.
1912 */
1913 while (spfn < end_pfn) {
1914 deferred_init_maxorder(&i, zone, &spfn, &epfn);
1915 cond_resched();
1916 }
1917}
1918
ecd09650
DJ
1919/* An arch may override for more concurrency. */
1920__weak int __init
1921deferred_page_init_max_threads(const struct cpumask *node_cpumask)
1922{
1923 return 1;
1924}
1925
7e18adb4 1926/* Initialise remaining memory on a node */
0e1cc95b 1927static int __init deferred_init_memmap(void *data)
7e18adb4 1928{
0e1cc95b 1929 pg_data_t *pgdat = data;
0e56acae 1930 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
89c7c402 1931 unsigned long spfn = 0, epfn = 0;
0e56acae 1932 unsigned long first_init_pfn, flags;
7e18adb4 1933 unsigned long start = jiffies;
7e18adb4 1934 struct zone *zone;
e4443149 1935 int zid, max_threads;
2f47a91f 1936 u64 i;
7e18adb4 1937
3a2d7fa8
PT
1938 /* Bind memory initialisation thread to a local node if possible */
1939 if (!cpumask_empty(cpumask))
1940 set_cpus_allowed_ptr(current, cpumask);
1941
1942 pgdat_resize_lock(pgdat, &flags);
1943 first_init_pfn = pgdat->first_deferred_pfn;
0e1cc95b 1944 if (first_init_pfn == ULONG_MAX) {
3a2d7fa8 1945 pgdat_resize_unlock(pgdat, &flags);
d3cd131d 1946 pgdat_init_report_one_done();
0e1cc95b
MG
1947 return 0;
1948 }
1949
7e18adb4
MG
1950 /* Sanity check boundaries */
1951 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1952 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1953 pgdat->first_deferred_pfn = ULONG_MAX;
1954
3d060856
PT
1955 /*
1956 * Once we unlock here, the zone cannot be grown anymore, thus if an
1957 * interrupt thread must allocate this early in boot, zone must be
1958 * pre-grown prior to start of deferred page initialization.
1959 */
1960 pgdat_resize_unlock(pgdat, &flags);
1961
7e18adb4
MG
1962 /* Only the highest zone is deferred so find it */
1963 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1964 zone = pgdat->node_zones + zid;
1965 if (first_init_pfn < zone_end_pfn(zone))
1966 break;
1967 }
0e56acae
AD
1968
1969 /* If the zone is empty somebody else may have cleared out the zone */
1970 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1971 first_init_pfn))
1972 goto zone_empty;
7e18adb4 1973
ecd09650 1974 max_threads = deferred_page_init_max_threads(cpumask);
7e18adb4 1975
117003c3 1976 while (spfn < epfn) {
e4443149
DJ
1977 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
1978 struct padata_mt_job job = {
1979 .thread_fn = deferred_init_memmap_chunk,
1980 .fn_arg = zone,
1981 .start = spfn,
1982 .size = epfn_align - spfn,
1983 .align = PAGES_PER_SECTION,
1984 .min_chunk = PAGES_PER_SECTION,
1985 .max_threads = max_threads,
1986 };
1987
1988 padata_do_multithreaded(&job);
1989 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1990 epfn_align);
117003c3 1991 }
0e56acae 1992zone_empty:
7e18adb4
MG
1993 /* Sanity check that the next zone really is unpopulated */
1994 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1995
89c7c402
DJ
1996 pr_info("node %d deferred pages initialised in %ums\n",
1997 pgdat->node_id, jiffies_to_msecs(jiffies - start));
d3cd131d
NS
1998
1999 pgdat_init_report_one_done();
0e1cc95b
MG
2000 return 0;
2001}
c9e97a19 2002
c9e97a19
PT
2003/*
2004 * If this zone has deferred pages, try to grow it by initializing enough
2005 * deferred pages to satisfy the allocation specified by order, rounded up to
2006 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
2007 * of SECTION_SIZE bytes by initializing struct pages in increments of
2008 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2009 *
2010 * Return true when zone was grown, otherwise return false. We return true even
2011 * when we grow less than requested, to let the caller decide if there are
2012 * enough pages to satisfy the allocation.
2013 *
2014 * Note: We use noinline because this function is needed only during boot, and
2015 * it is called from a __ref function _deferred_grow_zone. This way we are
2016 * making sure that it is not inlined into permanent text section.
2017 */
2018static noinline bool __init
2019deferred_grow_zone(struct zone *zone, unsigned int order)
2020{
c9e97a19 2021 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
837566e7 2022 pg_data_t *pgdat = zone->zone_pgdat;
c9e97a19 2023 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
0e56acae
AD
2024 unsigned long spfn, epfn, flags;
2025 unsigned long nr_pages = 0;
c9e97a19
PT
2026 u64 i;
2027
2028 /* Only the last zone may have deferred pages */
2029 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2030 return false;
2031
2032 pgdat_resize_lock(pgdat, &flags);
2033
c9e97a19
PT
2034 /*
2035 * If someone grew this zone while we were waiting for spinlock, return
2036 * true, as there might be enough pages already.
2037 */
2038 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2039 pgdat_resize_unlock(pgdat, &flags);
2040 return true;
2041 }
2042
0e56acae
AD
2043 /* If the zone is empty somebody else may have cleared out the zone */
2044 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2045 first_deferred_pfn)) {
2046 pgdat->first_deferred_pfn = ULONG_MAX;
c9e97a19 2047 pgdat_resize_unlock(pgdat, &flags);
b9705d87
JG
2048 /* Retry only once. */
2049 return first_deferred_pfn != ULONG_MAX;
c9e97a19
PT
2050 }
2051
0e56acae
AD
2052 /*
2053 * Initialize and free pages in MAX_ORDER sized increments so
2054 * that we can avoid introducing any issues with the buddy
2055 * allocator.
2056 */
2057 while (spfn < epfn) {
2058 /* update our first deferred PFN for this section */
2059 first_deferred_pfn = spfn;
2060
2061 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
117003c3 2062 touch_nmi_watchdog();
c9e97a19 2063
0e56acae
AD
2064 /* We should only stop along section boundaries */
2065 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2066 continue;
c9e97a19 2067
0e56acae 2068 /* If our quota has been met we can stop here */
c9e97a19
PT
2069 if (nr_pages >= nr_pages_needed)
2070 break;
2071 }
2072
0e56acae 2073 pgdat->first_deferred_pfn = spfn;
c9e97a19
PT
2074 pgdat_resize_unlock(pgdat, &flags);
2075
2076 return nr_pages > 0;
2077}
2078
2079/*
2080 * deferred_grow_zone() is __init, but it is called from
2081 * get_page_from_freelist() during early boot until deferred_pages permanently
2082 * disables this call. This is why we have refdata wrapper to avoid warning,
2083 * and to ensure that the function body gets unloaded.
2084 */
2085static bool __ref
2086_deferred_grow_zone(struct zone *zone, unsigned int order)
2087{
2088 return deferred_grow_zone(zone, order);
2089}
2090
7cf91a98 2091#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
0e1cc95b
MG
2092
2093void __init page_alloc_init_late(void)
2094{
7cf91a98 2095 struct zone *zone;
e900a918 2096 int nid;
7cf91a98
JK
2097
2098#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0e1cc95b 2099
d3cd131d
NS
2100 /* There will be num_node_state(N_MEMORY) threads */
2101 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
0e1cc95b 2102 for_each_node_state(nid, N_MEMORY) {
0e1cc95b
MG
2103 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2104 }
2105
2106 /* Block until all are initialised */
d3cd131d 2107 wait_for_completion(&pgdat_init_all_done_comp);
4248b0da 2108
3e8fc007
MG
2109 /*
2110 * The number of managed pages has changed due to the initialisation
2111 * so the pcpu batch and high limits needs to be updated or the limits
2112 * will be artificially small.
2113 */
2114 for_each_populated_zone(zone)
2115 zone_pcp_update(zone);
2116
c9e97a19
PT
2117 /*
2118 * We initialized the rest of the deferred pages. Permanently disable
2119 * on-demand struct page initialization.
2120 */
2121 static_branch_disable(&deferred_pages);
2122
4248b0da
MG
2123 /* Reinit limits that are based on free pages after the kernel is up */
2124 files_maxfiles_init();
7cf91a98 2125#endif
350e88ba 2126
ba8f3587
LF
2127 buffer_init();
2128
3010f876
PT
2129 /* Discard memblock private memory */
2130 memblock_discard();
7cf91a98 2131
e900a918
DW
2132 for_each_node_state(nid, N_MEMORY)
2133 shuffle_free_memory(NODE_DATA(nid));
2134
7cf91a98
JK
2135 for_each_populated_zone(zone)
2136 set_zone_contiguous(zone);
7e18adb4 2137}
7e18adb4 2138
47118af0 2139#ifdef CONFIG_CMA
9cf510a5 2140/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
47118af0
MN
2141void __init init_cma_reserved_pageblock(struct page *page)
2142{
2143 unsigned i = pageblock_nr_pages;
2144 struct page *p = page;
2145
2146 do {
2147 __ClearPageReserved(p);
2148 set_page_count(p, 0);
d883c6cf 2149 } while (++p, --i);
47118af0 2150
47118af0 2151 set_pageblock_migratetype(page, MIGRATE_CMA);
dc78327c
MN
2152
2153 if (pageblock_order >= MAX_ORDER) {
2154 i = pageblock_nr_pages;
2155 p = page;
2156 do {
2157 set_page_refcounted(p);
2158 __free_pages(p, MAX_ORDER - 1);
2159 p += MAX_ORDER_NR_PAGES;
2160 } while (i -= MAX_ORDER_NR_PAGES);
2161 } else {
2162 set_page_refcounted(page);
2163 __free_pages(page, pageblock_order);
2164 }
2165
3dcc0571 2166 adjust_managed_page_count(page, pageblock_nr_pages);
47118af0
MN
2167}
2168#endif
1da177e4
LT
2169
2170/*
2171 * The order of subdivision here is critical for the IO subsystem.
2172 * Please do not alter this order without good reasons and regression
2173 * testing. Specifically, as large blocks of memory are subdivided,
2174 * the order in which smaller blocks are delivered depends on the order
2175 * they're subdivided in this function. This is the primary factor
2176 * influencing the order in which pages are delivered to the IO
2177 * subsystem according to empirical testing, and this is also justified
2178 * by considering the behavior of a buddy system containing a single
2179 * large block of memory acted on by a series of small allocations.
2180 * This behavior is a critical factor in sglist merging's success.
2181 *
6d49e352 2182 * -- nyc
1da177e4 2183 */
085cc7d5 2184static inline void expand(struct zone *zone, struct page *page,
6ab01363 2185 int low, int high, int migratetype)
1da177e4
LT
2186{
2187 unsigned long size = 1 << high;
2188
2189 while (high > low) {
1da177e4
LT
2190 high--;
2191 size >>= 1;
309381fe 2192 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
c0a32fc5 2193
acbc15a4
JK
2194 /*
2195 * Mark as guard pages (or page), that will allow to
2196 * merge back to allocator when buddy will be freed.
2197 * Corresponding page table entries will not be touched,
2198 * pages will stay not present in virtual address space
2199 */
2200 if (set_page_guard(zone, &page[size], high, migratetype))
c0a32fc5 2201 continue;
acbc15a4 2202
6ab01363 2203 add_to_free_list(&page[size], zone, high, migratetype);
ab130f91 2204 set_buddy_order(&page[size], high);
1da177e4 2205 }
1da177e4
LT
2206}
2207
4e611801 2208static void check_new_page_bad(struct page *page)
1da177e4 2209{
f4c18e6f 2210 if (unlikely(page->flags & __PG_HWPOISON)) {
e570f56c
NH
2211 /* Don't complain about hwpoisoned pages */
2212 page_mapcount_reset(page); /* remove PageBuddy */
2213 return;
f4c18e6f 2214 }
58b7f119
WY
2215
2216 bad_page(page,
2217 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
4e611801
VB
2218}
2219
2220/*
2221 * This page is about to be returned from the page allocator
2222 */
2223static inline int check_new_page(struct page *page)
2224{
2225 if (likely(page_expected_state(page,
2226 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2227 return 0;
2228
2229 check_new_page_bad(page);
2230 return 1;
2a7684a2
WF
2231}
2232
479f854a 2233#ifdef CONFIG_DEBUG_VM
4462b32c
VB
2234/*
2235 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2236 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2237 * also checked when pcp lists are refilled from the free lists.
2238 */
2239static inline bool check_pcp_refill(struct page *page)
479f854a 2240{
8e57f8ac 2241 if (debug_pagealloc_enabled_static())
4462b32c
VB
2242 return check_new_page(page);
2243 else
2244 return false;
479f854a
MG
2245}
2246
4462b32c 2247static inline bool check_new_pcp(struct page *page)
479f854a
MG
2248{
2249 return check_new_page(page);
2250}
2251#else
4462b32c
VB
2252/*
2253 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2254 * when pcp lists are being refilled from the free lists. With debug_pagealloc
2255 * enabled, they are also checked when being allocated from the pcp lists.
2256 */
2257static inline bool check_pcp_refill(struct page *page)
479f854a
MG
2258{
2259 return check_new_page(page);
2260}
4462b32c 2261static inline bool check_new_pcp(struct page *page)
479f854a 2262{
8e57f8ac 2263 if (debug_pagealloc_enabled_static())
4462b32c
VB
2264 return check_new_page(page);
2265 else
2266 return false;
479f854a
MG
2267}
2268#endif /* CONFIG_DEBUG_VM */
2269
2270static bool check_new_pages(struct page *page, unsigned int order)
2271{
2272 int i;
2273 for (i = 0; i < (1 << order); i++) {
2274 struct page *p = page + i;
2275
2276 if (unlikely(check_new_page(p)))
2277 return true;
2278 }
2279
2280 return false;
2281}
2282
46f24fd8
JK
2283inline void post_alloc_hook(struct page *page, unsigned int order,
2284 gfp_t gfp_flags)
2285{
2286 set_page_private(page, 0);
2287 set_page_refcounted(page);
2288
2289 arch_alloc_page(page, order);
77bc7fd6 2290 debug_pagealloc_map_pages(page, 1 << order);
46f24fd8 2291 kasan_alloc_pages(page, order);
8db26a3d 2292 kernel_unpoison_pages(page, 1 << order);
46f24fd8 2293 set_page_owner(page, order, gfp_flags);
862b6dee 2294
f289041e 2295 if (!want_init_on_free() && want_init_on_alloc(gfp_flags))
862b6dee 2296 kernel_init_free_pages(page, 1 << order);
46f24fd8
JK
2297}
2298
479f854a 2299static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
c603844b 2300 unsigned int alloc_flags)
2a7684a2 2301{
46f24fd8 2302 post_alloc_hook(page, order, gfp_flags);
17cf4406 2303
17cf4406
NP
2304 if (order && (gfp_flags & __GFP_COMP))
2305 prep_compound_page(page, order);
2306
75379191 2307 /*
2f064f34 2308 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
75379191
VB
2309 * allocate the page. The expectation is that the caller is taking
2310 * steps that will free more memory. The caller should avoid the page
2311 * being used for !PFMEMALLOC purposes.
2312 */
2f064f34
MH
2313 if (alloc_flags & ALLOC_NO_WATERMARKS)
2314 set_page_pfmemalloc(page);
2315 else
2316 clear_page_pfmemalloc(page);
1da177e4
LT
2317}
2318
56fd56b8
MG
2319/*
2320 * Go through the free lists for the given migratetype and remove
2321 * the smallest available page from the freelists
2322 */
85ccc8fa 2323static __always_inline
728ec980 2324struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
2325 int migratetype)
2326{
2327 unsigned int current_order;
b8af2941 2328 struct free_area *area;
56fd56b8
MG
2329 struct page *page;
2330
2331 /* Find a page of the appropriate size in the preferred list */
2332 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2333 area = &(zone->free_area[current_order]);
b03641af 2334 page = get_page_from_free_area(area, migratetype);
a16601c5
GT
2335 if (!page)
2336 continue;
6ab01363
AD
2337 del_page_from_free_list(page, zone, current_order);
2338 expand(zone, page, order, current_order, migratetype);
bb14c2c7 2339 set_pcppage_migratetype(page, migratetype);
56fd56b8
MG
2340 return page;
2341 }
2342
2343 return NULL;
2344}
2345
2346
b2a0ac88
MG
2347/*
2348 * This array describes the order lists are fallen back to when
2349 * the free lists for the desirable migrate type are depleted
2350 */
da415663 2351static int fallbacks[MIGRATE_TYPES][3] = {
974a786e 2352 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
974a786e 2353 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
7ead3342 2354 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
47118af0 2355#ifdef CONFIG_CMA
974a786e 2356 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
47118af0 2357#endif
194159fb 2358#ifdef CONFIG_MEMORY_ISOLATION
974a786e 2359 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
194159fb 2360#endif
b2a0ac88
MG
2361};
2362
dc67647b 2363#ifdef CONFIG_CMA
85ccc8fa 2364static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
dc67647b
JK
2365 unsigned int order)
2366{
2367 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2368}
2369#else
2370static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2371 unsigned int order) { return NULL; }
2372#endif
2373
c361be55 2374/*
293ffa5e 2375 * Move the free pages in a range to the freelist tail of the requested type.
d9c23400 2376 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
2377 * boundary. If alignment is required, use move_freepages_block()
2378 */
02aa0cdd 2379static int move_freepages(struct zone *zone,
b69a7288 2380 struct page *start_page, struct page *end_page,
02aa0cdd 2381 int migratetype, int *num_movable)
c361be55
MG
2382{
2383 struct page *page;
d00181b9 2384 unsigned int order;
d100313f 2385 int pages_moved = 0;
c361be55 2386
c361be55
MG
2387 for (page = start_page; page <= end_page;) {
2388 if (!pfn_valid_within(page_to_pfn(page))) {
2389 page++;
2390 continue;
2391 }
2392
2393 if (!PageBuddy(page)) {
02aa0cdd
VB
2394 /*
2395 * We assume that pages that could be isolated for
2396 * migration are movable. But we don't actually try
2397 * isolating, as that would be expensive.
2398 */
2399 if (num_movable &&
2400 (PageLRU(page) || __PageMovable(page)))
2401 (*num_movable)++;
2402
c361be55
MG
2403 page++;
2404 continue;
2405 }
2406
cd961038
DR
2407 /* Make sure we are not inadvertently changing nodes */
2408 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2409 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2410
ab130f91 2411 order = buddy_order(page);
6ab01363 2412 move_to_free_list(page, zone, order, migratetype);
c361be55 2413 page += 1 << order;
d100313f 2414 pages_moved += 1 << order;
c361be55
MG
2415 }
2416
d100313f 2417 return pages_moved;
c361be55
MG
2418}
2419
ee6f509c 2420int move_freepages_block(struct zone *zone, struct page *page,
02aa0cdd 2421 int migratetype, int *num_movable)
c361be55
MG
2422{
2423 unsigned long start_pfn, end_pfn;
2424 struct page *start_page, *end_page;
2425
4a222127
DR
2426 if (num_movable)
2427 *num_movable = 0;
2428
c361be55 2429 start_pfn = page_to_pfn(page);
d9c23400 2430 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 2431 start_page = pfn_to_page(start_pfn);
d9c23400
MG
2432 end_page = start_page + pageblock_nr_pages - 1;
2433 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
2434
2435 /* Do not cross zone boundaries */
108bcc96 2436 if (!zone_spans_pfn(zone, start_pfn))
c361be55 2437 start_page = page;
108bcc96 2438 if (!zone_spans_pfn(zone, end_pfn))
c361be55
MG
2439 return 0;
2440
02aa0cdd
VB
2441 return move_freepages(zone, start_page, end_page, migratetype,
2442 num_movable);
c361be55
MG
2443}
2444
2f66a68f
MG
2445static void change_pageblock_range(struct page *pageblock_page,
2446 int start_order, int migratetype)
2447{
2448 int nr_pageblocks = 1 << (start_order - pageblock_order);
2449
2450 while (nr_pageblocks--) {
2451 set_pageblock_migratetype(pageblock_page, migratetype);
2452 pageblock_page += pageblock_nr_pages;
2453 }
2454}
2455
fef903ef 2456/*
9c0415eb
VB
2457 * When we are falling back to another migratetype during allocation, try to
2458 * steal extra free pages from the same pageblocks to satisfy further
2459 * allocations, instead of polluting multiple pageblocks.
2460 *
2461 * If we are stealing a relatively large buddy page, it is likely there will
2462 * be more free pages in the pageblock, so try to steal them all. For
2463 * reclaimable and unmovable allocations, we steal regardless of page size,
2464 * as fragmentation caused by those allocations polluting movable pageblocks
2465 * is worse than movable allocations stealing from unmovable and reclaimable
2466 * pageblocks.
fef903ef 2467 */
4eb7dce6
JK
2468static bool can_steal_fallback(unsigned int order, int start_mt)
2469{
2470 /*
2471 * Leaving this order check is intended, although there is
2472 * relaxed order check in next check. The reason is that
2473 * we can actually steal whole pageblock if this condition met,
2474 * but, below check doesn't guarantee it and that is just heuristic
2475 * so could be changed anytime.
2476 */
2477 if (order >= pageblock_order)
2478 return true;
2479
2480 if (order >= pageblock_order / 2 ||
2481 start_mt == MIGRATE_RECLAIMABLE ||
2482 start_mt == MIGRATE_UNMOVABLE ||
2483 page_group_by_mobility_disabled)
2484 return true;
2485
2486 return false;
2487}
2488
597c8920 2489static inline bool boost_watermark(struct zone *zone)
1c30844d
MG
2490{
2491 unsigned long max_boost;
2492
2493 if (!watermark_boost_factor)
597c8920 2494 return false;
14f69140
HW
2495 /*
2496 * Don't bother in zones that are unlikely to produce results.
2497 * On small machines, including kdump capture kernels running
2498 * in a small area, boosting the watermark can cause an out of
2499 * memory situation immediately.
2500 */
2501 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
597c8920 2502 return false;
1c30844d
MG
2503
2504 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2505 watermark_boost_factor, 10000);
94b3334c
MG
2506
2507 /*
2508 * high watermark may be uninitialised if fragmentation occurs
2509 * very early in boot so do not boost. We do not fall
2510 * through and boost by pageblock_nr_pages as failing
2511 * allocations that early means that reclaim is not going
2512 * to help and it may even be impossible to reclaim the
2513 * boosted watermark resulting in a hang.
2514 */
2515 if (!max_boost)
597c8920 2516 return false;
94b3334c 2517
1c30844d
MG
2518 max_boost = max(pageblock_nr_pages, max_boost);
2519
2520 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2521 max_boost);
597c8920
JW
2522
2523 return true;
1c30844d
MG
2524}
2525
4eb7dce6
JK
2526/*
2527 * This function implements actual steal behaviour. If order is large enough,
2528 * we can steal whole pageblock. If not, we first move freepages in this
02aa0cdd
VB
2529 * pageblock to our migratetype and determine how many already-allocated pages
2530 * are there in the pageblock with a compatible migratetype. If at least half
2531 * of pages are free or compatible, we can change migratetype of the pageblock
2532 * itself, so pages freed in the future will be put on the correct free list.
4eb7dce6
JK
2533 */
2534static void steal_suitable_fallback(struct zone *zone, struct page *page,
1c30844d 2535 unsigned int alloc_flags, int start_type, bool whole_block)
fef903ef 2536{
ab130f91 2537 unsigned int current_order = buddy_order(page);
02aa0cdd
VB
2538 int free_pages, movable_pages, alike_pages;
2539 int old_block_type;
2540
2541 old_block_type = get_pageblock_migratetype(page);
fef903ef 2542
3bc48f96
VB
2543 /*
2544 * This can happen due to races and we want to prevent broken
2545 * highatomic accounting.
2546 */
02aa0cdd 2547 if (is_migrate_highatomic(old_block_type))
3bc48f96
VB
2548 goto single_page;
2549
fef903ef
SB
2550 /* Take ownership for orders >= pageblock_order */
2551 if (current_order >= pageblock_order) {
2552 change_pageblock_range(page, current_order, start_type);
3bc48f96 2553 goto single_page;
fef903ef
SB
2554 }
2555
1c30844d
MG
2556 /*
2557 * Boost watermarks to increase reclaim pressure to reduce the
2558 * likelihood of future fallbacks. Wake kswapd now as the node
2559 * may be balanced overall and kswapd will not wake naturally.
2560 */
597c8920 2561 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
73444bc4 2562 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
1c30844d 2563
3bc48f96
VB
2564 /* We are not allowed to try stealing from the whole block */
2565 if (!whole_block)
2566 goto single_page;
2567
02aa0cdd
VB
2568 free_pages = move_freepages_block(zone, page, start_type,
2569 &movable_pages);
2570 /*
2571 * Determine how many pages are compatible with our allocation.
2572 * For movable allocation, it's the number of movable pages which
2573 * we just obtained. For other types it's a bit more tricky.
2574 */
2575 if (start_type == MIGRATE_MOVABLE) {
2576 alike_pages = movable_pages;
2577 } else {
2578 /*
2579 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2580 * to MOVABLE pageblock, consider all non-movable pages as
2581 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2582 * vice versa, be conservative since we can't distinguish the
2583 * exact migratetype of non-movable pages.
2584 */
2585 if (old_block_type == MIGRATE_MOVABLE)
2586 alike_pages = pageblock_nr_pages
2587 - (free_pages + movable_pages);
2588 else
2589 alike_pages = 0;
2590 }
2591
3bc48f96 2592 /* moving whole block can fail due to zone boundary conditions */
02aa0cdd 2593 if (!free_pages)
3bc48f96 2594 goto single_page;
fef903ef 2595
02aa0cdd
VB
2596 /*
2597 * If a sufficient number of pages in the block are either free or of
2598 * comparable migratability as our allocation, claim the whole block.
2599 */
2600 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
4eb7dce6
JK
2601 page_group_by_mobility_disabled)
2602 set_pageblock_migratetype(page, start_type);
3bc48f96
VB
2603
2604 return;
2605
2606single_page:
6ab01363 2607 move_to_free_list(page, zone, current_order, start_type);
4eb7dce6
JK
2608}
2609
2149cdae
JK
2610/*
2611 * Check whether there is a suitable fallback freepage with requested order.
2612 * If only_stealable is true, this function returns fallback_mt only if
2613 * we can steal other freepages all together. This would help to reduce
2614 * fragmentation due to mixed migratetype pages in one pageblock.
2615 */
2616int find_suitable_fallback(struct free_area *area, unsigned int order,
2617 int migratetype, bool only_stealable, bool *can_steal)
4eb7dce6
JK
2618{
2619 int i;
2620 int fallback_mt;
2621
2622 if (area->nr_free == 0)
2623 return -1;
2624
2625 *can_steal = false;
2626 for (i = 0;; i++) {
2627 fallback_mt = fallbacks[migratetype][i];
974a786e 2628 if (fallback_mt == MIGRATE_TYPES)
4eb7dce6
JK
2629 break;
2630
b03641af 2631 if (free_area_empty(area, fallback_mt))
4eb7dce6 2632 continue;
fef903ef 2633
4eb7dce6
JK
2634 if (can_steal_fallback(order, migratetype))
2635 *can_steal = true;
2636
2149cdae
JK
2637 if (!only_stealable)
2638 return fallback_mt;
2639
2640 if (*can_steal)
2641 return fallback_mt;
fef903ef 2642 }
4eb7dce6
JK
2643
2644 return -1;
fef903ef
SB
2645}
2646
0aaa29a5
MG
2647/*
2648 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2649 * there are no empty page blocks that contain a page with a suitable order
2650 */
2651static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2652 unsigned int alloc_order)
2653{
2654 int mt;
2655 unsigned long max_managed, flags;
2656
2657 /*
2658 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2659 * Check is race-prone but harmless.
2660 */
9705bea5 2661 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
0aaa29a5
MG
2662 if (zone->nr_reserved_highatomic >= max_managed)
2663 return;
2664
2665 spin_lock_irqsave(&zone->lock, flags);
2666
2667 /* Recheck the nr_reserved_highatomic limit under the lock */
2668 if (zone->nr_reserved_highatomic >= max_managed)
2669 goto out_unlock;
2670
2671 /* Yoink! */
2672 mt = get_pageblock_migratetype(page);
a6ffdc07
XQ
2673 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2674 && !is_migrate_cma(mt)) {
0aaa29a5
MG
2675 zone->nr_reserved_highatomic += pageblock_nr_pages;
2676 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
02aa0cdd 2677 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
0aaa29a5
MG
2678 }
2679
2680out_unlock:
2681 spin_unlock_irqrestore(&zone->lock, flags);
2682}
2683
2684/*
2685 * Used when an allocation is about to fail under memory pressure. This
2686 * potentially hurts the reliability of high-order allocations when under
2687 * intense memory pressure but failed atomic allocations should be easier
2688 * to recover from than an OOM.
29fac03b
MK
2689 *
2690 * If @force is true, try to unreserve a pageblock even though highatomic
2691 * pageblock is exhausted.
0aaa29a5 2692 */
29fac03b
MK
2693static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2694 bool force)
0aaa29a5
MG
2695{
2696 struct zonelist *zonelist = ac->zonelist;
2697 unsigned long flags;
2698 struct zoneref *z;
2699 struct zone *zone;
2700 struct page *page;
2701 int order;
04c8716f 2702 bool ret;
0aaa29a5 2703
97a225e6 2704 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
0aaa29a5 2705 ac->nodemask) {
29fac03b
MK
2706 /*
2707 * Preserve at least one pageblock unless memory pressure
2708 * is really high.
2709 */
2710 if (!force && zone->nr_reserved_highatomic <=
2711 pageblock_nr_pages)
0aaa29a5
MG
2712 continue;
2713
2714 spin_lock_irqsave(&zone->lock, flags);
2715 for (order = 0; order < MAX_ORDER; order++) {
2716 struct free_area *area = &(zone->free_area[order]);
2717
b03641af 2718 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
a16601c5 2719 if (!page)
0aaa29a5
MG
2720 continue;
2721
0aaa29a5 2722 /*
4855e4a7
MK
2723 * In page freeing path, migratetype change is racy so
2724 * we can counter several free pages in a pageblock
2725 * in this loop althoug we changed the pageblock type
2726 * from highatomic to ac->migratetype. So we should
2727 * adjust the count once.
0aaa29a5 2728 */
a6ffdc07 2729 if (is_migrate_highatomic_page(page)) {
4855e4a7
MK
2730 /*
2731 * It should never happen but changes to
2732 * locking could inadvertently allow a per-cpu
2733 * drain to add pages to MIGRATE_HIGHATOMIC
2734 * while unreserving so be safe and watch for
2735 * underflows.
2736 */
2737 zone->nr_reserved_highatomic -= min(
2738 pageblock_nr_pages,
2739 zone->nr_reserved_highatomic);
2740 }
0aaa29a5
MG
2741
2742 /*
2743 * Convert to ac->migratetype and avoid the normal
2744 * pageblock stealing heuristics. Minimally, the caller
2745 * is doing the work and needs the pages. More
2746 * importantly, if the block was always converted to
2747 * MIGRATE_UNMOVABLE or another type then the number
2748 * of pageblocks that cannot be completely freed
2749 * may increase.
2750 */
2751 set_pageblock_migratetype(page, ac->migratetype);
02aa0cdd
VB
2752 ret = move_freepages_block(zone, page, ac->migratetype,
2753 NULL);
29fac03b
MK
2754 if (ret) {
2755 spin_unlock_irqrestore(&zone->lock, flags);
2756 return ret;
2757 }
0aaa29a5
MG
2758 }
2759 spin_unlock_irqrestore(&zone->lock, flags);
2760 }
04c8716f
MK
2761
2762 return false;
0aaa29a5
MG
2763}
2764
3bc48f96
VB
2765/*
2766 * Try finding a free buddy page on the fallback list and put it on the free
2767 * list of requested migratetype, possibly along with other pages from the same
2768 * block, depending on fragmentation avoidance heuristics. Returns true if
2769 * fallback was found so that __rmqueue_smallest() can grab it.
b002529d
RV
2770 *
2771 * The use of signed ints for order and current_order is a deliberate
2772 * deviation from the rest of this file, to make the for loop
2773 * condition simpler.
3bc48f96 2774 */
85ccc8fa 2775static __always_inline bool
6bb15450
MG
2776__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2777 unsigned int alloc_flags)
b2a0ac88 2778{
b8af2941 2779 struct free_area *area;
b002529d 2780 int current_order;
6bb15450 2781 int min_order = order;
b2a0ac88 2782 struct page *page;
4eb7dce6
JK
2783 int fallback_mt;
2784 bool can_steal;
b2a0ac88 2785
6bb15450
MG
2786 /*
2787 * Do not steal pages from freelists belonging to other pageblocks
2788 * i.e. orders < pageblock_order. If there are no local zones free,
2789 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2790 */
2791 if (alloc_flags & ALLOC_NOFRAGMENT)
2792 min_order = pageblock_order;
2793
7a8f58f3
VB
2794 /*
2795 * Find the largest available free page in the other list. This roughly
2796 * approximates finding the pageblock with the most free pages, which
2797 * would be too costly to do exactly.
2798 */
6bb15450 2799 for (current_order = MAX_ORDER - 1; current_order >= min_order;
7aeb09f9 2800 --current_order) {
4eb7dce6
JK
2801 area = &(zone->free_area[current_order]);
2802 fallback_mt = find_suitable_fallback(area, current_order,
2149cdae 2803 start_migratetype, false, &can_steal);
4eb7dce6
JK
2804 if (fallback_mt == -1)
2805 continue;
b2a0ac88 2806
7a8f58f3
VB
2807 /*
2808 * We cannot steal all free pages from the pageblock and the
2809 * requested migratetype is movable. In that case it's better to
2810 * steal and split the smallest available page instead of the
2811 * largest available page, because even if the next movable
2812 * allocation falls back into a different pageblock than this
2813 * one, it won't cause permanent fragmentation.
2814 */
2815 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2816 && current_order > order)
2817 goto find_smallest;
b2a0ac88 2818
7a8f58f3
VB
2819 goto do_steal;
2820 }
e0fff1bd 2821
7a8f58f3 2822 return false;
e0fff1bd 2823
7a8f58f3
VB
2824find_smallest:
2825 for (current_order = order; current_order < MAX_ORDER;
2826 current_order++) {
2827 area = &(zone->free_area[current_order]);
2828 fallback_mt = find_suitable_fallback(area, current_order,
2829 start_migratetype, false, &can_steal);
2830 if (fallback_mt != -1)
2831 break;
b2a0ac88
MG
2832 }
2833
7a8f58f3
VB
2834 /*
2835 * This should not happen - we already found a suitable fallback
2836 * when looking for the largest page.
2837 */
2838 VM_BUG_ON(current_order == MAX_ORDER);
2839
2840do_steal:
b03641af 2841 page = get_page_from_free_area(area, fallback_mt);
7a8f58f3 2842
1c30844d
MG
2843 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2844 can_steal);
7a8f58f3
VB
2845
2846 trace_mm_page_alloc_extfrag(page, order, current_order,
2847 start_migratetype, fallback_mt);
2848
2849 return true;
2850
b2a0ac88
MG
2851}
2852
56fd56b8 2853/*
1da177e4
LT
2854 * Do the hard work of removing an element from the buddy allocator.
2855 * Call me with the zone->lock already held.
2856 */
85ccc8fa 2857static __always_inline struct page *
6bb15450
MG
2858__rmqueue(struct zone *zone, unsigned int order, int migratetype,
2859 unsigned int alloc_flags)
1da177e4 2860{
1da177e4
LT
2861 struct page *page;
2862
16867664
RG
2863#ifdef CONFIG_CMA
2864 /*
2865 * Balance movable allocations between regular and CMA areas by
2866 * allocating from CMA when over half of the zone's free memory
2867 * is in the CMA area.
2868 */
8510e69c 2869 if (alloc_flags & ALLOC_CMA &&
16867664
RG
2870 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2871 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2872 page = __rmqueue_cma_fallback(zone, order);
2873 if (page)
2874 return page;
2875 }
2876#endif
3bc48f96 2877retry:
56fd56b8 2878 page = __rmqueue_smallest(zone, order, migratetype);
974a786e 2879 if (unlikely(!page)) {
8510e69c 2880 if (alloc_flags & ALLOC_CMA)
dc67647b
JK
2881 page = __rmqueue_cma_fallback(zone, order);
2882
6bb15450
MG
2883 if (!page && __rmqueue_fallback(zone, order, migratetype,
2884 alloc_flags))
3bc48f96 2885 goto retry;
728ec980
MG
2886 }
2887
0d3d062a 2888 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 2889 return page;
1da177e4
LT
2890}
2891
5f63b720 2892/*
1da177e4
LT
2893 * Obtain a specified number of elements from the buddy allocator, all under
2894 * a single hold of the lock, for efficiency. Add them to the supplied list.
2895 * Returns the number of new pages which were placed at *list.
2896 */
5f63b720 2897static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 2898 unsigned long count, struct list_head *list,
6bb15450 2899 int migratetype, unsigned int alloc_flags)
1da177e4 2900{
a6de734b 2901 int i, alloced = 0;
5f63b720 2902
d34b0733 2903 spin_lock(&zone->lock);
1da177e4 2904 for (i = 0; i < count; ++i) {
6bb15450
MG
2905 struct page *page = __rmqueue(zone, order, migratetype,
2906 alloc_flags);
085cc7d5 2907 if (unlikely(page == NULL))
1da177e4 2908 break;
81eabcbe 2909
479f854a
MG
2910 if (unlikely(check_pcp_refill(page)))
2911 continue;
2912
81eabcbe 2913 /*
0fac3ba5
VB
2914 * Split buddy pages returned by expand() are received here in
2915 * physical page order. The page is added to the tail of
2916 * caller's list. From the callers perspective, the linked list
2917 * is ordered by page number under some conditions. This is
2918 * useful for IO devices that can forward direction from the
2919 * head, thus also in the physical page order. This is useful
2920 * for IO devices that can merge IO requests if the physical
2921 * pages are ordered properly.
81eabcbe 2922 */
0fac3ba5 2923 list_add_tail(&page->lru, list);
a6de734b 2924 alloced++;
bb14c2c7 2925 if (is_migrate_cma(get_pcppage_migratetype(page)))
d1ce749a
BZ
2926 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2927 -(1 << order));
1da177e4 2928 }
a6de734b
MG
2929
2930 /*
2931 * i pages were removed from the buddy list even if some leak due
2932 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2933 * on i. Do not confuse with 'alloced' which is the number of
2934 * pages added to the pcp list.
2935 */
f2260e6b 2936 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
d34b0733 2937 spin_unlock(&zone->lock);
a6de734b 2938 return alloced;
1da177e4
LT
2939}
2940
4ae7c039 2941#ifdef CONFIG_NUMA
8fce4d8e 2942/*
4037d452
CL
2943 * Called from the vmstat counter updater to drain pagesets of this
2944 * currently executing processor on remote nodes after they have
2945 * expired.
2946 *
879336c3
CL
2947 * Note that this function must be called with the thread pinned to
2948 * a single processor.
8fce4d8e 2949 */
4037d452 2950void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 2951{
4ae7c039 2952 unsigned long flags;
7be12fc9 2953 int to_drain, batch;
4ae7c039 2954
4037d452 2955 local_irq_save(flags);
4db0c3c2 2956 batch = READ_ONCE(pcp->batch);
7be12fc9 2957 to_drain = min(pcp->count, batch);
77ba9062 2958 if (to_drain > 0)
2a13515c 2959 free_pcppages_bulk(zone, to_drain, pcp);
4037d452 2960 local_irq_restore(flags);
4ae7c039
CL
2961}
2962#endif
2963
9f8f2172 2964/*
93481ff0 2965 * Drain pcplists of the indicated processor and zone.
9f8f2172
CL
2966 *
2967 * The processor must either be the current processor and the
2968 * thread pinned to the current processor or a processor that
2969 * is not online.
2970 */
93481ff0 2971static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1da177e4 2972{
c54ad30c 2973 unsigned long flags;
93481ff0
VB
2974 struct per_cpu_pageset *pset;
2975 struct per_cpu_pages *pcp;
1da177e4 2976
93481ff0
VB
2977 local_irq_save(flags);
2978 pset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 2979
93481ff0 2980 pcp = &pset->pcp;
77ba9062 2981 if (pcp->count)
93481ff0 2982 free_pcppages_bulk(zone, pcp->count, pcp);
93481ff0
VB
2983 local_irq_restore(flags);
2984}
3dfa5721 2985
93481ff0
VB
2986/*
2987 * Drain pcplists of all zones on the indicated processor.
2988 *
2989 * The processor must either be the current processor and the
2990 * thread pinned to the current processor or a processor that
2991 * is not online.
2992 */
2993static void drain_pages(unsigned int cpu)
2994{
2995 struct zone *zone;
2996
2997 for_each_populated_zone(zone) {
2998 drain_pages_zone(cpu, zone);
1da177e4
LT
2999 }
3000}
1da177e4 3001
9f8f2172
CL
3002/*
3003 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
93481ff0
VB
3004 *
3005 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3006 * the single zone's pages.
9f8f2172 3007 */
93481ff0 3008void drain_local_pages(struct zone *zone)
9f8f2172 3009{
93481ff0
VB
3010 int cpu = smp_processor_id();
3011
3012 if (zone)
3013 drain_pages_zone(cpu, zone);
3014 else
3015 drain_pages(cpu);
9f8f2172
CL
3016}
3017
0ccce3b9
MG
3018static void drain_local_pages_wq(struct work_struct *work)
3019{
d9367bd0
WY
3020 struct pcpu_drain *drain;
3021
3022 drain = container_of(work, struct pcpu_drain, work);
3023
a459eeb7
MH
3024 /*
3025 * drain_all_pages doesn't use proper cpu hotplug protection so
3026 * we can race with cpu offline when the WQ can move this from
3027 * a cpu pinned worker to an unbound one. We can operate on a different
3028 * cpu which is allright but we also have to make sure to not move to
3029 * a different one.
3030 */
3031 preempt_disable();
d9367bd0 3032 drain_local_pages(drain->zone);
a459eeb7 3033 preempt_enable();
0ccce3b9
MG
3034}
3035
9f8f2172 3036/*
ec6e8c7e
VB
3037 * The implementation of drain_all_pages(), exposing an extra parameter to
3038 * drain on all cpus.
93481ff0 3039 *
ec6e8c7e
VB
3040 * drain_all_pages() is optimized to only execute on cpus where pcplists are
3041 * not empty. The check for non-emptiness can however race with a free to
3042 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3043 * that need the guarantee that every CPU has drained can disable the
3044 * optimizing racy check.
9f8f2172 3045 */
3b1f3658 3046static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
9f8f2172 3047{
74046494 3048 int cpu;
74046494
GBY
3049
3050 /*
3051 * Allocate in the BSS so we wont require allocation in
3052 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3053 */
3054 static cpumask_t cpus_with_pcps;
3055
ce612879
MH
3056 /*
3057 * Make sure nobody triggers this path before mm_percpu_wq is fully
3058 * initialized.
3059 */
3060 if (WARN_ON_ONCE(!mm_percpu_wq))
3061 return;
3062
bd233f53
MG
3063 /*
3064 * Do not drain if one is already in progress unless it's specific to
3065 * a zone. Such callers are primarily CMA and memory hotplug and need
3066 * the drain to be complete when the call returns.
3067 */
3068 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3069 if (!zone)
3070 return;
3071 mutex_lock(&pcpu_drain_mutex);
3072 }
0ccce3b9 3073
74046494
GBY
3074 /*
3075 * We don't care about racing with CPU hotplug event
3076 * as offline notification will cause the notified
3077 * cpu to drain that CPU pcps and on_each_cpu_mask
3078 * disables preemption as part of its processing
3079 */
3080 for_each_online_cpu(cpu) {
93481ff0
VB
3081 struct per_cpu_pageset *pcp;
3082 struct zone *z;
74046494 3083 bool has_pcps = false;
93481ff0 3084
ec6e8c7e
VB
3085 if (force_all_cpus) {
3086 /*
3087 * The pcp.count check is racy, some callers need a
3088 * guarantee that no cpu is missed.
3089 */
3090 has_pcps = true;
3091 } else if (zone) {
74046494 3092 pcp = per_cpu_ptr(zone->pageset, cpu);
93481ff0 3093 if (pcp->pcp.count)
74046494 3094 has_pcps = true;
93481ff0
VB
3095 } else {
3096 for_each_populated_zone(z) {
3097 pcp = per_cpu_ptr(z->pageset, cpu);
3098 if (pcp->pcp.count) {
3099 has_pcps = true;
3100 break;
3101 }
74046494
GBY
3102 }
3103 }
93481ff0 3104
74046494
GBY
3105 if (has_pcps)
3106 cpumask_set_cpu(cpu, &cpus_with_pcps);
3107 else
3108 cpumask_clear_cpu(cpu, &cpus_with_pcps);
3109 }
0ccce3b9 3110
bd233f53 3111 for_each_cpu(cpu, &cpus_with_pcps) {
d9367bd0
WY
3112 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3113
3114 drain->zone = zone;
3115 INIT_WORK(&drain->work, drain_local_pages_wq);
3116 queue_work_on(cpu, mm_percpu_wq, &drain->work);
0ccce3b9 3117 }
bd233f53 3118 for_each_cpu(cpu, &cpus_with_pcps)
d9367bd0 3119 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
bd233f53
MG
3120
3121 mutex_unlock(&pcpu_drain_mutex);
9f8f2172
CL
3122}
3123
ec6e8c7e
VB
3124/*
3125 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3126 *
3127 * When zone parameter is non-NULL, spill just the single zone's pages.
3128 *
3129 * Note that this can be extremely slow as the draining happens in a workqueue.
3130 */
3131void drain_all_pages(struct zone *zone)
3132{
3133 __drain_all_pages(zone, false);
3134}
3135
296699de 3136#ifdef CONFIG_HIBERNATION
1da177e4 3137
556b969a
CY
3138/*
3139 * Touch the watchdog for every WD_PAGE_COUNT pages.
3140 */
3141#define WD_PAGE_COUNT (128*1024)
3142
1da177e4
LT
3143void mark_free_pages(struct zone *zone)
3144{
556b969a 3145 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
f623f0db 3146 unsigned long flags;
7aeb09f9 3147 unsigned int order, t;
86760a2c 3148 struct page *page;
1da177e4 3149
8080fc03 3150 if (zone_is_empty(zone))
1da177e4
LT
3151 return;
3152
3153 spin_lock_irqsave(&zone->lock, flags);
f623f0db 3154
108bcc96 3155 max_zone_pfn = zone_end_pfn(zone);
f623f0db
RW
3156 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3157 if (pfn_valid(pfn)) {
86760a2c 3158 page = pfn_to_page(pfn);
ba6b0979 3159
556b969a
CY
3160 if (!--page_count) {
3161 touch_nmi_watchdog();
3162 page_count = WD_PAGE_COUNT;
3163 }
3164
ba6b0979
JK
3165 if (page_zone(page) != zone)
3166 continue;
3167
7be98234
RW
3168 if (!swsusp_page_is_forbidden(page))
3169 swsusp_unset_page_free(page);
f623f0db 3170 }
1da177e4 3171
b2a0ac88 3172 for_each_migratetype_order(order, t) {
86760a2c
GT
3173 list_for_each_entry(page,
3174 &zone->free_area[order].free_list[t], lru) {
f623f0db 3175 unsigned long i;
1da177e4 3176
86760a2c 3177 pfn = page_to_pfn(page);
556b969a
CY
3178 for (i = 0; i < (1UL << order); i++) {
3179 if (!--page_count) {
3180 touch_nmi_watchdog();
3181 page_count = WD_PAGE_COUNT;
3182 }
7be98234 3183 swsusp_set_page_free(pfn_to_page(pfn + i));
556b969a 3184 }
f623f0db 3185 }
b2a0ac88 3186 }
1da177e4
LT
3187 spin_unlock_irqrestore(&zone->lock, flags);
3188}
e2c55dc8 3189#endif /* CONFIG_PM */
1da177e4 3190
2d4894b5 3191static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
1da177e4 3192{
5f8dcc21 3193 int migratetype;
1da177e4 3194
4db7548c 3195 if (!free_pcp_prepare(page))
9cca35d4 3196 return false;
689bcebf 3197
dc4b0caf 3198 migratetype = get_pfnblock_migratetype(page, pfn);
bb14c2c7 3199 set_pcppage_migratetype(page, migratetype);
9cca35d4
MG
3200 return true;
3201}
3202
2d4894b5 3203static void free_unref_page_commit(struct page *page, unsigned long pfn)
9cca35d4
MG
3204{
3205 struct zone *zone = page_zone(page);
3206 struct per_cpu_pages *pcp;
3207 int migratetype;
3208
3209 migratetype = get_pcppage_migratetype(page);
d34b0733 3210 __count_vm_event(PGFREE);
da456f14 3211
5f8dcc21
MG
3212 /*
3213 * We only track unmovable, reclaimable and movable on pcp lists.
3214 * Free ISOLATE pages back to the allocator because they are being
a6ffdc07 3215 * offlined but treat HIGHATOMIC as movable pages so we can get those
5f8dcc21
MG
3216 * areas back if necessary. Otherwise, we may have to free
3217 * excessively into the page allocator
3218 */
3219 if (migratetype >= MIGRATE_PCPTYPES) {
194159fb 3220 if (unlikely(is_migrate_isolate(migratetype))) {
7fef431b
DH
3221 free_one_page(zone, page, pfn, 0, migratetype,
3222 FPI_NONE);
9cca35d4 3223 return;
5f8dcc21
MG
3224 }
3225 migratetype = MIGRATE_MOVABLE;
3226 }
3227
99dcc3e5 3228 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2d4894b5 3229 list_add(&page->lru, &pcp->lists[migratetype]);
1da177e4 3230 pcp->count++;
5c3ad2eb
VB
3231 if (pcp->count >= READ_ONCE(pcp->high))
3232 free_pcppages_bulk(zone, READ_ONCE(pcp->batch), pcp);
9cca35d4 3233}
5f8dcc21 3234
9cca35d4
MG
3235/*
3236 * Free a 0-order page
9cca35d4 3237 */
2d4894b5 3238void free_unref_page(struct page *page)
9cca35d4
MG
3239{
3240 unsigned long flags;
3241 unsigned long pfn = page_to_pfn(page);
3242
2d4894b5 3243 if (!free_unref_page_prepare(page, pfn))
9cca35d4
MG
3244 return;
3245
3246 local_irq_save(flags);
2d4894b5 3247 free_unref_page_commit(page, pfn);
d34b0733 3248 local_irq_restore(flags);
1da177e4
LT
3249}
3250
cc59850e
KK
3251/*
3252 * Free a list of 0-order pages
3253 */
2d4894b5 3254void free_unref_page_list(struct list_head *list)
cc59850e
KK
3255{
3256 struct page *page, *next;
9cca35d4 3257 unsigned long flags, pfn;
c24ad77d 3258 int batch_count = 0;
9cca35d4
MG
3259
3260 /* Prepare pages for freeing */
3261 list_for_each_entry_safe(page, next, list, lru) {
3262 pfn = page_to_pfn(page);
2d4894b5 3263 if (!free_unref_page_prepare(page, pfn))
9cca35d4
MG
3264 list_del(&page->lru);
3265 set_page_private(page, pfn);
3266 }
cc59850e 3267
9cca35d4 3268 local_irq_save(flags);
cc59850e 3269 list_for_each_entry_safe(page, next, list, lru) {
9cca35d4
MG
3270 unsigned long pfn = page_private(page);
3271
3272 set_page_private(page, 0);
2d4894b5
MG
3273 trace_mm_page_free_batched(page);
3274 free_unref_page_commit(page, pfn);
c24ad77d
LS
3275
3276 /*
3277 * Guard against excessive IRQ disabled times when we get
3278 * a large list of pages to free.
3279 */
3280 if (++batch_count == SWAP_CLUSTER_MAX) {
3281 local_irq_restore(flags);
3282 batch_count = 0;
3283 local_irq_save(flags);
3284 }
cc59850e 3285 }
9cca35d4 3286 local_irq_restore(flags);
cc59850e
KK
3287}
3288
8dfcc9ba
NP
3289/*
3290 * split_page takes a non-compound higher-order page, and splits it into
3291 * n (1<<order) sub-pages: page[0..n]
3292 * Each sub-page must be freed individually.
3293 *
3294 * Note: this is probably too low level an operation for use in drivers.
3295 * Please consult with lkml before using this in your driver.
3296 */
3297void split_page(struct page *page, unsigned int order)
3298{
3299 int i;
3300
309381fe
SL
3301 VM_BUG_ON_PAGE(PageCompound(page), page);
3302 VM_BUG_ON_PAGE(!page_count(page), page);
b1eeab67 3303
a9627bc5 3304 for (i = 1; i < (1 << order); i++)
7835e98b 3305 set_page_refcounted(page + i);
8fb156c9 3306 split_page_owner(page, 1 << order);
8dfcc9ba 3307}
5853ff23 3308EXPORT_SYMBOL_GPL(split_page);
8dfcc9ba 3309
3c605096 3310int __isolate_free_page(struct page *page, unsigned int order)
748446bb 3311{
748446bb
MG
3312 unsigned long watermark;
3313 struct zone *zone;
2139cbe6 3314 int mt;
748446bb
MG
3315
3316 BUG_ON(!PageBuddy(page));
3317
3318 zone = page_zone(page);
2e30abd1 3319 mt = get_pageblock_migratetype(page);
748446bb 3320
194159fb 3321 if (!is_migrate_isolate(mt)) {
8348faf9
VB
3322 /*
3323 * Obey watermarks as if the page was being allocated. We can
3324 * emulate a high-order watermark check with a raised order-0
3325 * watermark, because we already know our high-order page
3326 * exists.
3327 */
fd1444b2 3328 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
d883c6cf 3329 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2e30abd1
MS
3330 return 0;
3331
8fb74b9f 3332 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2e30abd1 3333 }
748446bb
MG
3334
3335 /* Remove page from free list */
b03641af 3336
6ab01363 3337 del_page_from_free_list(page, zone, order);
2139cbe6 3338
400bc7fd 3339 /*
3340 * Set the pageblock if the isolated page is at least half of a
3341 * pageblock
3342 */
748446bb
MG
3343 if (order >= pageblock_order - 1) {
3344 struct page *endpage = page + (1 << order) - 1;
47118af0
MN
3345 for (; page < endpage; page += pageblock_nr_pages) {
3346 int mt = get_pageblock_migratetype(page);
88ed365e 3347 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
a6ffdc07 3348 && !is_migrate_highatomic(mt))
47118af0
MN
3349 set_pageblock_migratetype(page,
3350 MIGRATE_MOVABLE);
3351 }
748446bb
MG
3352 }
3353
f3a14ced 3354
8fb74b9f 3355 return 1UL << order;
1fb3f8ca
MG
3356}
3357
624f58d8
AD
3358/**
3359 * __putback_isolated_page - Return a now-isolated page back where we got it
3360 * @page: Page that was isolated
3361 * @order: Order of the isolated page
e6a0a7ad 3362 * @mt: The page's pageblock's migratetype
624f58d8
AD
3363 *
3364 * This function is meant to return a page pulled from the free lists via
3365 * __isolate_free_page back to the free lists they were pulled from.
3366 */
3367void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3368{
3369 struct zone *zone = page_zone(page);
3370
3371 /* zone lock should be held when this function is called */
3372 lockdep_assert_held(&zone->lock);
3373
3374 /* Return isolated page to tail of freelist. */
f04a5d5d 3375 __free_one_page(page, page_to_pfn(page), zone, order, mt,
47b6a24a 3376 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
624f58d8
AD
3377}
3378
060e7417
MG
3379/*
3380 * Update NUMA hit/miss statistics
3381 *
3382 * Must be called with interrupts disabled.
060e7417 3383 */
41b6167e 3384static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
060e7417
MG
3385{
3386#ifdef CONFIG_NUMA
3a321d2a 3387 enum numa_stat_item local_stat = NUMA_LOCAL;
060e7417 3388
4518085e
KW
3389 /* skip numa counters update if numa stats is disabled */
3390 if (!static_branch_likely(&vm_numa_stat_key))
3391 return;
3392
c1093b74 3393 if (zone_to_nid(z) != numa_node_id())
060e7417 3394 local_stat = NUMA_OTHER;
060e7417 3395
c1093b74 3396 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3a321d2a 3397 __inc_numa_state(z, NUMA_HIT);
2df26639 3398 else {
3a321d2a
KW
3399 __inc_numa_state(z, NUMA_MISS);
3400 __inc_numa_state(preferred_zone, NUMA_FOREIGN);
060e7417 3401 }
3a321d2a 3402 __inc_numa_state(z, local_stat);
060e7417
MG
3403#endif
3404}
3405
066b2393
MG
3406/* Remove page from the per-cpu list, caller must protect the list */
3407static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
6bb15450 3408 unsigned int alloc_flags,
453f85d4 3409 struct per_cpu_pages *pcp,
066b2393
MG
3410 struct list_head *list)
3411{
3412 struct page *page;
3413
3414 do {
3415 if (list_empty(list)) {
3416 pcp->count += rmqueue_bulk(zone, 0,
5c3ad2eb 3417 READ_ONCE(pcp->batch), list,
6bb15450 3418 migratetype, alloc_flags);
066b2393
MG
3419 if (unlikely(list_empty(list)))
3420 return NULL;
3421 }
3422
453f85d4 3423 page = list_first_entry(list, struct page, lru);
066b2393
MG
3424 list_del(&page->lru);
3425 pcp->count--;
3426 } while (check_new_pcp(page));
3427
3428 return page;
3429}
3430
3431/* Lock and remove page from the per-cpu list */
3432static struct page *rmqueue_pcplist(struct zone *preferred_zone,
1c52e6d0
YS
3433 struct zone *zone, gfp_t gfp_flags,
3434 int migratetype, unsigned int alloc_flags)
066b2393
MG
3435{
3436 struct per_cpu_pages *pcp;
3437 struct list_head *list;
066b2393 3438 struct page *page;
d34b0733 3439 unsigned long flags;
066b2393 3440
d34b0733 3441 local_irq_save(flags);
066b2393
MG
3442 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3443 list = &pcp->lists[migratetype];
6bb15450 3444 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
066b2393 3445 if (page) {
1c52e6d0 3446 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
066b2393
MG
3447 zone_statistics(preferred_zone, zone);
3448 }
d34b0733 3449 local_irq_restore(flags);
066b2393
MG
3450 return page;
3451}
3452
1da177e4 3453/*
75379191 3454 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
1da177e4 3455 */
0a15c3e9 3456static inline
066b2393 3457struct page *rmqueue(struct zone *preferred_zone,
7aeb09f9 3458 struct zone *zone, unsigned int order,
c603844b
MG
3459 gfp_t gfp_flags, unsigned int alloc_flags,
3460 int migratetype)
1da177e4
LT
3461{
3462 unsigned long flags;
689bcebf 3463 struct page *page;
1da177e4 3464
d34b0733 3465 if (likely(order == 0)) {
1d91df85
JK
3466 /*
3467 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3468 * we need to skip it when CMA area isn't allowed.
3469 */
3470 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3471 migratetype != MIGRATE_MOVABLE) {
3472 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
1c52e6d0 3473 migratetype, alloc_flags);
1d91df85
JK
3474 goto out;
3475 }
066b2393 3476 }
83b9355b 3477
066b2393
MG
3478 /*
3479 * We most definitely don't want callers attempting to
3480 * allocate greater than order-1 page units with __GFP_NOFAIL.
3481 */
3482 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3483 spin_lock_irqsave(&zone->lock, flags);
0aaa29a5 3484
066b2393
MG
3485 do {
3486 page = NULL;
1d91df85
JK
3487 /*
3488 * order-0 request can reach here when the pcplist is skipped
3489 * due to non-CMA allocation context. HIGHATOMIC area is
3490 * reserved for high-order atomic allocation, so order-0
3491 * request should skip it.
3492 */
3493 if (order > 0 && alloc_flags & ALLOC_HARDER) {
066b2393
MG
3494 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3495 if (page)
3496 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3497 }
a74609fa 3498 if (!page)
6bb15450 3499 page = __rmqueue(zone, order, migratetype, alloc_flags);
066b2393
MG
3500 } while (page && check_new_pages(page, order));
3501 spin_unlock(&zone->lock);
3502 if (!page)
3503 goto failed;
3504 __mod_zone_freepage_state(zone, -(1 << order),
3505 get_pcppage_migratetype(page));
1da177e4 3506
16709d1d 3507 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
41b6167e 3508 zone_statistics(preferred_zone, zone);
a74609fa 3509 local_irq_restore(flags);
1da177e4 3510
066b2393 3511out:
73444bc4
MG
3512 /* Separate test+clear to avoid unnecessary atomics */
3513 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3514 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3515 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3516 }
3517
066b2393 3518 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
1da177e4 3519 return page;
a74609fa
NP
3520
3521failed:
3522 local_irq_restore(flags);
a74609fa 3523 return NULL;
1da177e4
LT
3524}
3525
933e312e
AM
3526#ifdef CONFIG_FAIL_PAGE_ALLOC
3527
b2588c4b 3528static struct {
933e312e
AM
3529 struct fault_attr attr;
3530
621a5f7a 3531 bool ignore_gfp_highmem;
71baba4b 3532 bool ignore_gfp_reclaim;
54114994 3533 u32 min_order;
933e312e
AM
3534} fail_page_alloc = {
3535 .attr = FAULT_ATTR_INITIALIZER,
71baba4b 3536 .ignore_gfp_reclaim = true,
621a5f7a 3537 .ignore_gfp_highmem = true,
54114994 3538 .min_order = 1,
933e312e
AM
3539};
3540
3541static int __init setup_fail_page_alloc(char *str)
3542{
3543 return setup_fault_attr(&fail_page_alloc.attr, str);
3544}
3545__setup("fail_page_alloc=", setup_fail_page_alloc);
3546
af3b8544 3547static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 3548{
54114994 3549 if (order < fail_page_alloc.min_order)
deaf386e 3550 return false;
933e312e 3551 if (gfp_mask & __GFP_NOFAIL)
deaf386e 3552 return false;
933e312e 3553 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
deaf386e 3554 return false;
71baba4b
MG
3555 if (fail_page_alloc.ignore_gfp_reclaim &&
3556 (gfp_mask & __GFP_DIRECT_RECLAIM))
deaf386e 3557 return false;
933e312e
AM
3558
3559 return should_fail(&fail_page_alloc.attr, 1 << order);
3560}
3561
3562#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3563
3564static int __init fail_page_alloc_debugfs(void)
3565{
0825a6f9 3566 umode_t mode = S_IFREG | 0600;
933e312e 3567 struct dentry *dir;
933e312e 3568
dd48c085
AM
3569 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3570 &fail_page_alloc.attr);
b2588c4b 3571
d9f7979c
GKH
3572 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3573 &fail_page_alloc.ignore_gfp_reclaim);
3574 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3575 &fail_page_alloc.ignore_gfp_highmem);
3576 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
933e312e 3577
d9f7979c 3578 return 0;
933e312e
AM
3579}
3580
3581late_initcall(fail_page_alloc_debugfs);
3582
3583#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3584
3585#else /* CONFIG_FAIL_PAGE_ALLOC */
3586
af3b8544 3587static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 3588{
deaf386e 3589 return false;
933e312e
AM
3590}
3591
3592#endif /* CONFIG_FAIL_PAGE_ALLOC */
3593
76cd6173 3594noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
af3b8544
BP
3595{
3596 return __should_fail_alloc_page(gfp_mask, order);
3597}
3598ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3599
f27ce0e1
JK
3600static inline long __zone_watermark_unusable_free(struct zone *z,
3601 unsigned int order, unsigned int alloc_flags)
3602{
3603 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3604 long unusable_free = (1 << order) - 1;
3605
3606 /*
3607 * If the caller does not have rights to ALLOC_HARDER then subtract
3608 * the high-atomic reserves. This will over-estimate the size of the
3609 * atomic reserve but it avoids a search.
3610 */
3611 if (likely(!alloc_harder))
3612 unusable_free += z->nr_reserved_highatomic;
3613
3614#ifdef CONFIG_CMA
3615 /* If allocation can't use CMA areas don't use free CMA pages */
3616 if (!(alloc_flags & ALLOC_CMA))
3617 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3618#endif
3619
3620 return unusable_free;
3621}
3622
1da177e4 3623/*
97a16fc8
MG
3624 * Return true if free base pages are above 'mark'. For high-order checks it
3625 * will return true of the order-0 watermark is reached and there is at least
3626 * one free page of a suitable size. Checking now avoids taking the zone lock
3627 * to check in the allocation paths if no pages are free.
1da177e4 3628 */
86a294a8 3629bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
97a225e6 3630 int highest_zoneidx, unsigned int alloc_flags,
86a294a8 3631 long free_pages)
1da177e4 3632{
d23ad423 3633 long min = mark;
1da177e4 3634 int o;
cd04ae1e 3635 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
1da177e4 3636
0aaa29a5 3637 /* free_pages may go negative - that's OK */
f27ce0e1 3638 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
0aaa29a5 3639
7fb1d9fc 3640 if (alloc_flags & ALLOC_HIGH)
1da177e4 3641 min -= min / 2;
0aaa29a5 3642
f27ce0e1 3643 if (unlikely(alloc_harder)) {
cd04ae1e
MH
3644 /*
3645 * OOM victims can try even harder than normal ALLOC_HARDER
3646 * users on the grounds that it's definitely going to be in
3647 * the exit path shortly and free memory. Any allocation it
3648 * makes during the free path will be small and short-lived.
3649 */
3650 if (alloc_flags & ALLOC_OOM)
3651 min -= min / 2;
3652 else
3653 min -= min / 4;
3654 }
3655
97a16fc8
MG
3656 /*
3657 * Check watermarks for an order-0 allocation request. If these
3658 * are not met, then a high-order request also cannot go ahead
3659 * even if a suitable page happened to be free.
3660 */
97a225e6 3661 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
88f5acf8 3662 return false;
1da177e4 3663
97a16fc8
MG
3664 /* If this is an order-0 request then the watermark is fine */
3665 if (!order)
3666 return true;
3667
3668 /* For a high-order request, check at least one suitable page is free */
3669 for (o = order; o < MAX_ORDER; o++) {
3670 struct free_area *area = &z->free_area[o];
3671 int mt;
3672
3673 if (!area->nr_free)
3674 continue;
3675
97a16fc8 3676 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
b03641af 3677 if (!free_area_empty(area, mt))
97a16fc8
MG
3678 return true;
3679 }
3680
3681#ifdef CONFIG_CMA
d883c6cf 3682 if ((alloc_flags & ALLOC_CMA) &&
b03641af 3683 !free_area_empty(area, MIGRATE_CMA)) {
97a16fc8 3684 return true;
d883c6cf 3685 }
97a16fc8 3686#endif
76089d00 3687 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
b050e376 3688 return true;
1da177e4 3689 }
97a16fc8 3690 return false;
88f5acf8
MG
3691}
3692
7aeb09f9 3693bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
97a225e6 3694 int highest_zoneidx, unsigned int alloc_flags)
88f5acf8 3695{
97a225e6 3696 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
88f5acf8
MG
3697 zone_page_state(z, NR_FREE_PAGES));
3698}
3699
48ee5f36 3700static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
97a225e6 3701 unsigned long mark, int highest_zoneidx,
f80b08fc 3702 unsigned int alloc_flags, gfp_t gfp_mask)
48ee5f36 3703{
f27ce0e1 3704 long free_pages;
d883c6cf 3705
f27ce0e1 3706 free_pages = zone_page_state(z, NR_FREE_PAGES);
48ee5f36
MG
3707
3708 /*
3709 * Fast check for order-0 only. If this fails then the reserves
f27ce0e1 3710 * need to be calculated.
48ee5f36 3711 */
f27ce0e1
JK
3712 if (!order) {
3713 long fast_free;
3714
3715 fast_free = free_pages;
3716 fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3717 if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3718 return true;
3719 }
48ee5f36 3720
f80b08fc
CTR
3721 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3722 free_pages))
3723 return true;
3724 /*
3725 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3726 * when checking the min watermark. The min watermark is the
3727 * point where boosting is ignored so that kswapd is woken up
3728 * when below the low watermark.
3729 */
3730 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3731 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3732 mark = z->_watermark[WMARK_MIN];
3733 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3734 alloc_flags, free_pages);
3735 }
3736
3737 return false;
48ee5f36
MG
3738}
3739
7aeb09f9 3740bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
97a225e6 3741 unsigned long mark, int highest_zoneidx)
88f5acf8
MG
3742{
3743 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3744
3745 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3746 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3747
97a225e6 3748 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
88f5acf8 3749 free_pages);
1da177e4
LT
3750}
3751
9276b1bc 3752#ifdef CONFIG_NUMA
957f822a
DR
3753static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3754{
e02dc017 3755 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
a55c7454 3756 node_reclaim_distance;
957f822a 3757}
9276b1bc 3758#else /* CONFIG_NUMA */
957f822a
DR
3759static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3760{
3761 return true;
3762}
9276b1bc
PJ
3763#endif /* CONFIG_NUMA */
3764
6bb15450
MG
3765/*
3766 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3767 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3768 * premature use of a lower zone may cause lowmem pressure problems that
3769 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3770 * probably too small. It only makes sense to spread allocations to avoid
3771 * fragmentation between the Normal and DMA32 zones.
3772 */
3773static inline unsigned int
0a79cdad 3774alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
6bb15450 3775{
736838e9 3776 unsigned int alloc_flags;
0a79cdad 3777
736838e9
MN
3778 /*
3779 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3780 * to save a branch.
3781 */
3782 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
0a79cdad
MG
3783
3784#ifdef CONFIG_ZONE_DMA32
8139ad04
AR
3785 if (!zone)
3786 return alloc_flags;
3787
6bb15450 3788 if (zone_idx(zone) != ZONE_NORMAL)
8118b82e 3789 return alloc_flags;
6bb15450
MG
3790
3791 /*
3792 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3793 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3794 * on UMA that if Normal is populated then so is DMA32.
3795 */
3796 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3797 if (nr_online_nodes > 1 && !populated_zone(--zone))
8118b82e 3798 return alloc_flags;
6bb15450 3799
8118b82e 3800 alloc_flags |= ALLOC_NOFRAGMENT;
0a79cdad
MG
3801#endif /* CONFIG_ZONE_DMA32 */
3802 return alloc_flags;
6bb15450 3803}
6bb15450 3804
8510e69c
JK
3805static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
3806 unsigned int alloc_flags)
3807{
3808#ifdef CONFIG_CMA
3809 unsigned int pflags = current->flags;
3810
3811 if (!(pflags & PF_MEMALLOC_NOCMA) &&
3812 gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3813 alloc_flags |= ALLOC_CMA;
3814
3815#endif
3816 return alloc_flags;
3817}
3818
7fb1d9fc 3819/*
0798e519 3820 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
3821 * a page.
3822 */
3823static struct page *
a9263751
VB
3824get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3825 const struct alloc_context *ac)
753ee728 3826{
6bb15450 3827 struct zoneref *z;
5117f45d 3828 struct zone *zone;
3b8c0be4 3829 struct pglist_data *last_pgdat_dirty_limit = NULL;
6bb15450 3830 bool no_fallback;
3b8c0be4 3831
6bb15450 3832retry:
7fb1d9fc 3833 /*
9276b1bc 3834 * Scan zonelist, looking for a zone with enough free.
344736f2 3835 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
7fb1d9fc 3836 */
6bb15450
MG
3837 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3838 z = ac->preferred_zoneref;
30d8ec73
MN
3839 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3840 ac->nodemask) {
be06af00 3841 struct page *page;
e085dbc5
JW
3842 unsigned long mark;
3843
664eedde
MG
3844 if (cpusets_enabled() &&
3845 (alloc_flags & ALLOC_CPUSET) &&
002f2906 3846 !__cpuset_zone_allowed(zone, gfp_mask))
cd38b115 3847 continue;
a756cf59
JW
3848 /*
3849 * When allocating a page cache page for writing, we
281e3726
MG
3850 * want to get it from a node that is within its dirty
3851 * limit, such that no single node holds more than its
a756cf59 3852 * proportional share of globally allowed dirty pages.
281e3726 3853 * The dirty limits take into account the node's
a756cf59
JW
3854 * lowmem reserves and high watermark so that kswapd
3855 * should be able to balance it without having to
3856 * write pages from its LRU list.
3857 *
a756cf59 3858 * XXX: For now, allow allocations to potentially
281e3726 3859 * exceed the per-node dirty limit in the slowpath
c9ab0c4f 3860 * (spread_dirty_pages unset) before going into reclaim,
a756cf59 3861 * which is important when on a NUMA setup the allowed
281e3726 3862 * nodes are together not big enough to reach the
a756cf59 3863 * global limit. The proper fix for these situations
281e3726 3864 * will require awareness of nodes in the
a756cf59
JW
3865 * dirty-throttling and the flusher threads.
3866 */
3b8c0be4
MG
3867 if (ac->spread_dirty_pages) {
3868 if (last_pgdat_dirty_limit == zone->zone_pgdat)
3869 continue;
3870
3871 if (!node_dirty_ok(zone->zone_pgdat)) {
3872 last_pgdat_dirty_limit = zone->zone_pgdat;
3873 continue;
3874 }
3875 }
7fb1d9fc 3876
6bb15450
MG
3877 if (no_fallback && nr_online_nodes > 1 &&
3878 zone != ac->preferred_zoneref->zone) {
3879 int local_nid;
3880
3881 /*
3882 * If moving to a remote node, retry but allow
3883 * fragmenting fallbacks. Locality is more important
3884 * than fragmentation avoidance.
3885 */
3886 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3887 if (zone_to_nid(zone) != local_nid) {
3888 alloc_flags &= ~ALLOC_NOFRAGMENT;
3889 goto retry;
3890 }
3891 }
3892
a9214443 3893 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
48ee5f36 3894 if (!zone_watermark_fast(zone, order, mark,
f80b08fc
CTR
3895 ac->highest_zoneidx, alloc_flags,
3896 gfp_mask)) {
fa5e084e
MG
3897 int ret;
3898
c9e97a19
PT
3899#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3900 /*
3901 * Watermark failed for this zone, but see if we can
3902 * grow this zone if it contains deferred pages.
3903 */
3904 if (static_branch_unlikely(&deferred_pages)) {
3905 if (_deferred_grow_zone(zone, order))
3906 goto try_this_zone;
3907 }
3908#endif
5dab2911
MG
3909 /* Checked here to keep the fast path fast */
3910 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3911 if (alloc_flags & ALLOC_NO_WATERMARKS)
3912 goto try_this_zone;
3913
a5f5f91d 3914 if (node_reclaim_mode == 0 ||
c33d6c06 3915 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
cd38b115
MG
3916 continue;
3917
a5f5f91d 3918 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
fa5e084e 3919 switch (ret) {
a5f5f91d 3920 case NODE_RECLAIM_NOSCAN:
fa5e084e 3921 /* did not scan */
cd38b115 3922 continue;
a5f5f91d 3923 case NODE_RECLAIM_FULL:
fa5e084e 3924 /* scanned but unreclaimable */
cd38b115 3925 continue;
fa5e084e
MG
3926 default:
3927 /* did we reclaim enough */
fed2719e 3928 if (zone_watermark_ok(zone, order, mark,
97a225e6 3929 ac->highest_zoneidx, alloc_flags))
fed2719e
MG
3930 goto try_this_zone;
3931
fed2719e 3932 continue;
0798e519 3933 }
7fb1d9fc
RS
3934 }
3935
fa5e084e 3936try_this_zone:
066b2393 3937 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
0aaa29a5 3938 gfp_mask, alloc_flags, ac->migratetype);
75379191 3939 if (page) {
479f854a 3940 prep_new_page(page, order, gfp_mask, alloc_flags);
0aaa29a5
MG
3941
3942 /*
3943 * If this is a high-order atomic allocation then check
3944 * if the pageblock should be reserved for the future
3945 */
3946 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3947 reserve_highatomic_pageblock(page, zone, order);
3948
75379191 3949 return page;
c9e97a19
PT
3950 } else {
3951#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3952 /* Try again if zone has deferred pages */
3953 if (static_branch_unlikely(&deferred_pages)) {
3954 if (_deferred_grow_zone(zone, order))
3955 goto try_this_zone;
3956 }
3957#endif
75379191 3958 }
54a6eb5c 3959 }
9276b1bc 3960
6bb15450
MG
3961 /*
3962 * It's possible on a UMA machine to get through all zones that are
3963 * fragmented. If avoiding fragmentation, reset and try again.
3964 */
3965 if (no_fallback) {
3966 alloc_flags &= ~ALLOC_NOFRAGMENT;
3967 goto retry;
3968 }
3969
4ffeaf35 3970 return NULL;
753ee728
MH
3971}
3972
9af744d7 3973static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
a238ab5b 3974{
a238ab5b 3975 unsigned int filter = SHOW_MEM_FILTER_NODES;
a238ab5b
DH
3976
3977 /*
3978 * This documents exceptions given to allocations in certain
3979 * contexts that are allowed to allocate outside current's set
3980 * of allowed nodes.
3981 */
3982 if (!(gfp_mask & __GFP_NOMEMALLOC))
cd04ae1e 3983 if (tsk_is_oom_victim(current) ||
a238ab5b
DH
3984 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3985 filter &= ~SHOW_MEM_FILTER_NODES;
d0164adc 3986 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
a238ab5b
DH
3987 filter &= ~SHOW_MEM_FILTER_NODES;
3988
9af744d7 3989 show_mem(filter, nodemask);
aa187507
MH
3990}
3991
a8e99259 3992void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
aa187507
MH
3993{
3994 struct va_format vaf;
3995 va_list args;
1be334e5 3996 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
aa187507 3997
0f7896f1 3998 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
aa187507
MH
3999 return;
4000
7877cdcc
MH
4001 va_start(args, fmt);
4002 vaf.fmt = fmt;
4003 vaf.va = &args;
ef8444ea 4004 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
0205f755
MH
4005 current->comm, &vaf, gfp_mask, &gfp_mask,
4006 nodemask_pr_args(nodemask));
7877cdcc 4007 va_end(args);
3ee9a4f0 4008
a8e99259 4009 cpuset_print_current_mems_allowed();
ef8444ea 4010 pr_cont("\n");
a238ab5b 4011 dump_stack();
685dbf6f 4012 warn_alloc_show_mem(gfp_mask, nodemask);
a238ab5b
DH
4013}
4014
6c18ba7a
MH
4015static inline struct page *
4016__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4017 unsigned int alloc_flags,
4018 const struct alloc_context *ac)
4019{
4020 struct page *page;
4021
4022 page = get_page_from_freelist(gfp_mask, order,
4023 alloc_flags|ALLOC_CPUSET, ac);
4024 /*
4025 * fallback to ignore cpuset restriction if our nodes
4026 * are depleted
4027 */
4028 if (!page)
4029 page = get_page_from_freelist(gfp_mask, order,
4030 alloc_flags, ac);
4031
4032 return page;
4033}
4034
11e33f6a
MG
4035static inline struct page *
4036__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
a9263751 4037 const struct alloc_context *ac, unsigned long *did_some_progress)
11e33f6a 4038{
6e0fc46d
DR
4039 struct oom_control oc = {
4040 .zonelist = ac->zonelist,
4041 .nodemask = ac->nodemask,
2a966b77 4042 .memcg = NULL,
6e0fc46d
DR
4043 .gfp_mask = gfp_mask,
4044 .order = order,
6e0fc46d 4045 };
11e33f6a
MG
4046 struct page *page;
4047
9879de73
JW
4048 *did_some_progress = 0;
4049
9879de73 4050 /*
dc56401f
JW
4051 * Acquire the oom lock. If that fails, somebody else is
4052 * making progress for us.
9879de73 4053 */
dc56401f 4054 if (!mutex_trylock(&oom_lock)) {
9879de73 4055 *did_some_progress = 1;
11e33f6a 4056 schedule_timeout_uninterruptible(1);
1da177e4
LT
4057 return NULL;
4058 }
6b1de916 4059
11e33f6a
MG
4060 /*
4061 * Go through the zonelist yet one more time, keep very high watermark
4062 * here, this is only to catch a parallel oom killing, we must fail if
e746bf73
TH
4063 * we're still under heavy pressure. But make sure that this reclaim
4064 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4065 * allocation which will never fail due to oom_lock already held.
11e33f6a 4066 */
e746bf73
TH
4067 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4068 ~__GFP_DIRECT_RECLAIM, order,
4069 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
7fb1d9fc 4070 if (page)
11e33f6a
MG
4071 goto out;
4072
06ad276a
MH
4073 /* Coredumps can quickly deplete all memory reserves */
4074 if (current->flags & PF_DUMPCORE)
4075 goto out;
4076 /* The OOM killer will not help higher order allocs */
4077 if (order > PAGE_ALLOC_COSTLY_ORDER)
4078 goto out;
dcda9b04
MH
4079 /*
4080 * We have already exhausted all our reclaim opportunities without any
4081 * success so it is time to admit defeat. We will skip the OOM killer
4082 * because it is very likely that the caller has a more reasonable
4083 * fallback than shooting a random task.
cfb4a541
MN
4084 *
4085 * The OOM killer may not free memory on a specific node.
dcda9b04 4086 */
cfb4a541 4087 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
dcda9b04 4088 goto out;
06ad276a 4089 /* The OOM killer does not needlessly kill tasks for lowmem */
97a225e6 4090 if (ac->highest_zoneidx < ZONE_NORMAL)
06ad276a
MH
4091 goto out;
4092 if (pm_suspended_storage())
4093 goto out;
4094 /*
4095 * XXX: GFP_NOFS allocations should rather fail than rely on
4096 * other request to make a forward progress.
4097 * We are in an unfortunate situation where out_of_memory cannot
4098 * do much for this context but let's try it to at least get
4099 * access to memory reserved if the current task is killed (see
4100 * out_of_memory). Once filesystems are ready to handle allocation
4101 * failures more gracefully we should just bail out here.
4102 */
4103
3c2c6488 4104 /* Exhausted what can be done so it's blame time */
5020e285 4105 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
c32b3cbe 4106 *did_some_progress = 1;
5020e285 4107
6c18ba7a
MH
4108 /*
4109 * Help non-failing allocations by giving them access to memory
4110 * reserves
4111 */
4112 if (gfp_mask & __GFP_NOFAIL)
4113 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
5020e285 4114 ALLOC_NO_WATERMARKS, ac);
5020e285 4115 }
11e33f6a 4116out:
dc56401f 4117 mutex_unlock(&oom_lock);
11e33f6a
MG
4118 return page;
4119}
4120
33c2d214
MH
4121/*
4122 * Maximum number of compaction retries wit a progress before OOM
4123 * killer is consider as the only way to move forward.
4124 */
4125#define MAX_COMPACT_RETRIES 16
4126
56de7263
MG
4127#ifdef CONFIG_COMPACTION
4128/* Try memory compaction for high-order allocations before reclaim */
4129static struct page *
4130__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 4131 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 4132 enum compact_priority prio, enum compact_result *compact_result)
56de7263 4133{
5e1f0f09 4134 struct page *page = NULL;
eb414681 4135 unsigned long pflags;
499118e9 4136 unsigned int noreclaim_flag;
53853e2d
VB
4137
4138 if (!order)
66199712 4139 return NULL;
66199712 4140
eb414681 4141 psi_memstall_enter(&pflags);
499118e9 4142 noreclaim_flag = memalloc_noreclaim_save();
eb414681 4143
c5d01d0d 4144 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
5e1f0f09 4145 prio, &page);
eb414681 4146
499118e9 4147 memalloc_noreclaim_restore(noreclaim_flag);
eb414681 4148 psi_memstall_leave(&pflags);
56de7263 4149
98dd3b48
VB
4150 /*
4151 * At least in one zone compaction wasn't deferred or skipped, so let's
4152 * count a compaction stall
4153 */
4154 count_vm_event(COMPACTSTALL);
8fb74b9f 4155
5e1f0f09
MG
4156 /* Prep a captured page if available */
4157 if (page)
4158 prep_new_page(page, order, gfp_mask, alloc_flags);
4159
4160 /* Try get a page from the freelist if available */
4161 if (!page)
4162 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
53853e2d 4163
98dd3b48
VB
4164 if (page) {
4165 struct zone *zone = page_zone(page);
53853e2d 4166
98dd3b48
VB
4167 zone->compact_blockskip_flush = false;
4168 compaction_defer_reset(zone, order, true);
4169 count_vm_event(COMPACTSUCCESS);
4170 return page;
4171 }
56de7263 4172
98dd3b48
VB
4173 /*
4174 * It's bad if compaction run occurs and fails. The most likely reason
4175 * is that pages exist, but not enough to satisfy watermarks.
4176 */
4177 count_vm_event(COMPACTFAIL);
66199712 4178
98dd3b48 4179 cond_resched();
56de7263
MG
4180
4181 return NULL;
4182}
33c2d214 4183
3250845d
VB
4184static inline bool
4185should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4186 enum compact_result compact_result,
4187 enum compact_priority *compact_priority,
d9436498 4188 int *compaction_retries)
3250845d
VB
4189{
4190 int max_retries = MAX_COMPACT_RETRIES;
c2033b00 4191 int min_priority;
65190cff
MH
4192 bool ret = false;
4193 int retries = *compaction_retries;
4194 enum compact_priority priority = *compact_priority;
3250845d
VB
4195
4196 if (!order)
4197 return false;
4198
d9436498
VB
4199 if (compaction_made_progress(compact_result))
4200 (*compaction_retries)++;
4201
3250845d
VB
4202 /*
4203 * compaction considers all the zone as desperately out of memory
4204 * so it doesn't really make much sense to retry except when the
4205 * failure could be caused by insufficient priority
4206 */
d9436498
VB
4207 if (compaction_failed(compact_result))
4208 goto check_priority;
3250845d 4209
49433085
VB
4210 /*
4211 * compaction was skipped because there are not enough order-0 pages
4212 * to work with, so we retry only if it looks like reclaim can help.
4213 */
4214 if (compaction_needs_reclaim(compact_result)) {
4215 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4216 goto out;
4217 }
4218
3250845d
VB
4219 /*
4220 * make sure the compaction wasn't deferred or didn't bail out early
4221 * due to locks contention before we declare that we should give up.
49433085
VB
4222 * But the next retry should use a higher priority if allowed, so
4223 * we don't just keep bailing out endlessly.
3250845d 4224 */
65190cff 4225 if (compaction_withdrawn(compact_result)) {
49433085 4226 goto check_priority;
65190cff 4227 }
3250845d
VB
4228
4229 /*
dcda9b04 4230 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
3250845d
VB
4231 * costly ones because they are de facto nofail and invoke OOM
4232 * killer to move on while costly can fail and users are ready
4233 * to cope with that. 1/4 retries is rather arbitrary but we
4234 * would need much more detailed feedback from compaction to
4235 * make a better decision.
4236 */
4237 if (order > PAGE_ALLOC_COSTLY_ORDER)
4238 max_retries /= 4;
65190cff
MH
4239 if (*compaction_retries <= max_retries) {
4240 ret = true;
4241 goto out;
4242 }
3250845d 4243
d9436498
VB
4244 /*
4245 * Make sure there are attempts at the highest priority if we exhausted
4246 * all retries or failed at the lower priorities.
4247 */
4248check_priority:
c2033b00
VB
4249 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4250 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
65190cff 4251
c2033b00 4252 if (*compact_priority > min_priority) {
d9436498
VB
4253 (*compact_priority)--;
4254 *compaction_retries = 0;
65190cff 4255 ret = true;
d9436498 4256 }
65190cff
MH
4257out:
4258 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4259 return ret;
3250845d 4260}
56de7263
MG
4261#else
4262static inline struct page *
4263__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 4264 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 4265 enum compact_priority prio, enum compact_result *compact_result)
56de7263 4266{
33c2d214 4267 *compact_result = COMPACT_SKIPPED;
56de7263
MG
4268 return NULL;
4269}
33c2d214
MH
4270
4271static inline bool
86a294a8
MH
4272should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4273 enum compact_result compact_result,
a5508cd8 4274 enum compact_priority *compact_priority,
d9436498 4275 int *compaction_retries)
33c2d214 4276{
31e49bfd
MH
4277 struct zone *zone;
4278 struct zoneref *z;
4279
4280 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4281 return false;
4282
4283 /*
4284 * There are setups with compaction disabled which would prefer to loop
4285 * inside the allocator rather than hit the oom killer prematurely.
4286 * Let's give them a good hope and keep retrying while the order-0
4287 * watermarks are OK.
4288 */
97a225e6
JK
4289 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4290 ac->highest_zoneidx, ac->nodemask) {
31e49bfd 4291 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
97a225e6 4292 ac->highest_zoneidx, alloc_flags))
31e49bfd
MH
4293 return true;
4294 }
33c2d214
MH
4295 return false;
4296}
3250845d 4297#endif /* CONFIG_COMPACTION */
56de7263 4298
d92a8cfc 4299#ifdef CONFIG_LOCKDEP
93781325 4300static struct lockdep_map __fs_reclaim_map =
d92a8cfc
PZ
4301 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4302
f920e413 4303static bool __need_reclaim(gfp_t gfp_mask)
d92a8cfc 4304{
d92a8cfc
PZ
4305 /* no reclaim without waiting on it */
4306 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4307 return false;
4308
4309 /* this guy won't enter reclaim */
2e517d68 4310 if (current->flags & PF_MEMALLOC)
d92a8cfc
PZ
4311 return false;
4312
d92a8cfc
PZ
4313 if (gfp_mask & __GFP_NOLOCKDEP)
4314 return false;
4315
4316 return true;
4317}
4318
93781325
OS
4319void __fs_reclaim_acquire(void)
4320{
4321 lock_map_acquire(&__fs_reclaim_map);
4322}
4323
4324void __fs_reclaim_release(void)
4325{
4326 lock_map_release(&__fs_reclaim_map);
4327}
4328
d92a8cfc
PZ
4329void fs_reclaim_acquire(gfp_t gfp_mask)
4330{
f920e413
DV
4331 gfp_mask = current_gfp_context(gfp_mask);
4332
4333 if (__need_reclaim(gfp_mask)) {
4334 if (gfp_mask & __GFP_FS)
4335 __fs_reclaim_acquire();
4336
4337#ifdef CONFIG_MMU_NOTIFIER
4338 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4339 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4340#endif
4341
4342 }
d92a8cfc
PZ
4343}
4344EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4345
4346void fs_reclaim_release(gfp_t gfp_mask)
4347{
f920e413
DV
4348 gfp_mask = current_gfp_context(gfp_mask);
4349
4350 if (__need_reclaim(gfp_mask)) {
4351 if (gfp_mask & __GFP_FS)
4352 __fs_reclaim_release();
4353 }
d92a8cfc
PZ
4354}
4355EXPORT_SYMBOL_GPL(fs_reclaim_release);
4356#endif
4357
bba90710 4358/* Perform direct synchronous page reclaim */
2187e17b 4359static unsigned long
a9263751
VB
4360__perform_reclaim(gfp_t gfp_mask, unsigned int order,
4361 const struct alloc_context *ac)
11e33f6a 4362{
499118e9 4363 unsigned int noreclaim_flag;
2187e17b 4364 unsigned long pflags, progress;
11e33f6a
MG
4365
4366 cond_resched();
4367
4368 /* We now go into synchronous reclaim */
4369 cpuset_memory_pressure_bump();
eb414681 4370 psi_memstall_enter(&pflags);
d92a8cfc 4371 fs_reclaim_acquire(gfp_mask);
93781325 4372 noreclaim_flag = memalloc_noreclaim_save();
11e33f6a 4373
a9263751
VB
4374 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4375 ac->nodemask);
11e33f6a 4376
499118e9 4377 memalloc_noreclaim_restore(noreclaim_flag);
93781325 4378 fs_reclaim_release(gfp_mask);
eb414681 4379 psi_memstall_leave(&pflags);
11e33f6a
MG
4380
4381 cond_resched();
4382
bba90710
MS
4383 return progress;
4384}
4385
4386/* The really slow allocator path where we enter direct reclaim */
4387static inline struct page *
4388__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
c603844b 4389 unsigned int alloc_flags, const struct alloc_context *ac,
a9263751 4390 unsigned long *did_some_progress)
bba90710
MS
4391{
4392 struct page *page = NULL;
4393 bool drained = false;
4394
a9263751 4395 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
9ee493ce
MG
4396 if (unlikely(!(*did_some_progress)))
4397 return NULL;
11e33f6a 4398
9ee493ce 4399retry:
31a6c190 4400 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
9ee493ce
MG
4401
4402 /*
4403 * If an allocation failed after direct reclaim, it could be because
0aaa29a5 4404 * pages are pinned on the per-cpu lists or in high alloc reserves.
047b9967 4405 * Shrink them and try again
9ee493ce
MG
4406 */
4407 if (!page && !drained) {
29fac03b 4408 unreserve_highatomic_pageblock(ac, false);
93481ff0 4409 drain_all_pages(NULL);
9ee493ce
MG
4410 drained = true;
4411 goto retry;
4412 }
4413
11e33f6a
MG
4414 return page;
4415}
4416
5ecd9d40
DR
4417static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4418 const struct alloc_context *ac)
3a025760
JW
4419{
4420 struct zoneref *z;
4421 struct zone *zone;
e1a55637 4422 pg_data_t *last_pgdat = NULL;
97a225e6 4423 enum zone_type highest_zoneidx = ac->highest_zoneidx;
3a025760 4424
97a225e6 4425 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
5ecd9d40 4426 ac->nodemask) {
e1a55637 4427 if (last_pgdat != zone->zone_pgdat)
97a225e6 4428 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
e1a55637
MG
4429 last_pgdat = zone->zone_pgdat;
4430 }
3a025760
JW
4431}
4432
c603844b 4433static inline unsigned int
341ce06f
PZ
4434gfp_to_alloc_flags(gfp_t gfp_mask)
4435{
c603844b 4436 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1da177e4 4437
736838e9
MN
4438 /*
4439 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4440 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4441 * to save two branches.
4442 */
e6223a3b 4443 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
736838e9 4444 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
933e312e 4445
341ce06f
PZ
4446 /*
4447 * The caller may dip into page reserves a bit more if the caller
4448 * cannot run direct reclaim, or if the caller has realtime scheduling
4449 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
d0164adc 4450 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
341ce06f 4451 */
736838e9
MN
4452 alloc_flags |= (__force int)
4453 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
1da177e4 4454
d0164adc 4455 if (gfp_mask & __GFP_ATOMIC) {
5c3240d9 4456 /*
b104a35d
DR
4457 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4458 * if it can't schedule.
5c3240d9 4459 */
b104a35d 4460 if (!(gfp_mask & __GFP_NOMEMALLOC))
5c3240d9 4461 alloc_flags |= ALLOC_HARDER;
523b9458 4462 /*
b104a35d 4463 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
344736f2 4464 * comment for __cpuset_node_allowed().
523b9458 4465 */
341ce06f 4466 alloc_flags &= ~ALLOC_CPUSET;
c06b1fca 4467 } else if (unlikely(rt_task(current)) && !in_interrupt())
341ce06f
PZ
4468 alloc_flags |= ALLOC_HARDER;
4469
8510e69c
JK
4470 alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
4471
341ce06f
PZ
4472 return alloc_flags;
4473}
4474
cd04ae1e 4475static bool oom_reserves_allowed(struct task_struct *tsk)
072bb0aa 4476{
cd04ae1e
MH
4477 if (!tsk_is_oom_victim(tsk))
4478 return false;
4479
4480 /*
4481 * !MMU doesn't have oom reaper so give access to memory reserves
4482 * only to the thread with TIF_MEMDIE set
4483 */
4484 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
31a6c190
VB
4485 return false;
4486
cd04ae1e
MH
4487 return true;
4488}
4489
4490/*
4491 * Distinguish requests which really need access to full memory
4492 * reserves from oom victims which can live with a portion of it
4493 */
4494static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4495{
4496 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4497 return 0;
31a6c190 4498 if (gfp_mask & __GFP_MEMALLOC)
cd04ae1e 4499 return ALLOC_NO_WATERMARKS;
31a6c190 4500 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
cd04ae1e
MH
4501 return ALLOC_NO_WATERMARKS;
4502 if (!in_interrupt()) {
4503 if (current->flags & PF_MEMALLOC)
4504 return ALLOC_NO_WATERMARKS;
4505 else if (oom_reserves_allowed(current))
4506 return ALLOC_OOM;
4507 }
31a6c190 4508
cd04ae1e
MH
4509 return 0;
4510}
4511
4512bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4513{
4514 return !!__gfp_pfmemalloc_flags(gfp_mask);
072bb0aa
MG
4515}
4516
0a0337e0
MH
4517/*
4518 * Checks whether it makes sense to retry the reclaim to make a forward progress
4519 * for the given allocation request.
491d79ae
JW
4520 *
4521 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4522 * without success, or when we couldn't even meet the watermark if we
4523 * reclaimed all remaining pages on the LRU lists.
0a0337e0
MH
4524 *
4525 * Returns true if a retry is viable or false to enter the oom path.
4526 */
4527static inline bool
4528should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4529 struct alloc_context *ac, int alloc_flags,
423b452e 4530 bool did_some_progress, int *no_progress_loops)
0a0337e0
MH
4531{
4532 struct zone *zone;
4533 struct zoneref *z;
15f570bf 4534 bool ret = false;
0a0337e0 4535
423b452e
VB
4536 /*
4537 * Costly allocations might have made a progress but this doesn't mean
4538 * their order will become available due to high fragmentation so
4539 * always increment the no progress counter for them
4540 */
4541 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4542 *no_progress_loops = 0;
4543 else
4544 (*no_progress_loops)++;
4545
0a0337e0
MH
4546 /*
4547 * Make sure we converge to OOM if we cannot make any progress
4548 * several times in the row.
4549 */
04c8716f
MK
4550 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4551 /* Before OOM, exhaust highatomic_reserve */
29fac03b 4552 return unreserve_highatomic_pageblock(ac, true);
04c8716f 4553 }
0a0337e0 4554
bca67592
MG
4555 /*
4556 * Keep reclaiming pages while there is a chance this will lead
4557 * somewhere. If none of the target zones can satisfy our allocation
4558 * request even if all reclaimable pages are considered then we are
4559 * screwed and have to go OOM.
0a0337e0 4560 */
97a225e6
JK
4561 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4562 ac->highest_zoneidx, ac->nodemask) {
0a0337e0 4563 unsigned long available;
ede37713 4564 unsigned long reclaimable;
d379f01d
MH
4565 unsigned long min_wmark = min_wmark_pages(zone);
4566 bool wmark;
0a0337e0 4567
5a1c84b4 4568 available = reclaimable = zone_reclaimable_pages(zone);
5a1c84b4 4569 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
0a0337e0
MH
4570
4571 /*
491d79ae
JW
4572 * Would the allocation succeed if we reclaimed all
4573 * reclaimable pages?
0a0337e0 4574 */
d379f01d 4575 wmark = __zone_watermark_ok(zone, order, min_wmark,
97a225e6 4576 ac->highest_zoneidx, alloc_flags, available);
d379f01d
MH
4577 trace_reclaim_retry_zone(z, order, reclaimable,
4578 available, min_wmark, *no_progress_loops, wmark);
4579 if (wmark) {
ede37713
MH
4580 /*
4581 * If we didn't make any progress and have a lot of
4582 * dirty + writeback pages then we should wait for
4583 * an IO to complete to slow down the reclaim and
4584 * prevent from pre mature OOM
4585 */
4586 if (!did_some_progress) {
11fb9989 4587 unsigned long write_pending;
ede37713 4588
5a1c84b4
MG
4589 write_pending = zone_page_state_snapshot(zone,
4590 NR_ZONE_WRITE_PENDING);
ede37713 4591
11fb9989 4592 if (2 * write_pending > reclaimable) {
ede37713
MH
4593 congestion_wait(BLK_RW_ASYNC, HZ/10);
4594 return true;
4595 }
4596 }
5a1c84b4 4597
15f570bf
MH
4598 ret = true;
4599 goto out;
0a0337e0
MH
4600 }
4601 }
4602
15f570bf
MH
4603out:
4604 /*
4605 * Memory allocation/reclaim might be called from a WQ context and the
4606 * current implementation of the WQ concurrency control doesn't
4607 * recognize that a particular WQ is congested if the worker thread is
4608 * looping without ever sleeping. Therefore we have to do a short sleep
4609 * here rather than calling cond_resched().
4610 */
4611 if (current->flags & PF_WQ_WORKER)
4612 schedule_timeout_uninterruptible(1);
4613 else
4614 cond_resched();
4615 return ret;
0a0337e0
MH
4616}
4617
902b6281
VB
4618static inline bool
4619check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4620{
4621 /*
4622 * It's possible that cpuset's mems_allowed and the nodemask from
4623 * mempolicy don't intersect. This should be normally dealt with by
4624 * policy_nodemask(), but it's possible to race with cpuset update in
4625 * such a way the check therein was true, and then it became false
4626 * before we got our cpuset_mems_cookie here.
4627 * This assumes that for all allocations, ac->nodemask can come only
4628 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4629 * when it does not intersect with the cpuset restrictions) or the
4630 * caller can deal with a violated nodemask.
4631 */
4632 if (cpusets_enabled() && ac->nodemask &&
4633 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4634 ac->nodemask = NULL;
4635 return true;
4636 }
4637
4638 /*
4639 * When updating a task's mems_allowed or mempolicy nodemask, it is
4640 * possible to race with parallel threads in such a way that our
4641 * allocation can fail while the mask is being updated. If we are about
4642 * to fail, check if the cpuset changed during allocation and if so,
4643 * retry.
4644 */
4645 if (read_mems_allowed_retry(cpuset_mems_cookie))
4646 return true;
4647
4648 return false;
4649}
4650
11e33f6a
MG
4651static inline struct page *
4652__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
a9263751 4653 struct alloc_context *ac)
11e33f6a 4654{
d0164adc 4655 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
282722b0 4656 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
11e33f6a 4657 struct page *page = NULL;
c603844b 4658 unsigned int alloc_flags;
11e33f6a 4659 unsigned long did_some_progress;
5ce9bfef 4660 enum compact_priority compact_priority;
c5d01d0d 4661 enum compact_result compact_result;
5ce9bfef
VB
4662 int compaction_retries;
4663 int no_progress_loops;
5ce9bfef 4664 unsigned int cpuset_mems_cookie;
cd04ae1e 4665 int reserve_flags;
1da177e4 4666
d0164adc
MG
4667 /*
4668 * We also sanity check to catch abuse of atomic reserves being used by
4669 * callers that are not in atomic context.
4670 */
4671 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4672 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4673 gfp_mask &= ~__GFP_ATOMIC;
4674
5ce9bfef
VB
4675retry_cpuset:
4676 compaction_retries = 0;
4677 no_progress_loops = 0;
4678 compact_priority = DEF_COMPACT_PRIORITY;
4679 cpuset_mems_cookie = read_mems_allowed_begin();
9a67f648
MH
4680
4681 /*
4682 * The fast path uses conservative alloc_flags to succeed only until
4683 * kswapd needs to be woken up, and to avoid the cost of setting up
4684 * alloc_flags precisely. So we do that now.
4685 */
4686 alloc_flags = gfp_to_alloc_flags(gfp_mask);
4687
e47483bc
VB
4688 /*
4689 * We need to recalculate the starting point for the zonelist iterator
4690 * because we might have used different nodemask in the fast path, or
4691 * there was a cpuset modification and we are retrying - otherwise we
4692 * could end up iterating over non-eligible zones endlessly.
4693 */
4694 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
97a225e6 4695 ac->highest_zoneidx, ac->nodemask);
e47483bc
VB
4696 if (!ac->preferred_zoneref->zone)
4697 goto nopage;
4698
0a79cdad 4699 if (alloc_flags & ALLOC_KSWAPD)
5ecd9d40 4700 wake_all_kswapds(order, gfp_mask, ac);
23771235
VB
4701
4702 /*
4703 * The adjusted alloc_flags might result in immediate success, so try
4704 * that first
4705 */
4706 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4707 if (page)
4708 goto got_pg;
4709
a8161d1e
VB
4710 /*
4711 * For costly allocations, try direct compaction first, as it's likely
282722b0
VB
4712 * that we have enough base pages and don't need to reclaim. For non-
4713 * movable high-order allocations, do that as well, as compaction will
4714 * try prevent permanent fragmentation by migrating from blocks of the
4715 * same migratetype.
4716 * Don't try this for allocations that are allowed to ignore
4717 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
a8161d1e 4718 */
282722b0
VB
4719 if (can_direct_reclaim &&
4720 (costly_order ||
4721 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4722 && !gfp_pfmemalloc_allowed(gfp_mask)) {
a8161d1e
VB
4723 page = __alloc_pages_direct_compact(gfp_mask, order,
4724 alloc_flags, ac,
a5508cd8 4725 INIT_COMPACT_PRIORITY,
a8161d1e
VB
4726 &compact_result);
4727 if (page)
4728 goto got_pg;
4729
cc638f32
VB
4730 /*
4731 * Checks for costly allocations with __GFP_NORETRY, which
4732 * includes some THP page fault allocations
4733 */
4734 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
b39d0ee2
DR
4735 /*
4736 * If allocating entire pageblock(s) and compaction
4737 * failed because all zones are below low watermarks
4738 * or is prohibited because it recently failed at this
3f36d866
DR
4739 * order, fail immediately unless the allocator has
4740 * requested compaction and reclaim retry.
b39d0ee2
DR
4741 *
4742 * Reclaim is
4743 * - potentially very expensive because zones are far
4744 * below their low watermarks or this is part of very
4745 * bursty high order allocations,
4746 * - not guaranteed to help because isolate_freepages()
4747 * may not iterate over freed pages as part of its
4748 * linear scan, and
4749 * - unlikely to make entire pageblocks free on its
4750 * own.
4751 */
4752 if (compact_result == COMPACT_SKIPPED ||
4753 compact_result == COMPACT_DEFERRED)
4754 goto nopage;
a8161d1e 4755
a8161d1e 4756 /*
3eb2771b
VB
4757 * Looks like reclaim/compaction is worth trying, but
4758 * sync compaction could be very expensive, so keep
25160354 4759 * using async compaction.
a8161d1e 4760 */
a5508cd8 4761 compact_priority = INIT_COMPACT_PRIORITY;
a8161d1e
VB
4762 }
4763 }
23771235 4764
31a6c190 4765retry:
23771235 4766 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
0a79cdad 4767 if (alloc_flags & ALLOC_KSWAPD)
5ecd9d40 4768 wake_all_kswapds(order, gfp_mask, ac);
31a6c190 4769
cd04ae1e
MH
4770 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4771 if (reserve_flags)
8510e69c 4772 alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
23771235 4773
e46e7b77 4774 /*
d6a24df0
VB
4775 * Reset the nodemask and zonelist iterators if memory policies can be
4776 * ignored. These allocations are high priority and system rather than
4777 * user oriented.
e46e7b77 4778 */
cd04ae1e 4779 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
d6a24df0 4780 ac->nodemask = NULL;
e46e7b77 4781 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
97a225e6 4782 ac->highest_zoneidx, ac->nodemask);
e46e7b77
MG
4783 }
4784
23771235 4785 /* Attempt with potentially adjusted zonelist and alloc_flags */
31a6c190 4786 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
7fb1d9fc
RS
4787 if (page)
4788 goto got_pg;
1da177e4 4789
d0164adc 4790 /* Caller is not willing to reclaim, we can't balance anything */
9a67f648 4791 if (!can_direct_reclaim)
1da177e4
LT
4792 goto nopage;
4793
9a67f648
MH
4794 /* Avoid recursion of direct reclaim */
4795 if (current->flags & PF_MEMALLOC)
6583bb64
DR
4796 goto nopage;
4797
a8161d1e
VB
4798 /* Try direct reclaim and then allocating */
4799 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4800 &did_some_progress);
4801 if (page)
4802 goto got_pg;
4803
4804 /* Try direct compaction and then allocating */
a9263751 4805 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
a5508cd8 4806 compact_priority, &compact_result);
56de7263
MG
4807 if (page)
4808 goto got_pg;
75f30861 4809
9083905a
JW
4810 /* Do not loop if specifically requested */
4811 if (gfp_mask & __GFP_NORETRY)
a8161d1e 4812 goto nopage;
9083905a 4813
0a0337e0
MH
4814 /*
4815 * Do not retry costly high order allocations unless they are
dcda9b04 4816 * __GFP_RETRY_MAYFAIL
0a0337e0 4817 */
dcda9b04 4818 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
a8161d1e 4819 goto nopage;
0a0337e0 4820
0a0337e0 4821 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
423b452e 4822 did_some_progress > 0, &no_progress_loops))
0a0337e0
MH
4823 goto retry;
4824
33c2d214
MH
4825 /*
4826 * It doesn't make any sense to retry for the compaction if the order-0
4827 * reclaim is not able to make any progress because the current
4828 * implementation of the compaction depends on the sufficient amount
4829 * of free memory (see __compaction_suitable)
4830 */
4831 if (did_some_progress > 0 &&
86a294a8 4832 should_compact_retry(ac, order, alloc_flags,
a5508cd8 4833 compact_result, &compact_priority,
d9436498 4834 &compaction_retries))
33c2d214
MH
4835 goto retry;
4836
902b6281
VB
4837
4838 /* Deal with possible cpuset update races before we start OOM killing */
4839 if (check_retry_cpuset(cpuset_mems_cookie, ac))
e47483bc
VB
4840 goto retry_cpuset;
4841
9083905a
JW
4842 /* Reclaim has failed us, start killing things */
4843 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4844 if (page)
4845 goto got_pg;
4846
9a67f648 4847 /* Avoid allocations with no watermarks from looping endlessly */
cd04ae1e 4848 if (tsk_is_oom_victim(current) &&
8510e69c 4849 (alloc_flags & ALLOC_OOM ||
c288983d 4850 (gfp_mask & __GFP_NOMEMALLOC)))
9a67f648
MH
4851 goto nopage;
4852
9083905a 4853 /* Retry as long as the OOM killer is making progress */
0a0337e0
MH
4854 if (did_some_progress) {
4855 no_progress_loops = 0;
9083905a 4856 goto retry;
0a0337e0 4857 }
9083905a 4858
1da177e4 4859nopage:
902b6281
VB
4860 /* Deal with possible cpuset update races before we fail */
4861 if (check_retry_cpuset(cpuset_mems_cookie, ac))
5ce9bfef
VB
4862 goto retry_cpuset;
4863
9a67f648
MH
4864 /*
4865 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4866 * we always retry
4867 */
4868 if (gfp_mask & __GFP_NOFAIL) {
4869 /*
4870 * All existing users of the __GFP_NOFAIL are blockable, so warn
4871 * of any new users that actually require GFP_NOWAIT
4872 */
4873 if (WARN_ON_ONCE(!can_direct_reclaim))
4874 goto fail;
4875
4876 /*
4877 * PF_MEMALLOC request from this context is rather bizarre
4878 * because we cannot reclaim anything and only can loop waiting
4879 * for somebody to do a work for us
4880 */
4881 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4882
4883 /*
4884 * non failing costly orders are a hard requirement which we
4885 * are not prepared for much so let's warn about these users
4886 * so that we can identify them and convert them to something
4887 * else.
4888 */
4889 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4890
6c18ba7a
MH
4891 /*
4892 * Help non-failing allocations by giving them access to memory
4893 * reserves but do not use ALLOC_NO_WATERMARKS because this
4894 * could deplete whole memory reserves which would just make
4895 * the situation worse
4896 */
4897 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4898 if (page)
4899 goto got_pg;
4900
9a67f648
MH
4901 cond_resched();
4902 goto retry;
4903 }
4904fail:
a8e99259 4905 warn_alloc(gfp_mask, ac->nodemask,
7877cdcc 4906 "page allocation failure: order:%u", order);
1da177e4 4907got_pg:
072bb0aa 4908 return page;
1da177e4 4909}
11e33f6a 4910
9cd75558 4911static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
04ec6264 4912 int preferred_nid, nodemask_t *nodemask,
9cd75558
MG
4913 struct alloc_context *ac, gfp_t *alloc_mask,
4914 unsigned int *alloc_flags)
11e33f6a 4915{
97a225e6 4916 ac->highest_zoneidx = gfp_zone(gfp_mask);
04ec6264 4917 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
9cd75558 4918 ac->nodemask = nodemask;
01c0bfe0 4919 ac->migratetype = gfp_migratetype(gfp_mask);
11e33f6a 4920
682a3385 4921 if (cpusets_enabled()) {
9cd75558 4922 *alloc_mask |= __GFP_HARDWALL;
182f3d7a
MS
4923 /*
4924 * When we are in the interrupt context, it is irrelevant
4925 * to the current task context. It means that any node ok.
4926 */
4927 if (!in_interrupt() && !ac->nodemask)
9cd75558 4928 ac->nodemask = &cpuset_current_mems_allowed;
51047820
VB
4929 else
4930 *alloc_flags |= ALLOC_CPUSET;
682a3385
MG
4931 }
4932
d92a8cfc
PZ
4933 fs_reclaim_acquire(gfp_mask);
4934 fs_reclaim_release(gfp_mask);
11e33f6a 4935
d0164adc 4936 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
11e33f6a
MG
4937
4938 if (should_fail_alloc_page(gfp_mask, order))
9cd75558 4939 return false;
11e33f6a 4940
8510e69c 4941 *alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
d883c6cf 4942
c9ab0c4f 4943 /* Dirty zone balancing only done in the fast path */
9cd75558 4944 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
c9ab0c4f 4945
e46e7b77
MG
4946 /*
4947 * The preferred zone is used for statistics but crucially it is
4948 * also used as the starting point for the zonelist iterator. It
4949 * may get reset for allocations that ignore memory policies.
4950 */
9cd75558 4951 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
97a225e6 4952 ac->highest_zoneidx, ac->nodemask);
a0622d05
MN
4953
4954 return true;
9cd75558
MG
4955}
4956
4957/*
4958 * This is the 'heart' of the zoned buddy allocator.
4959 */
4960struct page *
04ec6264
VB
4961__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4962 nodemask_t *nodemask)
9cd75558
MG
4963{
4964 struct page *page;
4965 unsigned int alloc_flags = ALLOC_WMARK_LOW;
f19360f0 4966 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
9cd75558
MG
4967 struct alloc_context ac = { };
4968
c63ae43b
MH
4969 /*
4970 * There are several places where we assume that the order value is sane
4971 * so bail out early if the request is out of bound.
4972 */
4973 if (unlikely(order >= MAX_ORDER)) {
4974 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4975 return NULL;
4976 }
4977
9cd75558 4978 gfp_mask &= gfp_allowed_mask;
f19360f0 4979 alloc_mask = gfp_mask;
04ec6264 4980 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
9cd75558
MG
4981 return NULL;
4982
6bb15450
MG
4983 /*
4984 * Forbid the first pass from falling back to types that fragment
4985 * memory until all local zones are considered.
4986 */
0a79cdad 4987 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
6bb15450 4988
5117f45d 4989 /* First allocation attempt */
a9263751 4990 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4fcb0971
MG
4991 if (likely(page))
4992 goto out;
11e33f6a 4993
4fcb0971 4994 /*
7dea19f9
MH
4995 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4996 * resp. GFP_NOIO which has to be inherited for all allocation requests
4997 * from a particular context which has been marked by
4998 * memalloc_no{fs,io}_{save,restore}.
4fcb0971 4999 */
7dea19f9 5000 alloc_mask = current_gfp_context(gfp_mask);
4fcb0971 5001 ac.spread_dirty_pages = false;
23f086f9 5002
4741526b
MG
5003 /*
5004 * Restore the original nodemask if it was potentially replaced with
5005 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5006 */
97ce86f9 5007 ac.nodemask = nodemask;
16096c25 5008
4fcb0971 5009 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
cc9a6c87 5010
4fcb0971 5011out:
c4159a75 5012 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
f4b00eab 5013 unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
c4159a75
VD
5014 __free_pages(page, order);
5015 page = NULL;
4949148a
VD
5016 }
5017
4fcb0971
MG
5018 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
5019
11e33f6a 5020 return page;
1da177e4 5021}
d239171e 5022EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
5023
5024/*
9ea9a680
MH
5025 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5026 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5027 * you need to access high mem.
1da177e4 5028 */
920c7a5d 5029unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 5030{
945a1113
AM
5031 struct page *page;
5032
9ea9a680 5033 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
1da177e4
LT
5034 if (!page)
5035 return 0;
5036 return (unsigned long) page_address(page);
5037}
1da177e4
LT
5038EXPORT_SYMBOL(__get_free_pages);
5039
920c7a5d 5040unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 5041{
945a1113 5042 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 5043}
1da177e4
LT
5044EXPORT_SYMBOL(get_zeroed_page);
5045
742aa7fb 5046static inline void free_the_page(struct page *page, unsigned int order)
1da177e4 5047{
742aa7fb
AL
5048 if (order == 0) /* Via pcp? */
5049 free_unref_page(page);
5050 else
7fef431b 5051 __free_pages_ok(page, order, FPI_NONE);
1da177e4
LT
5052}
5053
7f194fbb
MWO
5054/**
5055 * __free_pages - Free pages allocated with alloc_pages().
5056 * @page: The page pointer returned from alloc_pages().
5057 * @order: The order of the allocation.
5058 *
5059 * This function can free multi-page allocations that are not compound
5060 * pages. It does not check that the @order passed in matches that of
5061 * the allocation, so it is easy to leak memory. Freeing more memory
5062 * than was allocated will probably emit a warning.
5063 *
5064 * If the last reference to this page is speculative, it will be released
5065 * by put_page() which only frees the first page of a non-compound
5066 * allocation. To prevent the remaining pages from being leaked, we free
5067 * the subsequent pages here. If you want to use the page's reference
5068 * count to decide when to free the allocation, you should allocate a
5069 * compound page, and use put_page() instead of __free_pages().
5070 *
5071 * Context: May be called in interrupt context or while holding a normal
5072 * spinlock, but not in NMI context or while holding a raw spinlock.
5073 */
742aa7fb
AL
5074void __free_pages(struct page *page, unsigned int order)
5075{
5076 if (put_page_testzero(page))
5077 free_the_page(page, order);
e320d301
MWO
5078 else if (!PageHead(page))
5079 while (order-- > 0)
5080 free_the_page(page + (1 << order), order);
742aa7fb 5081}
1da177e4
LT
5082EXPORT_SYMBOL(__free_pages);
5083
920c7a5d 5084void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
5085{
5086 if (addr != 0) {
725d704e 5087 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
5088 __free_pages(virt_to_page((void *)addr), order);
5089 }
5090}
5091
5092EXPORT_SYMBOL(free_pages);
5093
b63ae8ca
AD
5094/*
5095 * Page Fragment:
5096 * An arbitrary-length arbitrary-offset area of memory which resides
5097 * within a 0 or higher order page. Multiple fragments within that page
5098 * are individually refcounted, in the page's reference counter.
5099 *
5100 * The page_frag functions below provide a simple allocation framework for
5101 * page fragments. This is used by the network stack and network device
5102 * drivers to provide a backing region of memory for use as either an
5103 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5104 */
2976db80
AD
5105static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5106 gfp_t gfp_mask)
b63ae8ca
AD
5107{
5108 struct page *page = NULL;
5109 gfp_t gfp = gfp_mask;
5110
5111#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5112 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5113 __GFP_NOMEMALLOC;
5114 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5115 PAGE_FRAG_CACHE_MAX_ORDER);
5116 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5117#endif
5118 if (unlikely(!page))
5119 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5120
5121 nc->va = page ? page_address(page) : NULL;
5122
5123 return page;
5124}
5125
2976db80 5126void __page_frag_cache_drain(struct page *page, unsigned int count)
44fdffd7
AD
5127{
5128 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5129
742aa7fb
AL
5130 if (page_ref_sub_and_test(page, count))
5131 free_the_page(page, compound_order(page));
44fdffd7 5132}
2976db80 5133EXPORT_SYMBOL(__page_frag_cache_drain);
44fdffd7 5134
8c2dd3e4
AD
5135void *page_frag_alloc(struct page_frag_cache *nc,
5136 unsigned int fragsz, gfp_t gfp_mask)
b63ae8ca
AD
5137{
5138 unsigned int size = PAGE_SIZE;
5139 struct page *page;
5140 int offset;
5141
5142 if (unlikely(!nc->va)) {
5143refill:
2976db80 5144 page = __page_frag_cache_refill(nc, gfp_mask);
b63ae8ca
AD
5145 if (!page)
5146 return NULL;
5147
5148#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5149 /* if size can vary use size else just use PAGE_SIZE */
5150 size = nc->size;
5151#endif
5152 /* Even if we own the page, we do not use atomic_set().
5153 * This would break get_page_unless_zero() users.
5154 */
86447726 5155 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
b63ae8ca
AD
5156
5157 /* reset page count bias and offset to start of new frag */
2f064f34 5158 nc->pfmemalloc = page_is_pfmemalloc(page);
86447726 5159 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
b63ae8ca
AD
5160 nc->offset = size;
5161 }
5162
5163 offset = nc->offset - fragsz;
5164 if (unlikely(offset < 0)) {
5165 page = virt_to_page(nc->va);
5166
fe896d18 5167 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
b63ae8ca
AD
5168 goto refill;
5169
d8c19014
DZ
5170 if (unlikely(nc->pfmemalloc)) {
5171 free_the_page(page, compound_order(page));
5172 goto refill;
5173 }
5174
b63ae8ca
AD
5175#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5176 /* if size can vary use size else just use PAGE_SIZE */
5177 size = nc->size;
5178#endif
5179 /* OK, page count is 0, we can safely set it */
86447726 5180 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
b63ae8ca
AD
5181
5182 /* reset page count bias and offset to start of new frag */
86447726 5183 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
b63ae8ca
AD
5184 offset = size - fragsz;
5185 }
5186
5187 nc->pagecnt_bias--;
5188 nc->offset = offset;
5189
5190 return nc->va + offset;
5191}
8c2dd3e4 5192EXPORT_SYMBOL(page_frag_alloc);
b63ae8ca
AD
5193
5194/*
5195 * Frees a page fragment allocated out of either a compound or order 0 page.
5196 */
8c2dd3e4 5197void page_frag_free(void *addr)
b63ae8ca
AD
5198{
5199 struct page *page = virt_to_head_page(addr);
5200
742aa7fb
AL
5201 if (unlikely(put_page_testzero(page)))
5202 free_the_page(page, compound_order(page));
b63ae8ca 5203}
8c2dd3e4 5204EXPORT_SYMBOL(page_frag_free);
b63ae8ca 5205
d00181b9
KS
5206static void *make_alloc_exact(unsigned long addr, unsigned int order,
5207 size_t size)
ee85c2e1
AK
5208{
5209 if (addr) {
5210 unsigned long alloc_end = addr + (PAGE_SIZE << order);
5211 unsigned long used = addr + PAGE_ALIGN(size);
5212
5213 split_page(virt_to_page((void *)addr), order);
5214 while (used < alloc_end) {
5215 free_page(used);
5216 used += PAGE_SIZE;
5217 }
5218 }
5219 return (void *)addr;
5220}
5221
2be0ffe2
TT
5222/**
5223 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5224 * @size: the number of bytes to allocate
63931eb9 5225 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
2be0ffe2
TT
5226 *
5227 * This function is similar to alloc_pages(), except that it allocates the
5228 * minimum number of pages to satisfy the request. alloc_pages() can only
5229 * allocate memory in power-of-two pages.
5230 *
5231 * This function is also limited by MAX_ORDER.
5232 *
5233 * Memory allocated by this function must be released by free_pages_exact().
a862f68a
MR
5234 *
5235 * Return: pointer to the allocated area or %NULL in case of error.
2be0ffe2
TT
5236 */
5237void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5238{
5239 unsigned int order = get_order(size);
5240 unsigned long addr;
5241
63931eb9
VB
5242 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5243 gfp_mask &= ~__GFP_COMP;
5244
2be0ffe2 5245 addr = __get_free_pages(gfp_mask, order);
ee85c2e1 5246 return make_alloc_exact(addr, order, size);
2be0ffe2
TT
5247}
5248EXPORT_SYMBOL(alloc_pages_exact);
5249
ee85c2e1
AK
5250/**
5251 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5252 * pages on a node.
b5e6ab58 5253 * @nid: the preferred node ID where memory should be allocated
ee85c2e1 5254 * @size: the number of bytes to allocate
63931eb9 5255 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
ee85c2e1
AK
5256 *
5257 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5258 * back.
a862f68a
MR
5259 *
5260 * Return: pointer to the allocated area or %NULL in case of error.
ee85c2e1 5261 */
e1931811 5262void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
ee85c2e1 5263{
d00181b9 5264 unsigned int order = get_order(size);
63931eb9
VB
5265 struct page *p;
5266
5267 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5268 gfp_mask &= ~__GFP_COMP;
5269
5270 p = alloc_pages_node(nid, gfp_mask, order);
ee85c2e1
AK
5271 if (!p)
5272 return NULL;
5273 return make_alloc_exact((unsigned long)page_address(p), order, size);
5274}
ee85c2e1 5275
2be0ffe2
TT
5276/**
5277 * free_pages_exact - release memory allocated via alloc_pages_exact()
5278 * @virt: the value returned by alloc_pages_exact.
5279 * @size: size of allocation, same value as passed to alloc_pages_exact().
5280 *
5281 * Release the memory allocated by a previous call to alloc_pages_exact.
5282 */
5283void free_pages_exact(void *virt, size_t size)
5284{
5285 unsigned long addr = (unsigned long)virt;
5286 unsigned long end = addr + PAGE_ALIGN(size);
5287
5288 while (addr < end) {
5289 free_page(addr);
5290 addr += PAGE_SIZE;
5291 }
5292}
5293EXPORT_SYMBOL(free_pages_exact);
5294
e0fb5815
ZY
5295/**
5296 * nr_free_zone_pages - count number of pages beyond high watermark
5297 * @offset: The zone index of the highest zone
5298 *
a862f68a 5299 * nr_free_zone_pages() counts the number of pages which are beyond the
e0fb5815
ZY
5300 * high watermark within all zones at or below a given zone index. For each
5301 * zone, the number of pages is calculated as:
0e056eb5 5302 *
5303 * nr_free_zone_pages = managed_pages - high_pages
a862f68a
MR
5304 *
5305 * Return: number of pages beyond high watermark.
e0fb5815 5306 */
ebec3862 5307static unsigned long nr_free_zone_pages(int offset)
1da177e4 5308{
dd1a239f 5309 struct zoneref *z;
54a6eb5c
MG
5310 struct zone *zone;
5311
e310fd43 5312 /* Just pick one node, since fallback list is circular */
ebec3862 5313 unsigned long sum = 0;
1da177e4 5314
0e88460d 5315 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 5316
54a6eb5c 5317 for_each_zone_zonelist(zone, z, zonelist, offset) {
9705bea5 5318 unsigned long size = zone_managed_pages(zone);
41858966 5319 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
5320 if (size > high)
5321 sum += size - high;
1da177e4
LT
5322 }
5323
5324 return sum;
5325}
5326
e0fb5815
ZY
5327/**
5328 * nr_free_buffer_pages - count number of pages beyond high watermark
5329 *
5330 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5331 * watermark within ZONE_DMA and ZONE_NORMAL.
a862f68a
MR
5332 *
5333 * Return: number of pages beyond high watermark within ZONE_DMA and
5334 * ZONE_NORMAL.
1da177e4 5335 */
ebec3862 5336unsigned long nr_free_buffer_pages(void)
1da177e4 5337{
af4ca457 5338 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 5339}
c2f1a551 5340EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4 5341
08e0f6a9 5342static inline void show_node(struct zone *zone)
1da177e4 5343{
e5adfffc 5344 if (IS_ENABLED(CONFIG_NUMA))
25ba77c1 5345 printk("Node %d ", zone_to_nid(zone));
1da177e4 5346}
1da177e4 5347
d02bd27b
IR
5348long si_mem_available(void)
5349{
5350 long available;
5351 unsigned long pagecache;
5352 unsigned long wmark_low = 0;
5353 unsigned long pages[NR_LRU_LISTS];
b29940c1 5354 unsigned long reclaimable;
d02bd27b
IR
5355 struct zone *zone;
5356 int lru;
5357
5358 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
2f95ff90 5359 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
d02bd27b
IR
5360
5361 for_each_zone(zone)
a9214443 5362 wmark_low += low_wmark_pages(zone);
d02bd27b
IR
5363
5364 /*
5365 * Estimate the amount of memory available for userspace allocations,
5366 * without causing swapping.
5367 */
c41f012a 5368 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
d02bd27b
IR
5369
5370 /*
5371 * Not all the page cache can be freed, otherwise the system will
5372 * start swapping. Assume at least half of the page cache, or the
5373 * low watermark worth of cache, needs to stay.
5374 */
5375 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5376 pagecache -= min(pagecache / 2, wmark_low);
5377 available += pagecache;
5378
5379 /*
b29940c1
VB
5380 * Part of the reclaimable slab and other kernel memory consists of
5381 * items that are in use, and cannot be freed. Cap this estimate at the
5382 * low watermark.
d02bd27b 5383 */
d42f3245
RG
5384 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5385 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
b29940c1 5386 available += reclaimable - min(reclaimable / 2, wmark_low);
034ebf65 5387
d02bd27b
IR
5388 if (available < 0)
5389 available = 0;
5390 return available;
5391}
5392EXPORT_SYMBOL_GPL(si_mem_available);
5393
1da177e4
LT
5394void si_meminfo(struct sysinfo *val)
5395{
ca79b0c2 5396 val->totalram = totalram_pages();
11fb9989 5397 val->sharedram = global_node_page_state(NR_SHMEM);
c41f012a 5398 val->freeram = global_zone_page_state(NR_FREE_PAGES);
1da177e4 5399 val->bufferram = nr_blockdev_pages();
ca79b0c2 5400 val->totalhigh = totalhigh_pages();
1da177e4 5401 val->freehigh = nr_free_highpages();
1da177e4
LT
5402 val->mem_unit = PAGE_SIZE;
5403}
5404
5405EXPORT_SYMBOL(si_meminfo);
5406
5407#ifdef CONFIG_NUMA
5408void si_meminfo_node(struct sysinfo *val, int nid)
5409{
cdd91a77
JL
5410 int zone_type; /* needs to be signed */
5411 unsigned long managed_pages = 0;
fc2bd799
JK
5412 unsigned long managed_highpages = 0;
5413 unsigned long free_highpages = 0;
1da177e4
LT
5414 pg_data_t *pgdat = NODE_DATA(nid);
5415
cdd91a77 5416 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
9705bea5 5417 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
cdd91a77 5418 val->totalram = managed_pages;
11fb9989 5419 val->sharedram = node_page_state(pgdat, NR_SHMEM);
75ef7184 5420 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 5421#ifdef CONFIG_HIGHMEM
fc2bd799
JK
5422 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5423 struct zone *zone = &pgdat->node_zones[zone_type];
5424
5425 if (is_highmem(zone)) {
9705bea5 5426 managed_highpages += zone_managed_pages(zone);
fc2bd799
JK
5427 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5428 }
5429 }
5430 val->totalhigh = managed_highpages;
5431 val->freehigh = free_highpages;
98d2b0eb 5432#else
fc2bd799
JK
5433 val->totalhigh = managed_highpages;
5434 val->freehigh = free_highpages;
98d2b0eb 5435#endif
1da177e4
LT
5436 val->mem_unit = PAGE_SIZE;
5437}
5438#endif
5439
ddd588b5 5440/*
7bf02ea2
DR
5441 * Determine whether the node should be displayed or not, depending on whether
5442 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
ddd588b5 5443 */
9af744d7 5444static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
ddd588b5 5445{
ddd588b5 5446 if (!(flags & SHOW_MEM_FILTER_NODES))
9af744d7 5447 return false;
ddd588b5 5448
9af744d7
MH
5449 /*
5450 * no node mask - aka implicit memory numa policy. Do not bother with
5451 * the synchronization - read_mems_allowed_begin - because we do not
5452 * have to be precise here.
5453 */
5454 if (!nodemask)
5455 nodemask = &cpuset_current_mems_allowed;
5456
5457 return !node_isset(nid, *nodemask);
ddd588b5
DR
5458}
5459
1da177e4
LT
5460#define K(x) ((x) << (PAGE_SHIFT-10))
5461
377e4f16
RV
5462static void show_migration_types(unsigned char type)
5463{
5464 static const char types[MIGRATE_TYPES] = {
5465 [MIGRATE_UNMOVABLE] = 'U',
377e4f16 5466 [MIGRATE_MOVABLE] = 'M',
475a2f90
VB
5467 [MIGRATE_RECLAIMABLE] = 'E',
5468 [MIGRATE_HIGHATOMIC] = 'H',
377e4f16
RV
5469#ifdef CONFIG_CMA
5470 [MIGRATE_CMA] = 'C',
5471#endif
194159fb 5472#ifdef CONFIG_MEMORY_ISOLATION
377e4f16 5473 [MIGRATE_ISOLATE] = 'I',
194159fb 5474#endif
377e4f16
RV
5475 };
5476 char tmp[MIGRATE_TYPES + 1];
5477 char *p = tmp;
5478 int i;
5479
5480 for (i = 0; i < MIGRATE_TYPES; i++) {
5481 if (type & (1 << i))
5482 *p++ = types[i];
5483 }
5484
5485 *p = '\0';
1f84a18f 5486 printk(KERN_CONT "(%s) ", tmp);
377e4f16
RV
5487}
5488
1da177e4
LT
5489/*
5490 * Show free area list (used inside shift_scroll-lock stuff)
5491 * We also calculate the percentage fragmentation. We do this by counting the
5492 * memory on each free list with the exception of the first item on the list.
d1bfcdb8
KK
5493 *
5494 * Bits in @filter:
5495 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5496 * cpuset.
1da177e4 5497 */
9af744d7 5498void show_free_areas(unsigned int filter, nodemask_t *nodemask)
1da177e4 5499{
d1bfcdb8 5500 unsigned long free_pcp = 0;
c7241913 5501 int cpu;
1da177e4 5502 struct zone *zone;
599d0c95 5503 pg_data_t *pgdat;
1da177e4 5504
ee99c71c 5505 for_each_populated_zone(zone) {
9af744d7 5506 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 5507 continue;
d1bfcdb8 5508
761b0677
KK
5509 for_each_online_cpu(cpu)
5510 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
1da177e4
LT
5511 }
5512
a731286d
KM
5513 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5514 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
8d92890b 5515 " unevictable:%lu dirty:%lu writeback:%lu\n"
d1bfcdb8 5516 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
d1ce749a 5517 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
d1bfcdb8 5518 " free:%lu free_pcp:%lu free_cma:%lu\n",
599d0c95
MG
5519 global_node_page_state(NR_ACTIVE_ANON),
5520 global_node_page_state(NR_INACTIVE_ANON),
5521 global_node_page_state(NR_ISOLATED_ANON),
5522 global_node_page_state(NR_ACTIVE_FILE),
5523 global_node_page_state(NR_INACTIVE_FILE),
5524 global_node_page_state(NR_ISOLATED_FILE),
5525 global_node_page_state(NR_UNEVICTABLE),
11fb9989
MG
5526 global_node_page_state(NR_FILE_DIRTY),
5527 global_node_page_state(NR_WRITEBACK),
d42f3245
RG
5528 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
5529 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
50658e2e 5530 global_node_page_state(NR_FILE_MAPPED),
11fb9989 5531 global_node_page_state(NR_SHMEM),
f0c0c115 5532 global_node_page_state(NR_PAGETABLE),
c41f012a
MH
5533 global_zone_page_state(NR_BOUNCE),
5534 global_zone_page_state(NR_FREE_PAGES),
d1bfcdb8 5535 free_pcp,
c41f012a 5536 global_zone_page_state(NR_FREE_CMA_PAGES));
1da177e4 5537
599d0c95 5538 for_each_online_pgdat(pgdat) {
9af744d7 5539 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
c02e50bb
MH
5540 continue;
5541
599d0c95
MG
5542 printk("Node %d"
5543 " active_anon:%lukB"
5544 " inactive_anon:%lukB"
5545 " active_file:%lukB"
5546 " inactive_file:%lukB"
5547 " unevictable:%lukB"
5548 " isolated(anon):%lukB"
5549 " isolated(file):%lukB"
50658e2e 5550 " mapped:%lukB"
11fb9989
MG
5551 " dirty:%lukB"
5552 " writeback:%lukB"
5553 " shmem:%lukB"
5554#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5555 " shmem_thp: %lukB"
5556 " shmem_pmdmapped: %lukB"
5557 " anon_thp: %lukB"
5558#endif
5559 " writeback_tmp:%lukB"
991e7673
SB
5560 " kernel_stack:%lukB"
5561#ifdef CONFIG_SHADOW_CALL_STACK
5562 " shadow_call_stack:%lukB"
5563#endif
f0c0c115 5564 " pagetables:%lukB"
599d0c95
MG
5565 " all_unreclaimable? %s"
5566 "\n",
5567 pgdat->node_id,
5568 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5569 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5570 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5571 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5572 K(node_page_state(pgdat, NR_UNEVICTABLE)),
5573 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5574 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
50658e2e 5575 K(node_page_state(pgdat, NR_FILE_MAPPED)),
11fb9989
MG
5576 K(node_page_state(pgdat, NR_FILE_DIRTY)),
5577 K(node_page_state(pgdat, NR_WRITEBACK)),
1f06b81a 5578 K(node_page_state(pgdat, NR_SHMEM)),
11fb9989
MG
5579#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5580 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
5581 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
5582 * HPAGE_PMD_NR),
5583 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
5584#endif
11fb9989 5585 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
991e7673
SB
5586 node_page_state(pgdat, NR_KERNEL_STACK_KB),
5587#ifdef CONFIG_SHADOW_CALL_STACK
5588 node_page_state(pgdat, NR_KERNEL_SCS_KB),
5589#endif
f0c0c115 5590 K(node_page_state(pgdat, NR_PAGETABLE)),
c73322d0
JW
5591 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5592 "yes" : "no");
599d0c95
MG
5593 }
5594
ee99c71c 5595 for_each_populated_zone(zone) {
1da177e4
LT
5596 int i;
5597
9af744d7 5598 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 5599 continue;
d1bfcdb8
KK
5600
5601 free_pcp = 0;
5602 for_each_online_cpu(cpu)
5603 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5604
1da177e4 5605 show_node(zone);
1f84a18f
JP
5606 printk(KERN_CONT
5607 "%s"
1da177e4
LT
5608 " free:%lukB"
5609 " min:%lukB"
5610 " low:%lukB"
5611 " high:%lukB"
e47b346a 5612 " reserved_highatomic:%luKB"
71c799f4
MK
5613 " active_anon:%lukB"
5614 " inactive_anon:%lukB"
5615 " active_file:%lukB"
5616 " inactive_file:%lukB"
5617 " unevictable:%lukB"
5a1c84b4 5618 " writepending:%lukB"
1da177e4 5619 " present:%lukB"
9feedc9d 5620 " managed:%lukB"
4a0aa73f 5621 " mlocked:%lukB"
4a0aa73f 5622 " bounce:%lukB"
d1bfcdb8
KK
5623 " free_pcp:%lukB"
5624 " local_pcp:%ukB"
d1ce749a 5625 " free_cma:%lukB"
1da177e4
LT
5626 "\n",
5627 zone->name,
88f5acf8 5628 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
5629 K(min_wmark_pages(zone)),
5630 K(low_wmark_pages(zone)),
5631 K(high_wmark_pages(zone)),
e47b346a 5632 K(zone->nr_reserved_highatomic),
71c799f4
MK
5633 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5634 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5635 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5636 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
5637 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5a1c84b4 5638 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
1da177e4 5639 K(zone->present_pages),
9705bea5 5640 K(zone_managed_pages(zone)),
4a0aa73f 5641 K(zone_page_state(zone, NR_MLOCK)),
4a0aa73f 5642 K(zone_page_state(zone, NR_BOUNCE)),
d1bfcdb8
KK
5643 K(free_pcp),
5644 K(this_cpu_read(zone->pageset->pcp.count)),
33e077bd 5645 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
1da177e4
LT
5646 printk("lowmem_reserve[]:");
5647 for (i = 0; i < MAX_NR_ZONES; i++)
1f84a18f
JP
5648 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
5649 printk(KERN_CONT "\n");
1da177e4
LT
5650 }
5651
ee99c71c 5652 for_each_populated_zone(zone) {
d00181b9
KS
5653 unsigned int order;
5654 unsigned long nr[MAX_ORDER], flags, total = 0;
377e4f16 5655 unsigned char types[MAX_ORDER];
1da177e4 5656
9af744d7 5657 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 5658 continue;
1da177e4 5659 show_node(zone);
1f84a18f 5660 printk(KERN_CONT "%s: ", zone->name);
1da177e4
LT
5661
5662 spin_lock_irqsave(&zone->lock, flags);
5663 for (order = 0; order < MAX_ORDER; order++) {
377e4f16
RV
5664 struct free_area *area = &zone->free_area[order];
5665 int type;
5666
5667 nr[order] = area->nr_free;
8f9de51a 5668 total += nr[order] << order;
377e4f16
RV
5669
5670 types[order] = 0;
5671 for (type = 0; type < MIGRATE_TYPES; type++) {
b03641af 5672 if (!free_area_empty(area, type))
377e4f16
RV
5673 types[order] |= 1 << type;
5674 }
1da177e4
LT
5675 }
5676 spin_unlock_irqrestore(&zone->lock, flags);
377e4f16 5677 for (order = 0; order < MAX_ORDER; order++) {
1f84a18f
JP
5678 printk(KERN_CONT "%lu*%lukB ",
5679 nr[order], K(1UL) << order);
377e4f16
RV
5680 if (nr[order])
5681 show_migration_types(types[order]);
5682 }
1f84a18f 5683 printk(KERN_CONT "= %lukB\n", K(total));
1da177e4
LT
5684 }
5685
949f7ec5
DR
5686 hugetlb_show_meminfo();
5687
11fb9989 5688 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
e6f3602d 5689
1da177e4
LT
5690 show_swap_cache_info();
5691}
5692
19770b32
MG
5693static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5694{
5695 zoneref->zone = zone;
5696 zoneref->zone_idx = zone_idx(zone);
5697}
5698
1da177e4
LT
5699/*
5700 * Builds allocation fallback zone lists.
1a93205b
CL
5701 *
5702 * Add all populated zones of a node to the zonelist.
1da177e4 5703 */
9d3be21b 5704static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
1da177e4 5705{
1a93205b 5706 struct zone *zone;
bc732f1d 5707 enum zone_type zone_type = MAX_NR_ZONES;
9d3be21b 5708 int nr_zones = 0;
02a68a5e
CL
5709
5710 do {
2f6726e5 5711 zone_type--;
070f8032 5712 zone = pgdat->node_zones + zone_type;
6aa303de 5713 if (managed_zone(zone)) {
9d3be21b 5714 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
070f8032 5715 check_highest_zone(zone_type);
1da177e4 5716 }
2f6726e5 5717 } while (zone_type);
bc732f1d 5718
070f8032 5719 return nr_zones;
1da177e4
LT
5720}
5721
5722#ifdef CONFIG_NUMA
f0c0b2b8
KH
5723
5724static int __parse_numa_zonelist_order(char *s)
5725{
c9bff3ee
MH
5726 /*
5727 * We used to support different zonlists modes but they turned
5728 * out to be just not useful. Let's keep the warning in place
5729 * if somebody still use the cmd line parameter so that we do
5730 * not fail it silently
5731 */
5732 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5733 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
f0c0b2b8
KH
5734 return -EINVAL;
5735 }
5736 return 0;
5737}
5738
c9bff3ee
MH
5739char numa_zonelist_order[] = "Node";
5740
f0c0b2b8
KH
5741/*
5742 * sysctl handler for numa_zonelist_order
5743 */
cccad5b9 5744int numa_zonelist_order_handler(struct ctl_table *table, int write,
32927393 5745 void *buffer, size_t *length, loff_t *ppos)
f0c0b2b8 5746{
32927393
CH
5747 if (write)
5748 return __parse_numa_zonelist_order(buffer);
5749 return proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8
KH
5750}
5751
5752
62bc62a8 5753#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
5754static int node_load[MAX_NUMNODES];
5755
1da177e4 5756/**
4dc3b16b 5757 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
5758 * @node: node whose fallback list we're appending
5759 * @used_node_mask: nodemask_t of already used nodes
5760 *
5761 * We use a number of factors to determine which is the next node that should
5762 * appear on a given node's fallback list. The node should not have appeared
5763 * already in @node's fallback list, and it should be the next closest node
5764 * according to the distance array (which contains arbitrary distance values
5765 * from each node to each node in the system), and should also prefer nodes
5766 * with no CPUs, since presumably they'll have very little allocation pressure
5767 * on them otherwise.
a862f68a
MR
5768 *
5769 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
1da177e4 5770 */
f0c0b2b8 5771static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 5772{
4cf808eb 5773 int n, val;
1da177e4 5774 int min_val = INT_MAX;
00ef2d2f 5775 int best_node = NUMA_NO_NODE;
1da177e4 5776
4cf808eb
LT
5777 /* Use the local node if we haven't already */
5778 if (!node_isset(node, *used_node_mask)) {
5779 node_set(node, *used_node_mask);
5780 return node;
5781 }
1da177e4 5782
4b0ef1fe 5783 for_each_node_state(n, N_MEMORY) {
1da177e4
LT
5784
5785 /* Don't want a node to appear more than once */
5786 if (node_isset(n, *used_node_mask))
5787 continue;
5788
1da177e4
LT
5789 /* Use the distance array to find the distance */
5790 val = node_distance(node, n);
5791
4cf808eb
LT
5792 /* Penalize nodes under us ("prefer the next node") */
5793 val += (n < node);
5794
1da177e4 5795 /* Give preference to headless and unused nodes */
b630749f 5796 if (!cpumask_empty(cpumask_of_node(n)))
1da177e4
LT
5797 val += PENALTY_FOR_NODE_WITH_CPUS;
5798
5799 /* Slight preference for less loaded node */
5800 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5801 val += node_load[n];
5802
5803 if (val < min_val) {
5804 min_val = val;
5805 best_node = n;
5806 }
5807 }
5808
5809 if (best_node >= 0)
5810 node_set(best_node, *used_node_mask);
5811
5812 return best_node;
5813}
5814
f0c0b2b8
KH
5815
5816/*
5817 * Build zonelists ordered by node and zones within node.
5818 * This results in maximum locality--normal zone overflows into local
5819 * DMA zone, if any--but risks exhausting DMA zone.
5820 */
9d3be21b
MH
5821static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5822 unsigned nr_nodes)
1da177e4 5823{
9d3be21b
MH
5824 struct zoneref *zonerefs;
5825 int i;
5826
5827 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5828
5829 for (i = 0; i < nr_nodes; i++) {
5830 int nr_zones;
5831
5832 pg_data_t *node = NODE_DATA(node_order[i]);
f0c0b2b8 5833
9d3be21b
MH
5834 nr_zones = build_zonerefs_node(node, zonerefs);
5835 zonerefs += nr_zones;
5836 }
5837 zonerefs->zone = NULL;
5838 zonerefs->zone_idx = 0;
f0c0b2b8
KH
5839}
5840
523b9458
CL
5841/*
5842 * Build gfp_thisnode zonelists
5843 */
5844static void build_thisnode_zonelists(pg_data_t *pgdat)
5845{
9d3be21b
MH
5846 struct zoneref *zonerefs;
5847 int nr_zones;
523b9458 5848
9d3be21b
MH
5849 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5850 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5851 zonerefs += nr_zones;
5852 zonerefs->zone = NULL;
5853 zonerefs->zone_idx = 0;
523b9458
CL
5854}
5855
f0c0b2b8
KH
5856/*
5857 * Build zonelists ordered by zone and nodes within zones.
5858 * This results in conserving DMA zone[s] until all Normal memory is
5859 * exhausted, but results in overflowing to remote node while memory
5860 * may still exist in local DMA zone.
5861 */
f0c0b2b8 5862
f0c0b2b8
KH
5863static void build_zonelists(pg_data_t *pgdat)
5864{
9d3be21b
MH
5865 static int node_order[MAX_NUMNODES];
5866 int node, load, nr_nodes = 0;
d0ddf49b 5867 nodemask_t used_mask = NODE_MASK_NONE;
f0c0b2b8 5868 int local_node, prev_node;
1da177e4
LT
5869
5870 /* NUMA-aware ordering of nodes */
5871 local_node = pgdat->node_id;
62bc62a8 5872 load = nr_online_nodes;
1da177e4 5873 prev_node = local_node;
f0c0b2b8 5874
f0c0b2b8 5875 memset(node_order, 0, sizeof(node_order));
1da177e4
LT
5876 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5877 /*
5878 * We don't want to pressure a particular node.
5879 * So adding penalty to the first node in same
5880 * distance group to make it round-robin.
5881 */
957f822a
DR
5882 if (node_distance(local_node, node) !=
5883 node_distance(local_node, prev_node))
f0c0b2b8
KH
5884 node_load[node] = load;
5885
9d3be21b 5886 node_order[nr_nodes++] = node;
1da177e4
LT
5887 prev_node = node;
5888 load--;
1da177e4 5889 }
523b9458 5890
9d3be21b 5891 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
523b9458 5892 build_thisnode_zonelists(pgdat);
1da177e4
LT
5893}
5894
7aac7898
LS
5895#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5896/*
5897 * Return node id of node used for "local" allocations.
5898 * I.e., first node id of first zone in arg node's generic zonelist.
5899 * Used for initializing percpu 'numa_mem', which is used primarily
5900 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5901 */
5902int local_memory_node(int node)
5903{
c33d6c06 5904 struct zoneref *z;
7aac7898 5905
c33d6c06 5906 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
7aac7898 5907 gfp_zone(GFP_KERNEL),
c33d6c06 5908 NULL);
c1093b74 5909 return zone_to_nid(z->zone);
7aac7898
LS
5910}
5911#endif
f0c0b2b8 5912
6423aa81
JK
5913static void setup_min_unmapped_ratio(void);
5914static void setup_min_slab_ratio(void);
1da177e4
LT
5915#else /* CONFIG_NUMA */
5916
f0c0b2b8 5917static void build_zonelists(pg_data_t *pgdat)
1da177e4 5918{
19655d34 5919 int node, local_node;
9d3be21b
MH
5920 struct zoneref *zonerefs;
5921 int nr_zones;
1da177e4
LT
5922
5923 local_node = pgdat->node_id;
1da177e4 5924
9d3be21b
MH
5925 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5926 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5927 zonerefs += nr_zones;
1da177e4 5928
54a6eb5c
MG
5929 /*
5930 * Now we build the zonelist so that it contains the zones
5931 * of all the other nodes.
5932 * We don't want to pressure a particular node, so when
5933 * building the zones for node N, we make sure that the
5934 * zones coming right after the local ones are those from
5935 * node N+1 (modulo N)
5936 */
5937 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5938 if (!node_online(node))
5939 continue;
9d3be21b
MH
5940 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5941 zonerefs += nr_zones;
1da177e4 5942 }
54a6eb5c
MG
5943 for (node = 0; node < local_node; node++) {
5944 if (!node_online(node))
5945 continue;
9d3be21b
MH
5946 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5947 zonerefs += nr_zones;
54a6eb5c
MG
5948 }
5949
9d3be21b
MH
5950 zonerefs->zone = NULL;
5951 zonerefs->zone_idx = 0;
1da177e4
LT
5952}
5953
5954#endif /* CONFIG_NUMA */
5955
99dcc3e5
CL
5956/*
5957 * Boot pageset table. One per cpu which is going to be used for all
5958 * zones and all nodes. The parameters will be set in such a way
5959 * that an item put on a list will immediately be handed over to
5960 * the buddy list. This is safe since pageset manipulation is done
5961 * with interrupts disabled.
5962 *
5963 * The boot_pagesets must be kept even after bootup is complete for
5964 * unused processors and/or zones. They do play a role for bootstrapping
5965 * hotplugged processors.
5966 *
5967 * zoneinfo_show() and maybe other functions do
5968 * not check if the processor is online before following the pageset pointer.
5969 * Other parts of the kernel may not check if the zone is available.
5970 */
69a8396a 5971static void pageset_init(struct per_cpu_pageset *p);
952eaf81
VB
5972/* These effectively disable the pcplists in the boot pageset completely */
5973#define BOOT_PAGESET_HIGH 0
5974#define BOOT_PAGESET_BATCH 1
99dcc3e5 5975static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
385386cf 5976static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
99dcc3e5 5977
11cd8638 5978static void __build_all_zonelists(void *data)
1da177e4 5979{
6811378e 5980 int nid;
afb6ebb3 5981 int __maybe_unused cpu;
9adb62a5 5982 pg_data_t *self = data;
b93e0f32
MH
5983 static DEFINE_SPINLOCK(lock);
5984
5985 spin_lock(&lock);
9276b1bc 5986
7f9cfb31
BL
5987#ifdef CONFIG_NUMA
5988 memset(node_load, 0, sizeof(node_load));
5989#endif
9adb62a5 5990
c1152583
WY
5991 /*
5992 * This node is hotadded and no memory is yet present. So just
5993 * building zonelists is fine - no need to touch other nodes.
5994 */
9adb62a5
JL
5995 if (self && !node_online(self->node_id)) {
5996 build_zonelists(self);
c1152583
WY
5997 } else {
5998 for_each_online_node(nid) {
5999 pg_data_t *pgdat = NODE_DATA(nid);
7ea1530a 6000
c1152583
WY
6001 build_zonelists(pgdat);
6002 }
99dcc3e5 6003
7aac7898
LS
6004#ifdef CONFIG_HAVE_MEMORYLESS_NODES
6005 /*
6006 * We now know the "local memory node" for each node--
6007 * i.e., the node of the first zone in the generic zonelist.
6008 * Set up numa_mem percpu variable for on-line cpus. During
6009 * boot, only the boot cpu should be on-line; we'll init the
6010 * secondary cpus' numa_mem as they come on-line. During
6011 * node/memory hotplug, we'll fixup all on-line cpus.
6012 */
d9c9a0b9 6013 for_each_online_cpu(cpu)
7aac7898 6014 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
afb6ebb3 6015#endif
d9c9a0b9 6016 }
b93e0f32
MH
6017
6018 spin_unlock(&lock);
6811378e
YG
6019}
6020
061f67bc
RV
6021static noinline void __init
6022build_all_zonelists_init(void)
6023{
afb6ebb3
MH
6024 int cpu;
6025
061f67bc 6026 __build_all_zonelists(NULL);
afb6ebb3
MH
6027
6028 /*
6029 * Initialize the boot_pagesets that are going to be used
6030 * for bootstrapping processors. The real pagesets for
6031 * each zone will be allocated later when the per cpu
6032 * allocator is available.
6033 *
6034 * boot_pagesets are used also for bootstrapping offline
6035 * cpus if the system is already booted because the pagesets
6036 * are needed to initialize allocators on a specific cpu too.
6037 * F.e. the percpu allocator needs the page allocator which
6038 * needs the percpu allocator in order to allocate its pagesets
6039 * (a chicken-egg dilemma).
6040 */
6041 for_each_possible_cpu(cpu)
69a8396a 6042 pageset_init(&per_cpu(boot_pageset, cpu));
afb6ebb3 6043
061f67bc
RV
6044 mminit_verify_zonelist();
6045 cpuset_init_current_mems_allowed();
6046}
6047
4eaf3f64 6048/*
4eaf3f64 6049 * unless system_state == SYSTEM_BOOTING.
061f67bc 6050 *
72675e13 6051 * __ref due to call of __init annotated helper build_all_zonelists_init
061f67bc 6052 * [protected by SYSTEM_BOOTING].
4eaf3f64 6053 */
72675e13 6054void __ref build_all_zonelists(pg_data_t *pgdat)
6811378e 6055{
0a18e607
DH
6056 unsigned long vm_total_pages;
6057
6811378e 6058 if (system_state == SYSTEM_BOOTING) {
061f67bc 6059 build_all_zonelists_init();
6811378e 6060 } else {
11cd8638 6061 __build_all_zonelists(pgdat);
6811378e
YG
6062 /* cpuset refresh routine should be here */
6063 }
56b9413b
DH
6064 /* Get the number of free pages beyond high watermark in all zones. */
6065 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
9ef9acb0
MG
6066 /*
6067 * Disable grouping by mobility if the number of pages in the
6068 * system is too low to allow the mechanism to work. It would be
6069 * more accurate, but expensive to check per-zone. This check is
6070 * made on memory-hotadd so a system can start with mobility
6071 * disabled and enable it later
6072 */
d9c23400 6073 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
6074 page_group_by_mobility_disabled = 1;
6075 else
6076 page_group_by_mobility_disabled = 0;
6077
ce0725f7 6078 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
756a025f 6079 nr_online_nodes,
756a025f
JP
6080 page_group_by_mobility_disabled ? "off" : "on",
6081 vm_total_pages);
f0c0b2b8 6082#ifdef CONFIG_NUMA
f88dfff5 6083 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
f0c0b2b8 6084#endif
1da177e4
LT
6085}
6086
a9a9e77f
PT
6087/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6088static bool __meminit
6089overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6090{
a9a9e77f
PT
6091 static struct memblock_region *r;
6092
6093 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6094 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
cc6de168 6095 for_each_mem_region(r) {
a9a9e77f
PT
6096 if (*pfn < memblock_region_memory_end_pfn(r))
6097 break;
6098 }
6099 }
6100 if (*pfn >= memblock_region_memory_base_pfn(r) &&
6101 memblock_is_mirror(r)) {
6102 *pfn = memblock_region_memory_end_pfn(r);
6103 return true;
6104 }
6105 }
a9a9e77f
PT
6106 return false;
6107}
6108
1da177e4
LT
6109/*
6110 * Initially all pages are reserved - free ones are freed
c6ffc5ca 6111 * up by memblock_free_all() once the early boot process is
1da177e4 6112 * done. Non-atomic initialization, single-pass.
d882c006
DH
6113 *
6114 * All aligned pageblocks are initialized to the specified migratetype
6115 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6116 * zone stats (e.g., nr_isolate_pageblock) are touched.
1da177e4 6117 */
c09b4240 6118void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
d882c006
DH
6119 unsigned long start_pfn,
6120 enum meminit_context context,
6121 struct vmem_altmap *altmap, int migratetype)
1da177e4 6122{
a9a9e77f 6123 unsigned long pfn, end_pfn = start_pfn + size;
d0dc12e8 6124 struct page *page;
1da177e4 6125
22b31eec
HD
6126 if (highest_memmap_pfn < end_pfn - 1)
6127 highest_memmap_pfn = end_pfn - 1;
6128
966cf44f 6129#ifdef CONFIG_ZONE_DEVICE
4b94ffdc
DW
6130 /*
6131 * Honor reservation requested by the driver for this ZONE_DEVICE
966cf44f
AD
6132 * memory. We limit the total number of pages to initialize to just
6133 * those that might contain the memory mapping. We will defer the
6134 * ZONE_DEVICE page initialization until after we have released
6135 * the hotplug lock.
4b94ffdc 6136 */
966cf44f
AD
6137 if (zone == ZONE_DEVICE) {
6138 if (!altmap)
6139 return;
6140
6141 if (start_pfn == altmap->base_pfn)
6142 start_pfn += altmap->reserve;
6143 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6144 }
6145#endif
4b94ffdc 6146
948c436e 6147 for (pfn = start_pfn; pfn < end_pfn; ) {
a2f3aa02 6148 /*
b72d0ffb
AM
6149 * There can be holes in boot-time mem_map[]s handed to this
6150 * function. They do not exist on hotplugged memory.
a2f3aa02 6151 */
c1d0da83 6152 if (context == MEMINIT_EARLY) {
a9a9e77f
PT
6153 if (overlap_memmap_init(zone, &pfn))
6154 continue;
6155 if (defer_init(nid, pfn, end_pfn))
6156 break;
a2f3aa02 6157 }
ac5d2539 6158
d0dc12e8
PT
6159 page = pfn_to_page(pfn);
6160 __init_single_page(page, pfn, zone, nid);
c1d0da83 6161 if (context == MEMINIT_HOTPLUG)
d483da5b 6162 __SetPageReserved(page);
d0dc12e8 6163
ac5d2539 6164 /*
d882c006
DH
6165 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6166 * such that unmovable allocations won't be scattered all
6167 * over the place during system boot.
ac5d2539 6168 */
4eb29bd9 6169 if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
d882c006 6170 set_pageblock_migratetype(page, migratetype);
9b6e63cb 6171 cond_resched();
ac5d2539 6172 }
948c436e 6173 pfn++;
1da177e4
LT
6174 }
6175}
6176
966cf44f
AD
6177#ifdef CONFIG_ZONE_DEVICE
6178void __ref memmap_init_zone_device(struct zone *zone,
6179 unsigned long start_pfn,
1f8d75c1 6180 unsigned long nr_pages,
966cf44f
AD
6181 struct dev_pagemap *pgmap)
6182{
1f8d75c1 6183 unsigned long pfn, end_pfn = start_pfn + nr_pages;
966cf44f 6184 struct pglist_data *pgdat = zone->zone_pgdat;
514caf23 6185 struct vmem_altmap *altmap = pgmap_altmap(pgmap);
966cf44f
AD
6186 unsigned long zone_idx = zone_idx(zone);
6187 unsigned long start = jiffies;
6188 int nid = pgdat->node_id;
6189
46d945ae 6190 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
966cf44f
AD
6191 return;
6192
6193 /*
6194 * The call to memmap_init_zone should have already taken care
6195 * of the pages reserved for the memmap, so we can just jump to
6196 * the end of that region and start processing the device pages.
6197 */
514caf23 6198 if (altmap) {
966cf44f 6199 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1f8d75c1 6200 nr_pages = end_pfn - start_pfn;
966cf44f
AD
6201 }
6202
6203 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
6204 struct page *page = pfn_to_page(pfn);
6205
6206 __init_single_page(page, pfn, zone_idx, nid);
6207
6208 /*
6209 * Mark page reserved as it will need to wait for onlining
6210 * phase for it to be fully associated with a zone.
6211 *
6212 * We can use the non-atomic __set_bit operation for setting
6213 * the flag as we are still initializing the pages.
6214 */
6215 __SetPageReserved(page);
6216
6217 /*
8a164fef
CH
6218 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6219 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
6220 * ever freed or placed on a driver-private list.
966cf44f
AD
6221 */
6222 page->pgmap = pgmap;
8a164fef 6223 page->zone_device_data = NULL;
966cf44f
AD
6224
6225 /*
6226 * Mark the block movable so that blocks are reserved for
6227 * movable at startup. This will force kernel allocations
6228 * to reserve their blocks rather than leaking throughout
6229 * the address space during boot when many long-lived
6230 * kernel allocations are made.
6231 *
c1d0da83 6232 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
ba72b4c8 6233 * because this is done early in section_activate()
966cf44f 6234 */
4eb29bd9 6235 if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
966cf44f
AD
6236 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6237 cond_resched();
6238 }
6239 }
6240
fdc029b1 6241 pr_info("%s initialised %lu pages in %ums\n", __func__,
1f8d75c1 6242 nr_pages, jiffies_to_msecs(jiffies - start));
966cf44f
AD
6243}
6244
6245#endif
1e548deb 6246static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 6247{
7aeb09f9 6248 unsigned int order, t;
b2a0ac88
MG
6249 for_each_migratetype_order(order, t) {
6250 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
6251 zone->free_area[order].nr_free = 0;
6252 }
6253}
6254
dfb3ccd0 6255void __meminit __weak memmap_init(unsigned long size, int nid,
73a6e474
BH
6256 unsigned long zone,
6257 unsigned long range_start_pfn)
dfb3ccd0 6258{
73a6e474
BH
6259 unsigned long start_pfn, end_pfn;
6260 unsigned long range_end_pfn = range_start_pfn + size;
6261 int i;
6262
6263 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6264 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6265 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6266
6267 if (end_pfn > start_pfn) {
6268 size = end_pfn - start_pfn;
6269 memmap_init_zone(size, nid, zone, start_pfn,
d882c006 6270 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
73a6e474
BH
6271 }
6272 }
dfb3ccd0 6273}
1da177e4 6274
7cd2b0a3 6275static int zone_batchsize(struct zone *zone)
e7c8d5c9 6276{
3a6be87f 6277#ifdef CONFIG_MMU
e7c8d5c9
CL
6278 int batch;
6279
6280 /*
6281 * The per-cpu-pages pools are set to around 1000th of the
d8a759b5 6282 * size of the zone.
e7c8d5c9 6283 */
9705bea5 6284 batch = zone_managed_pages(zone) / 1024;
d8a759b5
AL
6285 /* But no more than a meg. */
6286 if (batch * PAGE_SIZE > 1024 * 1024)
6287 batch = (1024 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
6288 batch /= 4; /* We effectively *= 4 below */
6289 if (batch < 1)
6290 batch = 1;
6291
6292 /*
0ceaacc9
NP
6293 * Clamp the batch to a 2^n - 1 value. Having a power
6294 * of 2 value was found to be more likely to have
6295 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 6296 *
0ceaacc9
NP
6297 * For example if 2 tasks are alternately allocating
6298 * batches of pages, one task can end up with a lot
6299 * of pages of one half of the possible page colors
6300 * and the other with pages of the other colors.
e7c8d5c9 6301 */
9155203a 6302 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 6303
e7c8d5c9 6304 return batch;
3a6be87f
DH
6305
6306#else
6307 /* The deferral and batching of frees should be suppressed under NOMMU
6308 * conditions.
6309 *
6310 * The problem is that NOMMU needs to be able to allocate large chunks
6311 * of contiguous memory as there's no hardware page translation to
6312 * assemble apparent contiguous memory from discontiguous pages.
6313 *
6314 * Queueing large contiguous runs of pages for batching, however,
6315 * causes the pages to actually be freed in smaller chunks. As there
6316 * can be a significant delay between the individual batches being
6317 * recycled, this leads to the once large chunks of space being
6318 * fragmented and becoming unavailable for high-order allocations.
6319 */
6320 return 0;
6321#endif
e7c8d5c9
CL
6322}
6323
8d7a8fa9 6324/*
5c3ad2eb
VB
6325 * pcp->high and pcp->batch values are related and generally batch is lower
6326 * than high. They are also related to pcp->count such that count is lower
6327 * than high, and as soon as it reaches high, the pcplist is flushed.
8d7a8fa9 6328 *
5c3ad2eb
VB
6329 * However, guaranteeing these relations at all times would require e.g. write
6330 * barriers here but also careful usage of read barriers at the read side, and
6331 * thus be prone to error and bad for performance. Thus the update only prevents
6332 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
6333 * can cope with those fields changing asynchronously, and fully trust only the
6334 * pcp->count field on the local CPU with interrupts disabled.
8d7a8fa9
CS
6335 *
6336 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6337 * outside of boot time (or some other assurance that no concurrent updaters
6338 * exist).
6339 */
6340static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6341 unsigned long batch)
6342{
5c3ad2eb
VB
6343 WRITE_ONCE(pcp->batch, batch);
6344 WRITE_ONCE(pcp->high, high);
8d7a8fa9
CS
6345}
6346
88c90dbc 6347static void pageset_init(struct per_cpu_pageset *p)
2caaad41
CL
6348{
6349 struct per_cpu_pages *pcp;
5f8dcc21 6350 int migratetype;
2caaad41 6351
1c6fe946
MD
6352 memset(p, 0, sizeof(*p));
6353
3dfa5721 6354 pcp = &p->pcp;
5f8dcc21
MG
6355 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
6356 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41 6357
69a8396a
VB
6358 /*
6359 * Set batch and high values safe for a boot pageset. A true percpu
6360 * pageset's initialization will update them subsequently. Here we don't
6361 * need to be as careful as pageset_update() as nobody can access the
6362 * pageset yet.
6363 */
952eaf81
VB
6364 pcp->high = BOOT_PAGESET_HIGH;
6365 pcp->batch = BOOT_PAGESET_BATCH;
88c90dbc
CS
6366}
6367
3b1f3658 6368static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
ec6e8c7e
VB
6369 unsigned long batch)
6370{
6371 struct per_cpu_pageset *p;
6372 int cpu;
6373
6374 for_each_possible_cpu(cpu) {
6375 p = per_cpu_ptr(zone->pageset, cpu);
6376 pageset_update(&p->pcp, high, batch);
6377 }
6378}
6379
8ad4b1fb 6380/*
0a8b4f1d 6381 * Calculate and set new high and batch values for all per-cpu pagesets of a
7115ac6e 6382 * zone, based on the zone's size and the percpu_pagelist_fraction sysctl.
8ad4b1fb 6383 */
0a8b4f1d 6384static void zone_set_pageset_high_and_batch(struct zone *zone)
56cef2b8 6385{
7115ac6e
VB
6386 unsigned long new_high, new_batch;
6387
6388 if (percpu_pagelist_fraction) {
6389 new_high = zone_managed_pages(zone) / percpu_pagelist_fraction;
6390 new_batch = max(1UL, new_high / 4);
6391 if ((new_high / 4) > (PAGE_SHIFT * 8))
6392 new_batch = PAGE_SHIFT * 8;
6393 } else {
6394 new_batch = zone_batchsize(zone);
6395 new_high = 6 * new_batch;
6396 new_batch = max(1UL, 1 * new_batch);
6397 }
169f6c19 6398
952eaf81
VB
6399 if (zone->pageset_high == new_high &&
6400 zone->pageset_batch == new_batch)
6401 return;
6402
6403 zone->pageset_high = new_high;
6404 zone->pageset_batch = new_batch;
6405
ec6e8c7e 6406 __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
169f6c19
CS
6407}
6408
72675e13 6409void __meminit setup_zone_pageset(struct zone *zone)
319774e2 6410{
0a8b4f1d 6411 struct per_cpu_pageset *p;
319774e2 6412 int cpu;
0a8b4f1d 6413
319774e2 6414 zone->pageset = alloc_percpu(struct per_cpu_pageset);
0a8b4f1d
VB
6415 for_each_possible_cpu(cpu) {
6416 p = per_cpu_ptr(zone->pageset, cpu);
6417 pageset_init(p);
6418 }
6419
6420 zone_set_pageset_high_and_batch(zone);
319774e2
WF
6421}
6422
2caaad41 6423/*
99dcc3e5
CL
6424 * Allocate per cpu pagesets and initialize them.
6425 * Before this call only boot pagesets were available.
e7c8d5c9 6426 */
99dcc3e5 6427void __init setup_per_cpu_pageset(void)
e7c8d5c9 6428{
b4911ea2 6429 struct pglist_data *pgdat;
99dcc3e5 6430 struct zone *zone;
b418a0f9 6431 int __maybe_unused cpu;
e7c8d5c9 6432
319774e2
WF
6433 for_each_populated_zone(zone)
6434 setup_zone_pageset(zone);
b4911ea2 6435
b418a0f9
SD
6436#ifdef CONFIG_NUMA
6437 /*
6438 * Unpopulated zones continue using the boot pagesets.
6439 * The numa stats for these pagesets need to be reset.
6440 * Otherwise, they will end up skewing the stats of
6441 * the nodes these zones are associated with.
6442 */
6443 for_each_possible_cpu(cpu) {
6444 struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
6445 memset(pcp->vm_numa_stat_diff, 0,
6446 sizeof(pcp->vm_numa_stat_diff));
6447 }
6448#endif
6449
b4911ea2
MG
6450 for_each_online_pgdat(pgdat)
6451 pgdat->per_cpu_nodestats =
6452 alloc_percpu(struct per_cpu_nodestat);
e7c8d5c9
CL
6453}
6454
c09b4240 6455static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 6456{
99dcc3e5
CL
6457 /*
6458 * per cpu subsystem is not up at this point. The following code
6459 * relies on the ability of the linker to provide the
6460 * offset of a (static) per cpu variable into the per cpu area.
6461 */
6462 zone->pageset = &boot_pageset;
952eaf81
VB
6463 zone->pageset_high = BOOT_PAGESET_HIGH;
6464 zone->pageset_batch = BOOT_PAGESET_BATCH;
ed8ece2e 6465
b38a8725 6466 if (populated_zone(zone))
99dcc3e5
CL
6467 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
6468 zone->name, zone->present_pages,
6469 zone_batchsize(zone));
ed8ece2e
DH
6470}
6471
dc0bbf3b 6472void __meminit init_currently_empty_zone(struct zone *zone,
718127cc 6473 unsigned long zone_start_pfn,
b171e409 6474 unsigned long size)
ed8ece2e
DH
6475{
6476 struct pglist_data *pgdat = zone->zone_pgdat;
8f416836 6477 int zone_idx = zone_idx(zone) + 1;
9dcb8b68 6478
8f416836
WY
6479 if (zone_idx > pgdat->nr_zones)
6480 pgdat->nr_zones = zone_idx;
ed8ece2e 6481
ed8ece2e
DH
6482 zone->zone_start_pfn = zone_start_pfn;
6483
708614e6
MG
6484 mminit_dprintk(MMINIT_TRACE, "memmap_init",
6485 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
6486 pgdat->node_id,
6487 (unsigned long)zone_idx(zone),
6488 zone_start_pfn, (zone_start_pfn + size));
6489
1e548deb 6490 zone_init_free_lists(zone);
9dcb8b68 6491 zone->initialized = 1;
ed8ece2e
DH
6492}
6493
c713216d
MG
6494/**
6495 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
6496 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
6497 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6498 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
6499 *
6500 * It returns the start and end page frame of a node based on information
7d018176 6501 * provided by memblock_set_node(). If called for a node
c713216d 6502 * with no available memory, a warning is printed and the start and end
88ca3b94 6503 * PFNs will be 0.
c713216d 6504 */
bbe5d993 6505void __init get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
6506 unsigned long *start_pfn, unsigned long *end_pfn)
6507{
c13291a5 6508 unsigned long this_start_pfn, this_end_pfn;
c713216d 6509 int i;
c13291a5 6510
c713216d
MG
6511 *start_pfn = -1UL;
6512 *end_pfn = 0;
6513
c13291a5
TH
6514 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
6515 *start_pfn = min(*start_pfn, this_start_pfn);
6516 *end_pfn = max(*end_pfn, this_end_pfn);
c713216d
MG
6517 }
6518
633c0666 6519 if (*start_pfn == -1UL)
c713216d 6520 *start_pfn = 0;
c713216d
MG
6521}
6522
2a1e274a
MG
6523/*
6524 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6525 * assumption is made that zones within a node are ordered in monotonic
6526 * increasing memory addresses so that the "highest" populated zone is used
6527 */
b69a7288 6528static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
6529{
6530 int zone_index;
6531 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
6532 if (zone_index == ZONE_MOVABLE)
6533 continue;
6534
6535 if (arch_zone_highest_possible_pfn[zone_index] >
6536 arch_zone_lowest_possible_pfn[zone_index])
6537 break;
6538 }
6539
6540 VM_BUG_ON(zone_index == -1);
6541 movable_zone = zone_index;
6542}
6543
6544/*
6545 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
25985edc 6546 * because it is sized independent of architecture. Unlike the other zones,
2a1e274a
MG
6547 * the starting point for ZONE_MOVABLE is not fixed. It may be different
6548 * in each node depending on the size of each node and how evenly kernelcore
6549 * is distributed. This helper function adjusts the zone ranges
6550 * provided by the architecture for a given node by using the end of the
6551 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6552 * zones within a node are in order of monotonic increases memory addresses
6553 */
bbe5d993 6554static void __init adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
6555 unsigned long zone_type,
6556 unsigned long node_start_pfn,
6557 unsigned long node_end_pfn,
6558 unsigned long *zone_start_pfn,
6559 unsigned long *zone_end_pfn)
6560{
6561 /* Only adjust if ZONE_MOVABLE is on this node */
6562 if (zone_movable_pfn[nid]) {
6563 /* Size ZONE_MOVABLE */
6564 if (zone_type == ZONE_MOVABLE) {
6565 *zone_start_pfn = zone_movable_pfn[nid];
6566 *zone_end_pfn = min(node_end_pfn,
6567 arch_zone_highest_possible_pfn[movable_zone]);
6568
e506b996
XQ
6569 /* Adjust for ZONE_MOVABLE starting within this range */
6570 } else if (!mirrored_kernelcore &&
6571 *zone_start_pfn < zone_movable_pfn[nid] &&
6572 *zone_end_pfn > zone_movable_pfn[nid]) {
6573 *zone_end_pfn = zone_movable_pfn[nid];
6574
2a1e274a
MG
6575 /* Check if this whole range is within ZONE_MOVABLE */
6576 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
6577 *zone_start_pfn = *zone_end_pfn;
6578 }
6579}
6580
c713216d
MG
6581/*
6582 * Return the number of pages a zone spans in a node, including holes
6583 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6584 */
bbe5d993 6585static unsigned long __init zone_spanned_pages_in_node(int nid,
c713216d 6586 unsigned long zone_type,
7960aedd
ZY
6587 unsigned long node_start_pfn,
6588 unsigned long node_end_pfn,
d91749c1 6589 unsigned long *zone_start_pfn,
854e8848 6590 unsigned long *zone_end_pfn)
c713216d 6591{
299c83dc
LF
6592 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6593 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
b5685e92 6594 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
6595 if (!node_start_pfn && !node_end_pfn)
6596 return 0;
6597
7960aedd 6598 /* Get the start and end of the zone */
299c83dc
LF
6599 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6600 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
2a1e274a
MG
6601 adjust_zone_range_for_zone_movable(nid, zone_type,
6602 node_start_pfn, node_end_pfn,
d91749c1 6603 zone_start_pfn, zone_end_pfn);
c713216d
MG
6604
6605 /* Check that this node has pages within the zone's required range */
d91749c1 6606 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
c713216d
MG
6607 return 0;
6608
6609 /* Move the zone boundaries inside the node if necessary */
d91749c1
TI
6610 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
6611 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
c713216d
MG
6612
6613 /* Return the spanned pages */
d91749c1 6614 return *zone_end_pfn - *zone_start_pfn;
c713216d
MG
6615}
6616
6617/*
6618 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 6619 * then all holes in the requested range will be accounted for.
c713216d 6620 */
bbe5d993 6621unsigned long __init __absent_pages_in_range(int nid,
c713216d
MG
6622 unsigned long range_start_pfn,
6623 unsigned long range_end_pfn)
6624{
96e907d1
TH
6625 unsigned long nr_absent = range_end_pfn - range_start_pfn;
6626 unsigned long start_pfn, end_pfn;
6627 int i;
c713216d 6628
96e907d1
TH
6629 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6630 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6631 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6632 nr_absent -= end_pfn - start_pfn;
c713216d 6633 }
96e907d1 6634 return nr_absent;
c713216d
MG
6635}
6636
6637/**
6638 * absent_pages_in_range - Return number of page frames in holes within a range
6639 * @start_pfn: The start PFN to start searching for holes
6640 * @end_pfn: The end PFN to stop searching for holes
6641 *
a862f68a 6642 * Return: the number of pages frames in memory holes within a range.
c713216d
MG
6643 */
6644unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6645 unsigned long end_pfn)
6646{
6647 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6648}
6649
6650/* Return the number of page frames in holes in a zone on a node */
bbe5d993 6651static unsigned long __init zone_absent_pages_in_node(int nid,
c713216d 6652 unsigned long zone_type,
7960aedd 6653 unsigned long node_start_pfn,
854e8848 6654 unsigned long node_end_pfn)
c713216d 6655{
96e907d1
TH
6656 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6657 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
9c7cd687 6658 unsigned long zone_start_pfn, zone_end_pfn;
342332e6 6659 unsigned long nr_absent;
9c7cd687 6660
b5685e92 6661 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
6662 if (!node_start_pfn && !node_end_pfn)
6663 return 0;
6664
96e907d1
TH
6665 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6666 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
9c7cd687 6667
2a1e274a
MG
6668 adjust_zone_range_for_zone_movable(nid, zone_type,
6669 node_start_pfn, node_end_pfn,
6670 &zone_start_pfn, &zone_end_pfn);
342332e6
TI
6671 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
6672
6673 /*
6674 * ZONE_MOVABLE handling.
6675 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
6676 * and vice versa.
6677 */
e506b996
XQ
6678 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6679 unsigned long start_pfn, end_pfn;
6680 struct memblock_region *r;
6681
cc6de168 6682 for_each_mem_region(r) {
e506b996
XQ
6683 start_pfn = clamp(memblock_region_memory_base_pfn(r),
6684 zone_start_pfn, zone_end_pfn);
6685 end_pfn = clamp(memblock_region_memory_end_pfn(r),
6686 zone_start_pfn, zone_end_pfn);
6687
6688 if (zone_type == ZONE_MOVABLE &&
6689 memblock_is_mirror(r))
6690 nr_absent += end_pfn - start_pfn;
6691
6692 if (zone_type == ZONE_NORMAL &&
6693 !memblock_is_mirror(r))
6694 nr_absent += end_pfn - start_pfn;
342332e6
TI
6695 }
6696 }
6697
6698 return nr_absent;
c713216d 6699}
0e0b864e 6700
bbe5d993 6701static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7960aedd 6702 unsigned long node_start_pfn,
854e8848 6703 unsigned long node_end_pfn)
c713216d 6704{
febd5949 6705 unsigned long realtotalpages = 0, totalpages = 0;
c713216d
MG
6706 enum zone_type i;
6707
febd5949
GZ
6708 for (i = 0; i < MAX_NR_ZONES; i++) {
6709 struct zone *zone = pgdat->node_zones + i;
d91749c1 6710 unsigned long zone_start_pfn, zone_end_pfn;
3f08a302 6711 unsigned long spanned, absent;
febd5949 6712 unsigned long size, real_size;
c713216d 6713
854e8848
MR
6714 spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
6715 node_start_pfn,
6716 node_end_pfn,
6717 &zone_start_pfn,
6718 &zone_end_pfn);
6719 absent = zone_absent_pages_in_node(pgdat->node_id, i,
6720 node_start_pfn,
6721 node_end_pfn);
3f08a302
MR
6722
6723 size = spanned;
6724 real_size = size - absent;
6725
d91749c1
TI
6726 if (size)
6727 zone->zone_start_pfn = zone_start_pfn;
6728 else
6729 zone->zone_start_pfn = 0;
febd5949
GZ
6730 zone->spanned_pages = size;
6731 zone->present_pages = real_size;
6732
6733 totalpages += size;
6734 realtotalpages += real_size;
6735 }
6736
6737 pgdat->node_spanned_pages = totalpages;
c713216d
MG
6738 pgdat->node_present_pages = realtotalpages;
6739 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6740 realtotalpages);
6741}
6742
835c134e
MG
6743#ifndef CONFIG_SPARSEMEM
6744/*
6745 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
6746 * Start by making sure zonesize is a multiple of pageblock_order by rounding
6747 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
6748 * round what is now in bits to nearest long in bits, then return it in
6749 * bytes.
6750 */
7c45512d 6751static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
835c134e
MG
6752{
6753 unsigned long usemapsize;
6754
7c45512d 6755 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
d9c23400
MG
6756 usemapsize = roundup(zonesize, pageblock_nr_pages);
6757 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
6758 usemapsize *= NR_PAGEBLOCK_BITS;
6759 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6760
6761 return usemapsize / 8;
6762}
6763
7cc2a959 6764static void __ref setup_usemap(struct pglist_data *pgdat,
7c45512d
LT
6765 struct zone *zone,
6766 unsigned long zone_start_pfn,
6767 unsigned long zonesize)
835c134e 6768{
7c45512d 6769 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
835c134e 6770 zone->pageblock_flags = NULL;
23a7052a 6771 if (usemapsize) {
6782832e 6772 zone->pageblock_flags =
26fb3dae
MR
6773 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
6774 pgdat->node_id);
23a7052a
MR
6775 if (!zone->pageblock_flags)
6776 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
6777 usemapsize, zone->name, pgdat->node_id);
6778 }
835c134e
MG
6779}
6780#else
7c45512d
LT
6781static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6782 unsigned long zone_start_pfn, unsigned long zonesize) {}
835c134e
MG
6783#endif /* CONFIG_SPARSEMEM */
6784
d9c23400 6785#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c 6786
d9c23400 6787/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
03e85f9d 6788void __init set_pageblock_order(void)
d9c23400 6789{
955c1cd7
AM
6790 unsigned int order;
6791
d9c23400
MG
6792 /* Check that pageblock_nr_pages has not already been setup */
6793 if (pageblock_order)
6794 return;
6795
955c1cd7
AM
6796 if (HPAGE_SHIFT > PAGE_SHIFT)
6797 order = HUGETLB_PAGE_ORDER;
6798 else
6799 order = MAX_ORDER - 1;
6800
d9c23400
MG
6801 /*
6802 * Assume the largest contiguous order of interest is a huge page.
955c1cd7
AM
6803 * This value may be variable depending on boot parameters on IA64 and
6804 * powerpc.
d9c23400
MG
6805 */
6806 pageblock_order = order;
6807}
6808#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6809
ba72cb8c
MG
6810/*
6811 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
955c1cd7
AM
6812 * is unused as pageblock_order is set at compile-time. See
6813 * include/linux/pageblock-flags.h for the values of pageblock_order based on
6814 * the kernel config
ba72cb8c 6815 */
03e85f9d 6816void __init set_pageblock_order(void)
ba72cb8c 6817{
ba72cb8c 6818}
d9c23400
MG
6819
6820#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6821
03e85f9d 6822static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7cc2a959 6823 unsigned long present_pages)
01cefaef
JL
6824{
6825 unsigned long pages = spanned_pages;
6826
6827 /*
6828 * Provide a more accurate estimation if there are holes within
6829 * the zone and SPARSEMEM is in use. If there are holes within the
6830 * zone, each populated memory region may cost us one or two extra
6831 * memmap pages due to alignment because memmap pages for each
89d790ab 6832 * populated regions may not be naturally aligned on page boundary.
01cefaef
JL
6833 * So the (present_pages >> 4) heuristic is a tradeoff for that.
6834 */
6835 if (spanned_pages > present_pages + (present_pages >> 4) &&
6836 IS_ENABLED(CONFIG_SPARSEMEM))
6837 pages = present_pages;
6838
6839 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6840}
6841
ace1db39
OS
6842#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6843static void pgdat_init_split_queue(struct pglist_data *pgdat)
6844{
364c1eeb
YS
6845 struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
6846
6847 spin_lock_init(&ds_queue->split_queue_lock);
6848 INIT_LIST_HEAD(&ds_queue->split_queue);
6849 ds_queue->split_queue_len = 0;
ace1db39
OS
6850}
6851#else
6852static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
6853#endif
6854
6855#ifdef CONFIG_COMPACTION
6856static void pgdat_init_kcompactd(struct pglist_data *pgdat)
6857{
6858 init_waitqueue_head(&pgdat->kcompactd_wait);
6859}
6860#else
6861static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
6862#endif
6863
03e85f9d 6864static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1da177e4 6865{
208d54e5 6866 pgdat_resize_init(pgdat);
ace1db39 6867
ace1db39
OS
6868 pgdat_init_split_queue(pgdat);
6869 pgdat_init_kcompactd(pgdat);
6870
1da177e4 6871 init_waitqueue_head(&pgdat->kswapd_wait);
5515061d 6872 init_waitqueue_head(&pgdat->pfmemalloc_wait);
ace1db39 6873
eefa864b 6874 pgdat_page_ext_init(pgdat);
867e5e1d 6875 lruvec_init(&pgdat->__lruvec);
03e85f9d
OS
6876}
6877
6878static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
6879 unsigned long remaining_pages)
6880{
9705bea5 6881 atomic_long_set(&zone->managed_pages, remaining_pages);
03e85f9d
OS
6882 zone_set_nid(zone, nid);
6883 zone->name = zone_names[idx];
6884 zone->zone_pgdat = NODE_DATA(nid);
6885 spin_lock_init(&zone->lock);
6886 zone_seqlock_init(zone);
6887 zone_pcp_init(zone);
6888}
6889
6890/*
6891 * Set up the zone data structures
6892 * - init pgdat internals
6893 * - init all zones belonging to this node
6894 *
6895 * NOTE: this function is only called during memory hotplug
6896 */
6897#ifdef CONFIG_MEMORY_HOTPLUG
6898void __ref free_area_init_core_hotplug(int nid)
6899{
6900 enum zone_type z;
6901 pg_data_t *pgdat = NODE_DATA(nid);
6902
6903 pgdat_init_internals(pgdat);
6904 for (z = 0; z < MAX_NR_ZONES; z++)
6905 zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
6906}
6907#endif
6908
6909/*
6910 * Set up the zone data structures:
6911 * - mark all pages reserved
6912 * - mark all memory queues empty
6913 * - clear the memory bitmaps
6914 *
6915 * NOTE: pgdat should get zeroed by caller.
6916 * NOTE: this function is only called during early init.
6917 */
6918static void __init free_area_init_core(struct pglist_data *pgdat)
6919{
6920 enum zone_type j;
6921 int nid = pgdat->node_id;
5f63b720 6922
03e85f9d 6923 pgdat_init_internals(pgdat);
385386cf
JW
6924 pgdat->per_cpu_nodestats = &boot_nodestats;
6925
1da177e4
LT
6926 for (j = 0; j < MAX_NR_ZONES; j++) {
6927 struct zone *zone = pgdat->node_zones + j;
e6943859 6928 unsigned long size, freesize, memmap_pages;
d91749c1 6929 unsigned long zone_start_pfn = zone->zone_start_pfn;
1da177e4 6930
febd5949 6931 size = zone->spanned_pages;
e6943859 6932 freesize = zone->present_pages;
1da177e4 6933
0e0b864e 6934 /*
9feedc9d 6935 * Adjust freesize so that it accounts for how much memory
0e0b864e
MG
6936 * is used by this zone for memmap. This affects the watermark
6937 * and per-cpu initialisations
6938 */
e6943859 6939 memmap_pages = calc_memmap_size(size, freesize);
ba914f48
ZH
6940 if (!is_highmem_idx(j)) {
6941 if (freesize >= memmap_pages) {
6942 freesize -= memmap_pages;
6943 if (memmap_pages)
6944 printk(KERN_DEBUG
6945 " %s zone: %lu pages used for memmap\n",
6946 zone_names[j], memmap_pages);
6947 } else
1170532b 6948 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
ba914f48
ZH
6949 zone_names[j], memmap_pages, freesize);
6950 }
0e0b864e 6951
6267276f 6952 /* Account for reserved pages */
9feedc9d
JL
6953 if (j == 0 && freesize > dma_reserve) {
6954 freesize -= dma_reserve;
d903ef9f 6955 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 6956 zone_names[0], dma_reserve);
0e0b864e
MG
6957 }
6958
98d2b0eb 6959 if (!is_highmem_idx(j))
9feedc9d 6960 nr_kernel_pages += freesize;
01cefaef
JL
6961 /* Charge for highmem memmap if there are enough kernel pages */
6962 else if (nr_kernel_pages > memmap_pages * 2)
6963 nr_kernel_pages -= memmap_pages;
9feedc9d 6964 nr_all_pages += freesize;
1da177e4 6965
9feedc9d
JL
6966 /*
6967 * Set an approximate value for lowmem here, it will be adjusted
6968 * when the bootmem allocator frees pages into the buddy system.
6969 * And all highmem pages will be managed by the buddy system.
6970 */
03e85f9d 6971 zone_init_internals(zone, j, nid, freesize);
81c0a2bb 6972
d883c6cf 6973 if (!size)
1da177e4
LT
6974 continue;
6975
955c1cd7 6976 set_pageblock_order();
d883c6cf
JK
6977 setup_usemap(pgdat, zone, zone_start_pfn, size);
6978 init_currently_empty_zone(zone, zone_start_pfn, size);
76cdd58e 6979 memmap_init(size, nid, j, zone_start_pfn);
1da177e4
LT
6980 }
6981}
6982
0cd842f9 6983#ifdef CONFIG_FLAT_NODE_MEM_MAP
bd721ea7 6984static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 6985{
b0aeba74 6986 unsigned long __maybe_unused start = 0;
a1c34a3b
LA
6987 unsigned long __maybe_unused offset = 0;
6988
1da177e4
LT
6989 /* Skip empty nodes */
6990 if (!pgdat->node_spanned_pages)
6991 return;
6992
b0aeba74
TL
6993 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6994 offset = pgdat->node_start_pfn - start;
1da177e4
LT
6995 /* ia64 gets its own node_mem_map, before this, without bootmem */
6996 if (!pgdat->node_mem_map) {
b0aeba74 6997 unsigned long size, end;
d41dee36
AW
6998 struct page *map;
6999
e984bb43
BP
7000 /*
7001 * The zone's endpoints aren't required to be MAX_ORDER
7002 * aligned but the node_mem_map endpoints must be in order
7003 * for the buddy allocator to function correctly.
7004 */
108bcc96 7005 end = pgdat_end_pfn(pgdat);
e984bb43
BP
7006 end = ALIGN(end, MAX_ORDER_NR_PAGES);
7007 size = (end - start) * sizeof(struct page);
26fb3dae
MR
7008 map = memblock_alloc_node(size, SMP_CACHE_BYTES,
7009 pgdat->node_id);
23a7052a
MR
7010 if (!map)
7011 panic("Failed to allocate %ld bytes for node %d memory map\n",
7012 size, pgdat->node_id);
a1c34a3b 7013 pgdat->node_mem_map = map + offset;
1da177e4 7014 }
0cd842f9
OS
7015 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7016 __func__, pgdat->node_id, (unsigned long)pgdat,
7017 (unsigned long)pgdat->node_mem_map);
12d810c1 7018#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
7019 /*
7020 * With no DISCONTIG, the global mem_map is just set as node 0's
7021 */
c713216d 7022 if (pgdat == NODE_DATA(0)) {
1da177e4 7023 mem_map = NODE_DATA(0)->node_mem_map;
c713216d 7024 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
a1c34a3b 7025 mem_map -= offset;
c713216d 7026 }
1da177e4
LT
7027#endif
7028}
0cd842f9
OS
7029#else
7030static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
7031#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4 7032
0188dc98
OS
7033#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7034static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7035{
0188dc98
OS
7036 pgdat->first_deferred_pfn = ULONG_MAX;
7037}
7038#else
7039static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7040#endif
7041
854e8848 7042static void __init free_area_init_node(int nid)
1da177e4 7043{
9109fb7b 7044 pg_data_t *pgdat = NODE_DATA(nid);
7960aedd
ZY
7045 unsigned long start_pfn = 0;
7046 unsigned long end_pfn = 0;
9109fb7b 7047
88fdf75d 7048 /* pg_data_t should be reset to zero when it's allocated */
97a225e6 7049 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
88fdf75d 7050
854e8848 7051 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
88fdf75d 7052
1da177e4 7053 pgdat->node_id = nid;
854e8848 7054 pgdat->node_start_pfn = start_pfn;
75ef7184 7055 pgdat->per_cpu_nodestats = NULL;
854e8848 7056
8d29e18a 7057 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
4ada0c5a
ZL
7058 (u64)start_pfn << PAGE_SHIFT,
7059 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
854e8848 7060 calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1da177e4
LT
7061
7062 alloc_node_mem_map(pgdat);
0188dc98 7063 pgdat_set_deferred_range(pgdat);
1da177e4 7064
7f3eb55b 7065 free_area_init_core(pgdat);
1da177e4
LT
7066}
7067
bc9331a1 7068void __init free_area_init_memoryless_node(int nid)
3f08a302 7069{
854e8848 7070 free_area_init_node(nid);
3f08a302
MR
7071}
7072
aca52c39 7073#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
ec393a0f 7074/*
4b094b78
DH
7075 * Initialize all valid struct pages in the range [spfn, epfn) and mark them
7076 * PageReserved(). Return the number of struct pages that were initialized.
ec393a0f 7077 */
4b094b78 7078static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
ec393a0f
PT
7079{
7080 unsigned long pfn;
7081 u64 pgcnt = 0;
7082
7083 for (pfn = spfn; pfn < epfn; pfn++) {
7084 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
7085 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
7086 + pageblock_nr_pages - 1;
7087 continue;
7088 }
4b094b78
DH
7089 /*
7090 * Use a fake node/zone (0) for now. Some of these pages
7091 * (in memblock.reserved but not in memblock.memory) will
7092 * get re-initialized via reserve_bootmem_region() later.
7093 */
7094 __init_single_page(pfn_to_page(pfn), pfn, 0, 0);
7095 __SetPageReserved(pfn_to_page(pfn));
ec393a0f
PT
7096 pgcnt++;
7097 }
7098
7099 return pgcnt;
7100}
7101
a4a3ede2
PT
7102/*
7103 * Only struct pages that are backed by physical memory are zeroed and
7104 * initialized by going through __init_single_page(). But, there are some
7105 * struct pages which are reserved in memblock allocator and their fields
7106 * may be accessed (for example page_to_pfn() on some configuration accesses
4b094b78 7107 * flags). We must explicitly initialize those struct pages.
907ec5fc
NH
7108 *
7109 * This function also addresses a similar issue where struct pages are left
7110 * uninitialized because the physical address range is not covered by
7111 * memblock.memory or memblock.reserved. That could happen when memblock
e822969c
DH
7112 * layout is manually configured via memmap=, or when the highest physical
7113 * address (max_pfn) does not end on a section boundary.
a4a3ede2 7114 */
4b094b78 7115static void __init init_unavailable_mem(void)
a4a3ede2
PT
7116{
7117 phys_addr_t start, end;
a4a3ede2 7118 u64 i, pgcnt;
907ec5fc 7119 phys_addr_t next = 0;
a4a3ede2
PT
7120
7121 /*
907ec5fc 7122 * Loop through unavailable ranges not covered by memblock.memory.
a4a3ede2
PT
7123 */
7124 pgcnt = 0;
6e245ad4 7125 for_each_mem_range(i, &start, &end) {
ec393a0f 7126 if (next < start)
4b094b78
DH
7127 pgcnt += init_unavailable_range(PFN_DOWN(next),
7128 PFN_UP(start));
907ec5fc
NH
7129 next = end;
7130 }
e822969c
DH
7131
7132 /*
7133 * Early sections always have a fully populated memmap for the whole
7134 * section - see pfn_valid(). If the last section has holes at the
7135 * end and that section is marked "online", the memmap will be
7136 * considered initialized. Make sure that memmap has a well defined
7137 * state.
7138 */
4b094b78
DH
7139 pgcnt += init_unavailable_range(PFN_DOWN(next),
7140 round_up(max_pfn, PAGES_PER_SECTION));
907ec5fc 7141
a4a3ede2
PT
7142 /*
7143 * Struct pages that do not have backing memory. This could be because
7144 * firmware is using some of this memory, or for some other reasons.
a4a3ede2
PT
7145 */
7146 if (pgcnt)
907ec5fc 7147 pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
a4a3ede2 7148}
4b094b78
DH
7149#else
7150static inline void __init init_unavailable_mem(void)
7151{
7152}
aca52c39 7153#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
a4a3ede2 7154
418508c1
MS
7155#if MAX_NUMNODES > 1
7156/*
7157 * Figure out the number of possible node ids.
7158 */
f9872caf 7159void __init setup_nr_node_ids(void)
418508c1 7160{
904a9553 7161 unsigned int highest;
418508c1 7162
904a9553 7163 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
418508c1
MS
7164 nr_node_ids = highest + 1;
7165}
418508c1
MS
7166#endif
7167
1e01979c
TH
7168/**
7169 * node_map_pfn_alignment - determine the maximum internode alignment
7170 *
7171 * This function should be called after node map is populated and sorted.
7172 * It calculates the maximum power of two alignment which can distinguish
7173 * all the nodes.
7174 *
7175 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7176 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
7177 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
7178 * shifted, 1GiB is enough and this function will indicate so.
7179 *
7180 * This is used to test whether pfn -> nid mapping of the chosen memory
7181 * model has fine enough granularity to avoid incorrect mapping for the
7182 * populated node map.
7183 *
a862f68a 7184 * Return: the determined alignment in pfn's. 0 if there is no alignment
1e01979c
TH
7185 * requirement (single node).
7186 */
7187unsigned long __init node_map_pfn_alignment(void)
7188{
7189 unsigned long accl_mask = 0, last_end = 0;
c13291a5 7190 unsigned long start, end, mask;
98fa15f3 7191 int last_nid = NUMA_NO_NODE;
c13291a5 7192 int i, nid;
1e01979c 7193
c13291a5 7194 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1e01979c
TH
7195 if (!start || last_nid < 0 || last_nid == nid) {
7196 last_nid = nid;
7197 last_end = end;
7198 continue;
7199 }
7200
7201 /*
7202 * Start with a mask granular enough to pin-point to the
7203 * start pfn and tick off bits one-by-one until it becomes
7204 * too coarse to separate the current node from the last.
7205 */
7206 mask = ~((1 << __ffs(start)) - 1);
7207 while (mask && last_end <= (start & (mask << 1)))
7208 mask <<= 1;
7209
7210 /* accumulate all internode masks */
7211 accl_mask |= mask;
7212 }
7213
7214 /* convert mask to number of pages */
7215 return ~accl_mask + 1;
7216}
7217
c713216d
MG
7218/**
7219 * find_min_pfn_with_active_regions - Find the minimum PFN registered
7220 *
a862f68a 7221 * Return: the minimum PFN based on information provided via
7d018176 7222 * memblock_set_node().
c713216d
MG
7223 */
7224unsigned long __init find_min_pfn_with_active_regions(void)
7225{
8a1b25fe 7226 return PHYS_PFN(memblock_start_of_DRAM());
c713216d
MG
7227}
7228
37b07e41
LS
7229/*
7230 * early_calculate_totalpages()
7231 * Sum pages in active regions for movable zone.
4b0ef1fe 7232 * Populate N_MEMORY for calculating usable_nodes.
37b07e41 7233 */
484f51f8 7234static unsigned long __init early_calculate_totalpages(void)
7e63efef 7235{
7e63efef 7236 unsigned long totalpages = 0;
c13291a5
TH
7237 unsigned long start_pfn, end_pfn;
7238 int i, nid;
7239
7240 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7241 unsigned long pages = end_pfn - start_pfn;
7e63efef 7242
37b07e41
LS
7243 totalpages += pages;
7244 if (pages)
4b0ef1fe 7245 node_set_state(nid, N_MEMORY);
37b07e41 7246 }
b8af2941 7247 return totalpages;
7e63efef
MG
7248}
7249
2a1e274a
MG
7250/*
7251 * Find the PFN the Movable zone begins in each node. Kernel memory
7252 * is spread evenly between nodes as long as the nodes have enough
7253 * memory. When they don't, some nodes will have more kernelcore than
7254 * others
7255 */
b224ef85 7256static void __init find_zone_movable_pfns_for_nodes(void)
2a1e274a
MG
7257{
7258 int i, nid;
7259 unsigned long usable_startpfn;
7260 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd 7261 /* save the state before borrow the nodemask */
4b0ef1fe 7262 nodemask_t saved_node_state = node_states[N_MEMORY];
37b07e41 7263 unsigned long totalpages = early_calculate_totalpages();
4b0ef1fe 7264 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
136199f0 7265 struct memblock_region *r;
b2f3eebe
TC
7266
7267 /* Need to find movable_zone earlier when movable_node is specified. */
7268 find_usable_zone_for_movable();
7269
7270 /*
7271 * If movable_node is specified, ignore kernelcore and movablecore
7272 * options.
7273 */
7274 if (movable_node_is_enabled()) {
cc6de168 7275 for_each_mem_region(r) {
136199f0 7276 if (!memblock_is_hotpluggable(r))
b2f3eebe
TC
7277 continue;
7278
d622abf7 7279 nid = memblock_get_region_node(r);
b2f3eebe 7280
136199f0 7281 usable_startpfn = PFN_DOWN(r->base);
b2f3eebe
TC
7282 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7283 min(usable_startpfn, zone_movable_pfn[nid]) :
7284 usable_startpfn;
7285 }
7286
7287 goto out2;
7288 }
2a1e274a 7289
342332e6
TI
7290 /*
7291 * If kernelcore=mirror is specified, ignore movablecore option
7292 */
7293 if (mirrored_kernelcore) {
7294 bool mem_below_4gb_not_mirrored = false;
7295
cc6de168 7296 for_each_mem_region(r) {
342332e6
TI
7297 if (memblock_is_mirror(r))
7298 continue;
7299
d622abf7 7300 nid = memblock_get_region_node(r);
342332e6
TI
7301
7302 usable_startpfn = memblock_region_memory_base_pfn(r);
7303
7304 if (usable_startpfn < 0x100000) {
7305 mem_below_4gb_not_mirrored = true;
7306 continue;
7307 }
7308
7309 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7310 min(usable_startpfn, zone_movable_pfn[nid]) :
7311 usable_startpfn;
7312 }
7313
7314 if (mem_below_4gb_not_mirrored)
633bf2fe 7315 pr_warn("This configuration results in unmirrored kernel memory.\n");
342332e6
TI
7316
7317 goto out2;
7318 }
7319
7e63efef 7320 /*
a5c6d650
DR
7321 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7322 * amount of necessary memory.
7323 */
7324 if (required_kernelcore_percent)
7325 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7326 10000UL;
7327 if (required_movablecore_percent)
7328 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7329 10000UL;
7330
7331 /*
7332 * If movablecore= was specified, calculate what size of
7e63efef
MG
7333 * kernelcore that corresponds so that memory usable for
7334 * any allocation type is evenly spread. If both kernelcore
7335 * and movablecore are specified, then the value of kernelcore
7336 * will be used for required_kernelcore if it's greater than
7337 * what movablecore would have allowed.
7338 */
7339 if (required_movablecore) {
7e63efef
MG
7340 unsigned long corepages;
7341
7342 /*
7343 * Round-up so that ZONE_MOVABLE is at least as large as what
7344 * was requested by the user
7345 */
7346 required_movablecore =
7347 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
9fd745d4 7348 required_movablecore = min(totalpages, required_movablecore);
7e63efef
MG
7349 corepages = totalpages - required_movablecore;
7350
7351 required_kernelcore = max(required_kernelcore, corepages);
7352 }
7353
bde304bd
XQ
7354 /*
7355 * If kernelcore was not specified or kernelcore size is larger
7356 * than totalpages, there is no ZONE_MOVABLE.
7357 */
7358 if (!required_kernelcore || required_kernelcore >= totalpages)
66918dcd 7359 goto out;
2a1e274a
MG
7360
7361 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
2a1e274a
MG
7362 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7363
7364restart:
7365 /* Spread kernelcore memory as evenly as possible throughout nodes */
7366 kernelcore_node = required_kernelcore / usable_nodes;
4b0ef1fe 7367 for_each_node_state(nid, N_MEMORY) {
c13291a5
TH
7368 unsigned long start_pfn, end_pfn;
7369
2a1e274a
MG
7370 /*
7371 * Recalculate kernelcore_node if the division per node
7372 * now exceeds what is necessary to satisfy the requested
7373 * amount of memory for the kernel
7374 */
7375 if (required_kernelcore < kernelcore_node)
7376 kernelcore_node = required_kernelcore / usable_nodes;
7377
7378 /*
7379 * As the map is walked, we track how much memory is usable
7380 * by the kernel using kernelcore_remaining. When it is
7381 * 0, the rest of the node is usable by ZONE_MOVABLE
7382 */
7383 kernelcore_remaining = kernelcore_node;
7384
7385 /* Go through each range of PFNs within this node */
c13291a5 7386 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2a1e274a
MG
7387 unsigned long size_pages;
7388
c13291a5 7389 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
2a1e274a
MG
7390 if (start_pfn >= end_pfn)
7391 continue;
7392
7393 /* Account for what is only usable for kernelcore */
7394 if (start_pfn < usable_startpfn) {
7395 unsigned long kernel_pages;
7396 kernel_pages = min(end_pfn, usable_startpfn)
7397 - start_pfn;
7398
7399 kernelcore_remaining -= min(kernel_pages,
7400 kernelcore_remaining);
7401 required_kernelcore -= min(kernel_pages,
7402 required_kernelcore);
7403
7404 /* Continue if range is now fully accounted */
7405 if (end_pfn <= usable_startpfn) {
7406
7407 /*
7408 * Push zone_movable_pfn to the end so
7409 * that if we have to rebalance
7410 * kernelcore across nodes, we will
7411 * not double account here
7412 */
7413 zone_movable_pfn[nid] = end_pfn;
7414 continue;
7415 }
7416 start_pfn = usable_startpfn;
7417 }
7418
7419 /*
7420 * The usable PFN range for ZONE_MOVABLE is from
7421 * start_pfn->end_pfn. Calculate size_pages as the
7422 * number of pages used as kernelcore
7423 */
7424 size_pages = end_pfn - start_pfn;
7425 if (size_pages > kernelcore_remaining)
7426 size_pages = kernelcore_remaining;
7427 zone_movable_pfn[nid] = start_pfn + size_pages;
7428
7429 /*
7430 * Some kernelcore has been met, update counts and
7431 * break if the kernelcore for this node has been
b8af2941 7432 * satisfied
2a1e274a
MG
7433 */
7434 required_kernelcore -= min(required_kernelcore,
7435 size_pages);
7436 kernelcore_remaining -= size_pages;
7437 if (!kernelcore_remaining)
7438 break;
7439 }
7440 }
7441
7442 /*
7443 * If there is still required_kernelcore, we do another pass with one
7444 * less node in the count. This will push zone_movable_pfn[nid] further
7445 * along on the nodes that still have memory until kernelcore is
b8af2941 7446 * satisfied
2a1e274a
MG
7447 */
7448 usable_nodes--;
7449 if (usable_nodes && required_kernelcore > usable_nodes)
7450 goto restart;
7451
b2f3eebe 7452out2:
2a1e274a
MG
7453 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7454 for (nid = 0; nid < MAX_NUMNODES; nid++)
7455 zone_movable_pfn[nid] =
7456 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd 7457
20e6926d 7458out:
66918dcd 7459 /* restore the node_state */
4b0ef1fe 7460 node_states[N_MEMORY] = saved_node_state;
2a1e274a
MG
7461}
7462
4b0ef1fe
LJ
7463/* Any regular or high memory on that node ? */
7464static void check_for_memory(pg_data_t *pgdat, int nid)
37b07e41 7465{
37b07e41
LS
7466 enum zone_type zone_type;
7467
4b0ef1fe 7468 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
37b07e41 7469 struct zone *zone = &pgdat->node_zones[zone_type];
b38a8725 7470 if (populated_zone(zone)) {
7b0e0c0e
OS
7471 if (IS_ENABLED(CONFIG_HIGHMEM))
7472 node_set_state(nid, N_HIGH_MEMORY);
7473 if (zone_type <= ZONE_NORMAL)
4b0ef1fe 7474 node_set_state(nid, N_NORMAL_MEMORY);
d0048b0e
BL
7475 break;
7476 }
37b07e41 7477 }
37b07e41
LS
7478}
7479
51930df5
MR
7480/*
7481 * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
7482 * such cases we allow max_zone_pfn sorted in the descending order
7483 */
7484bool __weak arch_has_descending_max_zone_pfns(void)
7485{
7486 return false;
7487}
7488
c713216d 7489/**
9691a071 7490 * free_area_init - Initialise all pg_data_t and zone data
88ca3b94 7491 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
7492 *
7493 * This will call free_area_init_node() for each active node in the system.
7d018176 7494 * Using the page ranges provided by memblock_set_node(), the size of each
c713216d
MG
7495 * zone in each node and their holes is calculated. If the maximum PFN
7496 * between two adjacent zones match, it is assumed that the zone is empty.
7497 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7498 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7499 * starts where the previous one ended. For example, ZONE_DMA32 starts
7500 * at arch_max_dma_pfn.
7501 */
9691a071 7502void __init free_area_init(unsigned long *max_zone_pfn)
c713216d 7503{
c13291a5 7504 unsigned long start_pfn, end_pfn;
51930df5
MR
7505 int i, nid, zone;
7506 bool descending;
a6af2bc3 7507
c713216d
MG
7508 /* Record where the zone boundaries are */
7509 memset(arch_zone_lowest_possible_pfn, 0,
7510 sizeof(arch_zone_lowest_possible_pfn));
7511 memset(arch_zone_highest_possible_pfn, 0,
7512 sizeof(arch_zone_highest_possible_pfn));
90cae1fe
OH
7513
7514 start_pfn = find_min_pfn_with_active_regions();
51930df5 7515 descending = arch_has_descending_max_zone_pfns();
90cae1fe
OH
7516
7517 for (i = 0; i < MAX_NR_ZONES; i++) {
51930df5
MR
7518 if (descending)
7519 zone = MAX_NR_ZONES - i - 1;
7520 else
7521 zone = i;
7522
7523 if (zone == ZONE_MOVABLE)
2a1e274a 7524 continue;
90cae1fe 7525
51930df5
MR
7526 end_pfn = max(max_zone_pfn[zone], start_pfn);
7527 arch_zone_lowest_possible_pfn[zone] = start_pfn;
7528 arch_zone_highest_possible_pfn[zone] = end_pfn;
90cae1fe
OH
7529
7530 start_pfn = end_pfn;
c713216d 7531 }
2a1e274a
MG
7532
7533 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
7534 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
b224ef85 7535 find_zone_movable_pfns_for_nodes();
c713216d 7536
c713216d 7537 /* Print out the zone ranges */
f88dfff5 7538 pr_info("Zone ranges:\n");
2a1e274a
MG
7539 for (i = 0; i < MAX_NR_ZONES; i++) {
7540 if (i == ZONE_MOVABLE)
7541 continue;
f88dfff5 7542 pr_info(" %-8s ", zone_names[i]);
72f0ba02
DR
7543 if (arch_zone_lowest_possible_pfn[i] ==
7544 arch_zone_highest_possible_pfn[i])
f88dfff5 7545 pr_cont("empty\n");
72f0ba02 7546 else
8d29e18a
JG
7547 pr_cont("[mem %#018Lx-%#018Lx]\n",
7548 (u64)arch_zone_lowest_possible_pfn[i]
7549 << PAGE_SHIFT,
7550 ((u64)arch_zone_highest_possible_pfn[i]
a62e2f4f 7551 << PAGE_SHIFT) - 1);
2a1e274a
MG
7552 }
7553
7554 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
f88dfff5 7555 pr_info("Movable zone start for each node\n");
2a1e274a
MG
7556 for (i = 0; i < MAX_NUMNODES; i++) {
7557 if (zone_movable_pfn[i])
8d29e18a
JG
7558 pr_info(" Node %d: %#018Lx\n", i,
7559 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
2a1e274a 7560 }
c713216d 7561
f46edbd1
DW
7562 /*
7563 * Print out the early node map, and initialize the
7564 * subsection-map relative to active online memory ranges to
7565 * enable future "sub-section" extensions of the memory map.
7566 */
f88dfff5 7567 pr_info("Early memory node ranges\n");
f46edbd1 7568 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8d29e18a
JG
7569 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7570 (u64)start_pfn << PAGE_SHIFT,
7571 ((u64)end_pfn << PAGE_SHIFT) - 1);
f46edbd1
DW
7572 subsection_map_init(start_pfn, end_pfn - start_pfn);
7573 }
c713216d
MG
7574
7575 /* Initialise every node */
708614e6 7576 mminit_verify_pageflags_layout();
8ef82866 7577 setup_nr_node_ids();
4b094b78 7578 init_unavailable_mem();
c713216d
MG
7579 for_each_online_node(nid) {
7580 pg_data_t *pgdat = NODE_DATA(nid);
854e8848 7581 free_area_init_node(nid);
37b07e41
LS
7582
7583 /* Any memory on that node */
7584 if (pgdat->node_present_pages)
4b0ef1fe
LJ
7585 node_set_state(nid, N_MEMORY);
7586 check_for_memory(pgdat, nid);
c713216d
MG
7587 }
7588}
2a1e274a 7589
a5c6d650
DR
7590static int __init cmdline_parse_core(char *p, unsigned long *core,
7591 unsigned long *percent)
2a1e274a
MG
7592{
7593 unsigned long long coremem;
a5c6d650
DR
7594 char *endptr;
7595
2a1e274a
MG
7596 if (!p)
7597 return -EINVAL;
7598
a5c6d650
DR
7599 /* Value may be a percentage of total memory, otherwise bytes */
7600 coremem = simple_strtoull(p, &endptr, 0);
7601 if (*endptr == '%') {
7602 /* Paranoid check for percent values greater than 100 */
7603 WARN_ON(coremem > 100);
2a1e274a 7604
a5c6d650
DR
7605 *percent = coremem;
7606 } else {
7607 coremem = memparse(p, &p);
7608 /* Paranoid check that UL is enough for the coremem value */
7609 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
2a1e274a 7610
a5c6d650
DR
7611 *core = coremem >> PAGE_SHIFT;
7612 *percent = 0UL;
7613 }
2a1e274a
MG
7614 return 0;
7615}
ed7ed365 7616
7e63efef
MG
7617/*
7618 * kernelcore=size sets the amount of memory for use for allocations that
7619 * cannot be reclaimed or migrated.
7620 */
7621static int __init cmdline_parse_kernelcore(char *p)
7622{
342332e6
TI
7623 /* parse kernelcore=mirror */
7624 if (parse_option_str(p, "mirror")) {
7625 mirrored_kernelcore = true;
7626 return 0;
7627 }
7628
a5c6d650
DR
7629 return cmdline_parse_core(p, &required_kernelcore,
7630 &required_kernelcore_percent);
7e63efef
MG
7631}
7632
7633/*
7634 * movablecore=size sets the amount of memory for use for allocations that
7635 * can be reclaimed or migrated.
7636 */
7637static int __init cmdline_parse_movablecore(char *p)
7638{
a5c6d650
DR
7639 return cmdline_parse_core(p, &required_movablecore,
7640 &required_movablecore_percent);
7e63efef
MG
7641}
7642
ed7ed365 7643early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 7644early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 7645
c3d5f5f0
JL
7646void adjust_managed_page_count(struct page *page, long count)
7647{
9705bea5 7648 atomic_long_add(count, &page_zone(page)->managed_pages);
ca79b0c2 7649 totalram_pages_add(count);
3dcc0571
JL
7650#ifdef CONFIG_HIGHMEM
7651 if (PageHighMem(page))
ca79b0c2 7652 totalhigh_pages_add(count);
3dcc0571 7653#endif
c3d5f5f0 7654}
3dcc0571 7655EXPORT_SYMBOL(adjust_managed_page_count);
c3d5f5f0 7656
e5cb113f 7657unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
69afade7 7658{
11199692
JL
7659 void *pos;
7660 unsigned long pages = 0;
69afade7 7661
11199692
JL
7662 start = (void *)PAGE_ALIGN((unsigned long)start);
7663 end = (void *)((unsigned long)end & PAGE_MASK);
7664 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
0d834328
DH
7665 struct page *page = virt_to_page(pos);
7666 void *direct_map_addr;
7667
7668 /*
7669 * 'direct_map_addr' might be different from 'pos'
7670 * because some architectures' virt_to_page()
7671 * work with aliases. Getting the direct map
7672 * address ensures that we get a _writeable_
7673 * alias for the memset().
7674 */
7675 direct_map_addr = page_address(page);
c746170d
VF
7676 /*
7677 * Perform a kasan-unchecked memset() since this memory
7678 * has not been initialized.
7679 */
7680 direct_map_addr = kasan_reset_tag(direct_map_addr);
dbe67df4 7681 if ((unsigned int)poison <= 0xFF)
0d834328
DH
7682 memset(direct_map_addr, poison, PAGE_SIZE);
7683
7684 free_reserved_page(page);
69afade7
JL
7685 }
7686
7687 if (pages && s)
adb1fe9a
JP
7688 pr_info("Freeing %s memory: %ldK\n",
7689 s, pages << (PAGE_SHIFT - 10));
69afade7
JL
7690
7691 return pages;
7692}
7693
cfa11e08
JL
7694#ifdef CONFIG_HIGHMEM
7695void free_highmem_page(struct page *page)
7696{
7697 __free_reserved_page(page);
ca79b0c2 7698 totalram_pages_inc();
9705bea5 7699 atomic_long_inc(&page_zone(page)->managed_pages);
ca79b0c2 7700 totalhigh_pages_inc();
cfa11e08
JL
7701}
7702#endif
7703
7ee3d4e8
JL
7704
7705void __init mem_init_print_info(const char *str)
7706{
7707 unsigned long physpages, codesize, datasize, rosize, bss_size;
7708 unsigned long init_code_size, init_data_size;
7709
7710 physpages = get_num_physpages();
7711 codesize = _etext - _stext;
7712 datasize = _edata - _sdata;
7713 rosize = __end_rodata - __start_rodata;
7714 bss_size = __bss_stop - __bss_start;
7715 init_data_size = __init_end - __init_begin;
7716 init_code_size = _einittext - _sinittext;
7717
7718 /*
7719 * Detect special cases and adjust section sizes accordingly:
7720 * 1) .init.* may be embedded into .data sections
7721 * 2) .init.text.* may be out of [__init_begin, __init_end],
7722 * please refer to arch/tile/kernel/vmlinux.lds.S.
7723 * 3) .rodata.* may be embedded into .text or .data sections.
7724 */
7725#define adj_init_size(start, end, size, pos, adj) \
b8af2941
PK
7726 do { \
7727 if (start <= pos && pos < end && size > adj) \
7728 size -= adj; \
7729 } while (0)
7ee3d4e8
JL
7730
7731 adj_init_size(__init_begin, __init_end, init_data_size,
7732 _sinittext, init_code_size);
7733 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7734 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7735 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7736 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7737
7738#undef adj_init_size
7739
756a025f 7740 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7ee3d4e8 7741#ifdef CONFIG_HIGHMEM
756a025f 7742 ", %luK highmem"
7ee3d4e8 7743#endif
756a025f
JP
7744 "%s%s)\n",
7745 nr_free_pages() << (PAGE_SHIFT - 10),
7746 physpages << (PAGE_SHIFT - 10),
7747 codesize >> 10, datasize >> 10, rosize >> 10,
7748 (init_data_size + init_code_size) >> 10, bss_size >> 10,
ca79b0c2 7749 (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
756a025f 7750 totalcma_pages << (PAGE_SHIFT - 10),
7ee3d4e8 7751#ifdef CONFIG_HIGHMEM
ca79b0c2 7752 totalhigh_pages() << (PAGE_SHIFT - 10),
7ee3d4e8 7753#endif
756a025f 7754 str ? ", " : "", str ? str : "");
7ee3d4e8
JL
7755}
7756
0e0b864e 7757/**
88ca3b94
RD
7758 * set_dma_reserve - set the specified number of pages reserved in the first zone
7759 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e 7760 *
013110a7 7761 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
0e0b864e
MG
7762 * In the DMA zone, a significant percentage may be consumed by kernel image
7763 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
7764 * function may optionally be used to account for unfreeable pages in the
7765 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7766 * smaller per-cpu batchsize.
0e0b864e
MG
7767 */
7768void __init set_dma_reserve(unsigned long new_dma_reserve)
7769{
7770 dma_reserve = new_dma_reserve;
7771}
7772
005fd4bb 7773static int page_alloc_cpu_dead(unsigned int cpu)
1da177e4 7774{
1da177e4 7775
005fd4bb
SAS
7776 lru_add_drain_cpu(cpu);
7777 drain_pages(cpu);
9f8f2172 7778
005fd4bb
SAS
7779 /*
7780 * Spill the event counters of the dead processor
7781 * into the current processors event counters.
7782 * This artificially elevates the count of the current
7783 * processor.
7784 */
7785 vm_events_fold_cpu(cpu);
9f8f2172 7786
005fd4bb
SAS
7787 /*
7788 * Zero the differential counters of the dead processor
7789 * so that the vm statistics are consistent.
7790 *
7791 * This is only okay since the processor is dead and cannot
7792 * race with what we are doing.
7793 */
7794 cpu_vm_stats_fold(cpu);
7795 return 0;
1da177e4 7796}
1da177e4 7797
e03a5125
NP
7798#ifdef CONFIG_NUMA
7799int hashdist = HASHDIST_DEFAULT;
7800
7801static int __init set_hashdist(char *str)
7802{
7803 if (!str)
7804 return 0;
7805 hashdist = simple_strtoul(str, &str, 0);
7806 return 1;
7807}
7808__setup("hashdist=", set_hashdist);
7809#endif
7810
1da177e4
LT
7811void __init page_alloc_init(void)
7812{
005fd4bb
SAS
7813 int ret;
7814
e03a5125
NP
7815#ifdef CONFIG_NUMA
7816 if (num_node_state(N_MEMORY) == 1)
7817 hashdist = 0;
7818#endif
7819
005fd4bb
SAS
7820 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
7821 "mm/page_alloc:dead", NULL,
7822 page_alloc_cpu_dead);
7823 WARN_ON(ret < 0);
1da177e4
LT
7824}
7825
cb45b0e9 7826/*
34b10060 7827 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
cb45b0e9
HA
7828 * or min_free_kbytes changes.
7829 */
7830static void calculate_totalreserve_pages(void)
7831{
7832 struct pglist_data *pgdat;
7833 unsigned long reserve_pages = 0;
2f6726e5 7834 enum zone_type i, j;
cb45b0e9
HA
7835
7836 for_each_online_pgdat(pgdat) {
281e3726
MG
7837
7838 pgdat->totalreserve_pages = 0;
7839
cb45b0e9
HA
7840 for (i = 0; i < MAX_NR_ZONES; i++) {
7841 struct zone *zone = pgdat->node_zones + i;
3484b2de 7842 long max = 0;
9705bea5 7843 unsigned long managed_pages = zone_managed_pages(zone);
cb45b0e9
HA
7844
7845 /* Find valid and maximum lowmem_reserve in the zone */
7846 for (j = i; j < MAX_NR_ZONES; j++) {
7847 if (zone->lowmem_reserve[j] > max)
7848 max = zone->lowmem_reserve[j];
7849 }
7850
41858966
MG
7851 /* we treat the high watermark as reserved pages. */
7852 max += high_wmark_pages(zone);
cb45b0e9 7853
3d6357de
AK
7854 if (max > managed_pages)
7855 max = managed_pages;
a8d01437 7856
281e3726 7857 pgdat->totalreserve_pages += max;
a8d01437 7858
cb45b0e9
HA
7859 reserve_pages += max;
7860 }
7861 }
7862 totalreserve_pages = reserve_pages;
7863}
7864
1da177e4
LT
7865/*
7866 * setup_per_zone_lowmem_reserve - called whenever
34b10060 7867 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
1da177e4
LT
7868 * has a correct pages reserved value, so an adequate number of
7869 * pages are left in the zone after a successful __alloc_pages().
7870 */
7871static void setup_per_zone_lowmem_reserve(void)
7872{
7873 struct pglist_data *pgdat;
470c61d7 7874 enum zone_type i, j;
1da177e4 7875
ec936fc5 7876 for_each_online_pgdat(pgdat) {
470c61d7
LS
7877 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
7878 struct zone *zone = &pgdat->node_zones[i];
7879 int ratio = sysctl_lowmem_reserve_ratio[i];
7880 bool clear = !ratio || !zone_managed_pages(zone);
7881 unsigned long managed_pages = 0;
7882
7883 for (j = i + 1; j < MAX_NR_ZONES; j++) {
7884 if (clear) {
7885 zone->lowmem_reserve[j] = 0;
d3cda233 7886 } else {
470c61d7
LS
7887 struct zone *upper_zone = &pgdat->node_zones[j];
7888
7889 managed_pages += zone_managed_pages(upper_zone);
7890 zone->lowmem_reserve[j] = managed_pages / ratio;
d3cda233 7891 }
1da177e4
LT
7892 }
7893 }
7894 }
cb45b0e9
HA
7895
7896 /* update totalreserve_pages */
7897 calculate_totalreserve_pages();
1da177e4
LT
7898}
7899
cfd3da1e 7900static void __setup_per_zone_wmarks(void)
1da177e4
LT
7901{
7902 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
7903 unsigned long lowmem_pages = 0;
7904 struct zone *zone;
7905 unsigned long flags;
7906
7907 /* Calculate total number of !ZONE_HIGHMEM pages */
7908 for_each_zone(zone) {
7909 if (!is_highmem(zone))
9705bea5 7910 lowmem_pages += zone_managed_pages(zone);
1da177e4
LT
7911 }
7912
7913 for_each_zone(zone) {
ac924c60
AM
7914 u64 tmp;
7915
1125b4e3 7916 spin_lock_irqsave(&zone->lock, flags);
9705bea5 7917 tmp = (u64)pages_min * zone_managed_pages(zone);
ac924c60 7918 do_div(tmp, lowmem_pages);
1da177e4
LT
7919 if (is_highmem(zone)) {
7920 /*
669ed175
NP
7921 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
7922 * need highmem pages, so cap pages_min to a small
7923 * value here.
7924 *
41858966 7925 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8bb4e7a2 7926 * deltas control async page reclaim, and so should
669ed175 7927 * not be capped for highmem.
1da177e4 7928 */
90ae8d67 7929 unsigned long min_pages;
1da177e4 7930
9705bea5 7931 min_pages = zone_managed_pages(zone) / 1024;
90ae8d67 7932 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
a9214443 7933 zone->_watermark[WMARK_MIN] = min_pages;
1da177e4 7934 } else {
669ed175
NP
7935 /*
7936 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
7937 * proportionate to the zone's size.
7938 */
a9214443 7939 zone->_watermark[WMARK_MIN] = tmp;
1da177e4
LT
7940 }
7941
795ae7a0
JW
7942 /*
7943 * Set the kswapd watermarks distance according to the
7944 * scale factor in proportion to available memory, but
7945 * ensure a minimum size on small systems.
7946 */
7947 tmp = max_t(u64, tmp >> 2,
9705bea5 7948 mult_frac(zone_managed_pages(zone),
795ae7a0
JW
7949 watermark_scale_factor, 10000));
7950
aa092591 7951 zone->watermark_boost = 0;
a9214443
MG
7952 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
7953 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
49f223a9 7954
1125b4e3 7955 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 7956 }
cb45b0e9
HA
7957
7958 /* update totalreserve_pages */
7959 calculate_totalreserve_pages();
1da177e4
LT
7960}
7961
cfd3da1e
MG
7962/**
7963 * setup_per_zone_wmarks - called when min_free_kbytes changes
7964 * or when memory is hot-{added|removed}
7965 *
7966 * Ensures that the watermark[min,low,high] values for each zone are set
7967 * correctly with respect to min_free_kbytes.
7968 */
7969void setup_per_zone_wmarks(void)
7970{
b93e0f32
MH
7971 static DEFINE_SPINLOCK(lock);
7972
7973 spin_lock(&lock);
cfd3da1e 7974 __setup_per_zone_wmarks();
b93e0f32 7975 spin_unlock(&lock);
cfd3da1e
MG
7976}
7977
1da177e4
LT
7978/*
7979 * Initialise min_free_kbytes.
7980 *
7981 * For small machines we want it small (128k min). For large machines
8beeae86 7982 * we want it large (256MB max). But it is not linear, because network
1da177e4
LT
7983 * bandwidth does not increase linearly with machine size. We use
7984 *
b8af2941 7985 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
1da177e4
LT
7986 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
7987 *
7988 * which yields
7989 *
7990 * 16MB: 512k
7991 * 32MB: 724k
7992 * 64MB: 1024k
7993 * 128MB: 1448k
7994 * 256MB: 2048k
7995 * 512MB: 2896k
7996 * 1024MB: 4096k
7997 * 2048MB: 5792k
7998 * 4096MB: 8192k
7999 * 8192MB: 11584k
8000 * 16384MB: 16384k
8001 */
1b79acc9 8002int __meminit init_per_zone_wmark_min(void)
1da177e4
LT
8003{
8004 unsigned long lowmem_kbytes;
5f12733e 8005 int new_min_free_kbytes;
1da177e4
LT
8006
8007 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5f12733e
MH
8008 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8009
8010 if (new_min_free_kbytes > user_min_free_kbytes) {
8011 min_free_kbytes = new_min_free_kbytes;
8012 if (min_free_kbytes < 128)
8013 min_free_kbytes = 128;
ee8eb9a5
JS
8014 if (min_free_kbytes > 262144)
8015 min_free_kbytes = 262144;
5f12733e
MH
8016 } else {
8017 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8018 new_min_free_kbytes, user_min_free_kbytes);
8019 }
bc75d33f 8020 setup_per_zone_wmarks();
a6cccdc3 8021 refresh_zone_stat_thresholds();
1da177e4 8022 setup_per_zone_lowmem_reserve();
6423aa81
JK
8023
8024#ifdef CONFIG_NUMA
8025 setup_min_unmapped_ratio();
8026 setup_min_slab_ratio();
8027#endif
8028
4aab2be0
VB
8029 khugepaged_min_free_kbytes_update();
8030
1da177e4
LT
8031 return 0;
8032}
e08d3fdf 8033postcore_initcall(init_per_zone_wmark_min)
1da177e4
LT
8034
8035/*
b8af2941 8036 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
1da177e4
LT
8037 * that we can call two helper functions whenever min_free_kbytes
8038 * changes.
8039 */
cccad5b9 8040int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
32927393 8041 void *buffer, size_t *length, loff_t *ppos)
1da177e4 8042{
da8c757b
HP
8043 int rc;
8044
8045 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8046 if (rc)
8047 return rc;
8048
5f12733e
MH
8049 if (write) {
8050 user_min_free_kbytes = min_free_kbytes;
bc75d33f 8051 setup_per_zone_wmarks();
5f12733e 8052 }
1da177e4
LT
8053 return 0;
8054}
8055
795ae7a0 8056int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
32927393 8057 void *buffer, size_t *length, loff_t *ppos)
795ae7a0
JW
8058{
8059 int rc;
8060
8061 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8062 if (rc)
8063 return rc;
8064
8065 if (write)
8066 setup_per_zone_wmarks();
8067
8068 return 0;
8069}
8070
9614634f 8071#ifdef CONFIG_NUMA
6423aa81 8072static void setup_min_unmapped_ratio(void)
9614634f 8073{
6423aa81 8074 pg_data_t *pgdat;
9614634f 8075 struct zone *zone;
9614634f 8076
a5f5f91d 8077 for_each_online_pgdat(pgdat)
81cbcbc2 8078 pgdat->min_unmapped_pages = 0;
a5f5f91d 8079
9614634f 8080 for_each_zone(zone)
9705bea5
AK
8081 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8082 sysctl_min_unmapped_ratio) / 100;
9614634f 8083}
0ff38490 8084
6423aa81
JK
8085
8086int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
32927393 8087 void *buffer, size_t *length, loff_t *ppos)
0ff38490 8088{
0ff38490
CL
8089 int rc;
8090
8d65af78 8091 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
8092 if (rc)
8093 return rc;
8094
6423aa81
JK
8095 setup_min_unmapped_ratio();
8096
8097 return 0;
8098}
8099
8100static void setup_min_slab_ratio(void)
8101{
8102 pg_data_t *pgdat;
8103 struct zone *zone;
8104
a5f5f91d
MG
8105 for_each_online_pgdat(pgdat)
8106 pgdat->min_slab_pages = 0;
8107
0ff38490 8108 for_each_zone(zone)
9705bea5
AK
8109 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8110 sysctl_min_slab_ratio) / 100;
6423aa81
JK
8111}
8112
8113int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
32927393 8114 void *buffer, size_t *length, loff_t *ppos)
6423aa81
JK
8115{
8116 int rc;
8117
8118 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8119 if (rc)
8120 return rc;
8121
8122 setup_min_slab_ratio();
8123
0ff38490
CL
8124 return 0;
8125}
9614634f
CL
8126#endif
8127
1da177e4
LT
8128/*
8129 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8130 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8131 * whenever sysctl_lowmem_reserve_ratio changes.
8132 *
8133 * The reserve ratio obviously has absolutely no relation with the
41858966 8134 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
8135 * if in function of the boot time zone sizes.
8136 */
cccad5b9 8137int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
32927393 8138 void *buffer, size_t *length, loff_t *ppos)
1da177e4 8139{
86aaf255
BH
8140 int i;
8141
8d65af78 8142 proc_dointvec_minmax(table, write, buffer, length, ppos);
86aaf255
BH
8143
8144 for (i = 0; i < MAX_NR_ZONES; i++) {
8145 if (sysctl_lowmem_reserve_ratio[i] < 1)
8146 sysctl_lowmem_reserve_ratio[i] = 0;
8147 }
8148
1da177e4
LT
8149 setup_per_zone_lowmem_reserve();
8150 return 0;
8151}
8152
8ad4b1fb
RS
8153/*
8154 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
b8af2941
PK
8155 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8156 * pagelist can have before it gets flushed back to buddy allocator.
8ad4b1fb 8157 */
cccad5b9 8158int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
32927393 8159 void *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
8160{
8161 struct zone *zone;
7cd2b0a3 8162 int old_percpu_pagelist_fraction;
8ad4b1fb
RS
8163 int ret;
8164
7cd2b0a3
DR
8165 mutex_lock(&pcp_batch_high_lock);
8166 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
8167
8d65af78 8168 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7cd2b0a3
DR
8169 if (!write || ret < 0)
8170 goto out;
8171
8172 /* Sanity checking to avoid pcp imbalance */
8173 if (percpu_pagelist_fraction &&
8174 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
8175 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
8176 ret = -EINVAL;
8177 goto out;
8178 }
8179
8180 /* No change? */
8181 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
8182 goto out;
c8e251fa 8183
cb1ef534 8184 for_each_populated_zone(zone)
0a8b4f1d 8185 zone_set_pageset_high_and_batch(zone);
7cd2b0a3 8186out:
c8e251fa 8187 mutex_unlock(&pcp_batch_high_lock);
7cd2b0a3 8188 return ret;
8ad4b1fb
RS
8189}
8190
f6f34b43
SD
8191#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8192/*
8193 * Returns the number of pages that arch has reserved but
8194 * is not known to alloc_large_system_hash().
8195 */
8196static unsigned long __init arch_reserved_kernel_pages(void)
8197{
8198 return 0;
8199}
8200#endif
8201
9017217b
PT
8202/*
8203 * Adaptive scale is meant to reduce sizes of hash tables on large memory
8204 * machines. As memory size is increased the scale is also increased but at
8205 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
8206 * quadruples the scale is increased by one, which means the size of hash table
8207 * only doubles, instead of quadrupling as well.
8208 * Because 32-bit systems cannot have large physical memory, where this scaling
8209 * makes sense, it is disabled on such platforms.
8210 */
8211#if __BITS_PER_LONG > 32
8212#define ADAPT_SCALE_BASE (64ul << 30)
8213#define ADAPT_SCALE_SHIFT 2
8214#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
8215#endif
8216
1da177e4
LT
8217/*
8218 * allocate a large system hash table from bootmem
8219 * - it is assumed that the hash table must contain an exact power-of-2
8220 * quantity of entries
8221 * - limit is the number of hash buckets, not the total allocation size
8222 */
8223void *__init alloc_large_system_hash(const char *tablename,
8224 unsigned long bucketsize,
8225 unsigned long numentries,
8226 int scale,
8227 int flags,
8228 unsigned int *_hash_shift,
8229 unsigned int *_hash_mask,
31fe62b9
TB
8230 unsigned long low_limit,
8231 unsigned long high_limit)
1da177e4 8232{
31fe62b9 8233 unsigned long long max = high_limit;
1da177e4
LT
8234 unsigned long log2qty, size;
8235 void *table = NULL;
3749a8f0 8236 gfp_t gfp_flags;
ec11408a 8237 bool virt;
1da177e4
LT
8238
8239 /* allow the kernel cmdline to have a say */
8240 if (!numentries) {
8241 /* round applicable memory size up to nearest megabyte */
04903664 8242 numentries = nr_kernel_pages;
f6f34b43 8243 numentries -= arch_reserved_kernel_pages();
a7e83318
JZ
8244
8245 /* It isn't necessary when PAGE_SIZE >= 1MB */
8246 if (PAGE_SHIFT < 20)
8247 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
1da177e4 8248
9017217b
PT
8249#if __BITS_PER_LONG > 32
8250 if (!high_limit) {
8251 unsigned long adapt;
8252
8253 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8254 adapt <<= ADAPT_SCALE_SHIFT)
8255 scale++;
8256 }
8257#endif
8258
1da177e4
LT
8259 /* limit to 1 bucket per 2^scale bytes of low memory */
8260 if (scale > PAGE_SHIFT)
8261 numentries >>= (scale - PAGE_SHIFT);
8262 else
8263 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
8264
8265 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
8266 if (unlikely(flags & HASH_SMALL)) {
8267 /* Makes no sense without HASH_EARLY */
8268 WARN_ON(!(flags & HASH_EARLY));
8269 if (!(numentries >> *_hash_shift)) {
8270 numentries = 1UL << *_hash_shift;
8271 BUG_ON(!numentries);
8272 }
8273 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 8274 numentries = PAGE_SIZE / bucketsize;
1da177e4 8275 }
6e692ed3 8276 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
8277
8278 /* limit allocation size to 1/16 total memory by default */
8279 if (max == 0) {
8280 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8281 do_div(max, bucketsize);
8282 }
074b8517 8283 max = min(max, 0x80000000ULL);
1da177e4 8284
31fe62b9
TB
8285 if (numentries < low_limit)
8286 numentries = low_limit;
1da177e4
LT
8287 if (numentries > max)
8288 numentries = max;
8289
f0d1b0b3 8290 log2qty = ilog2(numentries);
1da177e4 8291
3749a8f0 8292 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
1da177e4 8293 do {
ec11408a 8294 virt = false;
1da177e4 8295 size = bucketsize << log2qty;
ea1f5f37
PT
8296 if (flags & HASH_EARLY) {
8297 if (flags & HASH_ZERO)
26fb3dae 8298 table = memblock_alloc(size, SMP_CACHE_BYTES);
ea1f5f37 8299 else
7e1c4e27
MR
8300 table = memblock_alloc_raw(size,
8301 SMP_CACHE_BYTES);
ec11408a 8302 } else if (get_order(size) >= MAX_ORDER || hashdist) {
88dca4ca 8303 table = __vmalloc(size, gfp_flags);
ec11408a 8304 virt = true;
ea1f5f37 8305 } else {
1037b83b
ED
8306 /*
8307 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
8308 * some pages at the end of hash table which
8309 * alloc_pages_exact() automatically does
1037b83b 8310 */
ec11408a
NP
8311 table = alloc_pages_exact(size, gfp_flags);
8312 kmemleak_alloc(table, size, 1, gfp_flags);
1da177e4
LT
8313 }
8314 } while (!table && size > PAGE_SIZE && --log2qty);
8315
8316 if (!table)
8317 panic("Failed to allocate %s hash table\n", tablename);
8318
ec11408a
NP
8319 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8320 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8321 virt ? "vmalloc" : "linear");
1da177e4
LT
8322
8323 if (_hash_shift)
8324 *_hash_shift = log2qty;
8325 if (_hash_mask)
8326 *_hash_mask = (1 << log2qty) - 1;
8327
8328 return table;
8329}
a117e66e 8330
a5d76b54 8331/*
80934513 8332 * This function checks whether pageblock includes unmovable pages or not.
80934513 8333 *
b8af2941 8334 * PageLRU check without isolation or lru_lock could race so that
0efadf48
YX
8335 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8336 * check without lock_page also may miss some movable non-lru pages at
8337 * race condition. So you can't expect this function should be exact.
4a55c047
QC
8338 *
8339 * Returns a page without holding a reference. If the caller wants to
047b9967 8340 * dereference that page (e.g., dumping), it has to make sure that it
4a55c047
QC
8341 * cannot get removed (e.g., via memory unplug) concurrently.
8342 *
a5d76b54 8343 */
4a55c047
QC
8344struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8345 int migratetype, int flags)
49ac8255 8346{
1a9f2191
QC
8347 unsigned long iter = 0;
8348 unsigned long pfn = page_to_pfn(page);
6a654e36 8349 unsigned long offset = pfn % pageblock_nr_pages;
47118af0 8350
1a9f2191
QC
8351 if (is_migrate_cma_page(page)) {
8352 /*
8353 * CMA allocations (alloc_contig_range) really need to mark
8354 * isolate CMA pageblocks even when they are not movable in fact
8355 * so consider them movable here.
8356 */
8357 if (is_migrate_cma(migratetype))
4a55c047 8358 return NULL;
1a9f2191 8359
3d680bdf 8360 return page;
1a9f2191 8361 }
4da2ce25 8362
6a654e36 8363 for (; iter < pageblock_nr_pages - offset; iter++) {
fe4c86c9 8364 if (!pfn_valid_within(pfn + iter))
49ac8255 8365 continue;
29723fcc 8366
fe4c86c9 8367 page = pfn_to_page(pfn + iter);
c8721bbb 8368
c9c510dc
DH
8369 /*
8370 * Both, bootmem allocations and memory holes are marked
8371 * PG_reserved and are unmovable. We can even have unmovable
8372 * allocations inside ZONE_MOVABLE, for example when
8373 * specifying "movablecore".
8374 */
d7ab3672 8375 if (PageReserved(page))
3d680bdf 8376 return page;
d7ab3672 8377
9d789999
MH
8378 /*
8379 * If the zone is movable and we have ruled out all reserved
8380 * pages then it should be reasonably safe to assume the rest
8381 * is movable.
8382 */
8383 if (zone_idx(zone) == ZONE_MOVABLE)
8384 continue;
8385
c8721bbb
NH
8386 /*
8387 * Hugepages are not in LRU lists, but they're movable.
1da2f328 8388 * THPs are on the LRU, but need to be counted as #small pages.
8bb4e7a2 8389 * We need not scan over tail pages because we don't
c8721bbb
NH
8390 * handle each tail page individually in migration.
8391 */
1da2f328 8392 if (PageHuge(page) || PageTransCompound(page)) {
17e2e7d7
OS
8393 struct page *head = compound_head(page);
8394 unsigned int skip_pages;
464c7ffb 8395
1da2f328
RR
8396 if (PageHuge(page)) {
8397 if (!hugepage_migration_supported(page_hstate(head)))
8398 return page;
8399 } else if (!PageLRU(head) && !__PageMovable(head)) {
3d680bdf 8400 return page;
1da2f328 8401 }
464c7ffb 8402
d8c6546b 8403 skip_pages = compound_nr(head) - (page - head);
17e2e7d7 8404 iter += skip_pages - 1;
c8721bbb
NH
8405 continue;
8406 }
8407
97d255c8
MK
8408 /*
8409 * We can't use page_count without pin a page
8410 * because another CPU can free compound page.
8411 * This check already skips compound tails of THP
0139aa7b 8412 * because their page->_refcount is zero at all time.
97d255c8 8413 */
fe896d18 8414 if (!page_ref_count(page)) {
49ac8255 8415 if (PageBuddy(page))
ab130f91 8416 iter += (1 << buddy_order(page)) - 1;
49ac8255
KH
8417 continue;
8418 }
97d255c8 8419
b023f468
WC
8420 /*
8421 * The HWPoisoned page may be not in buddy system, and
8422 * page_count() is not 0.
8423 */
756d25be 8424 if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
b023f468
WC
8425 continue;
8426
aa218795
DH
8427 /*
8428 * We treat all PageOffline() pages as movable when offlining
8429 * to give drivers a chance to decrement their reference count
8430 * in MEM_GOING_OFFLINE in order to indicate that these pages
8431 * can be offlined as there are no direct references anymore.
8432 * For actually unmovable PageOffline() where the driver does
8433 * not support this, we will fail later when trying to actually
8434 * move these pages that still have a reference count > 0.
8435 * (false negatives in this function only)
8436 */
8437 if ((flags & MEMORY_OFFLINE) && PageOffline(page))
8438 continue;
8439
fe4c86c9 8440 if (__PageMovable(page) || PageLRU(page))
0efadf48
YX
8441 continue;
8442
49ac8255 8443 /*
6b4f7799
JW
8444 * If there are RECLAIMABLE pages, we need to check
8445 * it. But now, memory offline itself doesn't call
8446 * shrink_node_slabs() and it still to be fixed.
49ac8255 8447 */
3d680bdf 8448 return page;
49ac8255 8449 }
4a55c047 8450 return NULL;
49ac8255
KH
8451}
8452
8df995f6 8453#ifdef CONFIG_CONTIG_ALLOC
041d3a8c
MN
8454static unsigned long pfn_max_align_down(unsigned long pfn)
8455{
8456 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8457 pageblock_nr_pages) - 1);
8458}
8459
8460static unsigned long pfn_max_align_up(unsigned long pfn)
8461{
8462 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8463 pageblock_nr_pages));
8464}
8465
041d3a8c 8466/* [start, end) must belong to a single zone. */
bb13ffeb
MG
8467static int __alloc_contig_migrate_range(struct compact_control *cc,
8468 unsigned long start, unsigned long end)
041d3a8c
MN
8469{
8470 /* This function is based on compact_zone() from compaction.c. */
730ec8c0 8471 unsigned int nr_reclaimed;
041d3a8c
MN
8472 unsigned long pfn = start;
8473 unsigned int tries = 0;
8474 int ret = 0;
8b94e0b8
JK
8475 struct migration_target_control mtc = {
8476 .nid = zone_to_nid(cc->zone),
8477 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
8478 };
041d3a8c 8479
be49a6e1 8480 migrate_prep();
041d3a8c 8481
bb13ffeb 8482 while (pfn < end || !list_empty(&cc->migratepages)) {
041d3a8c
MN
8483 if (fatal_signal_pending(current)) {
8484 ret = -EINTR;
8485 break;
8486 }
8487
bb13ffeb
MG
8488 if (list_empty(&cc->migratepages)) {
8489 cc->nr_migratepages = 0;
edc2ca61 8490 pfn = isolate_migratepages_range(cc, pfn, end);
041d3a8c
MN
8491 if (!pfn) {
8492 ret = -EINTR;
8493 break;
8494 }
8495 tries = 0;
8496 } else if (++tries == 5) {
8497 ret = ret < 0 ? ret : -EBUSY;
8498 break;
8499 }
8500
beb51eaa
MK
8501 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8502 &cc->migratepages);
8503 cc->nr_migratepages -= nr_reclaimed;
02c6de8d 8504
8b94e0b8
JK
8505 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
8506 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
041d3a8c 8507 }
2a6f5124
SP
8508 if (ret < 0) {
8509 putback_movable_pages(&cc->migratepages);
8510 return ret;
8511 }
8512 return 0;
041d3a8c
MN
8513}
8514
8515/**
8516 * alloc_contig_range() -- tries to allocate given range of pages
8517 * @start: start PFN to allocate
8518 * @end: one-past-the-last PFN to allocate
0815f3d8
MN
8519 * @migratetype: migratetype of the underlaying pageblocks (either
8520 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
8521 * in range must have the same migratetype and it must
8522 * be either of the two.
ca96b625 8523 * @gfp_mask: GFP mask to use during compaction
041d3a8c
MN
8524 *
8525 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
2c7452a0 8526 * aligned. The PFN range must belong to a single zone.
041d3a8c 8527 *
2c7452a0
MK
8528 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8529 * pageblocks in the range. Once isolated, the pageblocks should not
8530 * be modified by others.
041d3a8c 8531 *
a862f68a 8532 * Return: zero on success or negative error code. On success all
041d3a8c
MN
8533 * pages which PFN is in [start, end) are allocated for the caller and
8534 * need to be freed with free_contig_range().
8535 */
0815f3d8 8536int alloc_contig_range(unsigned long start, unsigned long end,
ca96b625 8537 unsigned migratetype, gfp_t gfp_mask)
041d3a8c 8538{
041d3a8c 8539 unsigned long outer_start, outer_end;
d00181b9
KS
8540 unsigned int order;
8541 int ret = 0;
041d3a8c 8542
bb13ffeb
MG
8543 struct compact_control cc = {
8544 .nr_migratepages = 0,
8545 .order = -1,
8546 .zone = page_zone(pfn_to_page(start)),
e0b9daeb 8547 .mode = MIGRATE_SYNC,
bb13ffeb 8548 .ignore_skip_hint = true,
2583d671 8549 .no_set_skip_hint = true,
7dea19f9 8550 .gfp_mask = current_gfp_context(gfp_mask),
b06eda09 8551 .alloc_contig = true,
bb13ffeb
MG
8552 };
8553 INIT_LIST_HEAD(&cc.migratepages);
8554
041d3a8c
MN
8555 /*
8556 * What we do here is we mark all pageblocks in range as
8557 * MIGRATE_ISOLATE. Because pageblock and max order pages may
8558 * have different sizes, and due to the way page allocator
8559 * work, we align the range to biggest of the two pages so
8560 * that page allocator won't try to merge buddies from
8561 * different pageblocks and change MIGRATE_ISOLATE to some
8562 * other migration type.
8563 *
8564 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
8565 * migrate the pages from an unaligned range (ie. pages that
8566 * we are interested in). This will put all the pages in
8567 * range back to page allocator as MIGRATE_ISOLATE.
8568 *
8569 * When this is done, we take the pages in range from page
8570 * allocator removing them from the buddy system. This way
8571 * page allocator will never consider using them.
8572 *
8573 * This lets us mark the pageblocks back as
8574 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
8575 * aligned range but not in the unaligned, original range are
8576 * put back to page allocator so that buddy can use them.
8577 */
8578
8579 ret = start_isolate_page_range(pfn_max_align_down(start),
d381c547 8580 pfn_max_align_up(end), migratetype, 0);
3fa0c7c7 8581 if (ret)
86a595f9 8582 return ret;
041d3a8c 8583
7612921f
VB
8584 drain_all_pages(cc.zone);
8585
8ef5849f
JK
8586 /*
8587 * In case of -EBUSY, we'd like to know which page causes problem.
63cd4489
MK
8588 * So, just fall through. test_pages_isolated() has a tracepoint
8589 * which will report the busy page.
8590 *
8591 * It is possible that busy pages could become available before
8592 * the call to test_pages_isolated, and the range will actually be
8593 * allocated. So, if we fall through be sure to clear ret so that
8594 * -EBUSY is not accidentally used or returned to caller.
8ef5849f 8595 */
bb13ffeb 8596 ret = __alloc_contig_migrate_range(&cc, start, end);
8ef5849f 8597 if (ret && ret != -EBUSY)
041d3a8c 8598 goto done;
63cd4489 8599 ret =0;
041d3a8c
MN
8600
8601 /*
8602 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
8603 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
8604 * more, all pages in [start, end) are free in page allocator.
8605 * What we are going to do is to allocate all pages from
8606 * [start, end) (that is remove them from page allocator).
8607 *
8608 * The only problem is that pages at the beginning and at the
8609 * end of interesting range may be not aligned with pages that
8610 * page allocator holds, ie. they can be part of higher order
8611 * pages. Because of this, we reserve the bigger range and
8612 * once this is done free the pages we are not interested in.
8613 *
8614 * We don't have to hold zone->lock here because the pages are
8615 * isolated thus they won't get removed from buddy.
8616 */
8617
8618 lru_add_drain_all();
041d3a8c
MN
8619
8620 order = 0;
8621 outer_start = start;
8622 while (!PageBuddy(pfn_to_page(outer_start))) {
8623 if (++order >= MAX_ORDER) {
8ef5849f
JK
8624 outer_start = start;
8625 break;
041d3a8c
MN
8626 }
8627 outer_start &= ~0UL << order;
8628 }
8629
8ef5849f 8630 if (outer_start != start) {
ab130f91 8631 order = buddy_order(pfn_to_page(outer_start));
8ef5849f
JK
8632
8633 /*
8634 * outer_start page could be small order buddy page and
8635 * it doesn't include start page. Adjust outer_start
8636 * in this case to report failed page properly
8637 * on tracepoint in test_pages_isolated()
8638 */
8639 if (outer_start + (1UL << order) <= start)
8640 outer_start = start;
8641 }
8642
041d3a8c 8643 /* Make sure the range is really isolated. */
756d25be 8644 if (test_pages_isolated(outer_start, end, 0)) {
75dddef3 8645 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
dae803e1 8646 __func__, outer_start, end);
041d3a8c
MN
8647 ret = -EBUSY;
8648 goto done;
8649 }
8650
49f223a9 8651 /* Grab isolated pages from freelists. */
bb13ffeb 8652 outer_end = isolate_freepages_range(&cc, outer_start, end);
041d3a8c
MN
8653 if (!outer_end) {
8654 ret = -EBUSY;
8655 goto done;
8656 }
8657
8658 /* Free head and tail (if any) */
8659 if (start != outer_start)
8660 free_contig_range(outer_start, start - outer_start);
8661 if (end != outer_end)
8662 free_contig_range(end, outer_end - end);
8663
8664done:
8665 undo_isolate_page_range(pfn_max_align_down(start),
0815f3d8 8666 pfn_max_align_up(end), migratetype);
041d3a8c
MN
8667 return ret;
8668}
255f5985 8669EXPORT_SYMBOL(alloc_contig_range);
5e27a2df
AK
8670
8671static int __alloc_contig_pages(unsigned long start_pfn,
8672 unsigned long nr_pages, gfp_t gfp_mask)
8673{
8674 unsigned long end_pfn = start_pfn + nr_pages;
8675
8676 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
8677 gfp_mask);
8678}
8679
8680static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
8681 unsigned long nr_pages)
8682{
8683 unsigned long i, end_pfn = start_pfn + nr_pages;
8684 struct page *page;
8685
8686 for (i = start_pfn; i < end_pfn; i++) {
8687 page = pfn_to_online_page(i);
8688 if (!page)
8689 return false;
8690
8691 if (page_zone(page) != z)
8692 return false;
8693
8694 if (PageReserved(page))
8695 return false;
8696
8697 if (page_count(page) > 0)
8698 return false;
8699
8700 if (PageHuge(page))
8701 return false;
8702 }
8703 return true;
8704}
8705
8706static bool zone_spans_last_pfn(const struct zone *zone,
8707 unsigned long start_pfn, unsigned long nr_pages)
8708{
8709 unsigned long last_pfn = start_pfn + nr_pages - 1;
8710
8711 return zone_spans_pfn(zone, last_pfn);
8712}
8713
8714/**
8715 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
8716 * @nr_pages: Number of contiguous pages to allocate
8717 * @gfp_mask: GFP mask to limit search and used during compaction
8718 * @nid: Target node
8719 * @nodemask: Mask for other possible nodes
8720 *
8721 * This routine is a wrapper around alloc_contig_range(). It scans over zones
8722 * on an applicable zonelist to find a contiguous pfn range which can then be
8723 * tried for allocation with alloc_contig_range(). This routine is intended
8724 * for allocation requests which can not be fulfilled with the buddy allocator.
8725 *
8726 * The allocated memory is always aligned to a page boundary. If nr_pages is a
8727 * power of two then the alignment is guaranteed to be to the given nr_pages
8728 * (e.g. 1GB request would be aligned to 1GB).
8729 *
8730 * Allocated pages can be freed with free_contig_range() or by manually calling
8731 * __free_page() on each allocated page.
8732 *
8733 * Return: pointer to contiguous pages on success, or NULL if not successful.
8734 */
8735struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
8736 int nid, nodemask_t *nodemask)
8737{
8738 unsigned long ret, pfn, flags;
8739 struct zonelist *zonelist;
8740 struct zone *zone;
8741 struct zoneref *z;
8742
8743 zonelist = node_zonelist(nid, gfp_mask);
8744 for_each_zone_zonelist_nodemask(zone, z, zonelist,
8745 gfp_zone(gfp_mask), nodemask) {
8746 spin_lock_irqsave(&zone->lock, flags);
8747
8748 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
8749 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
8750 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
8751 /*
8752 * We release the zone lock here because
8753 * alloc_contig_range() will also lock the zone
8754 * at some point. If there's an allocation
8755 * spinning on this lock, it may win the race
8756 * and cause alloc_contig_range() to fail...
8757 */
8758 spin_unlock_irqrestore(&zone->lock, flags);
8759 ret = __alloc_contig_pages(pfn, nr_pages,
8760 gfp_mask);
8761 if (!ret)
8762 return pfn_to_page(pfn);
8763 spin_lock_irqsave(&zone->lock, flags);
8764 }
8765 pfn += nr_pages;
8766 }
8767 spin_unlock_irqrestore(&zone->lock, flags);
8768 }
8769 return NULL;
8770}
4eb0716e 8771#endif /* CONFIG_CONTIG_ALLOC */
041d3a8c 8772
4eb0716e 8773void free_contig_range(unsigned long pfn, unsigned int nr_pages)
041d3a8c 8774{
bcc2b02f
MS
8775 unsigned int count = 0;
8776
8777 for (; nr_pages--; pfn++) {
8778 struct page *page = pfn_to_page(pfn);
8779
8780 count += page_count(page) != 1;
8781 __free_page(page);
8782 }
8783 WARN(count != 0, "%d pages are still in use!\n", count);
041d3a8c 8784}
255f5985 8785EXPORT_SYMBOL(free_contig_range);
041d3a8c 8786
0a647f38
CS
8787/*
8788 * The zone indicated has a new number of managed_pages; batch sizes and percpu
8789 * page high values need to be recalulated.
8790 */
4ed7e022
JL
8791void __meminit zone_pcp_update(struct zone *zone)
8792{
c8e251fa 8793 mutex_lock(&pcp_batch_high_lock);
0a8b4f1d 8794 zone_set_pageset_high_and_batch(zone);
c8e251fa 8795 mutex_unlock(&pcp_batch_high_lock);
4ed7e022 8796}
4ed7e022 8797
ec6e8c7e
VB
8798/*
8799 * Effectively disable pcplists for the zone by setting the high limit to 0
8800 * and draining all cpus. A concurrent page freeing on another CPU that's about
8801 * to put the page on pcplist will either finish before the drain and the page
8802 * will be drained, or observe the new high limit and skip the pcplist.
8803 *
8804 * Must be paired with a call to zone_pcp_enable().
8805 */
8806void zone_pcp_disable(struct zone *zone)
8807{
8808 mutex_lock(&pcp_batch_high_lock);
8809 __zone_set_pageset_high_and_batch(zone, 0, 1);
8810 __drain_all_pages(zone, true);
8811}
8812
8813void zone_pcp_enable(struct zone *zone)
8814{
8815 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
8816 mutex_unlock(&pcp_batch_high_lock);
8817}
8818
340175b7
JL
8819void zone_pcp_reset(struct zone *zone)
8820{
8821 unsigned long flags;
5a883813
MK
8822 int cpu;
8823 struct per_cpu_pageset *pset;
340175b7
JL
8824
8825 /* avoid races with drain_pages() */
8826 local_irq_save(flags);
8827 if (zone->pageset != &boot_pageset) {
5a883813
MK
8828 for_each_online_cpu(cpu) {
8829 pset = per_cpu_ptr(zone->pageset, cpu);
8830 drain_zonestat(zone, pset);
8831 }
340175b7
JL
8832 free_percpu(zone->pageset);
8833 zone->pageset = &boot_pageset;
8834 }
8835 local_irq_restore(flags);
8836}
8837
6dcd73d7 8838#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195 8839/*
257bea71
DH
8840 * All pages in the range must be in a single zone, must not contain holes,
8841 * must span full sections, and must be isolated before calling this function.
0c0e6195 8842 */
257bea71 8843void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
0c0e6195 8844{
257bea71 8845 unsigned long pfn = start_pfn;
0c0e6195
KH
8846 struct page *page;
8847 struct zone *zone;
0ee5f4f3 8848 unsigned int order;
0c0e6195 8849 unsigned long flags;
5557c766 8850
2d070eab 8851 offline_mem_sections(pfn, end_pfn);
0c0e6195
KH
8852 zone = page_zone(pfn_to_page(pfn));
8853 spin_lock_irqsave(&zone->lock, flags);
0c0e6195 8854 while (pfn < end_pfn) {
0c0e6195 8855 page = pfn_to_page(pfn);
b023f468
WC
8856 /*
8857 * The HWPoisoned page may be not in buddy system, and
8858 * page_count() is not 0.
8859 */
8860 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
8861 pfn++;
b023f468
WC
8862 continue;
8863 }
aa218795
DH
8864 /*
8865 * At this point all remaining PageOffline() pages have a
8866 * reference count of 0 and can simply be skipped.
8867 */
8868 if (PageOffline(page)) {
8869 BUG_ON(page_count(page));
8870 BUG_ON(PageBuddy(page));
8871 pfn++;
aa218795
DH
8872 continue;
8873 }
b023f468 8874
0c0e6195
KH
8875 BUG_ON(page_count(page));
8876 BUG_ON(!PageBuddy(page));
ab130f91 8877 order = buddy_order(page);
6ab01363 8878 del_page_from_free_list(page, zone, order);
0c0e6195
KH
8879 pfn += (1 << order);
8880 }
8881 spin_unlock_irqrestore(&zone->lock, flags);
8882}
8883#endif
8d22ba1b 8884
8d22ba1b
WF
8885bool is_free_buddy_page(struct page *page)
8886{
8887 struct zone *zone = page_zone(page);
8888 unsigned long pfn = page_to_pfn(page);
8889 unsigned long flags;
7aeb09f9 8890 unsigned int order;
8d22ba1b
WF
8891
8892 spin_lock_irqsave(&zone->lock, flags);
8893 for (order = 0; order < MAX_ORDER; order++) {
8894 struct page *page_head = page - (pfn & ((1 << order) - 1));
8895
ab130f91 8896 if (PageBuddy(page_head) && buddy_order(page_head) >= order)
8d22ba1b
WF
8897 break;
8898 }
8899 spin_unlock_irqrestore(&zone->lock, flags);
8900
8901 return order < MAX_ORDER;
8902}
d4ae9916
NH
8903
8904#ifdef CONFIG_MEMORY_FAILURE
8905/*
06be6ff3
OS
8906 * Break down a higher-order page in sub-pages, and keep our target out of
8907 * buddy allocator.
d4ae9916 8908 */
06be6ff3
OS
8909static void break_down_buddy_pages(struct zone *zone, struct page *page,
8910 struct page *target, int low, int high,
8911 int migratetype)
8912{
8913 unsigned long size = 1 << high;
8914 struct page *current_buddy, *next_page;
8915
8916 while (high > low) {
8917 high--;
8918 size >>= 1;
8919
8920 if (target >= &page[size]) {
8921 next_page = page + size;
8922 current_buddy = page;
8923 } else {
8924 next_page = page;
8925 current_buddy = page + size;
8926 }
8927
8928 if (set_page_guard(zone, current_buddy, high, migratetype))
8929 continue;
8930
8931 if (current_buddy != target) {
8932 add_to_free_list(current_buddy, zone, high, migratetype);
ab130f91 8933 set_buddy_order(current_buddy, high);
06be6ff3
OS
8934 page = next_page;
8935 }
8936 }
8937}
8938
8939/*
8940 * Take a page that will be marked as poisoned off the buddy allocator.
8941 */
8942bool take_page_off_buddy(struct page *page)
d4ae9916
NH
8943{
8944 struct zone *zone = page_zone(page);
8945 unsigned long pfn = page_to_pfn(page);
8946 unsigned long flags;
8947 unsigned int order;
06be6ff3 8948 bool ret = false;
d4ae9916
NH
8949
8950 spin_lock_irqsave(&zone->lock, flags);
8951 for (order = 0; order < MAX_ORDER; order++) {
8952 struct page *page_head = page - (pfn & ((1 << order) - 1));
ab130f91 8953 int page_order = buddy_order(page_head);
d4ae9916 8954
ab130f91 8955 if (PageBuddy(page_head) && page_order >= order) {
06be6ff3
OS
8956 unsigned long pfn_head = page_to_pfn(page_head);
8957 int migratetype = get_pfnblock_migratetype(page_head,
8958 pfn_head);
8959
ab130f91 8960 del_page_from_free_list(page_head, zone, page_order);
06be6ff3 8961 break_down_buddy_pages(zone, page_head, page, 0,
ab130f91 8962 page_order, migratetype);
06be6ff3 8963 ret = true;
d4ae9916
NH
8964 break;
8965 }
06be6ff3
OS
8966 if (page_count(page_head) > 0)
8967 break;
d4ae9916
NH
8968 }
8969 spin_unlock_irqrestore(&zone->lock, flags);
06be6ff3 8970 return ret;
d4ae9916
NH
8971}
8972#endif