Merge tag 'pull-18-rc1-work.mount' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / include / linux / mmzone.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
1da177e4 5#ifndef __ASSEMBLY__
97965478 6#ifndef __GENERATING_BOUNDS_H
1da177e4 7
1da177e4
LT
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
e815af95 11#include <linux/bitops.h>
1da177e4
LT
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
bdc8cb98 16#include <linux/seqlock.h>
8357f869 17#include <linux/nodemask.h>
835c134e 18#include <linux/pageblock-flags.h>
bbeae5b0 19#include <linux/page-flags-layout.h>
60063497 20#include <linux/atomic.h>
b03641af
DW
21#include <linux/mm_types.h>
22#include <linux/page-flags.h>
dbbee9d5 23#include <linux/local_lock.h>
93ff66bf 24#include <asm/page.h>
1da177e4
LT
25
26/* Free memory management - zoned buddy allocator. */
27#ifndef CONFIG_FORCE_MAX_ZONEORDER
28#define MAX_ORDER 11
29#else
30#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
31#endif
e984bb43 32#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
1da177e4 33
5ad333eb
AW
34/*
35 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
36 * costly to service. That is between allocation orders which should
35fca53e 37 * coalesce naturally under reasonable reclaim pressure and those which
5ad333eb
AW
38 * will not.
39 */
40#define PAGE_ALLOC_COSTLY_ORDER 3
41
a6ffdc07 42enum migratetype {
47118af0 43 MIGRATE_UNMOVABLE,
47118af0 44 MIGRATE_MOVABLE,
016c13da 45 MIGRATE_RECLAIMABLE,
0aaa29a5
MG
46 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
47 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
47118af0
MN
48#ifdef CONFIG_CMA
49 /*
50 * MIGRATE_CMA migration type is designed to mimic the way
51 * ZONE_MOVABLE works. Only movable pages can be allocated
52 * from MIGRATE_CMA pageblocks and page allocator never
53 * implicitly change migration type of MIGRATE_CMA pageblock.
54 *
55 * The way to use it is to change migratetype of a range of
56 * pageblocks to MIGRATE_CMA which can be done by
11ac3e87 57 * __free_pageblock_cma() function.
47118af0
MN
58 */
59 MIGRATE_CMA,
60#endif
194159fb 61#ifdef CONFIG_MEMORY_ISOLATION
47118af0 62 MIGRATE_ISOLATE, /* can't allocate from here */
194159fb 63#endif
47118af0
MN
64 MIGRATE_TYPES
65};
66
60f30350 67/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
c999fbd3 68extern const char * const migratetype_names[MIGRATE_TYPES];
60f30350 69
47118af0
MN
70#ifdef CONFIG_CMA
71# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
7c15d9bb 72# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
47118af0
MN
73#else
74# define is_migrate_cma(migratetype) false
7c15d9bb 75# define is_migrate_cma_page(_page) false
47118af0 76#endif
b2a0ac88 77
b682debd
VB
78static inline bool is_migrate_movable(int mt)
79{
80 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
81}
82
1dd214b8
ZY
83/*
84 * Check whether a migratetype can be merged with another migratetype.
85 *
86 * It is only mergeable when it can fall back to other migratetypes for
87 * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c.
88 */
89static inline bool migratetype_is_mergeable(int mt)
90{
91 return mt < MIGRATE_PCPTYPES;
92}
93
b2a0ac88
MG
94#define for_each_migratetype_order(order, type) \
95 for (order = 0; order < MAX_ORDER; order++) \
96 for (type = 0; type < MIGRATE_TYPES; type++)
97
467c996c
MG
98extern int page_group_by_mobility_disabled;
99
d38ac97f 100#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
e58469ba 101
dc4b0caf 102#define get_pageblock_migratetype(page) \
535b81e2 103 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
dc4b0caf 104
1da177e4 105struct free_area {
b2a0ac88 106 struct list_head free_list[MIGRATE_TYPES];
1da177e4
LT
107 unsigned long nr_free;
108};
109
b03641af
DW
110static inline struct page *get_page_from_free_area(struct free_area *area,
111 int migratetype)
112{
113 return list_first_entry_or_null(&area->free_list[migratetype],
114 struct page, lru);
115}
116
b03641af
DW
117static inline bool free_area_empty(struct free_area *area, int migratetype)
118{
119 return list_empty(&area->free_list[migratetype]);
120}
121
1da177e4
LT
122struct pglist_data;
123
124/*
041711ce 125 * Add a wild amount of padding here to ensure data fall into separate
1da177e4
LT
126 * cachelines. There are very few zone structures in the machine, so space
127 * consumption is not a concern here.
128 */
129#if defined(CONFIG_SMP)
130struct zone_padding {
131 char x[0];
22fc6ecc 132} ____cacheline_internodealigned_in_smp;
1da177e4
LT
133#define ZONE_PADDING(name) struct zone_padding name;
134#else
135#define ZONE_PADDING(name)
136#endif
137
3a321d2a
KW
138#ifdef CONFIG_NUMA
139enum numa_stat_item {
140 NUMA_HIT, /* allocated in intended node */
141 NUMA_MISS, /* allocated in non intended node */
142 NUMA_FOREIGN, /* was intended here, hit elsewhere */
143 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
144 NUMA_LOCAL, /* allocation from local node */
145 NUMA_OTHER, /* allocation from other node */
f19298b9 146 NR_VM_NUMA_EVENT_ITEMS
3a321d2a
KW
147};
148#else
f19298b9 149#define NR_VM_NUMA_EVENT_ITEMS 0
3a321d2a
KW
150#endif
151
2244b95a 152enum zone_stat_item {
51ed4491 153 /* First 128 byte cacheline (assuming 64 bit words) */
d23ad423 154 NR_FREE_PAGES,
71c799f4
MK
155 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
156 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
157 NR_ZONE_ACTIVE_ANON,
158 NR_ZONE_INACTIVE_FILE,
159 NR_ZONE_ACTIVE_FILE,
160 NR_ZONE_UNEVICTABLE,
5a1c84b4 161 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
5344b7e6 162 NR_MLOCK, /* mlock()ed pages found and moved off LRU */
c6a7f572 163 /* Second 128 byte cacheline */
d2c5e30c 164 NR_BOUNCE,
91537fee
MK
165#if IS_ENABLED(CONFIG_ZSMALLOC)
166 NR_ZSPAGES, /* allocated in zsmalloc */
ca889e6c 167#endif
d1ce749a 168 NR_FREE_CMA_PAGES,
2244b95a
CL
169 NR_VM_ZONE_STAT_ITEMS };
170
75ef7184 171enum node_stat_item {
599d0c95
MG
172 NR_LRU_BASE,
173 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
174 NR_ACTIVE_ANON, /* " " " " " */
175 NR_INACTIVE_FILE, /* " " " " " */
176 NR_ACTIVE_FILE, /* " " " " " */
177 NR_UNEVICTABLE, /* " " " " " */
d42f3245
RG
178 NR_SLAB_RECLAIMABLE_B,
179 NR_SLAB_UNRECLAIMABLE_B,
599d0c95
MG
180 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
181 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
68d48e6a 182 WORKINGSET_NODES,
170b04b7
JK
183 WORKINGSET_REFAULT_BASE,
184 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
185 WORKINGSET_REFAULT_FILE,
186 WORKINGSET_ACTIVATE_BASE,
187 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
188 WORKINGSET_ACTIVATE_FILE,
189 WORKINGSET_RESTORE_BASE,
190 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
191 WORKINGSET_RESTORE_FILE,
1e6b1085 192 WORKINGSET_NODERECLAIM,
4b9d0fab 193 NR_ANON_MAPPED, /* Mapped anonymous pages */
50658e2e
MG
194 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
195 only modified from process context */
11fb9989
MG
196 NR_FILE_PAGES,
197 NR_FILE_DIRTY,
198 NR_WRITEBACK,
199 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
200 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
201 NR_SHMEM_THPS,
202 NR_SHMEM_PMDMAPPED,
60fbf0ab
SL
203 NR_FILE_THPS,
204 NR_FILE_PMDMAPPED,
11fb9989 205 NR_ANON_THPS,
c4a25635
MG
206 NR_VMSCAN_WRITE,
207 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
208 NR_DIRTIED, /* page dirtyings since bootup */
209 NR_WRITTEN, /* page writings since bootup */
8cd7c588 210 NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
b29940c1 211 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
1970dc6f
JH
212 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
213 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
991e7673
SB
214 NR_KERNEL_STACK_KB, /* measured in KiB */
215#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
216 NR_KERNEL_SCS_KB, /* measured in KiB */
217#endif
f0c0c115 218 NR_PAGETABLE, /* used for pagetables */
b6038942
SB
219#ifdef CONFIG_SWAP
220 NR_SWAPCACHE,
e39bb6be
HY
221#endif
222#ifdef CONFIG_NUMA_BALANCING
223 PGPROMOTE_SUCCESS, /* promote successfully */
b6038942 224#endif
75ef7184
MG
225 NR_VM_NODE_STAT_ITEMS
226};
227
69473e5d
MS
228/*
229 * Returns true if the item should be printed in THPs (/proc/vmstat
230 * currently prints number of anon, file and shmem THPs. But the item
231 * is charged in pages).
232 */
233static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
234{
235 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
236 return false;
237
bf9ecead 238 return item == NR_ANON_THPS ||
57b2847d 239 item == NR_FILE_THPS ||
a1528e21 240 item == NR_SHMEM_THPS ||
380780e7
MS
241 item == NR_SHMEM_PMDMAPPED ||
242 item == NR_FILE_PMDMAPPED;
69473e5d
MS
243}
244
ea426c2a
RG
245/*
246 * Returns true if the value is measured in bytes (most vmstat values are
247 * measured in pages). This defines the API part, the internal representation
248 * might be different.
249 */
250static __always_inline bool vmstat_item_in_bytes(int idx)
251{
d42f3245
RG
252 /*
253 * Global and per-node slab counters track slab pages.
254 * It's expected that changes are multiples of PAGE_SIZE.
255 * Internally values are stored in pages.
256 *
257 * Per-memcg and per-lruvec counters track memory, consumed
258 * by individual slab objects. These counters are actually
259 * byte-precise.
260 */
261 return (idx == NR_SLAB_RECLAIMABLE_B ||
262 idx == NR_SLAB_UNRECLAIMABLE_B);
ea426c2a
RG
263}
264
4f98a2fe
RR
265/*
266 * We do arithmetic on the LRU lists in various places in the code,
267 * so it is important to keep the active lists LRU_ACTIVE higher in
268 * the array than the corresponding inactive lists, and to keep
269 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
270 *
271 * This has to be kept in sync with the statistics in zone_stat_item
272 * above and the descriptions in vmstat_text in mm/vmstat.c
273 */
274#define LRU_BASE 0
275#define LRU_ACTIVE 1
276#define LRU_FILE 2
277
b69408e8 278enum lru_list {
4f98a2fe
RR
279 LRU_INACTIVE_ANON = LRU_BASE,
280 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
281 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
282 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
894bc310 283 LRU_UNEVICTABLE,
894bc310
LS
284 NR_LRU_LISTS
285};
b69408e8 286
8cd7c588
MG
287enum vmscan_throttle_state {
288 VMSCAN_THROTTLE_WRITEBACK,
d818fca1 289 VMSCAN_THROTTLE_ISOLATED,
69392a40 290 VMSCAN_THROTTLE_NOPROGRESS,
1b4e3f26 291 VMSCAN_THROTTLE_CONGESTED,
8cd7c588
MG
292 NR_VMSCAN_THROTTLE,
293};
294
4111304d 295#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
b69408e8 296
4111304d 297#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
894bc310 298
b91ac374 299static inline bool is_file_lru(enum lru_list lru)
4f98a2fe 300{
4111304d 301 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
4f98a2fe
RR
302}
303
b91ac374 304static inline bool is_active_lru(enum lru_list lru)
b69408e8 305{
4111304d 306 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
b69408e8
CL
307}
308
ed017373
YZ
309#define ANON_AND_FILE 2
310
1b05117d
JW
311enum lruvec_flags {
312 LRUVEC_CONGESTED, /* lruvec has many dirty pages
313 * backed by a congested BDI
314 */
315};
316
6290df54 317struct lruvec {
23047a96 318 struct list_head lists[NR_LRU_LISTS];
6168d0da
AS
319 /* per lruvec lru_lock for memcg */
320 spinlock_t lru_lock;
1431d4d1
JW
321 /*
322 * These track the cost of reclaiming one LRU - file or anon -
323 * over the other. As the observed cost of reclaiming one LRU
324 * increases, the reclaim scan balance tips toward the other.
325 */
326 unsigned long anon_cost;
327 unsigned long file_cost;
31d8fcac
JW
328 /* Non-resident age, driven by LRU movement */
329 atomic_long_t nonresident_age;
ed017373
YZ
330 /* Refaults at the time of last reclaim cycle */
331 unsigned long refaults[ANON_AND_FILE];
1b05117d
JW
332 /* Various lruvec state flags (enum lruvec_flags) */
333 unsigned long flags;
c255a458 334#ifdef CONFIG_MEMCG
599d0c95 335 struct pglist_data *pgdat;
7f5e86c2 336#endif
6290df54
JW
337};
338
653e003d 339/* Isolate unmapped pages */
f3fd4a61 340#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
c8244935 341/* Isolate for asynchronous migration */
f3fd4a61 342#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
e46a2879
MK
343/* Isolate unevictable pages */
344#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
4356f21d
MK
345
346/* LRU Isolation modes. */
9efeccac 347typedef unsigned __bitwise isolate_mode_t;
4356f21d 348
41858966
MG
349enum zone_watermarks {
350 WMARK_MIN,
351 WMARK_LOW,
352 WMARK_HIGH,
c574bbe9 353 WMARK_PROMO,
41858966
MG
354 NR_WMARK
355};
356
44042b44
MG
357/*
358 * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional
359 * for pageblock size for THP if configured.
360 */
361#ifdef CONFIG_TRANSPARENT_HUGEPAGE
362#define NR_PCP_THP 1
363#else
364#define NR_PCP_THP 0
365#endif
366#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP))
367
368/*
369 * Shift to encode migratetype and order in the same integer, with order
370 * in the least significant bits.
371 */
372#define NR_PCP_ORDER_WIDTH 8
373#define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1)
374
1c30844d
MG
375#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
376#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
377#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
378#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
41858966 379
dbbee9d5 380/* Fields and list protected by pagesets local_lock in page_alloc.c */
1da177e4
LT
381struct per_cpu_pages {
382 int count; /* number of pages in the list */
1da177e4
LT
383 int high; /* high watermark, emptying needed */
384 int batch; /* chunk size for buddy add/remove */
3b12e7e9 385 short free_factor; /* batch scaling factor during free */
28f836b6 386#ifdef CONFIG_NUMA
3b12e7e9 387 short expire; /* When 0, remote pagesets are drained */
28f836b6 388#endif
5f8dcc21
MG
389
390 /* Lists of pages, one per migrate type stored on the pcp-lists */
44042b44 391 struct list_head lists[NR_PCP_LISTS];
1da177e4
LT
392};
393
28f836b6 394struct per_cpu_zonestat {
2244b95a
CL
395#ifdef CONFIG_SMP
396 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
28f836b6
MG
397 s8 stat_threshold;
398#endif
399#ifdef CONFIG_NUMA
f19298b9
MG
400 /*
401 * Low priority inaccurate counters that are only folded
402 * on demand. Use a large type to avoid the overhead of
403 * folding during refresh_cpu_vm_stats.
404 */
405 unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
2244b95a 406#endif
99dcc3e5 407};
e7c8d5c9 408
75ef7184
MG
409struct per_cpu_nodestat {
410 s8 stat_threshold;
411 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
412};
413
97965478
CL
414#endif /* !__GENERATING_BOUNDS.H */
415
2f1b6248
CL
416enum zone_type {
417 /*
734f9246
NSJ
418 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
419 * to DMA to all of the addressable memory (ZONE_NORMAL).
420 * On architectures where this area covers the whole 32 bit address
421 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
422 * DMA addressing constraints. This distinction is important as a 32bit
423 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
424 * platforms may need both zones as they support peripherals with
425 * different DMA addressing limitations.
2f1b6248 426 */
734f9246 427#ifdef CONFIG_ZONE_DMA
2f1b6248 428 ZONE_DMA,
4b51d669 429#endif
fb0e7942 430#ifdef CONFIG_ZONE_DMA32
2f1b6248 431 ZONE_DMA32,
fb0e7942 432#endif
2f1b6248
CL
433 /*
434 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
435 * performed on pages in ZONE_NORMAL if the DMA devices support
436 * transfers to all addressable memory.
437 */
438 ZONE_NORMAL,
e53ef38d 439#ifdef CONFIG_HIGHMEM
2f1b6248
CL
440 /*
441 * A memory area that is only addressable by the kernel through
442 * mapping portions into its own address space. This is for example
443 * used by i386 to allow the kernel to address the memory beyond
444 * 900MB. The kernel will set up special mappings (page
445 * table entries on i386) for each page that the kernel needs to
446 * access.
447 */
448 ZONE_HIGHMEM,
e53ef38d 449#endif
9181a980
DH
450 /*
451 * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
452 * movable pages with few exceptional cases described below. Main use
453 * cases for ZONE_MOVABLE are to make memory offlining/unplug more
454 * likely to succeed, and to locally limit unmovable allocations - e.g.,
455 * to increase the number of THP/huge pages. Notable special cases are:
456 *
457 * 1. Pinned pages: (long-term) pinning of movable pages might
d1e153fe
PT
458 * essentially turn such pages unmovable. Therefore, we do not allow
459 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
460 * faulted, they come from the right zone right away. However, it is
461 * still possible that address space already has pages in
462 * ZONE_MOVABLE at the time when pages are pinned (i.e. user has
463 * touches that memory before pinning). In such case we migrate them
464 * to a different zone. When migration fails - pinning fails.
9181a980
DH
465 * 2. memblock allocations: kernelcore/movablecore setups might create
466 * situations where ZONE_MOVABLE contains unmovable allocations
467 * after boot. Memory offlining and allocations fail early.
468 * 3. Memory holes: kernelcore/movablecore setups might create very rare
469 * situations where ZONE_MOVABLE contains memory holes after boot,
470 * for example, if we have sections that are only partially
471 * populated. Memory offlining and allocations fail early.
472 * 4. PG_hwpoison pages: while poisoned pages can be skipped during
473 * memory offlining, such pages cannot be allocated.
474 * 5. Unmovable PG_offline pages: in paravirtualized environments,
475 * hotplugged memory blocks might only partially be managed by the
476 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
477 * parts not manged by the buddy are unmovable PG_offline pages. In
478 * some cases (virtio-mem), such pages can be skipped during
479 * memory offlining, however, cannot be moved/allocated. These
480 * techniques might use alloc_contig_range() to hide previously
481 * exposed pages from the buddy again (e.g., to implement some sort
482 * of memory unplug in virtio-mem).
9afaf30f
PT
483 * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create
484 * situations where ZERO_PAGE(0) which is allocated differently
485 * on different platforms may end up in a movable zone. ZERO_PAGE(0)
486 * cannot be migrated.
a08a2ae3
OS
487 * 7. Memory-hotplug: when using memmap_on_memory and onlining the
488 * memory to the MOVABLE zone, the vmemmap pages are also placed in
489 * such zone. Such pages cannot be really moved around as they are
490 * self-stored in the range, but they are treated as movable when
491 * the range they describe is about to be offlined.
9181a980
DH
492 *
493 * In general, no unmovable allocations that degrade memory offlining
494 * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
495 * have to expect that migrating pages in ZONE_MOVABLE can fail (even
496 * if has_unmovable_pages() states that there are no unmovable pages,
497 * there can be false negatives).
498 */
2a1e274a 499 ZONE_MOVABLE,
033fbae9
DW
500#ifdef CONFIG_ZONE_DEVICE
501 ZONE_DEVICE,
502#endif
97965478 503 __MAX_NR_ZONES
033fbae9 504
2f1b6248 505};
1da177e4 506
97965478
CL
507#ifndef __GENERATING_BOUNDS_H
508
ed017373
YZ
509#define ASYNC_AND_SYNC 2
510
1da177e4 511struct zone {
3484b2de 512 /* Read-mostly fields */
41858966
MG
513
514 /* zone watermarks, access with *_wmark_pages(zone) macros */
a9214443 515 unsigned long _watermark[NR_WMARK];
1c30844d 516 unsigned long watermark_boost;
41858966 517
0aaa29a5
MG
518 unsigned long nr_reserved_highatomic;
519
1da177e4 520 /*
89903327
AM
521 * We don't know if the memory that we're going to allocate will be
522 * freeable or/and it will be released eventually, so to avoid totally
523 * wasting several GB of ram we must reserve some of the lower zone
524 * memory (otherwise we risk to run OOM on the lower zones despite
525 * there being tons of freeable ram on the higher zones). This array is
526 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
527 * changes.
1da177e4 528 */
3484b2de 529 long lowmem_reserve[MAX_NR_ZONES];
ab8fabd4 530
e7c8d5c9 531#ifdef CONFIG_NUMA
d5f541ed 532 int node;
3484b2de 533#endif
3484b2de 534 struct pglist_data *zone_pgdat;
28f836b6
MG
535 struct per_cpu_pages __percpu *per_cpu_pageset;
536 struct per_cpu_zonestat __percpu *per_cpu_zonestats;
952eaf81
VB
537 /*
538 * the high and batch values are copied to individual pagesets for
539 * faster access
540 */
541 int pageset_high;
542 int pageset_batch;
3484b2de 543
835c134e
MG
544#ifndef CONFIG_SPARSEMEM
545 /*
d9c23400 546 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
835c134e
MG
547 * In SPARSEMEM, this map is stored in struct mem_section
548 */
549 unsigned long *pageblock_flags;
550#endif /* CONFIG_SPARSEMEM */
551
1da177e4
LT
552 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
553 unsigned long zone_start_pfn;
554
bdc8cb98 555 /*
9feedc9d
JL
556 * spanned_pages is the total pages spanned by the zone, including
557 * holes, which is calculated as:
558 * spanned_pages = zone_end_pfn - zone_start_pfn;
bdc8cb98 559 *
9feedc9d
JL
560 * present_pages is physical pages existing within the zone, which
561 * is calculated as:
8761e31c 562 * present_pages = spanned_pages - absent_pages(pages in holes);
9feedc9d 563 *
4b097002
DH
564 * present_early_pages is present pages existing within the zone
565 * located on memory available since early boot, excluding hotplugged
566 * memory.
567 *
9feedc9d
JL
568 * managed_pages is present pages managed by the buddy system, which
569 * is calculated as (reserved_pages includes pages allocated by the
570 * bootmem allocator):
571 * managed_pages = present_pages - reserved_pages;
572 *
3c381db1
DH
573 * cma pages is present pages that are assigned for CMA use
574 * (MIGRATE_CMA).
575 *
9feedc9d
JL
576 * So present_pages may be used by memory hotplug or memory power
577 * management logic to figure out unmanaged pages by checking
578 * (present_pages - managed_pages). And managed_pages should be used
579 * by page allocator and vm scanner to calculate all kinds of watermarks
580 * and thresholds.
581 *
582 * Locking rules:
583 *
584 * zone_start_pfn and spanned_pages are protected by span_seqlock.
585 * It is a seqlock because it has to be read outside of zone->lock,
586 * and it is done in the main allocator path. But, it is written
587 * quite infrequently.
588 *
589 * The span_seq lock is declared along with zone->lock because it is
bdc8cb98
DH
590 * frequently read in proximity to zone->lock. It's good to
591 * give them a chance of being in the same cacheline.
9feedc9d 592 *
c3d5f5f0 593 * Write access to present_pages at runtime should be protected by
bfc8c901
VD
594 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
595 * present_pages should get_online_mems() to get a stable value.
bdc8cb98 596 */
9705bea5 597 atomic_long_t managed_pages;
9feedc9d
JL
598 unsigned long spanned_pages;
599 unsigned long present_pages;
4b097002
DH
600#if defined(CONFIG_MEMORY_HOTPLUG)
601 unsigned long present_early_pages;
602#endif
3c381db1
DH
603#ifdef CONFIG_CMA
604 unsigned long cma_pages;
605#endif
3484b2de
MG
606
607 const char *name;
1da177e4 608
ad53f92e
JK
609#ifdef CONFIG_MEMORY_ISOLATION
610 /*
611 * Number of isolated pageblock. It is used to solve incorrect
612 * freepage counting problem due to racy retrieving migratetype
613 * of pageblock. Protected by zone->lock.
614 */
615 unsigned long nr_isolate_pageblock;
616#endif
617
3484b2de
MG
618#ifdef CONFIG_MEMORY_HOTPLUG
619 /* see spanned/present_pages for more description */
620 seqlock_t span_seqlock;
621#endif
622
9dcb8b68 623 int initialized;
3484b2de 624
0f661148 625 /* Write-intensive fields used from the page allocator */
3484b2de 626 ZONE_PADDING(_pad1_)
0f661148 627
3484b2de
MG
628 /* free areas of different sizes */
629 struct free_area free_area[MAX_ORDER];
630
631 /* zone flags, see below */
632 unsigned long flags;
633
0f661148 634 /* Primarily protects free_area */
a368ab67
MG
635 spinlock_t lock;
636
0f661148 637 /* Write-intensive fields used by compaction and vmstats. */
3484b2de
MG
638 ZONE_PADDING(_pad2_)
639
3484b2de
MG
640 /*
641 * When free pages are below this point, additional steps are taken
642 * when reading the number of free pages to avoid per-cpu counter
643 * drift allowing watermarks to be breached
644 */
645 unsigned long percpu_drift_mark;
646
647#if defined CONFIG_COMPACTION || defined CONFIG_CMA
648 /* pfn where compaction free scanner should start */
649 unsigned long compact_cached_free_pfn;
ed017373
YZ
650 /* pfn where compaction migration scanner should start */
651 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
e332f741
MG
652 unsigned long compact_init_migrate_pfn;
653 unsigned long compact_init_free_pfn;
3484b2de
MG
654#endif
655
656#ifdef CONFIG_COMPACTION
657 /*
658 * On compaction failure, 1<<compact_defer_shift compactions
659 * are skipped before trying again. The number attempted since
660 * last failure is tracked with compact_considered.
860b3272 661 * compact_order_failed is the minimum compaction failed order.
3484b2de
MG
662 */
663 unsigned int compact_considered;
664 unsigned int compact_defer_shift;
665 int compact_order_failed;
666#endif
667
668#if defined CONFIG_COMPACTION || defined CONFIG_CMA
669 /* Set to true when the PG_migrate_skip bits should be cleared */
670 bool compact_blockskip_flush;
671#endif
672
7cf91a98
JK
673 bool contiguous;
674
3484b2de
MG
675 ZONE_PADDING(_pad3_)
676 /* Zone statistics */
677 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
f19298b9 678 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
22fc6ecc 679} ____cacheline_internodealigned_in_smp;
1da177e4 680
599d0c95 681enum pgdat_flags {
599d0c95 682 PGDAT_DIRTY, /* reclaim scanning has recently found
d43006d5
MG
683 * many dirty file pages at the tail
684 * of the LRU.
685 */
599d0c95 686 PGDAT_WRITEBACK, /* reclaim scanning has recently found
283aba9f
MG
687 * many pages under writeback
688 */
a5f5f91d 689 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
57054651 690};
e815af95 691
73444bc4
MG
692enum zone_flags {
693 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
694 * Cleared when kswapd is woken.
695 */
c49c2c47 696 ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
73444bc4
MG
697};
698
9705bea5
AK
699static inline unsigned long zone_managed_pages(struct zone *zone)
700{
701 return (unsigned long)atomic_long_read(&zone->managed_pages);
702}
703
3c381db1
DH
704static inline unsigned long zone_cma_pages(struct zone *zone)
705{
706#ifdef CONFIG_CMA
707 return zone->cma_pages;
708#else
709 return 0;
710#endif
711}
712
f9228b20 713static inline unsigned long zone_end_pfn(const struct zone *zone)
108bcc96
CS
714{
715 return zone->zone_start_pfn + zone->spanned_pages;
716}
717
718static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
719{
720 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
721}
722
2a6e3ebe
CS
723static inline bool zone_is_initialized(struct zone *zone)
724{
9dcb8b68 725 return zone->initialized;
2a6e3ebe
CS
726}
727
728static inline bool zone_is_empty(struct zone *zone)
729{
730 return zone->spanned_pages == 0;
731}
732
f1dd2cd1
MH
733/*
734 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
735 * intersection with the given zone
736 */
737static inline bool zone_intersects(struct zone *zone,
738 unsigned long start_pfn, unsigned long nr_pages)
739{
740 if (zone_is_empty(zone))
741 return false;
742 if (start_pfn >= zone_end_pfn(zone) ||
743 start_pfn + nr_pages <= zone->zone_start_pfn)
744 return false;
745
746 return true;
747}
748
1da177e4
LT
749/*
750 * The "priority" of VM scanning is how much of the queues we will scan in one
751 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
752 * queues ("queue_length >> 12") during an aging round.
753 */
754#define DEF_PRIORITY 12
755
9276b1bc
PJ
756/* Maximum number of zones on a zonelist */
757#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
758
c00eb15a
YB
759enum {
760 ZONELIST_FALLBACK, /* zonelist with fallback */
9276b1bc 761#ifdef CONFIG_NUMA
c00eb15a
YB
762 /*
763 * The NUMA zonelists are doubled because we need zonelists that
764 * restrict the allocations to a single node for __GFP_THISNODE.
765 */
766 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */
9276b1bc 767#endif
c00eb15a
YB
768 MAX_ZONELISTS
769};
9276b1bc 770
dd1a239f
MG
771/*
772 * This struct contains information about a zone in a zonelist. It is stored
773 * here to avoid dereferences into large structures and lookups of tables
774 */
775struct zoneref {
776 struct zone *zone; /* Pointer to actual zone */
777 int zone_idx; /* zone_idx(zoneref->zone) */
778};
779
1da177e4
LT
780/*
781 * One allocation request operates on a zonelist. A zonelist
782 * is a list of zones, the first one is the 'goal' of the
783 * allocation, the other zones are fallback zones, in decreasing
784 * priority.
785 *
dd1a239f
MG
786 * To speed the reading of the zonelist, the zonerefs contain the zone index
787 * of the entry being read. Helper functions to access information given
788 * a struct zoneref are
789 *
790 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
791 * zonelist_zone_idx() - Return the index of the zone for an entry
792 * zonelist_node_idx() - Return the index of the node for an entry
1da177e4
LT
793 */
794struct zonelist {
dd1a239f 795 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
1da177e4
LT
796};
797
bb1c50d3
MR
798/*
799 * The array of struct pages for flatmem.
800 * It must be declared for SPARSEMEM as well because there are configurations
801 * that rely on that.
802 */
5b99cd0e 803extern struct page *mem_map;
5b99cd0e 804
364c1eeb
YS
805#ifdef CONFIG_TRANSPARENT_HUGEPAGE
806struct deferred_split {
807 spinlock_t split_queue_lock;
808 struct list_head split_queue;
809 unsigned long split_queue_len;
810};
811#endif
812
1da177e4 813/*
1da177e4 814 * On NUMA machines, each NUMA node would have a pg_data_t to describe
618b8c20
NB
815 * it's memory layout. On UMA machines there is a single pglist_data which
816 * describes the whole memory.
1da177e4
LT
817 *
818 * Memory statistics and page replacement data structures are maintained on a
819 * per-zone basis.
820 */
1da177e4 821typedef struct pglist_data {
496df3d3
BW
822 /*
823 * node_zones contains just the zones for THIS node. Not all of the
824 * zones may be populated, but it is the full list. It is referenced by
825 * this node's node_zonelists as well as other node's node_zonelists.
826 */
1da177e4 827 struct zone node_zones[MAX_NR_ZONES];
496df3d3
BW
828
829 /*
830 * node_zonelists contains references to all zones in all nodes.
831 * Generally the first zones will be references to this node's
832 * node_zones.
833 */
523b9458 834 struct zonelist node_zonelists[MAX_ZONELISTS];
496df3d3
BW
835
836 int nr_zones; /* number of populated zones in this node */
43b02ba9 837#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
1da177e4 838 struct page *node_mem_map;
eefa864b
JK
839#ifdef CONFIG_PAGE_EXTENSION
840 struct page_ext *node_page_ext;
841#endif
d41dee36 842#endif
3a2d7fa8 843#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
208d54e5 844 /*
fa004ab7
WY
845 * Must be held any time you expect node_start_pfn,
846 * node_present_pages, node_spanned_pages or nr_zones to stay constant.
3d060856
PT
847 * Also synchronizes pgdat->first_deferred_pfn during deferred page
848 * init.
208d54e5 849 *
114d4b79 850 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
3a2d7fa8
PT
851 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
852 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
114d4b79 853 *
72c3b51b 854 * Nests above zone->lock and zone->span_seqlock
208d54e5
DH
855 */
856 spinlock_t node_size_lock;
857#endif
1da177e4
LT
858 unsigned long node_start_pfn;
859 unsigned long node_present_pages; /* total number of physical pages */
860 unsigned long node_spanned_pages; /* total size of physical page
861 range, including holes */
862 int node_id;
1da177e4 863 wait_queue_head_t kswapd_wait;
5515061d 864 wait_queue_head_t pfmemalloc_wait;
8cd7c588
MG
865
866 /* workqueues for throttling reclaim for different reasons. */
867 wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
868
869 atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
870 unsigned long nr_reclaim_start; /* nr pages written while throttled
871 * when throttling started. */
bfc8c901
VD
872 struct task_struct *kswapd; /* Protected by
873 mem_hotplug_begin/end() */
38087d9b 874 int kswapd_order;
97a225e6 875 enum zone_type kswapd_highest_zoneidx;
38087d9b 876
c73322d0
JW
877 int kswapd_failures; /* Number of 'reclaimed == 0' runs */
878
698b1b30
VB
879#ifdef CONFIG_COMPACTION
880 int kcompactd_max_order;
97a225e6 881 enum zone_type kcompactd_highest_zoneidx;
698b1b30
VB
882 wait_queue_head_t kcompactd_wait;
883 struct task_struct *kcompactd;
65d759c8 884 bool proactive_compact_trigger;
8177a420 885#endif
281e3726
MG
886 /*
887 * This is a per-node reserve of pages that are not available
888 * to userspace allocations.
889 */
890 unsigned long totalreserve_pages;
891
a5f5f91d
MG
892#ifdef CONFIG_NUMA
893 /*
0a3c5772 894 * node reclaim becomes active if more unmapped pages exist.
a5f5f91d
MG
895 */
896 unsigned long min_unmapped_pages;
897 unsigned long min_slab_pages;
898#endif /* CONFIG_NUMA */
899
a52633d8
MG
900 /* Write-intensive fields used by page reclaim */
901 ZONE_PADDING(_pad1_)
3a80a7fa
MG
902
903#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
904 /*
905 * If memory initialisation on large machines is deferred then this
906 * is the first PFN that needs to be initialised.
907 */
908 unsigned long first_deferred_pfn;
909#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
a3d0a918
KS
910
911#ifdef CONFIG_TRANSPARENT_HUGEPAGE
364c1eeb 912 struct deferred_split deferred_split_queue;
a3d0a918 913#endif
75ef7184 914
599d0c95 915 /* Fields commonly accessed by the page reclaim scanner */
867e5e1d
JW
916
917 /*
918 * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
919 *
920 * Use mem_cgroup_lruvec() to look up lruvecs.
921 */
922 struct lruvec __lruvec;
599d0c95 923
599d0c95
MG
924 unsigned long flags;
925
926 ZONE_PADDING(_pad2_)
927
75ef7184
MG
928 /* Per-node vmstats */
929 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
930 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
1da177e4
LT
931} pg_data_t;
932
933#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
934#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
935
c6830c22 936#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
da3649e1 937#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
c6830c22 938
da3649e1
CS
939static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
940{
941 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
942}
943
944static inline bool pgdat_is_empty(pg_data_t *pgdat)
945{
946 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
947}
c6830c22 948
208d54e5
DH
949#include <linux/memory_hotplug.h>
950
72675e13 951void build_all_zonelists(pg_data_t *pgdat);
5ecd9d40 952void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
97a225e6 953 enum zone_type highest_zoneidx);
86a294a8 954bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
97a225e6 955 int highest_zoneidx, unsigned int alloc_flags,
86a294a8 956 long free_pages);
7aeb09f9 957bool zone_watermark_ok(struct zone *z, unsigned int order,
97a225e6 958 unsigned long mark, int highest_zoneidx,
c603844b 959 unsigned int alloc_flags);
7aeb09f9 960bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
97a225e6 961 unsigned long mark, int highest_zoneidx);
c1d0da83
LD
962/*
963 * Memory initialization context, use to differentiate memory added by
964 * the platform statically or via memory hotplug interface.
965 */
966enum meminit_context {
967 MEMINIT_EARLY,
968 MEMINIT_HOTPLUG,
a2f3aa02 969};
c1d0da83 970
dc0bbf3b 971extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
b171e409 972 unsigned long size);
718127cc 973
bea8c150 974extern void lruvec_init(struct lruvec *lruvec);
7f5e86c2 975
599d0c95 976static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
7f5e86c2 977{
c255a458 978#ifdef CONFIG_MEMCG
599d0c95 979 return lruvec->pgdat;
7f5e86c2 980#else
867e5e1d 981 return container_of(lruvec, struct pglist_data, __lruvec);
7f5e86c2
KK
982#endif
983}
984
7aac7898
LS
985#ifdef CONFIG_HAVE_MEMORYLESS_NODES
986int local_memory_node(int node_id);
987#else
988static inline int local_memory_node(int node_id) { return node_id; };
989#endif
990
1da177e4
LT
991/*
992 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
993 */
994#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
995
1f90a347
DW
996#ifdef CONFIG_ZONE_DEVICE
997static inline bool zone_is_zone_device(struct zone *zone)
998{
999 return zone_idx(zone) == ZONE_DEVICE;
1000}
1001#else
1002static inline bool zone_is_zone_device(struct zone *zone)
1003{
1004 return false;
1005}
1006#endif
1007
6aa303de
MG
1008/*
1009 * Returns true if a zone has pages managed by the buddy allocator.
1010 * All the reclaim decisions have to use this function rather than
1011 * populated_zone(). If the whole zone is reserved then we can easily
1012 * end up with populated_zone() && !managed_zone().
1013 */
1014static inline bool managed_zone(struct zone *zone)
1015{
9705bea5 1016 return zone_managed_pages(zone);
6aa303de
MG
1017}
1018
1019/* Returns true if a zone has memory */
1020static inline bool populated_zone(struct zone *zone)
f3fe6512 1021{
6aa303de 1022 return zone->present_pages;
f3fe6512
CK
1023}
1024
c1093b74
PT
1025#ifdef CONFIG_NUMA
1026static inline int zone_to_nid(struct zone *zone)
1027{
1028 return zone->node;
1029}
1030
1031static inline void zone_set_nid(struct zone *zone, int nid)
1032{
1033 zone->node = nid;
1034}
1035#else
1036static inline int zone_to_nid(struct zone *zone)
1037{
1038 return 0;
1039}
1040
1041static inline void zone_set_nid(struct zone *zone, int nid) {}
1042#endif
1043
2a1e274a
MG
1044extern int movable_zone;
1045
2f1b6248 1046static inline int is_highmem_idx(enum zone_type idx)
1da177e4 1047{
e53ef38d 1048#ifdef CONFIG_HIGHMEM
2a1e274a 1049 return (idx == ZONE_HIGHMEM ||
b19bd1c9 1050 (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
e53ef38d
CL
1051#else
1052 return 0;
1053#endif
1da177e4
LT
1054}
1055
62b31070
BH
1056#ifdef CONFIG_ZONE_DMA
1057bool has_managed_dma(void);
1058#else
1059static inline bool has_managed_dma(void)
1060{
1061 return false;
1062}
1063#endif
1064
1da177e4 1065/**
b4a991ec 1066 * is_highmem - helper function to quickly check if a struct zone is a
1da177e4
LT
1067 * highmem zone or not. This is an attempt to keep references
1068 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
198fba41
MR
1069 * @zone: pointer to struct zone variable
1070 * Return: 1 for a highmem zone, 0 otherwise
1da177e4
LT
1071 */
1072static inline int is_highmem(struct zone *zone)
1073{
e53ef38d 1074#ifdef CONFIG_HIGHMEM
29f9cb53 1075 return is_highmem_idx(zone_idx(zone));
e53ef38d
CL
1076#else
1077 return 0;
1078#endif
1da177e4
LT
1079}
1080
1da177e4
LT
1081/* These two functions are used to setup the per zone pages min values */
1082struct ctl_table;
2374c09b 1083
32927393
CH
1084int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
1085 loff_t *);
1086int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
1087 size_t *, loff_t *);
d3cda233 1088extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
32927393
CH
1089int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
1090 size_t *, loff_t *);
74f44822
MG
1091int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int,
1092 void *, size_t *, loff_t *);
9614634f 1093int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
32927393 1094 void *, size_t *, loff_t *);
0ff38490 1095int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
32927393
CH
1096 void *, size_t *, loff_t *);
1097int numa_zonelist_order_handler(struct ctl_table *, int,
1098 void *, size_t *, loff_t *);
74f44822 1099extern int percpu_pagelist_high_fraction;
f0c0b2b8 1100extern char numa_zonelist_order[];
c9bff3ee 1101#define NUMA_ZONELIST_ORDER_LEN 16
f0c0b2b8 1102
a9ee6cf5 1103#ifndef CONFIG_NUMA
1da177e4
LT
1104
1105extern struct pglist_data contig_page_data;
351de44f
MG
1106static inline struct pglist_data *NODE_DATA(int nid)
1107{
1108 return &contig_page_data;
1109}
1da177e4 1110
a9ee6cf5 1111#else /* CONFIG_NUMA */
1da177e4
LT
1112
1113#include <asm/mmzone.h>
1114
a9ee6cf5 1115#endif /* !CONFIG_NUMA */
348f8b6c 1116
95144c78
KH
1117extern struct pglist_data *first_online_pgdat(void);
1118extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
1119extern struct zone *next_zone(struct zone *zone);
8357f869
KH
1120
1121/**
12d15f0d 1122 * for_each_online_pgdat - helper macro to iterate over all online nodes
198fba41 1123 * @pgdat: pointer to a pg_data_t variable
8357f869
KH
1124 */
1125#define for_each_online_pgdat(pgdat) \
1126 for (pgdat = first_online_pgdat(); \
1127 pgdat; \
1128 pgdat = next_online_pgdat(pgdat))
8357f869
KH
1129/**
1130 * for_each_zone - helper macro to iterate over all memory zones
198fba41 1131 * @zone: pointer to struct zone variable
8357f869
KH
1132 *
1133 * The user only needs to declare the zone variable, for_each_zone
1134 * fills it in.
1135 */
1136#define for_each_zone(zone) \
1137 for (zone = (first_online_pgdat())->node_zones; \
1138 zone; \
1139 zone = next_zone(zone))
1140
ee99c71c
KM
1141#define for_each_populated_zone(zone) \
1142 for (zone = (first_online_pgdat())->node_zones; \
1143 zone; \
1144 zone = next_zone(zone)) \
1145 if (!populated_zone(zone)) \
1146 ; /* do nothing */ \
1147 else
1148
dd1a239f
MG
1149static inline struct zone *zonelist_zone(struct zoneref *zoneref)
1150{
1151 return zoneref->zone;
1152}
1153
1154static inline int zonelist_zone_idx(struct zoneref *zoneref)
1155{
1156 return zoneref->zone_idx;
1157}
1158
1159static inline int zonelist_node_idx(struct zoneref *zoneref)
1160{
c1093b74 1161 return zone_to_nid(zoneref->zone);
dd1a239f
MG
1162}
1163
682a3385
MG
1164struct zoneref *__next_zones_zonelist(struct zoneref *z,
1165 enum zone_type highest_zoneidx,
1166 nodemask_t *nodes);
1167
19770b32
MG
1168/**
1169 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
198fba41
MR
1170 * @z: The cursor used as a starting point for the search
1171 * @highest_zoneidx: The zone index of the highest zone to return
1172 * @nodes: An optional nodemask to filter the zonelist with
19770b32
MG
1173 *
1174 * This function returns the next zone at or below a given zone index that is
1175 * within the allowed nodemask using a cursor as the starting point for the
5bead2a0
MG
1176 * search. The zoneref returned is a cursor that represents the current zone
1177 * being examined. It should be advanced by one before calling
1178 * next_zones_zonelist again.
198fba41
MR
1179 *
1180 * Return: the next zone at or below highest_zoneidx within the allowed
1181 * nodemask using a cursor within a zonelist as a starting point
19770b32 1182 */
682a3385 1183static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
19770b32 1184 enum zone_type highest_zoneidx,
682a3385
MG
1185 nodemask_t *nodes)
1186{
1187 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1188 return z;
1189 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1190}
dd1a239f 1191
19770b32
MG
1192/**
1193 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
198fba41
MR
1194 * @zonelist: The zonelist to search for a suitable zone
1195 * @highest_zoneidx: The zone index of the highest zone to return
1196 * @nodes: An optional nodemask to filter the zonelist with
19770b32
MG
1197 *
1198 * This function returns the first zone at or below a given zone index that is
1199 * within the allowed nodemask. The zoneref returned is a cursor that can be
5bead2a0
MG
1200 * used to iterate the zonelist with next_zones_zonelist by advancing it by
1201 * one before calling.
ea57485a
VB
1202 *
1203 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1204 * never NULL). This may happen either genuinely, or due to concurrent nodemask
1205 * update due to cpuset modification.
198fba41
MR
1206 *
1207 * Return: Zoneref pointer for the first suitable zone found
19770b32 1208 */
dd1a239f 1209static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
19770b32 1210 enum zone_type highest_zoneidx,
c33d6c06 1211 nodemask_t *nodes)
54a6eb5c 1212{
c33d6c06 1213 return next_zones_zonelist(zonelist->_zonerefs,
05891fb0 1214 highest_zoneidx, nodes);
54a6eb5c
MG
1215}
1216
19770b32
MG
1217/**
1218 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
198fba41
MR
1219 * @zone: The current zone in the iterator
1220 * @z: The current pointer within zonelist->_zonerefs being iterated
1221 * @zlist: The zonelist being iterated
1222 * @highidx: The zone index of the highest zone to return
1223 * @nodemask: Nodemask allowed by the allocator
19770b32
MG
1224 *
1225 * This iterator iterates though all zones at or below a given zone index and
1226 * within a given nodemask
1227 */
1228#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
c33d6c06 1229 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
19770b32 1230 zone; \
05891fb0 1231 z = next_zones_zonelist(++z, highidx, nodemask), \
c33d6c06
MG
1232 zone = zonelist_zone(z))
1233
30d8ec73 1234#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
c33d6c06
MG
1235 for (zone = z->zone; \
1236 zone; \
1237 z = next_zones_zonelist(++z, highidx, nodemask), \
1238 zone = zonelist_zone(z))
1239
54a6eb5c
MG
1240
1241/**
1242 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
198fba41
MR
1243 * @zone: The current zone in the iterator
1244 * @z: The current pointer within zonelist->zones being iterated
1245 * @zlist: The zonelist being iterated
1246 * @highidx: The zone index of the highest zone to return
54a6eb5c
MG
1247 *
1248 * This iterator iterates though all zones at or below a given zone index.
1249 */
1250#define for_each_zone_zonelist(zone, z, zlist, highidx) \
19770b32 1251 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
54a6eb5c 1252
8ca1b5a4
FT
1253/* Whether the 'nodes' are all movable nodes */
1254static inline bool movable_only_nodes(nodemask_t *nodes)
1255{
1256 struct zonelist *zonelist;
1257 struct zoneref *z;
1258 int nid;
1259
1260 if (nodes_empty(*nodes))
1261 return false;
1262
1263 /*
1264 * We can chose arbitrary node from the nodemask to get a
1265 * zonelist as they are interlinked. We just need to find
1266 * at least one zone that can satisfy kernel allocations.
1267 */
1268 nid = first_node(*nodes);
1269 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
1270 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
1271 return (!z->zone) ? true : false;
1272}
1273
1274
d41dee36
AW
1275#ifdef CONFIG_SPARSEMEM
1276#include <asm/sparsemem.h>
1277#endif
1278
2bdaf115
AW
1279#ifdef CONFIG_FLATMEM
1280#define pfn_to_nid(pfn) (0)
1281#endif
1282
d41dee36
AW
1283#ifdef CONFIG_SPARSEMEM
1284
1285/*
d41dee36
AW
1286 * PA_SECTION_SHIFT physical address to/from section number
1287 * PFN_SECTION_SHIFT pfn to/from section number
1288 */
d41dee36
AW
1289#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1290#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1291
1292#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1293
1294#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1295#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1296
835c134e 1297#define SECTION_BLOCKFLAGS_BITS \
d9c23400 1298 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
835c134e 1299
d41dee36
AW
1300#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1301#error Allocator MAX_ORDER exceeds SECTION_SIZE
1302#endif
1303
1dd2bfc8
YI
1304static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1305{
1306 return pfn >> PFN_SECTION_SHIFT;
1307}
1308static inline unsigned long section_nr_to_pfn(unsigned long sec)
1309{
1310 return sec << PFN_SECTION_SHIFT;
1311}
e3c40f37 1312
a539f353
DK
1313#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1314#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1315
f1eca35a 1316#define SUBSECTION_SHIFT 21
9ffc1d19 1317#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
f1eca35a
DW
1318
1319#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1320#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
1321#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1322
1323#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
1324#error Subsection size exceeds section size
1325#else
1326#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1327#endif
1328
a3619190
DW
1329#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
1330#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
1331
f1eca35a 1332struct mem_section_usage {
0a9f9f62 1333#ifdef CONFIG_SPARSEMEM_VMEMMAP
f1eca35a 1334 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
0a9f9f62 1335#endif
f1eca35a
DW
1336 /* See declaration of similar field in struct zone */
1337 unsigned long pageblock_flags[0];
1338};
1339
f46edbd1
DW
1340void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
1341
d41dee36 1342struct page;
eefa864b 1343struct page_ext;
d41dee36 1344struct mem_section {
29751f69
AW
1345 /*
1346 * This is, logically, a pointer to an array of struct
1347 * pages. However, it is stored with some other magic.
1348 * (see sparse.c::sparse_init_one_section())
1349 *
30c253e6
AW
1350 * Additionally during early boot we encode node id of
1351 * the location of the section here to guide allocation.
1352 * (see sparse.c::memory_present())
1353 *
29751f69
AW
1354 * Making it a UL at least makes someone do a cast
1355 * before using it wrong.
1356 */
1357 unsigned long section_mem_map;
5c0e3066 1358
f1eca35a 1359 struct mem_section_usage *usage;
eefa864b
JK
1360#ifdef CONFIG_PAGE_EXTENSION
1361 /*
0c9ad804 1362 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
eefa864b
JK
1363 * section. (see page_ext.h about this.)
1364 */
1365 struct page_ext *page_ext;
1366 unsigned long pad;
1367#endif
55878e88
CS
1368 /*
1369 * WARNING: mem_section must be a power-of-2 in size for the
1370 * calculation and use of SECTION_ROOT_MASK to make sense.
1371 */
d41dee36
AW
1372};
1373
3e347261
BP
1374#ifdef CONFIG_SPARSEMEM_EXTREME
1375#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1376#else
1377#define SECTIONS_PER_ROOT 1
1378#endif
802f192e 1379
3e347261 1380#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
0faa5638 1381#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
3e347261 1382#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
802f192e 1383
3e347261 1384#ifdef CONFIG_SPARSEMEM_EXTREME
83e3c487 1385extern struct mem_section **mem_section;
802f192e 1386#else
3e347261
BP
1387extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1388#endif
d41dee36 1389
f1eca35a
DW
1390static inline unsigned long *section_to_usemap(struct mem_section *ms)
1391{
1392 return ms->usage->pageblock_flags;
1393}
1394
29751f69
AW
1395static inline struct mem_section *__nr_to_section(unsigned long nr)
1396{
a431dbbc
WL
1397 unsigned long root = SECTION_NR_TO_ROOT(nr);
1398
1399 if (unlikely(root >= NR_SECTION_ROOTS))
1400 return NULL;
1401
83e3c487 1402#ifdef CONFIG_SPARSEMEM_EXTREME
a431dbbc 1403 if (!mem_section || !mem_section[root])
83e3c487
KS
1404 return NULL;
1405#endif
a431dbbc 1406 return &mem_section[root][nr & SECTION_ROOT_MASK];
29751f69 1407}
f1eca35a 1408extern size_t mem_section_usage_size(void);
29751f69
AW
1409
1410/*
1411 * We use the lower bits of the mem_map pointer to store
def9b71e
PT
1412 * a little bit of information. The pointer is calculated
1413 * as mem_map - section_nr_to_pfn(pnum). The result is
1414 * aligned to the minimum alignment of the two values:
1415 * 1. All mem_map arrays are page-aligned.
1416 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
1417 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1418 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1419 * worst combination is powerpc with 256k pages,
1420 * which results in PFN_SECTION_SHIFT equal 6.
1421 * To sum it up, at least 6 bits are available.
29751f69 1422 */
1f90a347
DW
1423#define SECTION_MARKED_PRESENT (1UL<<0)
1424#define SECTION_HAS_MEM_MAP (1UL<<1)
1425#define SECTION_IS_ONLINE (1UL<<2)
1426#define SECTION_IS_EARLY (1UL<<3)
1427#define SECTION_TAINT_ZONE_DEVICE (1UL<<4)
1428#define SECTION_MAP_LAST_BIT (1UL<<5)
1429#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
01c8d337 1430#define SECTION_NID_SHIFT 6
29751f69
AW
1431
1432static inline struct page *__section_mem_map_addr(struct mem_section *section)
1433{
1434 unsigned long map = section->section_mem_map;
1435 map &= SECTION_MAP_MASK;
1436 return (struct page *)map;
1437}
1438
540557b9 1439static inline int present_section(struct mem_section *section)
29751f69 1440{
802f192e 1441 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
29751f69
AW
1442}
1443
540557b9
AW
1444static inline int present_section_nr(unsigned long nr)
1445{
1446 return present_section(__nr_to_section(nr));
1447}
1448
1449static inline int valid_section(struct mem_section *section)
29751f69 1450{
802f192e 1451 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
29751f69
AW
1452}
1453
326e1b8f
DW
1454static inline int early_section(struct mem_section *section)
1455{
1456 return (section && (section->section_mem_map & SECTION_IS_EARLY));
1457}
1458
29751f69
AW
1459static inline int valid_section_nr(unsigned long nr)
1460{
1461 return valid_section(__nr_to_section(nr));
1462}
1463
2d070eab
MH
1464static inline int online_section(struct mem_section *section)
1465{
1466 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1467}
1468
1f90a347
DW
1469static inline int online_device_section(struct mem_section *section)
1470{
1471 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
1472
1473 return section && ((section->section_mem_map & flags) == flags);
1474}
1475
2d070eab
MH
1476static inline int online_section_nr(unsigned long nr)
1477{
1478 return online_section(__nr_to_section(nr));
1479}
1480
1481#ifdef CONFIG_MEMORY_HOTPLUG
1482void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
2d070eab
MH
1483void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1484#endif
2d070eab 1485
d41dee36
AW
1486static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1487{
29751f69 1488 return __nr_to_section(pfn_to_section_nr(pfn));
d41dee36
AW
1489}
1490
2491f0a2 1491extern unsigned long __highest_present_section_nr;
c4e1be9e 1492
f46edbd1
DW
1493static inline int subsection_map_index(unsigned long pfn)
1494{
1495 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
1496}
1497
1498#ifdef CONFIG_SPARSEMEM_VMEMMAP
1499static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1500{
1501 int idx = subsection_map_index(pfn);
1502
1503 return test_bit(idx, ms->usage->subsection_map);
1504}
1505#else
1506static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1507{
1508 return 1;
1509}
1510#endif
1511
7b7bf499 1512#ifndef CONFIG_HAVE_ARCH_PFN_VALID
51c656ae
MR
1513/**
1514 * pfn_valid - check if there is a valid memory map entry for a PFN
1515 * @pfn: the page frame number to check
1516 *
1517 * Check if there is a valid memory map entry aka struct page for the @pfn.
1518 * Note, that availability of the memory map entry does not imply that
1519 * there is actual usable memory at that @pfn. The struct page may
1520 * represent a hole or an unusable page frame.
1521 *
1522 * Return: 1 for PFNs that have memory map entries and 0 otherwise
1523 */
d41dee36
AW
1524static inline int pfn_valid(unsigned long pfn)
1525{
f46edbd1
DW
1526 struct mem_section *ms;
1527
16c9afc7
AK
1528 /*
1529 * Ensure the upper PAGE_SHIFT bits are clear in the
1530 * pfn. Else it might lead to false positives when
1531 * some of the upper bits are set, but the lower bits
1532 * match a valid pfn.
1533 */
1534 if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
1535 return 0;
1536
d41dee36
AW
1537 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1538 return 0;
f1dc0db2 1539 ms = __pfn_to_section(pfn);
f46edbd1
DW
1540 if (!valid_section(ms))
1541 return 0;
1542 /*
1543 * Traditionally early sections always returned pfn_valid() for
1544 * the entire section-sized span.
1545 */
1546 return early_section(ms) || pfn_section_valid(ms, pfn);
d41dee36 1547}
7b7bf499 1548#endif
d41dee36 1549
e03d1f78 1550static inline int pfn_in_present_section(unsigned long pfn)
540557b9
AW
1551{
1552 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1553 return 0;
f1dc0db2 1554 return present_section(__pfn_to_section(pfn));
540557b9
AW
1555}
1556
4c605881
DH
1557static inline unsigned long next_present_section_nr(unsigned long section_nr)
1558{
1559 while (++section_nr <= __highest_present_section_nr) {
1560 if (present_section_nr(section_nr))
1561 return section_nr;
1562 }
1563
1564 return -1;
1565}
1566
d41dee36
AW
1567/*
1568 * These are _only_ used during initialisation, therefore they
1569 * can use __initdata ... They could have names to indicate
1570 * this restriction.
1571 */
1572#ifdef CONFIG_NUMA
161599ff
AW
1573#define pfn_to_nid(pfn) \
1574({ \
1575 unsigned long __pfn_to_nid_pfn = (pfn); \
1576 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1577})
2bdaf115
AW
1578#else
1579#define pfn_to_nid(pfn) (0)
d41dee36
AW
1580#endif
1581
d41dee36
AW
1582void sparse_init(void);
1583#else
1584#define sparse_init() do {} while (0)
28ae55c9 1585#define sparse_index_init(_sec, _nid) do {} while (0)
e03d1f78 1586#define pfn_in_present_section pfn_valid
f46edbd1 1587#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
d41dee36
AW
1588#endif /* CONFIG_SPARSEMEM */
1589
97965478 1590#endif /* !__GENERATING_BOUNDS.H */
1da177e4 1591#endif /* !__ASSEMBLY__ */
1da177e4 1592#endif /* _LINUX_MMZONE_H */