Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_MMZONE_H |
3 | #define _LINUX_MMZONE_H | |
4 | ||
1da177e4 | 5 | #ifndef __ASSEMBLY__ |
97965478 | 6 | #ifndef __GENERATING_BOUNDS_H |
1da177e4 | 7 | |
1da177e4 LT |
8 | #include <linux/spinlock.h> |
9 | #include <linux/list.h> | |
10 | #include <linux/wait.h> | |
e815af95 | 11 | #include <linux/bitops.h> |
1da177e4 LT |
12 | #include <linux/cache.h> |
13 | #include <linux/threads.h> | |
14 | #include <linux/numa.h> | |
15 | #include <linux/init.h> | |
bdc8cb98 | 16 | #include <linux/seqlock.h> |
8357f869 | 17 | #include <linux/nodemask.h> |
835c134e | 18 | #include <linux/pageblock-flags.h> |
bbeae5b0 | 19 | #include <linux/page-flags-layout.h> |
60063497 | 20 | #include <linux/atomic.h> |
b03641af DW |
21 | #include <linux/mm_types.h> |
22 | #include <linux/page-flags.h> | |
dbbee9d5 | 23 | #include <linux/local_lock.h> |
93ff66bf | 24 | #include <asm/page.h> |
1da177e4 LT |
25 | |
26 | /* Free memory management - zoned buddy allocator. */ | |
27 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | |
28 | #define MAX_ORDER 11 | |
29 | #else | |
30 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | |
31 | #endif | |
e984bb43 | 32 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
1da177e4 | 33 | |
5ad333eb AW |
34 | /* |
35 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | |
36 | * costly to service. That is between allocation orders which should | |
35fca53e | 37 | * coalesce naturally under reasonable reclaim pressure and those which |
5ad333eb AW |
38 | * will not. |
39 | */ | |
40 | #define PAGE_ALLOC_COSTLY_ORDER 3 | |
41 | ||
a6ffdc07 | 42 | enum migratetype { |
47118af0 | 43 | MIGRATE_UNMOVABLE, |
47118af0 | 44 | MIGRATE_MOVABLE, |
016c13da | 45 | MIGRATE_RECLAIMABLE, |
0aaa29a5 MG |
46 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ |
47 | MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, | |
47118af0 MN |
48 | #ifdef CONFIG_CMA |
49 | /* | |
50 | * MIGRATE_CMA migration type is designed to mimic the way | |
51 | * ZONE_MOVABLE works. Only movable pages can be allocated | |
52 | * from MIGRATE_CMA pageblocks and page allocator never | |
53 | * implicitly change migration type of MIGRATE_CMA pageblock. | |
54 | * | |
55 | * The way to use it is to change migratetype of a range of | |
56 | * pageblocks to MIGRATE_CMA which can be done by | |
57 | * __free_pageblock_cma() function. What is important though | |
58 | * is that a range of pageblocks must be aligned to | |
cb152a1a | 59 | * MAX_ORDER_NR_PAGES should biggest page be bigger than |
47118af0 MN |
60 | * a single pageblock. |
61 | */ | |
62 | MIGRATE_CMA, | |
63 | #endif | |
194159fb | 64 | #ifdef CONFIG_MEMORY_ISOLATION |
47118af0 | 65 | MIGRATE_ISOLATE, /* can't allocate from here */ |
194159fb | 66 | #endif |
47118af0 MN |
67 | MIGRATE_TYPES |
68 | }; | |
69 | ||
60f30350 | 70 | /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ |
c999fbd3 | 71 | extern const char * const migratetype_names[MIGRATE_TYPES]; |
60f30350 | 72 | |
47118af0 MN |
73 | #ifdef CONFIG_CMA |
74 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) | |
7c15d9bb | 75 | # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) |
47118af0 MN |
76 | #else |
77 | # define is_migrate_cma(migratetype) false | |
7c15d9bb | 78 | # define is_migrate_cma_page(_page) false |
47118af0 | 79 | #endif |
b2a0ac88 | 80 | |
b682debd VB |
81 | static inline bool is_migrate_movable(int mt) |
82 | { | |
83 | return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; | |
84 | } | |
85 | ||
b2a0ac88 MG |
86 | #define for_each_migratetype_order(order, type) \ |
87 | for (order = 0; order < MAX_ORDER; order++) \ | |
88 | for (type = 0; type < MIGRATE_TYPES; type++) | |
89 | ||
467c996c MG |
90 | extern int page_group_by_mobility_disabled; |
91 | ||
d38ac97f | 92 | #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) |
e58469ba | 93 | |
dc4b0caf | 94 | #define get_pageblock_migratetype(page) \ |
535b81e2 | 95 | get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) |
dc4b0caf | 96 | |
1da177e4 | 97 | struct free_area { |
b2a0ac88 | 98 | struct list_head free_list[MIGRATE_TYPES]; |
1da177e4 LT |
99 | unsigned long nr_free; |
100 | }; | |
101 | ||
b03641af DW |
102 | static inline struct page *get_page_from_free_area(struct free_area *area, |
103 | int migratetype) | |
104 | { | |
105 | return list_first_entry_or_null(&area->free_list[migratetype], | |
106 | struct page, lru); | |
107 | } | |
108 | ||
b03641af DW |
109 | static inline bool free_area_empty(struct free_area *area, int migratetype) |
110 | { | |
111 | return list_empty(&area->free_list[migratetype]); | |
112 | } | |
113 | ||
1da177e4 LT |
114 | struct pglist_data; |
115 | ||
116 | /* | |
041711ce | 117 | * Add a wild amount of padding here to ensure data fall into separate |
1da177e4 LT |
118 | * cachelines. There are very few zone structures in the machine, so space |
119 | * consumption is not a concern here. | |
120 | */ | |
121 | #if defined(CONFIG_SMP) | |
122 | struct zone_padding { | |
123 | char x[0]; | |
22fc6ecc | 124 | } ____cacheline_internodealigned_in_smp; |
1da177e4 LT |
125 | #define ZONE_PADDING(name) struct zone_padding name; |
126 | #else | |
127 | #define ZONE_PADDING(name) | |
128 | #endif | |
129 | ||
3a321d2a KW |
130 | #ifdef CONFIG_NUMA |
131 | enum numa_stat_item { | |
132 | NUMA_HIT, /* allocated in intended node */ | |
133 | NUMA_MISS, /* allocated in non intended node */ | |
134 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ | |
135 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ | |
136 | NUMA_LOCAL, /* allocation from local node */ | |
137 | NUMA_OTHER, /* allocation from other node */ | |
f19298b9 | 138 | NR_VM_NUMA_EVENT_ITEMS |
3a321d2a KW |
139 | }; |
140 | #else | |
f19298b9 | 141 | #define NR_VM_NUMA_EVENT_ITEMS 0 |
3a321d2a KW |
142 | #endif |
143 | ||
2244b95a | 144 | enum zone_stat_item { |
51ed4491 | 145 | /* First 128 byte cacheline (assuming 64 bit words) */ |
d23ad423 | 146 | NR_FREE_PAGES, |
71c799f4 MK |
147 | NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ |
148 | NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, | |
149 | NR_ZONE_ACTIVE_ANON, | |
150 | NR_ZONE_INACTIVE_FILE, | |
151 | NR_ZONE_ACTIVE_FILE, | |
152 | NR_ZONE_UNEVICTABLE, | |
5a1c84b4 | 153 | NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ |
5344b7e6 | 154 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
c6a7f572 | 155 | /* Second 128 byte cacheline */ |
d2c5e30c | 156 | NR_BOUNCE, |
91537fee MK |
157 | #if IS_ENABLED(CONFIG_ZSMALLOC) |
158 | NR_ZSPAGES, /* allocated in zsmalloc */ | |
ca889e6c | 159 | #endif |
d1ce749a | 160 | NR_FREE_CMA_PAGES, |
2244b95a CL |
161 | NR_VM_ZONE_STAT_ITEMS }; |
162 | ||
75ef7184 | 163 | enum node_stat_item { |
599d0c95 MG |
164 | NR_LRU_BASE, |
165 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ | |
166 | NR_ACTIVE_ANON, /* " " " " " */ | |
167 | NR_INACTIVE_FILE, /* " " " " " */ | |
168 | NR_ACTIVE_FILE, /* " " " " " */ | |
169 | NR_UNEVICTABLE, /* " " " " " */ | |
d42f3245 RG |
170 | NR_SLAB_RECLAIMABLE_B, |
171 | NR_SLAB_UNRECLAIMABLE_B, | |
599d0c95 MG |
172 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ |
173 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ | |
68d48e6a | 174 | WORKINGSET_NODES, |
170b04b7 JK |
175 | WORKINGSET_REFAULT_BASE, |
176 | WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, | |
177 | WORKINGSET_REFAULT_FILE, | |
178 | WORKINGSET_ACTIVATE_BASE, | |
179 | WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, | |
180 | WORKINGSET_ACTIVATE_FILE, | |
181 | WORKINGSET_RESTORE_BASE, | |
182 | WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, | |
183 | WORKINGSET_RESTORE_FILE, | |
1e6b1085 | 184 | WORKINGSET_NODERECLAIM, |
4b9d0fab | 185 | NR_ANON_MAPPED, /* Mapped anonymous pages */ |
50658e2e MG |
186 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
187 | only modified from process context */ | |
11fb9989 MG |
188 | NR_FILE_PAGES, |
189 | NR_FILE_DIRTY, | |
190 | NR_WRITEBACK, | |
191 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ | |
192 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ | |
193 | NR_SHMEM_THPS, | |
194 | NR_SHMEM_PMDMAPPED, | |
60fbf0ab SL |
195 | NR_FILE_THPS, |
196 | NR_FILE_PMDMAPPED, | |
11fb9989 | 197 | NR_ANON_THPS, |
c4a25635 MG |
198 | NR_VMSCAN_WRITE, |
199 | NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ | |
200 | NR_DIRTIED, /* page dirtyings since bootup */ | |
201 | NR_WRITTEN, /* page writings since bootup */ | |
8cd7c588 | 202 | NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */ |
b29940c1 | 203 | NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ |
1970dc6f JH |
204 | NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ |
205 | NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ | |
991e7673 SB |
206 | NR_KERNEL_STACK_KB, /* measured in KiB */ |
207 | #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) | |
208 | NR_KERNEL_SCS_KB, /* measured in KiB */ | |
209 | #endif | |
f0c0c115 | 210 | NR_PAGETABLE, /* used for pagetables */ |
b6038942 SB |
211 | #ifdef CONFIG_SWAP |
212 | NR_SWAPCACHE, | |
213 | #endif | |
75ef7184 MG |
214 | NR_VM_NODE_STAT_ITEMS |
215 | }; | |
216 | ||
69473e5d MS |
217 | /* |
218 | * Returns true if the item should be printed in THPs (/proc/vmstat | |
219 | * currently prints number of anon, file and shmem THPs. But the item | |
220 | * is charged in pages). | |
221 | */ | |
222 | static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) | |
223 | { | |
224 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) | |
225 | return false; | |
226 | ||
bf9ecead | 227 | return item == NR_ANON_THPS || |
57b2847d | 228 | item == NR_FILE_THPS || |
a1528e21 | 229 | item == NR_SHMEM_THPS || |
380780e7 MS |
230 | item == NR_SHMEM_PMDMAPPED || |
231 | item == NR_FILE_PMDMAPPED; | |
69473e5d MS |
232 | } |
233 | ||
ea426c2a RG |
234 | /* |
235 | * Returns true if the value is measured in bytes (most vmstat values are | |
236 | * measured in pages). This defines the API part, the internal representation | |
237 | * might be different. | |
238 | */ | |
239 | static __always_inline bool vmstat_item_in_bytes(int idx) | |
240 | { | |
d42f3245 RG |
241 | /* |
242 | * Global and per-node slab counters track slab pages. | |
243 | * It's expected that changes are multiples of PAGE_SIZE. | |
244 | * Internally values are stored in pages. | |
245 | * | |
246 | * Per-memcg and per-lruvec counters track memory, consumed | |
247 | * by individual slab objects. These counters are actually | |
248 | * byte-precise. | |
249 | */ | |
250 | return (idx == NR_SLAB_RECLAIMABLE_B || | |
251 | idx == NR_SLAB_UNRECLAIMABLE_B); | |
ea426c2a RG |
252 | } |
253 | ||
4f98a2fe RR |
254 | /* |
255 | * We do arithmetic on the LRU lists in various places in the code, | |
256 | * so it is important to keep the active lists LRU_ACTIVE higher in | |
257 | * the array than the corresponding inactive lists, and to keep | |
258 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. | |
259 | * | |
260 | * This has to be kept in sync with the statistics in zone_stat_item | |
261 | * above and the descriptions in vmstat_text in mm/vmstat.c | |
262 | */ | |
263 | #define LRU_BASE 0 | |
264 | #define LRU_ACTIVE 1 | |
265 | #define LRU_FILE 2 | |
266 | ||
b69408e8 | 267 | enum lru_list { |
4f98a2fe RR |
268 | LRU_INACTIVE_ANON = LRU_BASE, |
269 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | |
270 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | |
271 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | |
894bc310 | 272 | LRU_UNEVICTABLE, |
894bc310 LS |
273 | NR_LRU_LISTS |
274 | }; | |
b69408e8 | 275 | |
8cd7c588 MG |
276 | enum vmscan_throttle_state { |
277 | VMSCAN_THROTTLE_WRITEBACK, | |
d818fca1 | 278 | VMSCAN_THROTTLE_ISOLATED, |
8cd7c588 MG |
279 | NR_VMSCAN_THROTTLE, |
280 | }; | |
281 | ||
4111304d | 282 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) |
b69408e8 | 283 | |
4111304d | 284 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) |
894bc310 | 285 | |
b91ac374 | 286 | static inline bool is_file_lru(enum lru_list lru) |
4f98a2fe | 287 | { |
4111304d | 288 | return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); |
4f98a2fe RR |
289 | } |
290 | ||
b91ac374 | 291 | static inline bool is_active_lru(enum lru_list lru) |
b69408e8 | 292 | { |
4111304d | 293 | return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); |
b69408e8 CL |
294 | } |
295 | ||
ed017373 YZ |
296 | #define ANON_AND_FILE 2 |
297 | ||
1b05117d JW |
298 | enum lruvec_flags { |
299 | LRUVEC_CONGESTED, /* lruvec has many dirty pages | |
300 | * backed by a congested BDI | |
301 | */ | |
302 | }; | |
303 | ||
6290df54 | 304 | struct lruvec { |
23047a96 | 305 | struct list_head lists[NR_LRU_LISTS]; |
6168d0da AS |
306 | /* per lruvec lru_lock for memcg */ |
307 | spinlock_t lru_lock; | |
1431d4d1 JW |
308 | /* |
309 | * These track the cost of reclaiming one LRU - file or anon - | |
310 | * over the other. As the observed cost of reclaiming one LRU | |
311 | * increases, the reclaim scan balance tips toward the other. | |
312 | */ | |
313 | unsigned long anon_cost; | |
314 | unsigned long file_cost; | |
31d8fcac JW |
315 | /* Non-resident age, driven by LRU movement */ |
316 | atomic_long_t nonresident_age; | |
ed017373 YZ |
317 | /* Refaults at the time of last reclaim cycle */ |
318 | unsigned long refaults[ANON_AND_FILE]; | |
1b05117d JW |
319 | /* Various lruvec state flags (enum lruvec_flags) */ |
320 | unsigned long flags; | |
c255a458 | 321 | #ifdef CONFIG_MEMCG |
599d0c95 | 322 | struct pglist_data *pgdat; |
7f5e86c2 | 323 | #endif |
6290df54 JW |
324 | }; |
325 | ||
653e003d | 326 | /* Isolate unmapped pages */ |
f3fd4a61 | 327 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) |
c8244935 | 328 | /* Isolate for asynchronous migration */ |
f3fd4a61 | 329 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) |
e46a2879 MK |
330 | /* Isolate unevictable pages */ |
331 | #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) | |
4356f21d MK |
332 | |
333 | /* LRU Isolation modes. */ | |
9efeccac | 334 | typedef unsigned __bitwise isolate_mode_t; |
4356f21d | 335 | |
41858966 MG |
336 | enum zone_watermarks { |
337 | WMARK_MIN, | |
338 | WMARK_LOW, | |
339 | WMARK_HIGH, | |
340 | NR_WMARK | |
341 | }; | |
342 | ||
44042b44 MG |
343 | /* |
344 | * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional | |
345 | * for pageblock size for THP if configured. | |
346 | */ | |
347 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
348 | #define NR_PCP_THP 1 | |
349 | #else | |
350 | #define NR_PCP_THP 0 | |
351 | #endif | |
352 | #define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP)) | |
353 | ||
354 | /* | |
355 | * Shift to encode migratetype and order in the same integer, with order | |
356 | * in the least significant bits. | |
357 | */ | |
358 | #define NR_PCP_ORDER_WIDTH 8 | |
359 | #define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1) | |
360 | ||
1c30844d MG |
361 | #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) |
362 | #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) | |
363 | #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) | |
364 | #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) | |
41858966 | 365 | |
dbbee9d5 | 366 | /* Fields and list protected by pagesets local_lock in page_alloc.c */ |
1da177e4 LT |
367 | struct per_cpu_pages { |
368 | int count; /* number of pages in the list */ | |
1da177e4 LT |
369 | int high; /* high watermark, emptying needed */ |
370 | int batch; /* chunk size for buddy add/remove */ | |
3b12e7e9 | 371 | short free_factor; /* batch scaling factor during free */ |
28f836b6 | 372 | #ifdef CONFIG_NUMA |
3b12e7e9 | 373 | short expire; /* When 0, remote pagesets are drained */ |
28f836b6 | 374 | #endif |
5f8dcc21 MG |
375 | |
376 | /* Lists of pages, one per migrate type stored on the pcp-lists */ | |
44042b44 | 377 | struct list_head lists[NR_PCP_LISTS]; |
1da177e4 LT |
378 | }; |
379 | ||
28f836b6 | 380 | struct per_cpu_zonestat { |
2244b95a CL |
381 | #ifdef CONFIG_SMP |
382 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | |
28f836b6 MG |
383 | s8 stat_threshold; |
384 | #endif | |
385 | #ifdef CONFIG_NUMA | |
f19298b9 MG |
386 | /* |
387 | * Low priority inaccurate counters that are only folded | |
388 | * on demand. Use a large type to avoid the overhead of | |
389 | * folding during refresh_cpu_vm_stats. | |
390 | */ | |
391 | unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; | |
2244b95a | 392 | #endif |
99dcc3e5 | 393 | }; |
e7c8d5c9 | 394 | |
75ef7184 MG |
395 | struct per_cpu_nodestat { |
396 | s8 stat_threshold; | |
397 | s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; | |
398 | }; | |
399 | ||
97965478 CL |
400 | #endif /* !__GENERATING_BOUNDS.H */ |
401 | ||
2f1b6248 CL |
402 | enum zone_type { |
403 | /* | |
734f9246 NSJ |
404 | * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able |
405 | * to DMA to all of the addressable memory (ZONE_NORMAL). | |
406 | * On architectures where this area covers the whole 32 bit address | |
407 | * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller | |
408 | * DMA addressing constraints. This distinction is important as a 32bit | |
409 | * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit | |
410 | * platforms may need both zones as they support peripherals with | |
411 | * different DMA addressing limitations. | |
2f1b6248 | 412 | */ |
734f9246 | 413 | #ifdef CONFIG_ZONE_DMA |
2f1b6248 | 414 | ZONE_DMA, |
4b51d669 | 415 | #endif |
fb0e7942 | 416 | #ifdef CONFIG_ZONE_DMA32 |
2f1b6248 | 417 | ZONE_DMA32, |
fb0e7942 | 418 | #endif |
2f1b6248 CL |
419 | /* |
420 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | |
421 | * performed on pages in ZONE_NORMAL if the DMA devices support | |
422 | * transfers to all addressable memory. | |
423 | */ | |
424 | ZONE_NORMAL, | |
e53ef38d | 425 | #ifdef CONFIG_HIGHMEM |
2f1b6248 CL |
426 | /* |
427 | * A memory area that is only addressable by the kernel through | |
428 | * mapping portions into its own address space. This is for example | |
429 | * used by i386 to allow the kernel to address the memory beyond | |
430 | * 900MB. The kernel will set up special mappings (page | |
431 | * table entries on i386) for each page that the kernel needs to | |
432 | * access. | |
433 | */ | |
434 | ZONE_HIGHMEM, | |
e53ef38d | 435 | #endif |
9181a980 DH |
436 | /* |
437 | * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains | |
438 | * movable pages with few exceptional cases described below. Main use | |
439 | * cases for ZONE_MOVABLE are to make memory offlining/unplug more | |
440 | * likely to succeed, and to locally limit unmovable allocations - e.g., | |
441 | * to increase the number of THP/huge pages. Notable special cases are: | |
442 | * | |
443 | * 1. Pinned pages: (long-term) pinning of movable pages might | |
d1e153fe PT |
444 | * essentially turn such pages unmovable. Therefore, we do not allow |
445 | * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and | |
446 | * faulted, they come from the right zone right away. However, it is | |
447 | * still possible that address space already has pages in | |
448 | * ZONE_MOVABLE at the time when pages are pinned (i.e. user has | |
449 | * touches that memory before pinning). In such case we migrate them | |
450 | * to a different zone. When migration fails - pinning fails. | |
9181a980 DH |
451 | * 2. memblock allocations: kernelcore/movablecore setups might create |
452 | * situations where ZONE_MOVABLE contains unmovable allocations | |
453 | * after boot. Memory offlining and allocations fail early. | |
454 | * 3. Memory holes: kernelcore/movablecore setups might create very rare | |
455 | * situations where ZONE_MOVABLE contains memory holes after boot, | |
456 | * for example, if we have sections that are only partially | |
457 | * populated. Memory offlining and allocations fail early. | |
458 | * 4. PG_hwpoison pages: while poisoned pages can be skipped during | |
459 | * memory offlining, such pages cannot be allocated. | |
460 | * 5. Unmovable PG_offline pages: in paravirtualized environments, | |
461 | * hotplugged memory blocks might only partially be managed by the | |
462 | * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The | |
463 | * parts not manged by the buddy are unmovable PG_offline pages. In | |
464 | * some cases (virtio-mem), such pages can be skipped during | |
465 | * memory offlining, however, cannot be moved/allocated. These | |
466 | * techniques might use alloc_contig_range() to hide previously | |
467 | * exposed pages from the buddy again (e.g., to implement some sort | |
468 | * of memory unplug in virtio-mem). | |
9afaf30f PT |
469 | * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create |
470 | * situations where ZERO_PAGE(0) which is allocated differently | |
471 | * on different platforms may end up in a movable zone. ZERO_PAGE(0) | |
472 | * cannot be migrated. | |
a08a2ae3 OS |
473 | * 7. Memory-hotplug: when using memmap_on_memory and onlining the |
474 | * memory to the MOVABLE zone, the vmemmap pages are also placed in | |
475 | * such zone. Such pages cannot be really moved around as they are | |
476 | * self-stored in the range, but they are treated as movable when | |
477 | * the range they describe is about to be offlined. | |
9181a980 DH |
478 | * |
479 | * In general, no unmovable allocations that degrade memory offlining | |
480 | * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) | |
481 | * have to expect that migrating pages in ZONE_MOVABLE can fail (even | |
482 | * if has_unmovable_pages() states that there are no unmovable pages, | |
483 | * there can be false negatives). | |
484 | */ | |
2a1e274a | 485 | ZONE_MOVABLE, |
033fbae9 DW |
486 | #ifdef CONFIG_ZONE_DEVICE |
487 | ZONE_DEVICE, | |
488 | #endif | |
97965478 | 489 | __MAX_NR_ZONES |
033fbae9 | 490 | |
2f1b6248 | 491 | }; |
1da177e4 | 492 | |
97965478 CL |
493 | #ifndef __GENERATING_BOUNDS_H |
494 | ||
ed017373 YZ |
495 | #define ASYNC_AND_SYNC 2 |
496 | ||
1da177e4 | 497 | struct zone { |
3484b2de | 498 | /* Read-mostly fields */ |
41858966 MG |
499 | |
500 | /* zone watermarks, access with *_wmark_pages(zone) macros */ | |
a9214443 | 501 | unsigned long _watermark[NR_WMARK]; |
1c30844d | 502 | unsigned long watermark_boost; |
41858966 | 503 | |
0aaa29a5 MG |
504 | unsigned long nr_reserved_highatomic; |
505 | ||
1da177e4 | 506 | /* |
89903327 AM |
507 | * We don't know if the memory that we're going to allocate will be |
508 | * freeable or/and it will be released eventually, so to avoid totally | |
509 | * wasting several GB of ram we must reserve some of the lower zone | |
510 | * memory (otherwise we risk to run OOM on the lower zones despite | |
511 | * there being tons of freeable ram on the higher zones). This array is | |
512 | * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl | |
513 | * changes. | |
1da177e4 | 514 | */ |
3484b2de | 515 | long lowmem_reserve[MAX_NR_ZONES]; |
ab8fabd4 | 516 | |
e7c8d5c9 | 517 | #ifdef CONFIG_NUMA |
d5f541ed | 518 | int node; |
3484b2de | 519 | #endif |
3484b2de | 520 | struct pglist_data *zone_pgdat; |
28f836b6 MG |
521 | struct per_cpu_pages __percpu *per_cpu_pageset; |
522 | struct per_cpu_zonestat __percpu *per_cpu_zonestats; | |
952eaf81 VB |
523 | /* |
524 | * the high and batch values are copied to individual pagesets for | |
525 | * faster access | |
526 | */ | |
527 | int pageset_high; | |
528 | int pageset_batch; | |
3484b2de | 529 | |
835c134e MG |
530 | #ifndef CONFIG_SPARSEMEM |
531 | /* | |
d9c23400 | 532 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. |
835c134e MG |
533 | * In SPARSEMEM, this map is stored in struct mem_section |
534 | */ | |
535 | unsigned long *pageblock_flags; | |
536 | #endif /* CONFIG_SPARSEMEM */ | |
537 | ||
1da177e4 LT |
538 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
539 | unsigned long zone_start_pfn; | |
540 | ||
bdc8cb98 | 541 | /* |
9feedc9d JL |
542 | * spanned_pages is the total pages spanned by the zone, including |
543 | * holes, which is calculated as: | |
544 | * spanned_pages = zone_end_pfn - zone_start_pfn; | |
bdc8cb98 | 545 | * |
9feedc9d JL |
546 | * present_pages is physical pages existing within the zone, which |
547 | * is calculated as: | |
8761e31c | 548 | * present_pages = spanned_pages - absent_pages(pages in holes); |
9feedc9d | 549 | * |
4b097002 DH |
550 | * present_early_pages is present pages existing within the zone |
551 | * located on memory available since early boot, excluding hotplugged | |
552 | * memory. | |
553 | * | |
9feedc9d JL |
554 | * managed_pages is present pages managed by the buddy system, which |
555 | * is calculated as (reserved_pages includes pages allocated by the | |
556 | * bootmem allocator): | |
557 | * managed_pages = present_pages - reserved_pages; | |
558 | * | |
3c381db1 DH |
559 | * cma pages is present pages that are assigned for CMA use |
560 | * (MIGRATE_CMA). | |
561 | * | |
9feedc9d JL |
562 | * So present_pages may be used by memory hotplug or memory power |
563 | * management logic to figure out unmanaged pages by checking | |
564 | * (present_pages - managed_pages). And managed_pages should be used | |
565 | * by page allocator and vm scanner to calculate all kinds of watermarks | |
566 | * and thresholds. | |
567 | * | |
568 | * Locking rules: | |
569 | * | |
570 | * zone_start_pfn and spanned_pages are protected by span_seqlock. | |
571 | * It is a seqlock because it has to be read outside of zone->lock, | |
572 | * and it is done in the main allocator path. But, it is written | |
573 | * quite infrequently. | |
574 | * | |
575 | * The span_seq lock is declared along with zone->lock because it is | |
bdc8cb98 DH |
576 | * frequently read in proximity to zone->lock. It's good to |
577 | * give them a chance of being in the same cacheline. | |
9feedc9d | 578 | * |
c3d5f5f0 | 579 | * Write access to present_pages at runtime should be protected by |
bfc8c901 VD |
580 | * mem_hotplug_begin/end(). Any reader who can't tolerant drift of |
581 | * present_pages should get_online_mems() to get a stable value. | |
bdc8cb98 | 582 | */ |
9705bea5 | 583 | atomic_long_t managed_pages; |
9feedc9d JL |
584 | unsigned long spanned_pages; |
585 | unsigned long present_pages; | |
4b097002 DH |
586 | #if defined(CONFIG_MEMORY_HOTPLUG) |
587 | unsigned long present_early_pages; | |
588 | #endif | |
3c381db1 DH |
589 | #ifdef CONFIG_CMA |
590 | unsigned long cma_pages; | |
591 | #endif | |
3484b2de MG |
592 | |
593 | const char *name; | |
1da177e4 | 594 | |
ad53f92e JK |
595 | #ifdef CONFIG_MEMORY_ISOLATION |
596 | /* | |
597 | * Number of isolated pageblock. It is used to solve incorrect | |
598 | * freepage counting problem due to racy retrieving migratetype | |
599 | * of pageblock. Protected by zone->lock. | |
600 | */ | |
601 | unsigned long nr_isolate_pageblock; | |
602 | #endif | |
603 | ||
3484b2de MG |
604 | #ifdef CONFIG_MEMORY_HOTPLUG |
605 | /* see spanned/present_pages for more description */ | |
606 | seqlock_t span_seqlock; | |
607 | #endif | |
608 | ||
9dcb8b68 | 609 | int initialized; |
3484b2de | 610 | |
0f661148 | 611 | /* Write-intensive fields used from the page allocator */ |
3484b2de | 612 | ZONE_PADDING(_pad1_) |
0f661148 | 613 | |
3484b2de MG |
614 | /* free areas of different sizes */ |
615 | struct free_area free_area[MAX_ORDER]; | |
616 | ||
617 | /* zone flags, see below */ | |
618 | unsigned long flags; | |
619 | ||
0f661148 | 620 | /* Primarily protects free_area */ |
a368ab67 MG |
621 | spinlock_t lock; |
622 | ||
0f661148 | 623 | /* Write-intensive fields used by compaction and vmstats. */ |
3484b2de MG |
624 | ZONE_PADDING(_pad2_) |
625 | ||
3484b2de MG |
626 | /* |
627 | * When free pages are below this point, additional steps are taken | |
628 | * when reading the number of free pages to avoid per-cpu counter | |
629 | * drift allowing watermarks to be breached | |
630 | */ | |
631 | unsigned long percpu_drift_mark; | |
632 | ||
633 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | |
634 | /* pfn where compaction free scanner should start */ | |
635 | unsigned long compact_cached_free_pfn; | |
ed017373 YZ |
636 | /* pfn where compaction migration scanner should start */ |
637 | unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; | |
e332f741 MG |
638 | unsigned long compact_init_migrate_pfn; |
639 | unsigned long compact_init_free_pfn; | |
3484b2de MG |
640 | #endif |
641 | ||
642 | #ifdef CONFIG_COMPACTION | |
643 | /* | |
644 | * On compaction failure, 1<<compact_defer_shift compactions | |
645 | * are skipped before trying again. The number attempted since | |
646 | * last failure is tracked with compact_considered. | |
860b3272 | 647 | * compact_order_failed is the minimum compaction failed order. |
3484b2de MG |
648 | */ |
649 | unsigned int compact_considered; | |
650 | unsigned int compact_defer_shift; | |
651 | int compact_order_failed; | |
652 | #endif | |
653 | ||
654 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | |
655 | /* Set to true when the PG_migrate_skip bits should be cleared */ | |
656 | bool compact_blockskip_flush; | |
657 | #endif | |
658 | ||
7cf91a98 JK |
659 | bool contiguous; |
660 | ||
3484b2de MG |
661 | ZONE_PADDING(_pad3_) |
662 | /* Zone statistics */ | |
663 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
f19298b9 | 664 | atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; |
22fc6ecc | 665 | } ____cacheline_internodealigned_in_smp; |
1da177e4 | 666 | |
599d0c95 | 667 | enum pgdat_flags { |
599d0c95 | 668 | PGDAT_DIRTY, /* reclaim scanning has recently found |
d43006d5 MG |
669 | * many dirty file pages at the tail |
670 | * of the LRU. | |
671 | */ | |
599d0c95 | 672 | PGDAT_WRITEBACK, /* reclaim scanning has recently found |
283aba9f MG |
673 | * many pages under writeback |
674 | */ | |
a5f5f91d | 675 | PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
57054651 | 676 | }; |
e815af95 | 677 | |
73444bc4 MG |
678 | enum zone_flags { |
679 | ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. | |
680 | * Cleared when kswapd is woken. | |
681 | */ | |
c49c2c47 | 682 | ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ |
73444bc4 MG |
683 | }; |
684 | ||
9705bea5 AK |
685 | static inline unsigned long zone_managed_pages(struct zone *zone) |
686 | { | |
687 | return (unsigned long)atomic_long_read(&zone->managed_pages); | |
688 | } | |
689 | ||
3c381db1 DH |
690 | static inline unsigned long zone_cma_pages(struct zone *zone) |
691 | { | |
692 | #ifdef CONFIG_CMA | |
693 | return zone->cma_pages; | |
694 | #else | |
695 | return 0; | |
696 | #endif | |
697 | } | |
698 | ||
f9228b20 | 699 | static inline unsigned long zone_end_pfn(const struct zone *zone) |
108bcc96 CS |
700 | { |
701 | return zone->zone_start_pfn + zone->spanned_pages; | |
702 | } | |
703 | ||
704 | static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) | |
705 | { | |
706 | return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); | |
707 | } | |
708 | ||
2a6e3ebe CS |
709 | static inline bool zone_is_initialized(struct zone *zone) |
710 | { | |
9dcb8b68 | 711 | return zone->initialized; |
2a6e3ebe CS |
712 | } |
713 | ||
714 | static inline bool zone_is_empty(struct zone *zone) | |
715 | { | |
716 | return zone->spanned_pages == 0; | |
717 | } | |
718 | ||
f1dd2cd1 MH |
719 | /* |
720 | * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty | |
721 | * intersection with the given zone | |
722 | */ | |
723 | static inline bool zone_intersects(struct zone *zone, | |
724 | unsigned long start_pfn, unsigned long nr_pages) | |
725 | { | |
726 | if (zone_is_empty(zone)) | |
727 | return false; | |
728 | if (start_pfn >= zone_end_pfn(zone) || | |
729 | start_pfn + nr_pages <= zone->zone_start_pfn) | |
730 | return false; | |
731 | ||
732 | return true; | |
733 | } | |
734 | ||
1da177e4 LT |
735 | /* |
736 | * The "priority" of VM scanning is how much of the queues we will scan in one | |
737 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | |
738 | * queues ("queue_length >> 12") during an aging round. | |
739 | */ | |
740 | #define DEF_PRIORITY 12 | |
741 | ||
9276b1bc PJ |
742 | /* Maximum number of zones on a zonelist */ |
743 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | |
744 | ||
c00eb15a YB |
745 | enum { |
746 | ZONELIST_FALLBACK, /* zonelist with fallback */ | |
9276b1bc | 747 | #ifdef CONFIG_NUMA |
c00eb15a YB |
748 | /* |
749 | * The NUMA zonelists are doubled because we need zonelists that | |
750 | * restrict the allocations to a single node for __GFP_THISNODE. | |
751 | */ | |
752 | ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ | |
9276b1bc | 753 | #endif |
c00eb15a YB |
754 | MAX_ZONELISTS |
755 | }; | |
9276b1bc | 756 | |
dd1a239f MG |
757 | /* |
758 | * This struct contains information about a zone in a zonelist. It is stored | |
759 | * here to avoid dereferences into large structures and lookups of tables | |
760 | */ | |
761 | struct zoneref { | |
762 | struct zone *zone; /* Pointer to actual zone */ | |
763 | int zone_idx; /* zone_idx(zoneref->zone) */ | |
764 | }; | |
765 | ||
1da177e4 LT |
766 | /* |
767 | * One allocation request operates on a zonelist. A zonelist | |
768 | * is a list of zones, the first one is the 'goal' of the | |
769 | * allocation, the other zones are fallback zones, in decreasing | |
770 | * priority. | |
771 | * | |
dd1a239f MG |
772 | * To speed the reading of the zonelist, the zonerefs contain the zone index |
773 | * of the entry being read. Helper functions to access information given | |
774 | * a struct zoneref are | |
775 | * | |
776 | * zonelist_zone() - Return the struct zone * for an entry in _zonerefs | |
777 | * zonelist_zone_idx() - Return the index of the zone for an entry | |
778 | * zonelist_node_idx() - Return the index of the node for an entry | |
1da177e4 LT |
779 | */ |
780 | struct zonelist { | |
dd1a239f | 781 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; |
1da177e4 LT |
782 | }; |
783 | ||
bb1c50d3 MR |
784 | /* |
785 | * The array of struct pages for flatmem. | |
786 | * It must be declared for SPARSEMEM as well because there are configurations | |
787 | * that rely on that. | |
788 | */ | |
5b99cd0e | 789 | extern struct page *mem_map; |
5b99cd0e | 790 | |
364c1eeb YS |
791 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
792 | struct deferred_split { | |
793 | spinlock_t split_queue_lock; | |
794 | struct list_head split_queue; | |
795 | unsigned long split_queue_len; | |
796 | }; | |
797 | #endif | |
798 | ||
1da177e4 | 799 | /* |
1da177e4 | 800 | * On NUMA machines, each NUMA node would have a pg_data_t to describe |
618b8c20 NB |
801 | * it's memory layout. On UMA machines there is a single pglist_data which |
802 | * describes the whole memory. | |
1da177e4 LT |
803 | * |
804 | * Memory statistics and page replacement data structures are maintained on a | |
805 | * per-zone basis. | |
806 | */ | |
1da177e4 | 807 | typedef struct pglist_data { |
496df3d3 BW |
808 | /* |
809 | * node_zones contains just the zones for THIS node. Not all of the | |
810 | * zones may be populated, but it is the full list. It is referenced by | |
811 | * this node's node_zonelists as well as other node's node_zonelists. | |
812 | */ | |
1da177e4 | 813 | struct zone node_zones[MAX_NR_ZONES]; |
496df3d3 BW |
814 | |
815 | /* | |
816 | * node_zonelists contains references to all zones in all nodes. | |
817 | * Generally the first zones will be references to this node's | |
818 | * node_zones. | |
819 | */ | |
523b9458 | 820 | struct zonelist node_zonelists[MAX_ZONELISTS]; |
496df3d3 BW |
821 | |
822 | int nr_zones; /* number of populated zones in this node */ | |
43b02ba9 | 823 | #ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ |
1da177e4 | 824 | struct page *node_mem_map; |
eefa864b JK |
825 | #ifdef CONFIG_PAGE_EXTENSION |
826 | struct page_ext *node_page_ext; | |
827 | #endif | |
d41dee36 | 828 | #endif |
3a2d7fa8 | 829 | #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) |
208d54e5 | 830 | /* |
fa004ab7 WY |
831 | * Must be held any time you expect node_start_pfn, |
832 | * node_present_pages, node_spanned_pages or nr_zones to stay constant. | |
3d060856 PT |
833 | * Also synchronizes pgdat->first_deferred_pfn during deferred page |
834 | * init. | |
208d54e5 | 835 | * |
114d4b79 | 836 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to |
3a2d7fa8 PT |
837 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG |
838 | * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. | |
114d4b79 | 839 | * |
72c3b51b | 840 | * Nests above zone->lock and zone->span_seqlock |
208d54e5 DH |
841 | */ |
842 | spinlock_t node_size_lock; | |
843 | #endif | |
1da177e4 LT |
844 | unsigned long node_start_pfn; |
845 | unsigned long node_present_pages; /* total number of physical pages */ | |
846 | unsigned long node_spanned_pages; /* total size of physical page | |
847 | range, including holes */ | |
848 | int node_id; | |
1da177e4 | 849 | wait_queue_head_t kswapd_wait; |
5515061d | 850 | wait_queue_head_t pfmemalloc_wait; |
8cd7c588 MG |
851 | |
852 | /* workqueues for throttling reclaim for different reasons. */ | |
853 | wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE]; | |
854 | ||
855 | atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */ | |
856 | unsigned long nr_reclaim_start; /* nr pages written while throttled | |
857 | * when throttling started. */ | |
bfc8c901 VD |
858 | struct task_struct *kswapd; /* Protected by |
859 | mem_hotplug_begin/end() */ | |
38087d9b | 860 | int kswapd_order; |
97a225e6 | 861 | enum zone_type kswapd_highest_zoneidx; |
38087d9b | 862 | |
c73322d0 JW |
863 | int kswapd_failures; /* Number of 'reclaimed == 0' runs */ |
864 | ||
698b1b30 VB |
865 | #ifdef CONFIG_COMPACTION |
866 | int kcompactd_max_order; | |
97a225e6 | 867 | enum zone_type kcompactd_highest_zoneidx; |
698b1b30 VB |
868 | wait_queue_head_t kcompactd_wait; |
869 | struct task_struct *kcompactd; | |
65d759c8 | 870 | bool proactive_compact_trigger; |
8177a420 | 871 | #endif |
281e3726 MG |
872 | /* |
873 | * This is a per-node reserve of pages that are not available | |
874 | * to userspace allocations. | |
875 | */ | |
876 | unsigned long totalreserve_pages; | |
877 | ||
a5f5f91d MG |
878 | #ifdef CONFIG_NUMA |
879 | /* | |
0a3c5772 | 880 | * node reclaim becomes active if more unmapped pages exist. |
a5f5f91d MG |
881 | */ |
882 | unsigned long min_unmapped_pages; | |
883 | unsigned long min_slab_pages; | |
884 | #endif /* CONFIG_NUMA */ | |
885 | ||
a52633d8 MG |
886 | /* Write-intensive fields used by page reclaim */ |
887 | ZONE_PADDING(_pad1_) | |
3a80a7fa MG |
888 | |
889 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | |
890 | /* | |
891 | * If memory initialisation on large machines is deferred then this | |
892 | * is the first PFN that needs to be initialised. | |
893 | */ | |
894 | unsigned long first_deferred_pfn; | |
895 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | |
a3d0a918 KS |
896 | |
897 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
364c1eeb | 898 | struct deferred_split deferred_split_queue; |
a3d0a918 | 899 | #endif |
75ef7184 | 900 | |
599d0c95 | 901 | /* Fields commonly accessed by the page reclaim scanner */ |
867e5e1d JW |
902 | |
903 | /* | |
904 | * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. | |
905 | * | |
906 | * Use mem_cgroup_lruvec() to look up lruvecs. | |
907 | */ | |
908 | struct lruvec __lruvec; | |
599d0c95 | 909 | |
599d0c95 MG |
910 | unsigned long flags; |
911 | ||
912 | ZONE_PADDING(_pad2_) | |
913 | ||
75ef7184 MG |
914 | /* Per-node vmstats */ |
915 | struct per_cpu_nodestat __percpu *per_cpu_nodestats; | |
916 | atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; | |
1da177e4 LT |
917 | } pg_data_t; |
918 | ||
919 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | |
920 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) | |
43b02ba9 | 921 | #ifdef CONFIG_FLATMEM |
408fde81 | 922 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) |
d41dee36 AW |
923 | #else |
924 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | |
925 | #endif | |
408fde81 | 926 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
1da177e4 | 927 | |
c6830c22 | 928 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
da3649e1 | 929 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) |
c6830c22 | 930 | |
da3649e1 CS |
931 | static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) |
932 | { | |
933 | return pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
934 | } | |
935 | ||
936 | static inline bool pgdat_is_empty(pg_data_t *pgdat) | |
937 | { | |
938 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; | |
939 | } | |
c6830c22 | 940 | |
208d54e5 DH |
941 | #include <linux/memory_hotplug.h> |
942 | ||
72675e13 | 943 | void build_all_zonelists(pg_data_t *pgdat); |
5ecd9d40 | 944 | void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, |
97a225e6 | 945 | enum zone_type highest_zoneidx); |
86a294a8 | 946 | bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, |
97a225e6 | 947 | int highest_zoneidx, unsigned int alloc_flags, |
86a294a8 | 948 | long free_pages); |
7aeb09f9 | 949 | bool zone_watermark_ok(struct zone *z, unsigned int order, |
97a225e6 | 950 | unsigned long mark, int highest_zoneidx, |
c603844b | 951 | unsigned int alloc_flags); |
7aeb09f9 | 952 | bool zone_watermark_ok_safe(struct zone *z, unsigned int order, |
97a225e6 | 953 | unsigned long mark, int highest_zoneidx); |
c1d0da83 LD |
954 | /* |
955 | * Memory initialization context, use to differentiate memory added by | |
956 | * the platform statically or via memory hotplug interface. | |
957 | */ | |
958 | enum meminit_context { | |
959 | MEMINIT_EARLY, | |
960 | MEMINIT_HOTPLUG, | |
a2f3aa02 | 961 | }; |
c1d0da83 | 962 | |
dc0bbf3b | 963 | extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
b171e409 | 964 | unsigned long size); |
718127cc | 965 | |
bea8c150 | 966 | extern void lruvec_init(struct lruvec *lruvec); |
7f5e86c2 | 967 | |
599d0c95 | 968 | static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) |
7f5e86c2 | 969 | { |
c255a458 | 970 | #ifdef CONFIG_MEMCG |
599d0c95 | 971 | return lruvec->pgdat; |
7f5e86c2 | 972 | #else |
867e5e1d | 973 | return container_of(lruvec, struct pglist_data, __lruvec); |
7f5e86c2 KK |
974 | #endif |
975 | } | |
976 | ||
7aac7898 LS |
977 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
978 | int local_memory_node(int node_id); | |
979 | #else | |
980 | static inline int local_memory_node(int node_id) { return node_id; }; | |
981 | #endif | |
982 | ||
1da177e4 LT |
983 | /* |
984 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | |
985 | */ | |
986 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) | |
987 | ||
1f90a347 DW |
988 | #ifdef CONFIG_ZONE_DEVICE |
989 | static inline bool zone_is_zone_device(struct zone *zone) | |
990 | { | |
991 | return zone_idx(zone) == ZONE_DEVICE; | |
992 | } | |
993 | #else | |
994 | static inline bool zone_is_zone_device(struct zone *zone) | |
995 | { | |
996 | return false; | |
997 | } | |
998 | #endif | |
999 | ||
6aa303de MG |
1000 | /* |
1001 | * Returns true if a zone has pages managed by the buddy allocator. | |
1002 | * All the reclaim decisions have to use this function rather than | |
1003 | * populated_zone(). If the whole zone is reserved then we can easily | |
1004 | * end up with populated_zone() && !managed_zone(). | |
1005 | */ | |
1006 | static inline bool managed_zone(struct zone *zone) | |
1007 | { | |
9705bea5 | 1008 | return zone_managed_pages(zone); |
6aa303de MG |
1009 | } |
1010 | ||
1011 | /* Returns true if a zone has memory */ | |
1012 | static inline bool populated_zone(struct zone *zone) | |
f3fe6512 | 1013 | { |
6aa303de | 1014 | return zone->present_pages; |
f3fe6512 CK |
1015 | } |
1016 | ||
c1093b74 PT |
1017 | #ifdef CONFIG_NUMA |
1018 | static inline int zone_to_nid(struct zone *zone) | |
1019 | { | |
1020 | return zone->node; | |
1021 | } | |
1022 | ||
1023 | static inline void zone_set_nid(struct zone *zone, int nid) | |
1024 | { | |
1025 | zone->node = nid; | |
1026 | } | |
1027 | #else | |
1028 | static inline int zone_to_nid(struct zone *zone) | |
1029 | { | |
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | static inline void zone_set_nid(struct zone *zone, int nid) {} | |
1034 | #endif | |
1035 | ||
2a1e274a MG |
1036 | extern int movable_zone; |
1037 | ||
2f1b6248 | 1038 | static inline int is_highmem_idx(enum zone_type idx) |
1da177e4 | 1039 | { |
e53ef38d | 1040 | #ifdef CONFIG_HIGHMEM |
2a1e274a | 1041 | return (idx == ZONE_HIGHMEM || |
b19bd1c9 | 1042 | (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); |
e53ef38d CL |
1043 | #else |
1044 | return 0; | |
1045 | #endif | |
1da177e4 LT |
1046 | } |
1047 | ||
1da177e4 | 1048 | /** |
b4a991ec | 1049 | * is_highmem - helper function to quickly check if a struct zone is a |
1da177e4 LT |
1050 | * highmem zone or not. This is an attempt to keep references |
1051 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | |
198fba41 MR |
1052 | * @zone: pointer to struct zone variable |
1053 | * Return: 1 for a highmem zone, 0 otherwise | |
1da177e4 LT |
1054 | */ |
1055 | static inline int is_highmem(struct zone *zone) | |
1056 | { | |
e53ef38d | 1057 | #ifdef CONFIG_HIGHMEM |
29f9cb53 | 1058 | return is_highmem_idx(zone_idx(zone)); |
e53ef38d CL |
1059 | #else |
1060 | return 0; | |
1061 | #endif | |
1da177e4 LT |
1062 | } |
1063 | ||
1da177e4 LT |
1064 | /* These two functions are used to setup the per zone pages min values */ |
1065 | struct ctl_table; | |
2374c09b | 1066 | |
32927393 CH |
1067 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, |
1068 | loff_t *); | |
1069 | int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, | |
1070 | size_t *, loff_t *); | |
d3cda233 | 1071 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; |
32927393 CH |
1072 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, |
1073 | size_t *, loff_t *); | |
74f44822 MG |
1074 | int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int, |
1075 | void *, size_t *, loff_t *); | |
9614634f | 1076 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
32927393 | 1077 | void *, size_t *, loff_t *); |
0ff38490 | 1078 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, |
32927393 CH |
1079 | void *, size_t *, loff_t *); |
1080 | int numa_zonelist_order_handler(struct ctl_table *, int, | |
1081 | void *, size_t *, loff_t *); | |
74f44822 | 1082 | extern int percpu_pagelist_high_fraction; |
f0c0b2b8 | 1083 | extern char numa_zonelist_order[]; |
c9bff3ee | 1084 | #define NUMA_ZONELIST_ORDER_LEN 16 |
f0c0b2b8 | 1085 | |
a9ee6cf5 | 1086 | #ifndef CONFIG_NUMA |
1da177e4 LT |
1087 | |
1088 | extern struct pglist_data contig_page_data; | |
351de44f MG |
1089 | static inline struct pglist_data *NODE_DATA(int nid) |
1090 | { | |
1091 | return &contig_page_data; | |
1092 | } | |
1da177e4 | 1093 | #define NODE_MEM_MAP(nid) mem_map |
1da177e4 | 1094 | |
a9ee6cf5 | 1095 | #else /* CONFIG_NUMA */ |
1da177e4 LT |
1096 | |
1097 | #include <asm/mmzone.h> | |
1098 | ||
a9ee6cf5 | 1099 | #endif /* !CONFIG_NUMA */ |
348f8b6c | 1100 | |
95144c78 KH |
1101 | extern struct pglist_data *first_online_pgdat(void); |
1102 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | |
1103 | extern struct zone *next_zone(struct zone *zone); | |
8357f869 KH |
1104 | |
1105 | /** | |
12d15f0d | 1106 | * for_each_online_pgdat - helper macro to iterate over all online nodes |
198fba41 | 1107 | * @pgdat: pointer to a pg_data_t variable |
8357f869 KH |
1108 | */ |
1109 | #define for_each_online_pgdat(pgdat) \ | |
1110 | for (pgdat = first_online_pgdat(); \ | |
1111 | pgdat; \ | |
1112 | pgdat = next_online_pgdat(pgdat)) | |
8357f869 KH |
1113 | /** |
1114 | * for_each_zone - helper macro to iterate over all memory zones | |
198fba41 | 1115 | * @zone: pointer to struct zone variable |
8357f869 KH |
1116 | * |
1117 | * The user only needs to declare the zone variable, for_each_zone | |
1118 | * fills it in. | |
1119 | */ | |
1120 | #define for_each_zone(zone) \ | |
1121 | for (zone = (first_online_pgdat())->node_zones; \ | |
1122 | zone; \ | |
1123 | zone = next_zone(zone)) | |
1124 | ||
ee99c71c KM |
1125 | #define for_each_populated_zone(zone) \ |
1126 | for (zone = (first_online_pgdat())->node_zones; \ | |
1127 | zone; \ | |
1128 | zone = next_zone(zone)) \ | |
1129 | if (!populated_zone(zone)) \ | |
1130 | ; /* do nothing */ \ | |
1131 | else | |
1132 | ||
dd1a239f MG |
1133 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) |
1134 | { | |
1135 | return zoneref->zone; | |
1136 | } | |
1137 | ||
1138 | static inline int zonelist_zone_idx(struct zoneref *zoneref) | |
1139 | { | |
1140 | return zoneref->zone_idx; | |
1141 | } | |
1142 | ||
1143 | static inline int zonelist_node_idx(struct zoneref *zoneref) | |
1144 | { | |
c1093b74 | 1145 | return zone_to_nid(zoneref->zone); |
dd1a239f MG |
1146 | } |
1147 | ||
682a3385 MG |
1148 | struct zoneref *__next_zones_zonelist(struct zoneref *z, |
1149 | enum zone_type highest_zoneidx, | |
1150 | nodemask_t *nodes); | |
1151 | ||
19770b32 MG |
1152 | /** |
1153 | * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point | |
198fba41 MR |
1154 | * @z: The cursor used as a starting point for the search |
1155 | * @highest_zoneidx: The zone index of the highest zone to return | |
1156 | * @nodes: An optional nodemask to filter the zonelist with | |
19770b32 MG |
1157 | * |
1158 | * This function returns the next zone at or below a given zone index that is | |
1159 | * within the allowed nodemask using a cursor as the starting point for the | |
5bead2a0 MG |
1160 | * search. The zoneref returned is a cursor that represents the current zone |
1161 | * being examined. It should be advanced by one before calling | |
1162 | * next_zones_zonelist again. | |
198fba41 MR |
1163 | * |
1164 | * Return: the next zone at or below highest_zoneidx within the allowed | |
1165 | * nodemask using a cursor within a zonelist as a starting point | |
19770b32 | 1166 | */ |
682a3385 | 1167 | static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, |
19770b32 | 1168 | enum zone_type highest_zoneidx, |
682a3385 MG |
1169 | nodemask_t *nodes) |
1170 | { | |
1171 | if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) | |
1172 | return z; | |
1173 | return __next_zones_zonelist(z, highest_zoneidx, nodes); | |
1174 | } | |
dd1a239f | 1175 | |
19770b32 MG |
1176 | /** |
1177 | * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist | |
198fba41 MR |
1178 | * @zonelist: The zonelist to search for a suitable zone |
1179 | * @highest_zoneidx: The zone index of the highest zone to return | |
1180 | * @nodes: An optional nodemask to filter the zonelist with | |
19770b32 MG |
1181 | * |
1182 | * This function returns the first zone at or below a given zone index that is | |
1183 | * within the allowed nodemask. The zoneref returned is a cursor that can be | |
5bead2a0 MG |
1184 | * used to iterate the zonelist with next_zones_zonelist by advancing it by |
1185 | * one before calling. | |
ea57485a VB |
1186 | * |
1187 | * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is | |
1188 | * never NULL). This may happen either genuinely, or due to concurrent nodemask | |
1189 | * update due to cpuset modification. | |
198fba41 MR |
1190 | * |
1191 | * Return: Zoneref pointer for the first suitable zone found | |
19770b32 | 1192 | */ |
dd1a239f | 1193 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, |
19770b32 | 1194 | enum zone_type highest_zoneidx, |
c33d6c06 | 1195 | nodemask_t *nodes) |
54a6eb5c | 1196 | { |
c33d6c06 | 1197 | return next_zones_zonelist(zonelist->_zonerefs, |
05891fb0 | 1198 | highest_zoneidx, nodes); |
54a6eb5c MG |
1199 | } |
1200 | ||
19770b32 MG |
1201 | /** |
1202 | * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask | |
198fba41 MR |
1203 | * @zone: The current zone in the iterator |
1204 | * @z: The current pointer within zonelist->_zonerefs being iterated | |
1205 | * @zlist: The zonelist being iterated | |
1206 | * @highidx: The zone index of the highest zone to return | |
1207 | * @nodemask: Nodemask allowed by the allocator | |
19770b32 MG |
1208 | * |
1209 | * This iterator iterates though all zones at or below a given zone index and | |
1210 | * within a given nodemask | |
1211 | */ | |
1212 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ | |
c33d6c06 | 1213 | for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ |
19770b32 | 1214 | zone; \ |
05891fb0 | 1215 | z = next_zones_zonelist(++z, highidx, nodemask), \ |
c33d6c06 MG |
1216 | zone = zonelist_zone(z)) |
1217 | ||
30d8ec73 | 1218 | #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ |
c33d6c06 MG |
1219 | for (zone = z->zone; \ |
1220 | zone; \ | |
1221 | z = next_zones_zonelist(++z, highidx, nodemask), \ | |
1222 | zone = zonelist_zone(z)) | |
1223 | ||
54a6eb5c MG |
1224 | |
1225 | /** | |
1226 | * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index | |
198fba41 MR |
1227 | * @zone: The current zone in the iterator |
1228 | * @z: The current pointer within zonelist->zones being iterated | |
1229 | * @zlist: The zonelist being iterated | |
1230 | * @highidx: The zone index of the highest zone to return | |
54a6eb5c MG |
1231 | * |
1232 | * This iterator iterates though all zones at or below a given zone index. | |
1233 | */ | |
1234 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ | |
19770b32 | 1235 | for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) |
54a6eb5c | 1236 | |
8ca1b5a4 FT |
1237 | /* Whether the 'nodes' are all movable nodes */ |
1238 | static inline bool movable_only_nodes(nodemask_t *nodes) | |
1239 | { | |
1240 | struct zonelist *zonelist; | |
1241 | struct zoneref *z; | |
1242 | int nid; | |
1243 | ||
1244 | if (nodes_empty(*nodes)) | |
1245 | return false; | |
1246 | ||
1247 | /* | |
1248 | * We can chose arbitrary node from the nodemask to get a | |
1249 | * zonelist as they are interlinked. We just need to find | |
1250 | * at least one zone that can satisfy kernel allocations. | |
1251 | */ | |
1252 | nid = first_node(*nodes); | |
1253 | zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; | |
1254 | z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); | |
1255 | return (!z->zone) ? true : false; | |
1256 | } | |
1257 | ||
1258 | ||
d41dee36 AW |
1259 | #ifdef CONFIG_SPARSEMEM |
1260 | #include <asm/sparsemem.h> | |
1261 | #endif | |
1262 | ||
2bdaf115 AW |
1263 | #ifdef CONFIG_FLATMEM |
1264 | #define pfn_to_nid(pfn) (0) | |
1265 | #endif | |
1266 | ||
d41dee36 AW |
1267 | #ifdef CONFIG_SPARSEMEM |
1268 | ||
1269 | /* | |
d41dee36 AW |
1270 | * PA_SECTION_SHIFT physical address to/from section number |
1271 | * PFN_SECTION_SHIFT pfn to/from section number | |
1272 | */ | |
d41dee36 AW |
1273 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) |
1274 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) | |
1275 | ||
1276 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) | |
1277 | ||
1278 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) | |
1279 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) | |
1280 | ||
835c134e | 1281 | #define SECTION_BLOCKFLAGS_BITS \ |
d9c23400 | 1282 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) |
835c134e | 1283 | |
d41dee36 AW |
1284 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS |
1285 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | |
1286 | #endif | |
1287 | ||
1dd2bfc8 YI |
1288 | static inline unsigned long pfn_to_section_nr(unsigned long pfn) |
1289 | { | |
1290 | return pfn >> PFN_SECTION_SHIFT; | |
1291 | } | |
1292 | static inline unsigned long section_nr_to_pfn(unsigned long sec) | |
1293 | { | |
1294 | return sec << PFN_SECTION_SHIFT; | |
1295 | } | |
e3c40f37 | 1296 | |
a539f353 DK |
1297 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) |
1298 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | |
1299 | ||
f1eca35a | 1300 | #define SUBSECTION_SHIFT 21 |
9ffc1d19 | 1301 | #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) |
f1eca35a DW |
1302 | |
1303 | #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) | |
1304 | #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) | |
1305 | #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) | |
1306 | ||
1307 | #if SUBSECTION_SHIFT > SECTION_SIZE_BITS | |
1308 | #error Subsection size exceeds section size | |
1309 | #else | |
1310 | #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) | |
1311 | #endif | |
1312 | ||
a3619190 DW |
1313 | #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) |
1314 | #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) | |
1315 | ||
f1eca35a | 1316 | struct mem_section_usage { |
0a9f9f62 | 1317 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
f1eca35a | 1318 | DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); |
0a9f9f62 | 1319 | #endif |
f1eca35a DW |
1320 | /* See declaration of similar field in struct zone */ |
1321 | unsigned long pageblock_flags[0]; | |
1322 | }; | |
1323 | ||
f46edbd1 DW |
1324 | void subsection_map_init(unsigned long pfn, unsigned long nr_pages); |
1325 | ||
d41dee36 | 1326 | struct page; |
eefa864b | 1327 | struct page_ext; |
d41dee36 | 1328 | struct mem_section { |
29751f69 AW |
1329 | /* |
1330 | * This is, logically, a pointer to an array of struct | |
1331 | * pages. However, it is stored with some other magic. | |
1332 | * (see sparse.c::sparse_init_one_section()) | |
1333 | * | |
30c253e6 AW |
1334 | * Additionally during early boot we encode node id of |
1335 | * the location of the section here to guide allocation. | |
1336 | * (see sparse.c::memory_present()) | |
1337 | * | |
29751f69 AW |
1338 | * Making it a UL at least makes someone do a cast |
1339 | * before using it wrong. | |
1340 | */ | |
1341 | unsigned long section_mem_map; | |
5c0e3066 | 1342 | |
f1eca35a | 1343 | struct mem_section_usage *usage; |
eefa864b JK |
1344 | #ifdef CONFIG_PAGE_EXTENSION |
1345 | /* | |
0c9ad804 | 1346 | * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use |
eefa864b JK |
1347 | * section. (see page_ext.h about this.) |
1348 | */ | |
1349 | struct page_ext *page_ext; | |
1350 | unsigned long pad; | |
1351 | #endif | |
55878e88 CS |
1352 | /* |
1353 | * WARNING: mem_section must be a power-of-2 in size for the | |
1354 | * calculation and use of SECTION_ROOT_MASK to make sense. | |
1355 | */ | |
d41dee36 AW |
1356 | }; |
1357 | ||
3e347261 BP |
1358 | #ifdef CONFIG_SPARSEMEM_EXTREME |
1359 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) | |
1360 | #else | |
1361 | #define SECTIONS_PER_ROOT 1 | |
1362 | #endif | |
802f192e | 1363 | |
3e347261 | 1364 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
0faa5638 | 1365 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) |
3e347261 | 1366 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
802f192e | 1367 | |
3e347261 | 1368 | #ifdef CONFIG_SPARSEMEM_EXTREME |
83e3c487 | 1369 | extern struct mem_section **mem_section; |
802f192e | 1370 | #else |
3e347261 BP |
1371 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; |
1372 | #endif | |
d41dee36 | 1373 | |
f1eca35a DW |
1374 | static inline unsigned long *section_to_usemap(struct mem_section *ms) |
1375 | { | |
1376 | return ms->usage->pageblock_flags; | |
1377 | } | |
1378 | ||
29751f69 AW |
1379 | static inline struct mem_section *__nr_to_section(unsigned long nr) |
1380 | { | |
83e3c487 KS |
1381 | #ifdef CONFIG_SPARSEMEM_EXTREME |
1382 | if (!mem_section) | |
1383 | return NULL; | |
1384 | #endif | |
3e347261 BP |
1385 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) |
1386 | return NULL; | |
1387 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | |
29751f69 | 1388 | } |
f1eca35a | 1389 | extern size_t mem_section_usage_size(void); |
29751f69 AW |
1390 | |
1391 | /* | |
1392 | * We use the lower bits of the mem_map pointer to store | |
def9b71e PT |
1393 | * a little bit of information. The pointer is calculated |
1394 | * as mem_map - section_nr_to_pfn(pnum). The result is | |
1395 | * aligned to the minimum alignment of the two values: | |
1396 | * 1. All mem_map arrays are page-aligned. | |
1397 | * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT | |
1398 | * lowest bits. PFN_SECTION_SHIFT is arch-specific | |
1399 | * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the | |
1400 | * worst combination is powerpc with 256k pages, | |
1401 | * which results in PFN_SECTION_SHIFT equal 6. | |
1402 | * To sum it up, at least 6 bits are available. | |
29751f69 | 1403 | */ |
1f90a347 DW |
1404 | #define SECTION_MARKED_PRESENT (1UL<<0) |
1405 | #define SECTION_HAS_MEM_MAP (1UL<<1) | |
1406 | #define SECTION_IS_ONLINE (1UL<<2) | |
1407 | #define SECTION_IS_EARLY (1UL<<3) | |
1408 | #define SECTION_TAINT_ZONE_DEVICE (1UL<<4) | |
1409 | #define SECTION_MAP_LAST_BIT (1UL<<5) | |
1410 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) | |
01c8d337 | 1411 | #define SECTION_NID_SHIFT 6 |
29751f69 AW |
1412 | |
1413 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | |
1414 | { | |
1415 | unsigned long map = section->section_mem_map; | |
1416 | map &= SECTION_MAP_MASK; | |
1417 | return (struct page *)map; | |
1418 | } | |
1419 | ||
540557b9 | 1420 | static inline int present_section(struct mem_section *section) |
29751f69 | 1421 | { |
802f192e | 1422 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
29751f69 AW |
1423 | } |
1424 | ||
540557b9 AW |
1425 | static inline int present_section_nr(unsigned long nr) |
1426 | { | |
1427 | return present_section(__nr_to_section(nr)); | |
1428 | } | |
1429 | ||
1430 | static inline int valid_section(struct mem_section *section) | |
29751f69 | 1431 | { |
802f192e | 1432 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
29751f69 AW |
1433 | } |
1434 | ||
326e1b8f DW |
1435 | static inline int early_section(struct mem_section *section) |
1436 | { | |
1437 | return (section && (section->section_mem_map & SECTION_IS_EARLY)); | |
1438 | } | |
1439 | ||
29751f69 AW |
1440 | static inline int valid_section_nr(unsigned long nr) |
1441 | { | |
1442 | return valid_section(__nr_to_section(nr)); | |
1443 | } | |
1444 | ||
2d070eab MH |
1445 | static inline int online_section(struct mem_section *section) |
1446 | { | |
1447 | return (section && (section->section_mem_map & SECTION_IS_ONLINE)); | |
1448 | } | |
1449 | ||
1f90a347 DW |
1450 | static inline int online_device_section(struct mem_section *section) |
1451 | { | |
1452 | unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; | |
1453 | ||
1454 | return section && ((section->section_mem_map & flags) == flags); | |
1455 | } | |
1456 | ||
2d070eab MH |
1457 | static inline int online_section_nr(unsigned long nr) |
1458 | { | |
1459 | return online_section(__nr_to_section(nr)); | |
1460 | } | |
1461 | ||
1462 | #ifdef CONFIG_MEMORY_HOTPLUG | |
1463 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); | |
2d070eab MH |
1464 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); |
1465 | #endif | |
2d070eab | 1466 | |
d41dee36 AW |
1467 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
1468 | { | |
29751f69 | 1469 | return __nr_to_section(pfn_to_section_nr(pfn)); |
d41dee36 AW |
1470 | } |
1471 | ||
2491f0a2 | 1472 | extern unsigned long __highest_present_section_nr; |
c4e1be9e | 1473 | |
f46edbd1 DW |
1474 | static inline int subsection_map_index(unsigned long pfn) |
1475 | { | |
1476 | return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; | |
1477 | } | |
1478 | ||
1479 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
1480 | static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) | |
1481 | { | |
1482 | int idx = subsection_map_index(pfn); | |
1483 | ||
1484 | return test_bit(idx, ms->usage->subsection_map); | |
1485 | } | |
1486 | #else | |
1487 | static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) | |
1488 | { | |
1489 | return 1; | |
1490 | } | |
1491 | #endif | |
1492 | ||
7b7bf499 | 1493 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID |
51c656ae MR |
1494 | /** |
1495 | * pfn_valid - check if there is a valid memory map entry for a PFN | |
1496 | * @pfn: the page frame number to check | |
1497 | * | |
1498 | * Check if there is a valid memory map entry aka struct page for the @pfn. | |
1499 | * Note, that availability of the memory map entry does not imply that | |
1500 | * there is actual usable memory at that @pfn. The struct page may | |
1501 | * represent a hole or an unusable page frame. | |
1502 | * | |
1503 | * Return: 1 for PFNs that have memory map entries and 0 otherwise | |
1504 | */ | |
d41dee36 AW |
1505 | static inline int pfn_valid(unsigned long pfn) |
1506 | { | |
f46edbd1 DW |
1507 | struct mem_section *ms; |
1508 | ||
16c9afc7 AK |
1509 | /* |
1510 | * Ensure the upper PAGE_SHIFT bits are clear in the | |
1511 | * pfn. Else it might lead to false positives when | |
1512 | * some of the upper bits are set, but the lower bits | |
1513 | * match a valid pfn. | |
1514 | */ | |
1515 | if (PHYS_PFN(PFN_PHYS(pfn)) != pfn) | |
1516 | return 0; | |
1517 | ||
d41dee36 AW |
1518 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
1519 | return 0; | |
f1dc0db2 | 1520 | ms = __pfn_to_section(pfn); |
f46edbd1 DW |
1521 | if (!valid_section(ms)) |
1522 | return 0; | |
1523 | /* | |
1524 | * Traditionally early sections always returned pfn_valid() for | |
1525 | * the entire section-sized span. | |
1526 | */ | |
1527 | return early_section(ms) || pfn_section_valid(ms, pfn); | |
d41dee36 | 1528 | } |
7b7bf499 | 1529 | #endif |
d41dee36 | 1530 | |
e03d1f78 | 1531 | static inline int pfn_in_present_section(unsigned long pfn) |
540557b9 AW |
1532 | { |
1533 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
1534 | return 0; | |
f1dc0db2 | 1535 | return present_section(__pfn_to_section(pfn)); |
540557b9 AW |
1536 | } |
1537 | ||
4c605881 DH |
1538 | static inline unsigned long next_present_section_nr(unsigned long section_nr) |
1539 | { | |
1540 | while (++section_nr <= __highest_present_section_nr) { | |
1541 | if (present_section_nr(section_nr)) | |
1542 | return section_nr; | |
1543 | } | |
1544 | ||
1545 | return -1; | |
1546 | } | |
1547 | ||
d41dee36 AW |
1548 | /* |
1549 | * These are _only_ used during initialisation, therefore they | |
1550 | * can use __initdata ... They could have names to indicate | |
1551 | * this restriction. | |
1552 | */ | |
1553 | #ifdef CONFIG_NUMA | |
161599ff AW |
1554 | #define pfn_to_nid(pfn) \ |
1555 | ({ \ | |
1556 | unsigned long __pfn_to_nid_pfn = (pfn); \ | |
1557 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ | |
1558 | }) | |
2bdaf115 AW |
1559 | #else |
1560 | #define pfn_to_nid(pfn) (0) | |
d41dee36 AW |
1561 | #endif |
1562 | ||
d41dee36 AW |
1563 | void sparse_init(void); |
1564 | #else | |
1565 | #define sparse_init() do {} while (0) | |
28ae55c9 | 1566 | #define sparse_index_init(_sec, _nid) do {} while (0) |
e03d1f78 | 1567 | #define pfn_in_present_section pfn_valid |
f46edbd1 | 1568 | #define subsection_map_init(_pfn, _nr_pages) do {} while (0) |
d41dee36 AW |
1569 | #endif /* CONFIG_SPARSEMEM */ |
1570 | ||
97965478 | 1571 | #endif /* !__GENERATING_BOUNDS.H */ |
1da177e4 | 1572 | #endif /* !__ASSEMBLY__ */ |
1da177e4 | 1573 | #endif /* _LINUX_MMZONE_H */ |