Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_MMZONE_H |
3 | #define _LINUX_MMZONE_H | |
4 | ||
1da177e4 | 5 | #ifndef __ASSEMBLY__ |
97965478 | 6 | #ifndef __GENERATING_BOUNDS_H |
1da177e4 | 7 | |
1da177e4 LT |
8 | #include <linux/spinlock.h> |
9 | #include <linux/list.h> | |
e4dde56c | 10 | #include <linux/list_nulls.h> |
1da177e4 | 11 | #include <linux/wait.h> |
e815af95 | 12 | #include <linux/bitops.h> |
1da177e4 LT |
13 | #include <linux/cache.h> |
14 | #include <linux/threads.h> | |
15 | #include <linux/numa.h> | |
16 | #include <linux/init.h> | |
bdc8cb98 | 17 | #include <linux/seqlock.h> |
8357f869 | 18 | #include <linux/nodemask.h> |
835c134e | 19 | #include <linux/pageblock-flags.h> |
bbeae5b0 | 20 | #include <linux/page-flags-layout.h> |
60063497 | 21 | #include <linux/atomic.h> |
b03641af DW |
22 | #include <linux/mm_types.h> |
23 | #include <linux/page-flags.h> | |
dbbee9d5 | 24 | #include <linux/local_lock.h> |
b5ba474f | 25 | #include <linux/zswap.h> |
93ff66bf | 26 | #include <asm/page.h> |
1da177e4 LT |
27 | |
28 | /* Free memory management - zoned buddy allocator. */ | |
0192445c | 29 | #ifndef CONFIG_ARCH_FORCE_MAX_ORDER |
5e0a760b | 30 | #define MAX_PAGE_ORDER 10 |
1da177e4 | 31 | #else |
5e0a760b | 32 | #define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER |
1da177e4 | 33 | #endif |
5e0a760b | 34 | #define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER) |
1da177e4 | 35 | |
3f6dac0f KS |
36 | #define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES) |
37 | ||
5e0a760b | 38 | #define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1) |
fd377218 | 39 | |
e13e7922 JY |
40 | /* Defines the order for the number of pages that have a migrate type. */ |
41 | #ifndef CONFIG_PAGE_BLOCK_ORDER | |
42 | #define PAGE_BLOCK_ORDER MAX_PAGE_ORDER | |
43 | #else | |
44 | #define PAGE_BLOCK_ORDER CONFIG_PAGE_BLOCK_ORDER | |
45 | #endif /* CONFIG_PAGE_BLOCK_ORDER */ | |
46 | ||
47 | /* | |
48 | * The MAX_PAGE_ORDER, which defines the max order of pages to be allocated | |
49 | * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_ORDER, | |
50 | * which defines the order for the number of pages that can have a migrate type | |
51 | */ | |
52 | #if (PAGE_BLOCK_ORDER > MAX_PAGE_ORDER) | |
53 | #error MAX_PAGE_ORDER must be >= PAGE_BLOCK_ORDER | |
54 | #endif | |
55 | ||
5ad333eb AW |
56 | /* |
57 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | |
58 | * costly to service. That is between allocation orders which should | |
35fca53e | 59 | * coalesce naturally under reasonable reclaim pressure and those which |
5ad333eb AW |
60 | * will not. |
61 | */ | |
62 | #define PAGE_ALLOC_COSTLY_ORDER 3 | |
63 | ||
a6ffdc07 | 64 | enum migratetype { |
47118af0 | 65 | MIGRATE_UNMOVABLE, |
47118af0 | 66 | MIGRATE_MOVABLE, |
016c13da | 67 | MIGRATE_RECLAIMABLE, |
0aaa29a5 MG |
68 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ |
69 | MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, | |
47118af0 MN |
70 | #ifdef CONFIG_CMA |
71 | /* | |
72 | * MIGRATE_CMA migration type is designed to mimic the way | |
73 | * ZONE_MOVABLE works. Only movable pages can be allocated | |
74 | * from MIGRATE_CMA pageblocks and page allocator never | |
75 | * implicitly change migration type of MIGRATE_CMA pageblock. | |
76 | * | |
77 | * The way to use it is to change migratetype of a range of | |
78 | * pageblocks to MIGRATE_CMA which can be done by | |
11ac3e87 | 79 | * __free_pageblock_cma() function. |
47118af0 MN |
80 | */ |
81 | MIGRATE_CMA, | |
82 | #endif | |
194159fb | 83 | #ifdef CONFIG_MEMORY_ISOLATION |
47118af0 | 84 | MIGRATE_ISOLATE, /* can't allocate from here */ |
194159fb | 85 | #endif |
47118af0 MN |
86 | MIGRATE_TYPES |
87 | }; | |
88 | ||
60f30350 | 89 | /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ |
c999fbd3 | 90 | extern const char * const migratetype_names[MIGRATE_TYPES]; |
60f30350 | 91 | |
47118af0 MN |
92 | #ifdef CONFIG_CMA |
93 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) | |
7c15d9bb | 94 | # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) |
fae7d834 MWO |
95 | # define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \ |
96 | get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK)) | |
47118af0 MN |
97 | #else |
98 | # define is_migrate_cma(migratetype) false | |
7c15d9bb | 99 | # define is_migrate_cma_page(_page) false |
fae7d834 | 100 | # define is_migrate_cma_folio(folio, pfn) false |
47118af0 | 101 | #endif |
b2a0ac88 | 102 | |
b682debd VB |
103 | static inline bool is_migrate_movable(int mt) |
104 | { | |
105 | return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; | |
106 | } | |
107 | ||
1dd214b8 ZY |
108 | /* |
109 | * Check whether a migratetype can be merged with another migratetype. | |
110 | * | |
111 | * It is only mergeable when it can fall back to other migratetypes for | |
112 | * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c. | |
113 | */ | |
114 | static inline bool migratetype_is_mergeable(int mt) | |
115 | { | |
116 | return mt < MIGRATE_PCPTYPES; | |
117 | } | |
118 | ||
b2a0ac88 | 119 | #define for_each_migratetype_order(order, type) \ |
fd377218 | 120 | for (order = 0; order < NR_PAGE_ORDERS; order++) \ |
b2a0ac88 MG |
121 | for (type = 0; type < MIGRATE_TYPES; type++) |
122 | ||
467c996c MG |
123 | extern int page_group_by_mobility_disabled; |
124 | ||
d38ac97f | 125 | #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) |
e58469ba | 126 | |
dc4b0caf | 127 | #define get_pageblock_migratetype(page) \ |
535b81e2 | 128 | get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) |
dc4b0caf | 129 | |
28fb54f6 VMO |
130 | #define folio_migratetype(folio) \ |
131 | get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \ | |
132 | MIGRATETYPE_MASK) | |
1da177e4 | 133 | struct free_area { |
b2a0ac88 | 134 | struct list_head free_list[MIGRATE_TYPES]; |
1da177e4 LT |
135 | unsigned long nr_free; |
136 | }; | |
137 | ||
138 | struct pglist_data; | |
139 | ||
3a321d2a KW |
140 | #ifdef CONFIG_NUMA |
141 | enum numa_stat_item { | |
142 | NUMA_HIT, /* allocated in intended node */ | |
143 | NUMA_MISS, /* allocated in non intended node */ | |
144 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ | |
145 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ | |
146 | NUMA_LOCAL, /* allocation from local node */ | |
147 | NUMA_OTHER, /* allocation from other node */ | |
f19298b9 | 148 | NR_VM_NUMA_EVENT_ITEMS |
3a321d2a KW |
149 | }; |
150 | #else | |
f19298b9 | 151 | #define NR_VM_NUMA_EVENT_ITEMS 0 |
3a321d2a KW |
152 | #endif |
153 | ||
2244b95a | 154 | enum zone_stat_item { |
51ed4491 | 155 | /* First 128 byte cacheline (assuming 64 bit words) */ |
d23ad423 | 156 | NR_FREE_PAGES, |
a211c655 | 157 | NR_FREE_PAGES_BLOCKS, |
71c799f4 MK |
158 | NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ |
159 | NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, | |
160 | NR_ZONE_ACTIVE_ANON, | |
161 | NR_ZONE_INACTIVE_FILE, | |
162 | NR_ZONE_ACTIVE_FILE, | |
163 | NR_ZONE_UNEVICTABLE, | |
5a1c84b4 | 164 | NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ |
5344b7e6 | 165 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
c6a7f572 | 166 | /* Second 128 byte cacheline */ |
91537fee MK |
167 | #if IS_ENABLED(CONFIG_ZSMALLOC) |
168 | NR_ZSPAGES, /* allocated in zsmalloc */ | |
ca889e6c | 169 | #endif |
d1ce749a | 170 | NR_FREE_CMA_PAGES, |
dcdfdd40 KS |
171 | #ifdef CONFIG_UNACCEPTED_MEMORY |
172 | NR_UNACCEPTED, | |
173 | #endif | |
2244b95a CL |
174 | NR_VM_ZONE_STAT_ITEMS }; |
175 | ||
75ef7184 | 176 | enum node_stat_item { |
599d0c95 MG |
177 | NR_LRU_BASE, |
178 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ | |
179 | NR_ACTIVE_ANON, /* " " " " " */ | |
180 | NR_INACTIVE_FILE, /* " " " " " */ | |
181 | NR_ACTIVE_FILE, /* " " " " " */ | |
182 | NR_UNEVICTABLE, /* " " " " " */ | |
d42f3245 RG |
183 | NR_SLAB_RECLAIMABLE_B, |
184 | NR_SLAB_UNRECLAIMABLE_B, | |
599d0c95 MG |
185 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ |
186 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ | |
68d48e6a | 187 | WORKINGSET_NODES, |
170b04b7 JK |
188 | WORKINGSET_REFAULT_BASE, |
189 | WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, | |
190 | WORKINGSET_REFAULT_FILE, | |
191 | WORKINGSET_ACTIVATE_BASE, | |
192 | WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, | |
193 | WORKINGSET_ACTIVATE_FILE, | |
194 | WORKINGSET_RESTORE_BASE, | |
195 | WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, | |
196 | WORKINGSET_RESTORE_FILE, | |
1e6b1085 | 197 | WORKINGSET_NODERECLAIM, |
4b9d0fab | 198 | NR_ANON_MAPPED, /* Mapped anonymous pages */ |
50658e2e MG |
199 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
200 | only modified from process context */ | |
11fb9989 MG |
201 | NR_FILE_PAGES, |
202 | NR_FILE_DIRTY, | |
203 | NR_WRITEBACK, | |
204 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ | |
205 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ | |
206 | NR_SHMEM_THPS, | |
207 | NR_SHMEM_PMDMAPPED, | |
60fbf0ab SL |
208 | NR_FILE_THPS, |
209 | NR_FILE_PMDMAPPED, | |
11fb9989 | 210 | NR_ANON_THPS, |
c4a25635 MG |
211 | NR_VMSCAN_WRITE, |
212 | NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ | |
213 | NR_DIRTIED, /* page dirtyings since bootup */ | |
214 | NR_WRITTEN, /* page writings since bootup */ | |
8cd7c588 | 215 | NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */ |
b29940c1 | 216 | NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ |
1970dc6f JH |
217 | NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ |
218 | NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ | |
991e7673 SB |
219 | NR_KERNEL_STACK_KB, /* measured in KiB */ |
220 | #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) | |
221 | NR_KERNEL_SCS_KB, /* measured in KiB */ | |
222 | #endif | |
f0c0c115 | 223 | NR_PAGETABLE, /* used for pagetables */ |
212c5c07 | 224 | NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */ |
bd3520a9 PT |
225 | #ifdef CONFIG_IOMMU_SUPPORT |
226 | NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */ | |
227 | #endif | |
b6038942 SB |
228 | #ifdef CONFIG_SWAP |
229 | NR_SWAPCACHE, | |
e39bb6be HY |
230 | #endif |
231 | #ifdef CONFIG_NUMA_BALANCING | |
232 | PGPROMOTE_SUCCESS, /* promote successfully */ | |
c6833e10 | 233 | PGPROMOTE_CANDIDATE, /* candidate pages to promote */ |
b805ab3c | 234 | #endif |
23e9f013 LZ |
235 | /* PGDEMOTE_*: pages demoted */ |
236 | PGDEMOTE_KSWAPD, | |
237 | PGDEMOTE_DIRECT, | |
238 | PGDEMOTE_KHUGEPAGED, | |
e452872b | 239 | PGDEMOTE_PROACTIVE, |
05d4532b JH |
240 | #ifdef CONFIG_HUGETLB_PAGE |
241 | NR_HUGETLB, | |
242 | #endif | |
835de376 | 243 | NR_BALLOON_PAGES, |
75ef7184 MG |
244 | NR_VM_NODE_STAT_ITEMS |
245 | }; | |
246 | ||
69473e5d MS |
247 | /* |
248 | * Returns true if the item should be printed in THPs (/proc/vmstat | |
249 | * currently prints number of anon, file and shmem THPs. But the item | |
250 | * is charged in pages). | |
251 | */ | |
252 | static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) | |
253 | { | |
254 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) | |
255 | return false; | |
256 | ||
bf9ecead | 257 | return item == NR_ANON_THPS || |
57b2847d | 258 | item == NR_FILE_THPS || |
a1528e21 | 259 | item == NR_SHMEM_THPS || |
380780e7 MS |
260 | item == NR_SHMEM_PMDMAPPED || |
261 | item == NR_FILE_PMDMAPPED; | |
69473e5d MS |
262 | } |
263 | ||
ea426c2a RG |
264 | /* |
265 | * Returns true if the value is measured in bytes (most vmstat values are | |
266 | * measured in pages). This defines the API part, the internal representation | |
267 | * might be different. | |
268 | */ | |
269 | static __always_inline bool vmstat_item_in_bytes(int idx) | |
270 | { | |
d42f3245 RG |
271 | /* |
272 | * Global and per-node slab counters track slab pages. | |
273 | * It's expected that changes are multiples of PAGE_SIZE. | |
274 | * Internally values are stored in pages. | |
275 | * | |
276 | * Per-memcg and per-lruvec counters track memory, consumed | |
277 | * by individual slab objects. These counters are actually | |
278 | * byte-precise. | |
279 | */ | |
280 | return (idx == NR_SLAB_RECLAIMABLE_B || | |
281 | idx == NR_SLAB_UNRECLAIMABLE_B); | |
ea426c2a RG |
282 | } |
283 | ||
4f98a2fe RR |
284 | /* |
285 | * We do arithmetic on the LRU lists in various places in the code, | |
286 | * so it is important to keep the active lists LRU_ACTIVE higher in | |
287 | * the array than the corresponding inactive lists, and to keep | |
288 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. | |
289 | * | |
290 | * This has to be kept in sync with the statistics in zone_stat_item | |
291 | * above and the descriptions in vmstat_text in mm/vmstat.c | |
292 | */ | |
293 | #define LRU_BASE 0 | |
294 | #define LRU_ACTIVE 1 | |
295 | #define LRU_FILE 2 | |
296 | ||
b69408e8 | 297 | enum lru_list { |
4f98a2fe RR |
298 | LRU_INACTIVE_ANON = LRU_BASE, |
299 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | |
300 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | |
301 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | |
894bc310 | 302 | LRU_UNEVICTABLE, |
894bc310 LS |
303 | NR_LRU_LISTS |
304 | }; | |
b69408e8 | 305 | |
8cd7c588 MG |
306 | enum vmscan_throttle_state { |
307 | VMSCAN_THROTTLE_WRITEBACK, | |
d818fca1 | 308 | VMSCAN_THROTTLE_ISOLATED, |
69392a40 | 309 | VMSCAN_THROTTLE_NOPROGRESS, |
1b4e3f26 | 310 | VMSCAN_THROTTLE_CONGESTED, |
8cd7c588 MG |
311 | NR_VMSCAN_THROTTLE, |
312 | }; | |
313 | ||
4111304d | 314 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) |
b69408e8 | 315 | |
4111304d | 316 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) |
894bc310 | 317 | |
b91ac374 | 318 | static inline bool is_file_lru(enum lru_list lru) |
4f98a2fe | 319 | { |
4111304d | 320 | return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); |
4f98a2fe RR |
321 | } |
322 | ||
b91ac374 | 323 | static inline bool is_active_lru(enum lru_list lru) |
b69408e8 | 324 | { |
4111304d | 325 | return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); |
b69408e8 CL |
326 | } |
327 | ||
e9c2dbc8 YY |
328 | #define WORKINGSET_ANON 0 |
329 | #define WORKINGSET_FILE 1 | |
ed017373 YZ |
330 | #define ANON_AND_FILE 2 |
331 | ||
1b05117d | 332 | enum lruvec_flags { |
1bc545bf YA |
333 | /* |
334 | * An lruvec has many dirty pages backed by a congested BDI: | |
335 | * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim. | |
336 | * It can be cleared by cgroup reclaim or kswapd. | |
337 | * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim. | |
338 | * It can only be cleared by kswapd. | |
339 | * | |
340 | * Essentially, kswapd can unthrottle an lruvec throttled by cgroup | |
341 | * reclaim, but not vice versa. This only applies to the root cgroup. | |
342 | * The goal is to prevent cgroup reclaim on the root cgroup (e.g. | |
343 | * memory.reclaim) to unthrottle an unbalanced node (that was throttled | |
344 | * by kswapd). | |
345 | */ | |
346 | LRUVEC_CGROUP_CONGESTED, | |
347 | LRUVEC_NODE_CONGESTED, | |
1b05117d JW |
348 | }; |
349 | ||
ec1c86b2 YZ |
350 | #endif /* !__GENERATING_BOUNDS_H */ |
351 | ||
352 | /* | |
4d5d14a0 | 353 | * Evictable folios are divided into multiple generations. The youngest and the |
ec1c86b2 YZ |
354 | * oldest generation numbers, max_seq and min_seq, are monotonically increasing. |
355 | * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An | |
356 | * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the | |
357 | * corresponding generation. The gen counter in folio->flags stores gen+1 while | |
4d5d14a0 | 358 | * a folio is on one of lrugen->folios[]. Otherwise it stores 0. |
ec1c86b2 | 359 | * |
4d5d14a0 YZ |
360 | * After a folio is faulted in, the aging needs to check the accessed bit at |
361 | * least twice before handing this folio over to the eviction. The first check | |
362 | * clears the accessed bit from the initial fault; the second check makes sure | |
363 | * this folio hasn't been used since then. This process, AKA second chance, | |
364 | * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI | |
365 | * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two | |
366 | * generations are considered active; the rest of generations, if they exist, | |
367 | * are considered inactive. See lru_gen_is_active(). | |
ec1c86b2 | 368 | * |
4d5d14a0 YZ |
369 | * PG_active is always cleared while a folio is on one of lrugen->folios[] so |
370 | * that the sliding window needs not to worry about it. And it's set again when | |
371 | * a folio considered active is isolated for non-reclaiming purposes, e.g., | |
372 | * migration. See lru_gen_add_folio() and lru_gen_del_folio(). | |
ec1c86b2 YZ |
373 | * |
374 | * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the | |
375 | * number of categories of the active/inactive LRU when keeping track of | |
376 | * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits | |
4d5d14a0 | 377 | * in folio->flags, masked by LRU_GEN_MASK. |
ec1c86b2 YZ |
378 | */ |
379 | #define MIN_NR_GENS 2U | |
380 | #define MAX_NR_GENS 4U | |
381 | ||
ac35a490 | 382 | /* |
4d5d14a0 YZ |
383 | * Each generation is divided into multiple tiers. A folio accessed N times |
384 | * through file descriptors is in tier order_base_2(N). A folio in the first | |
385 | * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page | |
386 | * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by | |
387 | * PG_workingset. A folio in any other tier (1<N<5) between the first and last | |
388 | * is marked by additional bits of LRU_REFS_WIDTH in folio->flags. | |
ac35a490 YZ |
389 | * |
390 | * In contrast to moving across generations which requires the LRU lock, moving | |
391 | * across tiers only involves atomic operations on folio->flags and therefore | |
392 | * has a negligible cost in the buffered access path. In the eviction path, | |
4d5d14a0 YZ |
393 | * comparisons of refaulted/(evicted+protected) from the first tier and the rest |
394 | * infer whether folios accessed multiple times through file descriptors are | |
395 | * statistically hot and thus worth protecting. | |
ac35a490 YZ |
396 | * |
397 | * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the | |
398 | * number of categories of the active/inactive LRU when keeping track of | |
399 | * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in | |
4d5d14a0 | 400 | * folio->flags, masked by LRU_REFS_MASK. |
ac35a490 YZ |
401 | */ |
402 | #define MAX_NR_TIERS 4U | |
403 | ||
ec1c86b2 YZ |
404 | #ifndef __GENERATING_BOUNDS_H |
405 | ||
ec1c86b2 YZ |
406 | #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF) |
407 | #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF) | |
408 | ||
4d5d14a0 YZ |
409 | /* |
410 | * For folios accessed multiple times through file descriptors, | |
411 | * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags | |
412 | * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its | |
413 | * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily | |
414 | * promoted into the second oldest generation in the eviction path. And when | |
415 | * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that | |
416 | * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is | |
417 | * only valid when PG_referenced is set. | |
418 | * | |
419 | * For folios accessed multiple times through page tables, folio_update_gen() | |
420 | * from a page table walk or lru_gen_set_refs() from a rmap walk sets | |
421 | * PG_referenced after the accessed bit is cleared for the first time. | |
422 | * Thereafter, those two paths set PG_workingset and promote folios to the | |
423 | * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears | |
424 | * PG_referenced. Note that for this case, LRU_REFS_MASK is not used. | |
425 | * | |
426 | * For both cases above, after PG_workingset is set on a folio, it remains until | |
427 | * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It | |
428 | * can be set again if lru_gen_test_recent() returns true upon a refault. | |
429 | */ | |
430 | #define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced)) | |
431 | ||
432 | struct lruvec; | |
433 | struct page_vma_mapped_walk; | |
434 | ||
ec1c86b2 YZ |
435 | #ifdef CONFIG_LRU_GEN |
436 | ||
437 | enum { | |
438 | LRU_GEN_ANON, | |
439 | LRU_GEN_FILE, | |
440 | }; | |
441 | ||
354ed597 YZ |
442 | enum { |
443 | LRU_GEN_CORE, | |
444 | LRU_GEN_MM_WALK, | |
445 | LRU_GEN_NONLEAF_YOUNG, | |
446 | NR_LRU_GEN_CAPS | |
447 | }; | |
448 | ||
ac35a490 YZ |
449 | #define MIN_LRU_BATCH BITS_PER_LONG |
450 | #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) | |
451 | ||
452 | /* whether to keep historical stats from evicted generations */ | |
453 | #ifdef CONFIG_LRU_GEN_STATS | |
454 | #define NR_HIST_GENS MAX_NR_GENS | |
455 | #else | |
456 | #define NR_HIST_GENS 1U | |
457 | #endif | |
458 | ||
ec1c86b2 YZ |
459 | /* |
460 | * The youngest generation number is stored in max_seq for both anon and file | |
461 | * types as they are aged on an equal footing. The oldest generation numbers are | |
798c0330 YZ |
462 | * stored in min_seq[] separately for anon and file types so that they can be |
463 | * incremented independently. Ideally min_seq[] are kept in sync when both anon | |
464 | * and file types are evictable. However, to adapt to situations like extreme | |
465 | * swappiness, they are allowed to be out of sync by at most | |
466 | * MAX_NR_GENS-MIN_NR_GENS-1. | |
ec1c86b2 YZ |
467 | * |
468 | * The number of pages in each generation is eventually consistent and therefore | |
bd74fdae | 469 | * can be transiently negative when reset_batch_size() is pending. |
ec1c86b2 | 470 | */ |
391655fe | 471 | struct lru_gen_folio { |
ec1c86b2 YZ |
472 | /* the aging increments the youngest generation number */ |
473 | unsigned long max_seq; | |
474 | /* the eviction increments the oldest generation numbers */ | |
475 | unsigned long min_seq[ANON_AND_FILE]; | |
1332a809 YZ |
476 | /* the birth time of each generation in jiffies */ |
477 | unsigned long timestamps[MAX_NR_GENS]; | |
ec1c86b2 | 478 | /* the multi-gen LRU lists, lazily sorted on eviction */ |
6df1b221 | 479 | struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; |
ec1c86b2 YZ |
480 | /* the multi-gen LRU sizes, eventually consistent */ |
481 | long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; | |
ac35a490 YZ |
482 | /* the exponential moving average of refaulted */ |
483 | unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS]; | |
484 | /* the exponential moving average of evicted+protected */ | |
485 | unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS]; | |
798c0330 YZ |
486 | /* can only be modified under the LRU lock */ |
487 | unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; | |
ac35a490 YZ |
488 | /* can be modified without holding the LRU lock */ |
489 | atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; | |
490 | atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; | |
354ed597 YZ |
491 | /* whether the multi-gen LRU is enabled */ |
492 | bool enabled; | |
e4dde56c YZ |
493 | /* the memcg generation this lru_gen_folio belongs to */ |
494 | u8 gen; | |
495 | /* the list segment this lru_gen_folio belongs to */ | |
496 | u8 seg; | |
497 | /* per-node lru_gen_folio list for global reclaim */ | |
498 | struct hlist_nulls_node list; | |
ec1c86b2 YZ |
499 | }; |
500 | ||
bd74fdae YZ |
501 | enum { |
502 | MM_LEAF_TOTAL, /* total leaf entries */ | |
bd74fdae | 503 | MM_LEAF_YOUNG, /* young leaf entries */ |
bd74fdae YZ |
504 | MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */ |
505 | MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */ | |
506 | NR_MM_STATS | |
507 | }; | |
508 | ||
509 | /* double-buffering Bloom filters */ | |
510 | #define NR_BLOOM_FILTERS 2 | |
511 | ||
512 | struct lru_gen_mm_state { | |
cc25bbe1 | 513 | /* synced with max_seq after each iteration */ |
bd74fdae | 514 | unsigned long seq; |
7f63cf2d | 515 | /* where the current iteration continues after */ |
bd74fdae | 516 | struct list_head *head; |
7f63cf2d | 517 | /* where the last iteration ended before */ |
bd74fdae | 518 | struct list_head *tail; |
bd74fdae YZ |
519 | /* Bloom filters flip after each iteration */ |
520 | unsigned long *filters[NR_BLOOM_FILTERS]; | |
521 | /* the mm stats for debugging */ | |
522 | unsigned long stats[NR_HIST_GENS][NR_MM_STATS]; | |
bd74fdae YZ |
523 | }; |
524 | ||
525 | struct lru_gen_mm_walk { | |
526 | /* the lruvec under reclaim */ | |
527 | struct lruvec *lruvec; | |
cc25bbe1 KH |
528 | /* max_seq from lru_gen_folio: can be out of date */ |
529 | unsigned long seq; | |
bd74fdae YZ |
530 | /* the next address within an mm to scan */ |
531 | unsigned long next_addr; | |
532 | /* to batch promoted pages */ | |
533 | int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; | |
534 | /* to batch the mm stats */ | |
535 | int mm_stats[NR_MM_STATS]; | |
536 | /* total batched items */ | |
537 | int batched; | |
798c0330 | 538 | int swappiness; |
bd74fdae YZ |
539 | bool force_scan; |
540 | }; | |
541 | ||
e4dde56c YZ |
542 | /* |
543 | * For each node, memcgs are divided into two generations: the old and the | |
544 | * young. For each generation, memcgs are randomly sharded into multiple bins | |
545 | * to improve scalability. For each bin, the hlist_nulls is virtually divided | |
546 | * into three segments: the head, the tail and the default. | |
547 | * | |
548 | * An onlining memcg is added to the tail of a random bin in the old generation. | |
549 | * The eviction starts at the head of a random bin in the old generation. The | |
550 | * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes | |
551 | * the old generation, is incremented when all its bins become empty. | |
552 | * | |
553 | * There are four operations: | |
8aa42061 | 554 | * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its |
e4dde56c | 555 | * current generation (old or young) and updates its "seg" to "head"; |
8aa42061 | 556 | * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its |
e4dde56c | 557 | * current generation (old or young) and updates its "seg" to "tail"; |
8aa42061 | 558 | * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old |
e4dde56c | 559 | * generation, updates its "gen" to "old" and resets its "seg" to "default"; |
8aa42061 | 560 | * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the |
e4dde56c YZ |
561 | * young generation, updates its "gen" to "young" and resets its "seg" to |
562 | * "default". | |
563 | * | |
564 | * The events that trigger the above operations are: | |
565 | * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD; | |
8aa42061 | 566 | * 2. The first attempt to reclaim a memcg below low, which triggers |
e4dde56c | 567 | * MEMCG_LRU_TAIL; |
4376807b YZ |
568 | * 3. The first attempt to reclaim a memcg offlined or below reclaimable size |
569 | * threshold, which triggers MEMCG_LRU_TAIL; | |
570 | * 4. The second attempt to reclaim a memcg offlined or below reclaimable size | |
571 | * threshold, which triggers MEMCG_LRU_YOUNG; | |
8aa42061 | 572 | * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG; |
e4dde56c | 573 | * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG; |
8aa42061 | 574 | * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD. |
e4dde56c | 575 | * |
8aa42061 YZ |
576 | * Notes: |
577 | * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing | |
578 | * of their max_seq counters ensures the eventual fairness to all eligible | |
579 | * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter(). | |
580 | * 2. There are only two valid generations: old (seq) and young (seq+1). | |
581 | * MEMCG_NR_GENS is set to three so that when reading the generation counter | |
582 | * locklessly, a stale value (seq-1) does not wraparound to young. | |
e4dde56c | 583 | */ |
8aa42061 | 584 | #define MEMCG_NR_GENS 3 |
e4dde56c YZ |
585 | #define MEMCG_NR_BINS 8 |
586 | ||
587 | struct lru_gen_memcg { | |
588 | /* the per-node memcg generation counter */ | |
589 | unsigned long seq; | |
590 | /* each memcg has one lru_gen_folio per node */ | |
591 | unsigned long nr_memcgs[MEMCG_NR_GENS]; | |
592 | /* per-node lru_gen_folio list for global reclaim */ | |
593 | struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS]; | |
594 | /* protects the above */ | |
595 | spinlock_t lock; | |
596 | }; | |
597 | ||
598 | void lru_gen_init_pgdat(struct pglist_data *pgdat); | |
745b13e6 | 599 | void lru_gen_init_lruvec(struct lruvec *lruvec); |
1d4832be | 600 | bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw); |
e4dde56c | 601 | |
ec1c86b2 YZ |
602 | void lru_gen_init_memcg(struct mem_cgroup *memcg); |
603 | void lru_gen_exit_memcg(struct mem_cgroup *memcg); | |
e4dde56c YZ |
604 | void lru_gen_online_memcg(struct mem_cgroup *memcg); |
605 | void lru_gen_offline_memcg(struct mem_cgroup *memcg); | |
606 | void lru_gen_release_memcg(struct mem_cgroup *memcg); | |
5c7e7a0d | 607 | void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); |
e4dde56c | 608 | |
ec1c86b2 YZ |
609 | #else /* !CONFIG_LRU_GEN */ |
610 | ||
e4dde56c YZ |
611 | static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) |
612 | { | |
613 | } | |
614 | ||
ec1c86b2 YZ |
615 | static inline void lru_gen_init_lruvec(struct lruvec *lruvec) |
616 | { | |
617 | } | |
618 | ||
1d4832be | 619 | static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) |
018ee47f | 620 | { |
1d4832be | 621 | return false; |
018ee47f YZ |
622 | } |
623 | ||
ec1c86b2 YZ |
624 | static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) |
625 | { | |
626 | } | |
627 | ||
628 | static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) | |
629 | { | |
630 | } | |
e4dde56c YZ |
631 | |
632 | static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) | |
633 | { | |
634 | } | |
635 | ||
636 | static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) | |
637 | { | |
638 | } | |
639 | ||
640 | static inline void lru_gen_release_memcg(struct mem_cgroup *memcg) | |
641 | { | |
642 | } | |
643 | ||
5c7e7a0d | 644 | static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) |
e4dde56c YZ |
645 | { |
646 | } | |
647 | ||
ec1c86b2 YZ |
648 | #endif /* CONFIG_LRU_GEN */ |
649 | ||
6290df54 | 650 | struct lruvec { |
23047a96 | 651 | struct list_head lists[NR_LRU_LISTS]; |
6168d0da AS |
652 | /* per lruvec lru_lock for memcg */ |
653 | spinlock_t lru_lock; | |
1431d4d1 JW |
654 | /* |
655 | * These track the cost of reclaiming one LRU - file or anon - | |
656 | * over the other. As the observed cost of reclaiming one LRU | |
657 | * increases, the reclaim scan balance tips toward the other. | |
658 | */ | |
659 | unsigned long anon_cost; | |
660 | unsigned long file_cost; | |
31d8fcac JW |
661 | /* Non-resident age, driven by LRU movement */ |
662 | atomic_long_t nonresident_age; | |
ed017373 YZ |
663 | /* Refaults at the time of last reclaim cycle */ |
664 | unsigned long refaults[ANON_AND_FILE]; | |
1b05117d JW |
665 | /* Various lruvec state flags (enum lruvec_flags) */ |
666 | unsigned long flags; | |
ec1c86b2 YZ |
667 | #ifdef CONFIG_LRU_GEN |
668 | /* evictable pages divided into generations */ | |
391655fe | 669 | struct lru_gen_folio lrugen; |
61dd3f24 | 670 | #ifdef CONFIG_LRU_GEN_WALKS_MMU |
bd74fdae YZ |
671 | /* to concurrently iterate lru_gen_mm_list */ |
672 | struct lru_gen_mm_state mm_state; | |
ec1c86b2 | 673 | #endif |
61dd3f24 | 674 | #endif /* CONFIG_LRU_GEN */ |
c255a458 | 675 | #ifdef CONFIG_MEMCG |
599d0c95 | 676 | struct pglist_data *pgdat; |
7f5e86c2 | 677 | #endif |
b5ba474f | 678 | struct zswap_lruvec_state zswap_lruvec_state; |
6290df54 JW |
679 | }; |
680 | ||
c8244935 | 681 | /* Isolate for asynchronous migration */ |
f3fd4a61 | 682 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) |
e46a2879 MK |
683 | /* Isolate unevictable pages */ |
684 | #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) | |
4356f21d MK |
685 | |
686 | /* LRU Isolation modes. */ | |
9efeccac | 687 | typedef unsigned __bitwise isolate_mode_t; |
4356f21d | 688 | |
41858966 MG |
689 | enum zone_watermarks { |
690 | WMARK_MIN, | |
691 | WMARK_LOW, | |
692 | WMARK_HIGH, | |
c574bbe9 | 693 | WMARK_PROMO, |
41858966 MG |
694 | NR_WMARK |
695 | }; | |
696 | ||
44042b44 | 697 | /* |
bf14ed81 | 698 | * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists |
699 | * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list | |
700 | * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE. | |
44042b44 MG |
701 | */ |
702 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
bf14ed81 | 703 | #define NR_PCP_THP 2 |
44042b44 MG |
704 | #else |
705 | #define NR_PCP_THP 0 | |
706 | #endif | |
5d0a661d MG |
707 | #define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1)) |
708 | #define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP) | |
44042b44 | 709 | |
ca71fe1a HY |
710 | /* |
711 | * Flags used in pcp->flags field. | |
712 | * | |
713 | * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the | |
714 | * previous page freeing. To avoid to drain PCP for an accident | |
715 | * high-order page freeing. | |
362d37a1 HY |
716 | * |
717 | * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before | |
718 | * draining PCP for consecutive high-order pages freeing without | |
719 | * allocation if data cache slice of CPU is large enough. To reduce | |
720 | * zone lock contention and keep cache-hot pages reusing. | |
ca71fe1a HY |
721 | */ |
722 | #define PCPF_PREV_FREE_HIGH_ORDER BIT(0) | |
362d37a1 | 723 | #define PCPF_FREE_HIGH_BATCH BIT(1) |
ca71fe1a | 724 | |
1da177e4 | 725 | struct per_cpu_pages { |
4b23a68f | 726 | spinlock_t lock; /* Protects lists field */ |
1da177e4 | 727 | int count; /* number of pages in the list */ |
1da177e4 | 728 | int high; /* high watermark, emptying needed */ |
90b41691 HY |
729 | int high_min; /* min high watermark */ |
730 | int high_max; /* max high watermark */ | |
1da177e4 | 731 | int batch; /* chunk size for buddy add/remove */ |
ca71fe1a | 732 | u8 flags; /* protected by pcp->lock */ |
c0a24239 | 733 | u8 alloc_factor; /* batch scaling factor during allocate */ |
28f836b6 | 734 | #ifdef CONFIG_NUMA |
c0a24239 | 735 | u8 expire; /* When 0, remote pagesets are drained */ |
28f836b6 | 736 | #endif |
6ccdcb6d | 737 | short free_count; /* consecutive free count */ |
5f8dcc21 MG |
738 | |
739 | /* Lists of pages, one per migrate type stored on the pcp-lists */ | |
44042b44 | 740 | struct list_head lists[NR_PCP_LISTS]; |
5d0a661d | 741 | } ____cacheline_aligned_in_smp; |
1da177e4 | 742 | |
28f836b6 | 743 | struct per_cpu_zonestat { |
2244b95a CL |
744 | #ifdef CONFIG_SMP |
745 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | |
28f836b6 MG |
746 | s8 stat_threshold; |
747 | #endif | |
748 | #ifdef CONFIG_NUMA | |
f19298b9 MG |
749 | /* |
750 | * Low priority inaccurate counters that are only folded | |
751 | * on demand. Use a large type to avoid the overhead of | |
752 | * folding during refresh_cpu_vm_stats. | |
753 | */ | |
754 | unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; | |
2244b95a | 755 | #endif |
99dcc3e5 | 756 | }; |
e7c8d5c9 | 757 | |
75ef7184 MG |
758 | struct per_cpu_nodestat { |
759 | s8 stat_threshold; | |
760 | s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; | |
761 | }; | |
762 | ||
97965478 CL |
763 | #endif /* !__GENERATING_BOUNDS.H */ |
764 | ||
2f1b6248 CL |
765 | enum zone_type { |
766 | /* | |
734f9246 NSJ |
767 | * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able |
768 | * to DMA to all of the addressable memory (ZONE_NORMAL). | |
769 | * On architectures where this area covers the whole 32 bit address | |
770 | * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller | |
771 | * DMA addressing constraints. This distinction is important as a 32bit | |
772 | * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit | |
773 | * platforms may need both zones as they support peripherals with | |
774 | * different DMA addressing limitations. | |
2f1b6248 | 775 | */ |
734f9246 | 776 | #ifdef CONFIG_ZONE_DMA |
2f1b6248 | 777 | ZONE_DMA, |
4b51d669 | 778 | #endif |
fb0e7942 | 779 | #ifdef CONFIG_ZONE_DMA32 |
2f1b6248 | 780 | ZONE_DMA32, |
fb0e7942 | 781 | #endif |
2f1b6248 CL |
782 | /* |
783 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | |
784 | * performed on pages in ZONE_NORMAL if the DMA devices support | |
785 | * transfers to all addressable memory. | |
786 | */ | |
787 | ZONE_NORMAL, | |
e53ef38d | 788 | #ifdef CONFIG_HIGHMEM |
2f1b6248 CL |
789 | /* |
790 | * A memory area that is only addressable by the kernel through | |
791 | * mapping portions into its own address space. This is for example | |
792 | * used by i386 to allow the kernel to address the memory beyond | |
793 | * 900MB. The kernel will set up special mappings (page | |
794 | * table entries on i386) for each page that the kernel needs to | |
795 | * access. | |
796 | */ | |
797 | ZONE_HIGHMEM, | |
e53ef38d | 798 | #endif |
9181a980 DH |
799 | /* |
800 | * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains | |
801 | * movable pages with few exceptional cases described below. Main use | |
802 | * cases for ZONE_MOVABLE are to make memory offlining/unplug more | |
803 | * likely to succeed, and to locally limit unmovable allocations - e.g., | |
804 | * to increase the number of THP/huge pages. Notable special cases are: | |
805 | * | |
806 | * 1. Pinned pages: (long-term) pinning of movable pages might | |
d1e153fe PT |
807 | * essentially turn such pages unmovable. Therefore, we do not allow |
808 | * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and | |
809 | * faulted, they come from the right zone right away. However, it is | |
810 | * still possible that address space already has pages in | |
811 | * ZONE_MOVABLE at the time when pages are pinned (i.e. user has | |
812 | * touches that memory before pinning). In such case we migrate them | |
813 | * to a different zone. When migration fails - pinning fails. | |
9181a980 DH |
814 | * 2. memblock allocations: kernelcore/movablecore setups might create |
815 | * situations where ZONE_MOVABLE contains unmovable allocations | |
816 | * after boot. Memory offlining and allocations fail early. | |
817 | * 3. Memory holes: kernelcore/movablecore setups might create very rare | |
818 | * situations where ZONE_MOVABLE contains memory holes after boot, | |
819 | * for example, if we have sections that are only partially | |
820 | * populated. Memory offlining and allocations fail early. | |
821 | * 4. PG_hwpoison pages: while poisoned pages can be skipped during | |
822 | * memory offlining, such pages cannot be allocated. | |
823 | * 5. Unmovable PG_offline pages: in paravirtualized environments, | |
824 | * hotplugged memory blocks might only partially be managed by the | |
825 | * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The | |
826 | * parts not manged by the buddy are unmovable PG_offline pages. In | |
827 | * some cases (virtio-mem), such pages can be skipped during | |
828 | * memory offlining, however, cannot be moved/allocated. These | |
829 | * techniques might use alloc_contig_range() to hide previously | |
830 | * exposed pages from the buddy again (e.g., to implement some sort | |
831 | * of memory unplug in virtio-mem). | |
9afaf30f PT |
832 | * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create |
833 | * situations where ZERO_PAGE(0) which is allocated differently | |
834 | * on different platforms may end up in a movable zone. ZERO_PAGE(0) | |
835 | * cannot be migrated. | |
a08a2ae3 OS |
836 | * 7. Memory-hotplug: when using memmap_on_memory and onlining the |
837 | * memory to the MOVABLE zone, the vmemmap pages are also placed in | |
838 | * such zone. Such pages cannot be really moved around as they are | |
839 | * self-stored in the range, but they are treated as movable when | |
840 | * the range they describe is about to be offlined. | |
9181a980 DH |
841 | * |
842 | * In general, no unmovable allocations that degrade memory offlining | |
843 | * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) | |
844 | * have to expect that migrating pages in ZONE_MOVABLE can fail (even | |
845 | * if has_unmovable_pages() states that there are no unmovable pages, | |
846 | * there can be false negatives). | |
847 | */ | |
2a1e274a | 848 | ZONE_MOVABLE, |
033fbae9 DW |
849 | #ifdef CONFIG_ZONE_DEVICE |
850 | ZONE_DEVICE, | |
851 | #endif | |
97965478 | 852 | __MAX_NR_ZONES |
033fbae9 | 853 | |
2f1b6248 | 854 | }; |
1da177e4 | 855 | |
97965478 CL |
856 | #ifndef __GENERATING_BOUNDS_H |
857 | ||
ed017373 YZ |
858 | #define ASYNC_AND_SYNC 2 |
859 | ||
1da177e4 | 860 | struct zone { |
3484b2de | 861 | /* Read-mostly fields */ |
41858966 MG |
862 | |
863 | /* zone watermarks, access with *_wmark_pages(zone) macros */ | |
a9214443 | 864 | unsigned long _watermark[NR_WMARK]; |
1c30844d | 865 | unsigned long watermark_boost; |
41858966 | 866 | |
0aaa29a5 | 867 | unsigned long nr_reserved_highatomic; |
c928807f | 868 | unsigned long nr_free_highatomic; |
0aaa29a5 | 869 | |
1da177e4 | 870 | /* |
89903327 AM |
871 | * We don't know if the memory that we're going to allocate will be |
872 | * freeable or/and it will be released eventually, so to avoid totally | |
873 | * wasting several GB of ram we must reserve some of the lower zone | |
874 | * memory (otherwise we risk to run OOM on the lower zones despite | |
875 | * there being tons of freeable ram on the higher zones). This array is | |
876 | * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl | |
877 | * changes. | |
1da177e4 | 878 | */ |
3484b2de | 879 | long lowmem_reserve[MAX_NR_ZONES]; |
ab8fabd4 | 880 | |
e7c8d5c9 | 881 | #ifdef CONFIG_NUMA |
d5f541ed | 882 | int node; |
3484b2de | 883 | #endif |
3484b2de | 884 | struct pglist_data *zone_pgdat; |
28f836b6 MG |
885 | struct per_cpu_pages __percpu *per_cpu_pageset; |
886 | struct per_cpu_zonestat __percpu *per_cpu_zonestats; | |
952eaf81 VB |
887 | /* |
888 | * the high and batch values are copied to individual pagesets for | |
889 | * faster access | |
890 | */ | |
90b41691 HY |
891 | int pageset_high_min; |
892 | int pageset_high_max; | |
952eaf81 | 893 | int pageset_batch; |
3484b2de | 894 | |
835c134e MG |
895 | #ifndef CONFIG_SPARSEMEM |
896 | /* | |
d9c23400 | 897 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. |
835c134e MG |
898 | * In SPARSEMEM, this map is stored in struct mem_section |
899 | */ | |
900 | unsigned long *pageblock_flags; | |
901 | #endif /* CONFIG_SPARSEMEM */ | |
902 | ||
1da177e4 LT |
903 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
904 | unsigned long zone_start_pfn; | |
905 | ||
bdc8cb98 | 906 | /* |
9feedc9d JL |
907 | * spanned_pages is the total pages spanned by the zone, including |
908 | * holes, which is calculated as: | |
909 | * spanned_pages = zone_end_pfn - zone_start_pfn; | |
bdc8cb98 | 910 | * |
9feedc9d JL |
911 | * present_pages is physical pages existing within the zone, which |
912 | * is calculated as: | |
8761e31c | 913 | * present_pages = spanned_pages - absent_pages(pages in holes); |
9feedc9d | 914 | * |
4b097002 DH |
915 | * present_early_pages is present pages existing within the zone |
916 | * located on memory available since early boot, excluding hotplugged | |
917 | * memory. | |
918 | * | |
9feedc9d JL |
919 | * managed_pages is present pages managed by the buddy system, which |
920 | * is calculated as (reserved_pages includes pages allocated by the | |
921 | * bootmem allocator): | |
922 | * managed_pages = present_pages - reserved_pages; | |
923 | * | |
3c381db1 DH |
924 | * cma pages is present pages that are assigned for CMA use |
925 | * (MIGRATE_CMA). | |
926 | * | |
9feedc9d JL |
927 | * So present_pages may be used by memory hotplug or memory power |
928 | * management logic to figure out unmanaged pages by checking | |
929 | * (present_pages - managed_pages). And managed_pages should be used | |
930 | * by page allocator and vm scanner to calculate all kinds of watermarks | |
931 | * and thresholds. | |
932 | * | |
933 | * Locking rules: | |
934 | * | |
935 | * zone_start_pfn and spanned_pages are protected by span_seqlock. | |
936 | * It is a seqlock because it has to be read outside of zone->lock, | |
937 | * and it is done in the main allocator path. But, it is written | |
938 | * quite infrequently. | |
939 | * | |
940 | * The span_seq lock is declared along with zone->lock because it is | |
bdc8cb98 DH |
941 | * frequently read in proximity to zone->lock. It's good to |
942 | * give them a chance of being in the same cacheline. | |
9feedc9d | 943 | * |
c3d5f5f0 | 944 | * Write access to present_pages at runtime should be protected by |
e8da368a YZL |
945 | * mem_hotplug_begin/done(). Any reader who can't tolerant drift of |
946 | * present_pages should use get_online_mems() to get a stable value. | |
bdc8cb98 | 947 | */ |
9705bea5 | 948 | atomic_long_t managed_pages; |
9feedc9d JL |
949 | unsigned long spanned_pages; |
950 | unsigned long present_pages; | |
4b097002 DH |
951 | #if defined(CONFIG_MEMORY_HOTPLUG) |
952 | unsigned long present_early_pages; | |
953 | #endif | |
3c381db1 DH |
954 | #ifdef CONFIG_CMA |
955 | unsigned long cma_pages; | |
956 | #endif | |
3484b2de MG |
957 | |
958 | const char *name; | |
1da177e4 | 959 | |
ad53f92e JK |
960 | #ifdef CONFIG_MEMORY_ISOLATION |
961 | /* | |
962 | * Number of isolated pageblock. It is used to solve incorrect | |
963 | * freepage counting problem due to racy retrieving migratetype | |
964 | * of pageblock. Protected by zone->lock. | |
965 | */ | |
966 | unsigned long nr_isolate_pageblock; | |
967 | #endif | |
968 | ||
3484b2de MG |
969 | #ifdef CONFIG_MEMORY_HOTPLUG |
970 | /* see spanned/present_pages for more description */ | |
971 | seqlock_t span_seqlock; | |
972 | #endif | |
973 | ||
9dcb8b68 | 974 | int initialized; |
3484b2de | 975 | |
0f661148 | 976 | /* Write-intensive fields used from the page allocator */ |
e6ad640b | 977 | CACHELINE_PADDING(_pad1_); |
0f661148 | 978 | |
3484b2de | 979 | /* free areas of different sizes */ |
fd377218 | 980 | struct free_area free_area[NR_PAGE_ORDERS]; |
3484b2de | 981 | |
dcdfdd40 | 982 | #ifdef CONFIG_UNACCEPTED_MEMORY |
5e0a760b | 983 | /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */ |
dcdfdd40 | 984 | struct list_head unaccepted_pages; |
4067196a KS |
985 | |
986 | /* To be called once the last page in the zone is accepted */ | |
987 | struct work_struct unaccepted_cleanup; | |
dcdfdd40 KS |
988 | #endif |
989 | ||
3484b2de MG |
990 | /* zone flags, see below */ |
991 | unsigned long flags; | |
992 | ||
0f661148 | 993 | /* Primarily protects free_area */ |
a368ab67 MG |
994 | spinlock_t lock; |
995 | ||
8c57b687 AS |
996 | /* Pages to be freed when next trylock succeeds */ |
997 | struct llist_head trylock_free_pages; | |
998 | ||
0f661148 | 999 | /* Write-intensive fields used by compaction and vmstats. */ |
e6ad640b | 1000 | CACHELINE_PADDING(_pad2_); |
3484b2de | 1001 | |
3484b2de MG |
1002 | /* |
1003 | * When free pages are below this point, additional steps are taken | |
1004 | * when reading the number of free pages to avoid per-cpu counter | |
1005 | * drift allowing watermarks to be breached | |
1006 | */ | |
1007 | unsigned long percpu_drift_mark; | |
1008 | ||
1009 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | |
1010 | /* pfn where compaction free scanner should start */ | |
1011 | unsigned long compact_cached_free_pfn; | |
ed017373 YZ |
1012 | /* pfn where compaction migration scanner should start */ |
1013 | unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; | |
e332f741 MG |
1014 | unsigned long compact_init_migrate_pfn; |
1015 | unsigned long compact_init_free_pfn; | |
3484b2de MG |
1016 | #endif |
1017 | ||
1018 | #ifdef CONFIG_COMPACTION | |
1019 | /* | |
1020 | * On compaction failure, 1<<compact_defer_shift compactions | |
1021 | * are skipped before trying again. The number attempted since | |
1022 | * last failure is tracked with compact_considered. | |
860b3272 | 1023 | * compact_order_failed is the minimum compaction failed order. |
3484b2de MG |
1024 | */ |
1025 | unsigned int compact_considered; | |
1026 | unsigned int compact_defer_shift; | |
1027 | int compact_order_failed; | |
1028 | #endif | |
1029 | ||
1030 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | |
1031 | /* Set to true when the PG_migrate_skip bits should be cleared */ | |
1032 | bool compact_blockskip_flush; | |
1033 | #endif | |
1034 | ||
7cf91a98 JK |
1035 | bool contiguous; |
1036 | ||
e6ad640b | 1037 | CACHELINE_PADDING(_pad3_); |
3484b2de MG |
1038 | /* Zone statistics */ |
1039 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
f19298b9 | 1040 | atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; |
22fc6ecc | 1041 | } ____cacheline_internodealigned_in_smp; |
1da177e4 | 1042 | |
599d0c95 | 1043 | enum pgdat_flags { |
599d0c95 | 1044 | PGDAT_DIRTY, /* reclaim scanning has recently found |
d43006d5 MG |
1045 | * many dirty file pages at the tail |
1046 | * of the LRU. | |
1047 | */ | |
599d0c95 | 1048 | PGDAT_WRITEBACK, /* reclaim scanning has recently found |
283aba9f MG |
1049 | * many pages under writeback |
1050 | */ | |
a5f5f91d | 1051 | PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
57054651 | 1052 | }; |
e815af95 | 1053 | |
73444bc4 MG |
1054 | enum zone_flags { |
1055 | ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. | |
1056 | * Cleared when kswapd is woken. | |
1057 | */ | |
c49c2c47 | 1058 | ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ |
57c0419c | 1059 | ZONE_BELOW_HIGH, /* zone is below high watermark. */ |
73444bc4 MG |
1060 | }; |
1061 | ||
620943d7 AM |
1062 | static inline unsigned long wmark_pages(const struct zone *z, |
1063 | enum zone_watermarks w) | |
1064 | { | |
1065 | return z->_watermark[w] + z->watermark_boost; | |
1066 | } | |
1067 | ||
1068 | static inline unsigned long min_wmark_pages(const struct zone *z) | |
1069 | { | |
1070 | return wmark_pages(z, WMARK_MIN); | |
1071 | } | |
1072 | ||
1073 | static inline unsigned long low_wmark_pages(const struct zone *z) | |
1074 | { | |
1075 | return wmark_pages(z, WMARK_LOW); | |
1076 | } | |
1077 | ||
1078 | static inline unsigned long high_wmark_pages(const struct zone *z) | |
1079 | { | |
1080 | return wmark_pages(z, WMARK_HIGH); | |
1081 | } | |
1082 | ||
1083 | static inline unsigned long promo_wmark_pages(const struct zone *z) | |
1084 | { | |
1085 | return wmark_pages(z, WMARK_PROMO); | |
1086 | } | |
1087 | ||
9705bea5 AK |
1088 | static inline unsigned long zone_managed_pages(struct zone *zone) |
1089 | { | |
1090 | return (unsigned long)atomic_long_read(&zone->managed_pages); | |
1091 | } | |
1092 | ||
3c381db1 DH |
1093 | static inline unsigned long zone_cma_pages(struct zone *zone) |
1094 | { | |
1095 | #ifdef CONFIG_CMA | |
1096 | return zone->cma_pages; | |
1097 | #else | |
1098 | return 0; | |
1099 | #endif | |
1100 | } | |
1101 | ||
f9228b20 | 1102 | static inline unsigned long zone_end_pfn(const struct zone *zone) |
108bcc96 CS |
1103 | { |
1104 | return zone->zone_start_pfn + zone->spanned_pages; | |
1105 | } | |
1106 | ||
1107 | static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) | |
1108 | { | |
1109 | return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); | |
1110 | } | |
1111 | ||
2a6e3ebe CS |
1112 | static inline bool zone_is_initialized(struct zone *zone) |
1113 | { | |
9dcb8b68 | 1114 | return zone->initialized; |
2a6e3ebe CS |
1115 | } |
1116 | ||
1117 | static inline bool zone_is_empty(struct zone *zone) | |
1118 | { | |
1119 | return zone->spanned_pages == 0; | |
1120 | } | |
1121 | ||
5bb88dc5 AS |
1122 | #ifndef BUILD_VDSO32_64 |
1123 | /* | |
1124 | * The zone field is never updated after free_area_init_core() | |
1125 | * sets it, so none of the operations on it need to be atomic. | |
1126 | */ | |
1127 | ||
1128 | /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ | |
1129 | #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) | |
1130 | #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) | |
1131 | #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) | |
1132 | #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) | |
1133 | #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) | |
ec1c86b2 YZ |
1134 | #define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH) |
1135 | #define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH) | |
5bb88dc5 AS |
1136 | |
1137 | /* | |
1138 | * Define the bit shifts to access each section. For non-existent | |
1139 | * sections we define the shift as 0; that plus a 0 mask ensures | |
1140 | * the compiler will optimise away reference to them. | |
1141 | */ | |
1142 | #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) | |
1143 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) | |
1144 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) | |
1145 | #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) | |
1146 | #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) | |
1147 | ||
1148 | /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ | |
1149 | #ifdef NODE_NOT_IN_PAGE_FLAGS | |
1150 | #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) | |
1151 | #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \ | |
1152 | SECTIONS_PGOFF : ZONES_PGOFF) | |
1153 | #else | |
1154 | #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) | |
1155 | #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \ | |
1156 | NODES_PGOFF : ZONES_PGOFF) | |
1157 | #endif | |
1158 | ||
1159 | #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) | |
1160 | ||
1161 | #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) | |
1162 | #define NODES_MASK ((1UL << NODES_WIDTH) - 1) | |
1163 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) | |
1164 | #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) | |
1165 | #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) | |
1166 | #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) | |
1167 | ||
1168 | static inline enum zone_type page_zonenum(const struct page *page) | |
1169 | { | |
1170 | ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); | |
1171 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; | |
1172 | } | |
1173 | ||
1174 | static inline enum zone_type folio_zonenum(const struct folio *folio) | |
1175 | { | |
1176 | return page_zonenum(&folio->page); | |
1177 | } | |
1178 | ||
1179 | #ifdef CONFIG_ZONE_DEVICE | |
1180 | static inline bool is_zone_device_page(const struct page *page) | |
1181 | { | |
1182 | return page_zonenum(page) == ZONE_DEVICE; | |
1183 | } | |
49580e69 | 1184 | |
82ba975e AP |
1185 | static inline struct dev_pagemap *page_pgmap(const struct page *page) |
1186 | { | |
1187 | VM_WARN_ON_ONCE_PAGE(!is_zone_device_page(page), page); | |
1188 | return page_folio(page)->pgmap; | |
1189 | } | |
1190 | ||
49580e69 LG |
1191 | /* |
1192 | * Consecutive zone device pages should not be merged into the same sgl | |
1193 | * or bvec segment with other types of pages or if they belong to different | |
1194 | * pgmaps. Otherwise getting the pgmap of a given segment is not possible | |
1195 | * without scanning the entire segment. This helper returns true either if | |
1196 | * both pages are not zone device pages or both pages are zone device pages | |
1197 | * with the same pgmap. | |
1198 | */ | |
1199 | static inline bool zone_device_pages_have_same_pgmap(const struct page *a, | |
1200 | const struct page *b) | |
1201 | { | |
1202 | if (is_zone_device_page(a) != is_zone_device_page(b)) | |
1203 | return false; | |
1204 | if (!is_zone_device_page(a)) | |
1205 | return true; | |
82ba975e | 1206 | return page_pgmap(a) == page_pgmap(b); |
49580e69 LG |
1207 | } |
1208 | ||
5bb88dc5 AS |
1209 | extern void memmap_init_zone_device(struct zone *, unsigned long, |
1210 | unsigned long, struct dev_pagemap *); | |
1211 | #else | |
1212 | static inline bool is_zone_device_page(const struct page *page) | |
1213 | { | |
1214 | return false; | |
1215 | } | |
49580e69 LG |
1216 | static inline bool zone_device_pages_have_same_pgmap(const struct page *a, |
1217 | const struct page *b) | |
1218 | { | |
1219 | return true; | |
1220 | } | |
82ba975e AP |
1221 | static inline struct dev_pagemap *page_pgmap(const struct page *page) |
1222 | { | |
1223 | return NULL; | |
1224 | } | |
5bb88dc5 AS |
1225 | #endif |
1226 | ||
1227 | static inline bool folio_is_zone_device(const struct folio *folio) | |
1228 | { | |
1229 | return is_zone_device_page(&folio->page); | |
1230 | } | |
1231 | ||
1232 | static inline bool is_zone_movable_page(const struct page *page) | |
1233 | { | |
1234 | return page_zonenum(page) == ZONE_MOVABLE; | |
1235 | } | |
708ff491 VMO |
1236 | |
1237 | static inline bool folio_is_zone_movable(const struct folio *folio) | |
1238 | { | |
1239 | return folio_zonenum(folio) == ZONE_MOVABLE; | |
1240 | } | |
5bb88dc5 AS |
1241 | #endif |
1242 | ||
f1dd2cd1 MH |
1243 | /* |
1244 | * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty | |
1245 | * intersection with the given zone | |
1246 | */ | |
1247 | static inline bool zone_intersects(struct zone *zone, | |
1248 | unsigned long start_pfn, unsigned long nr_pages) | |
1249 | { | |
1250 | if (zone_is_empty(zone)) | |
1251 | return false; | |
1252 | if (start_pfn >= zone_end_pfn(zone) || | |
1253 | start_pfn + nr_pages <= zone->zone_start_pfn) | |
1254 | return false; | |
1255 | ||
1256 | return true; | |
1257 | } | |
1258 | ||
1da177e4 LT |
1259 | /* |
1260 | * The "priority" of VM scanning is how much of the queues we will scan in one | |
1261 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | |
1262 | * queues ("queue_length >> 12") during an aging round. | |
1263 | */ | |
1264 | #define DEF_PRIORITY 12 | |
1265 | ||
9276b1bc PJ |
1266 | /* Maximum number of zones on a zonelist */ |
1267 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | |
1268 | ||
c00eb15a YB |
1269 | enum { |
1270 | ZONELIST_FALLBACK, /* zonelist with fallback */ | |
9276b1bc | 1271 | #ifdef CONFIG_NUMA |
c00eb15a YB |
1272 | /* |
1273 | * The NUMA zonelists are doubled because we need zonelists that | |
1274 | * restrict the allocations to a single node for __GFP_THISNODE. | |
1275 | */ | |
1276 | ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ | |
9276b1bc | 1277 | #endif |
c00eb15a YB |
1278 | MAX_ZONELISTS |
1279 | }; | |
9276b1bc | 1280 | |
dd1a239f MG |
1281 | /* |
1282 | * This struct contains information about a zone in a zonelist. It is stored | |
1283 | * here to avoid dereferences into large structures and lookups of tables | |
1284 | */ | |
1285 | struct zoneref { | |
1286 | struct zone *zone; /* Pointer to actual zone */ | |
1287 | int zone_idx; /* zone_idx(zoneref->zone) */ | |
1288 | }; | |
1289 | ||
1da177e4 LT |
1290 | /* |
1291 | * One allocation request operates on a zonelist. A zonelist | |
1292 | * is a list of zones, the first one is the 'goal' of the | |
1293 | * allocation, the other zones are fallback zones, in decreasing | |
1294 | * priority. | |
1295 | * | |
dd1a239f MG |
1296 | * To speed the reading of the zonelist, the zonerefs contain the zone index |
1297 | * of the entry being read. Helper functions to access information given | |
1298 | * a struct zoneref are | |
1299 | * | |
1300 | * zonelist_zone() - Return the struct zone * for an entry in _zonerefs | |
1301 | * zonelist_zone_idx() - Return the index of the zone for an entry | |
1302 | * zonelist_node_idx() - Return the index of the node for an entry | |
1da177e4 LT |
1303 | */ |
1304 | struct zonelist { | |
dd1a239f | 1305 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; |
1da177e4 LT |
1306 | }; |
1307 | ||
bb1c50d3 MR |
1308 | /* |
1309 | * The array of struct pages for flatmem. | |
1310 | * It must be declared for SPARSEMEM as well because there are configurations | |
1311 | * that rely on that. | |
1312 | */ | |
5b99cd0e | 1313 | extern struct page *mem_map; |
5b99cd0e | 1314 | |
364c1eeb YS |
1315 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1316 | struct deferred_split { | |
1317 | spinlock_t split_queue_lock; | |
1318 | struct list_head split_queue; | |
1319 | unsigned long split_queue_len; | |
1320 | }; | |
1321 | #endif | |
1322 | ||
44b8f8bf JY |
1323 | #ifdef CONFIG_MEMORY_FAILURE |
1324 | /* | |
1325 | * Per NUMA node memory failure handling statistics. | |
1326 | */ | |
1327 | struct memory_failure_stats { | |
1328 | /* | |
1329 | * Number of raw pages poisoned. | |
1330 | * Cases not accounted: memory outside kernel control, offline page, | |
1331 | * arch-specific memory_failure (SGX), hwpoison_filter() filtered | |
1332 | * error events, and unpoison actions from hwpoison_unpoison. | |
1333 | */ | |
1334 | unsigned long total; | |
1335 | /* | |
1336 | * Recovery results of poisoned raw pages handled by memory_failure, | |
1337 | * in sync with mf_result. | |
1338 | * total = ignored + failed + delayed + recovered. | |
1339 | * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted. | |
1340 | */ | |
1341 | unsigned long ignored; | |
1342 | unsigned long failed; | |
1343 | unsigned long delayed; | |
1344 | unsigned long recovered; | |
1345 | }; | |
1346 | #endif | |
1347 | ||
1da177e4 | 1348 | /* |
1da177e4 | 1349 | * On NUMA machines, each NUMA node would have a pg_data_t to describe |
618b8c20 NB |
1350 | * it's memory layout. On UMA machines there is a single pglist_data which |
1351 | * describes the whole memory. | |
1da177e4 LT |
1352 | * |
1353 | * Memory statistics and page replacement data structures are maintained on a | |
1354 | * per-zone basis. | |
1355 | */ | |
1da177e4 | 1356 | typedef struct pglist_data { |
496df3d3 BW |
1357 | /* |
1358 | * node_zones contains just the zones for THIS node. Not all of the | |
1359 | * zones may be populated, but it is the full list. It is referenced by | |
1360 | * this node's node_zonelists as well as other node's node_zonelists. | |
1361 | */ | |
1da177e4 | 1362 | struct zone node_zones[MAX_NR_ZONES]; |
496df3d3 BW |
1363 | |
1364 | /* | |
1365 | * node_zonelists contains references to all zones in all nodes. | |
1366 | * Generally the first zones will be references to this node's | |
1367 | * node_zones. | |
1368 | */ | |
523b9458 | 1369 | struct zonelist node_zonelists[MAX_ZONELISTS]; |
496df3d3 BW |
1370 | |
1371 | int nr_zones; /* number of populated zones in this node */ | |
43b02ba9 | 1372 | #ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ |
1da177e4 | 1373 | struct page *node_mem_map; |
eefa864b JK |
1374 | #ifdef CONFIG_PAGE_EXTENSION |
1375 | struct page_ext *node_page_ext; | |
1376 | #endif | |
d41dee36 | 1377 | #endif |
3a2d7fa8 | 1378 | #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) |
208d54e5 | 1379 | /* |
fa004ab7 WY |
1380 | * Must be held any time you expect node_start_pfn, |
1381 | * node_present_pages, node_spanned_pages or nr_zones to stay constant. | |
3d060856 PT |
1382 | * Also synchronizes pgdat->first_deferred_pfn during deferred page |
1383 | * init. | |
208d54e5 | 1384 | * |
114d4b79 | 1385 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to |
3a2d7fa8 PT |
1386 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG |
1387 | * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. | |
114d4b79 | 1388 | * |
72c3b51b | 1389 | * Nests above zone->lock and zone->span_seqlock |
208d54e5 DH |
1390 | */ |
1391 | spinlock_t node_size_lock; | |
1392 | #endif | |
1da177e4 LT |
1393 | unsigned long node_start_pfn; |
1394 | unsigned long node_present_pages; /* total number of physical pages */ | |
1395 | unsigned long node_spanned_pages; /* total size of physical page | |
1396 | range, including holes */ | |
1397 | int node_id; | |
1da177e4 | 1398 | wait_queue_head_t kswapd_wait; |
5515061d | 1399 | wait_queue_head_t pfmemalloc_wait; |
8cd7c588 MG |
1400 | |
1401 | /* workqueues for throttling reclaim for different reasons. */ | |
1402 | wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE]; | |
1403 | ||
1404 | atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */ | |
1405 | unsigned long nr_reclaim_start; /* nr pages written while throttled | |
1406 | * when throttling started. */ | |
b4a0215e KW |
1407 | #ifdef CONFIG_MEMORY_HOTPLUG |
1408 | struct mutex kswapd_lock; | |
1409 | #endif | |
1410 | struct task_struct *kswapd; /* Protected by kswapd_lock */ | |
38087d9b | 1411 | int kswapd_order; |
97a225e6 | 1412 | enum zone_type kswapd_highest_zoneidx; |
38087d9b | 1413 | |
c73322d0 JW |
1414 | int kswapd_failures; /* Number of 'reclaimed == 0' runs */ |
1415 | ||
698b1b30 VB |
1416 | #ifdef CONFIG_COMPACTION |
1417 | int kcompactd_max_order; | |
97a225e6 | 1418 | enum zone_type kcompactd_highest_zoneidx; |
698b1b30 VB |
1419 | wait_queue_head_t kcompactd_wait; |
1420 | struct task_struct *kcompactd; | |
65d759c8 | 1421 | bool proactive_compact_trigger; |
8177a420 | 1422 | #endif |
281e3726 MG |
1423 | /* |
1424 | * This is a per-node reserve of pages that are not available | |
1425 | * to userspace allocations. | |
1426 | */ | |
1427 | unsigned long totalreserve_pages; | |
1428 | ||
a5f5f91d MG |
1429 | #ifdef CONFIG_NUMA |
1430 | /* | |
0a3c5772 | 1431 | * node reclaim becomes active if more unmapped pages exist. |
a5f5f91d MG |
1432 | */ |
1433 | unsigned long min_unmapped_pages; | |
1434 | unsigned long min_slab_pages; | |
1435 | #endif /* CONFIG_NUMA */ | |
1436 | ||
a52633d8 | 1437 | /* Write-intensive fields used by page reclaim */ |
e6ad640b | 1438 | CACHELINE_PADDING(_pad1_); |
3a80a7fa MG |
1439 | |
1440 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | |
1441 | /* | |
1442 | * If memory initialisation on large machines is deferred then this | |
1443 | * is the first PFN that needs to be initialised. | |
1444 | */ | |
1445 | unsigned long first_deferred_pfn; | |
1446 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | |
a3d0a918 KS |
1447 | |
1448 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
364c1eeb | 1449 | struct deferred_split deferred_split_queue; |
a3d0a918 | 1450 | #endif |
75ef7184 | 1451 | |
c6833e10 HY |
1452 | #ifdef CONFIG_NUMA_BALANCING |
1453 | /* start time in ms of current promote rate limit period */ | |
1454 | unsigned int nbp_rl_start; | |
1455 | /* number of promote candidate pages at start time of current rate limit period */ | |
1456 | unsigned long nbp_rl_nr_cand; | |
c959924b HY |
1457 | /* promote threshold in ms */ |
1458 | unsigned int nbp_threshold; | |
1459 | /* start time in ms of current promote threshold adjustment period */ | |
1460 | unsigned int nbp_th_start; | |
1461 | /* | |
c7cdf94e | 1462 | * number of promote candidate pages at start time of current promote |
c959924b HY |
1463 | * threshold adjustment period |
1464 | */ | |
1465 | unsigned long nbp_th_nr_cand; | |
c6833e10 | 1466 | #endif |
599d0c95 | 1467 | /* Fields commonly accessed by the page reclaim scanner */ |
867e5e1d JW |
1468 | |
1469 | /* | |
1470 | * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. | |
1471 | * | |
1472 | * Use mem_cgroup_lruvec() to look up lruvecs. | |
1473 | */ | |
1474 | struct lruvec __lruvec; | |
599d0c95 | 1475 | |
599d0c95 MG |
1476 | unsigned long flags; |
1477 | ||
bd74fdae YZ |
1478 | #ifdef CONFIG_LRU_GEN |
1479 | /* kswap mm walk data */ | |
9a52b2f3 | 1480 | struct lru_gen_mm_walk mm_walk; |
e4dde56c YZ |
1481 | /* lru_gen_folio list */ |
1482 | struct lru_gen_memcg memcg_lru; | |
bd74fdae YZ |
1483 | #endif |
1484 | ||
e6ad640b | 1485 | CACHELINE_PADDING(_pad2_); |
599d0c95 | 1486 | |
75ef7184 MG |
1487 | /* Per-node vmstats */ |
1488 | struct per_cpu_nodestat __percpu *per_cpu_nodestats; | |
1489 | atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; | |
7766cf7a AK |
1490 | #ifdef CONFIG_NUMA |
1491 | struct memory_tier __rcu *memtier; | |
1492 | #endif | |
44b8f8bf JY |
1493 | #ifdef CONFIG_MEMORY_FAILURE |
1494 | struct memory_failure_stats mf_stats; | |
1495 | #endif | |
1da177e4 LT |
1496 | } pg_data_t; |
1497 | ||
1498 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | |
1499 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) | |
1500 | ||
c6830c22 | 1501 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
da3649e1 | 1502 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) |
c6830c22 | 1503 | |
da3649e1 CS |
1504 | static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) |
1505 | { | |
1506 | return pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
1507 | } | |
1508 | ||
208d54e5 DH |
1509 | #include <linux/memory_hotplug.h> |
1510 | ||
72675e13 | 1511 | void build_all_zonelists(pg_data_t *pgdat); |
5ecd9d40 | 1512 | void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, |
97a225e6 | 1513 | enum zone_type highest_zoneidx); |
86a294a8 | 1514 | bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, |
97a225e6 | 1515 | int highest_zoneidx, unsigned int alloc_flags, |
86a294a8 | 1516 | long free_pages); |
7aeb09f9 | 1517 | bool zone_watermark_ok(struct zone *z, unsigned int order, |
97a225e6 | 1518 | unsigned long mark, int highest_zoneidx, |
c603844b | 1519 | unsigned int alloc_flags); |
c1d0da83 LD |
1520 | /* |
1521 | * Memory initialization context, use to differentiate memory added by | |
1522 | * the platform statically or via memory hotplug interface. | |
1523 | */ | |
1524 | enum meminit_context { | |
1525 | MEMINIT_EARLY, | |
1526 | MEMINIT_HOTPLUG, | |
a2f3aa02 | 1527 | }; |
c1d0da83 | 1528 | |
dc0bbf3b | 1529 | extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
b171e409 | 1530 | unsigned long size); |
718127cc | 1531 | |
bea8c150 | 1532 | extern void lruvec_init(struct lruvec *lruvec); |
7f5e86c2 | 1533 | |
599d0c95 | 1534 | static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) |
7f5e86c2 | 1535 | { |
c255a458 | 1536 | #ifdef CONFIG_MEMCG |
599d0c95 | 1537 | return lruvec->pgdat; |
7f5e86c2 | 1538 | #else |
867e5e1d | 1539 | return container_of(lruvec, struct pglist_data, __lruvec); |
7f5e86c2 KK |
1540 | #endif |
1541 | } | |
1542 | ||
7aac7898 LS |
1543 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
1544 | int local_memory_node(int node_id); | |
1545 | #else | |
1546 | static inline int local_memory_node(int node_id) { return node_id; }; | |
1547 | #endif | |
1548 | ||
1da177e4 LT |
1549 | /* |
1550 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | |
1551 | */ | |
1552 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) | |
1553 | ||
1f90a347 DW |
1554 | #ifdef CONFIG_ZONE_DEVICE |
1555 | static inline bool zone_is_zone_device(struct zone *zone) | |
1556 | { | |
1557 | return zone_idx(zone) == ZONE_DEVICE; | |
1558 | } | |
1559 | #else | |
1560 | static inline bool zone_is_zone_device(struct zone *zone) | |
1561 | { | |
1562 | return false; | |
1563 | } | |
1564 | #endif | |
1565 | ||
6aa303de MG |
1566 | /* |
1567 | * Returns true if a zone has pages managed by the buddy allocator. | |
1568 | * All the reclaim decisions have to use this function rather than | |
1569 | * populated_zone(). If the whole zone is reserved then we can easily | |
1570 | * end up with populated_zone() && !managed_zone(). | |
1571 | */ | |
1572 | static inline bool managed_zone(struct zone *zone) | |
1573 | { | |
9705bea5 | 1574 | return zone_managed_pages(zone); |
6aa303de MG |
1575 | } |
1576 | ||
1577 | /* Returns true if a zone has memory */ | |
1578 | static inline bool populated_zone(struct zone *zone) | |
f3fe6512 | 1579 | { |
6aa303de | 1580 | return zone->present_pages; |
f3fe6512 CK |
1581 | } |
1582 | ||
c1093b74 PT |
1583 | #ifdef CONFIG_NUMA |
1584 | static inline int zone_to_nid(struct zone *zone) | |
1585 | { | |
1586 | return zone->node; | |
1587 | } | |
1588 | ||
1589 | static inline void zone_set_nid(struct zone *zone, int nid) | |
1590 | { | |
1591 | zone->node = nid; | |
1592 | } | |
1593 | #else | |
1594 | static inline int zone_to_nid(struct zone *zone) | |
1595 | { | |
1596 | return 0; | |
1597 | } | |
1598 | ||
1599 | static inline void zone_set_nid(struct zone *zone, int nid) {} | |
1600 | #endif | |
1601 | ||
2a1e274a MG |
1602 | extern int movable_zone; |
1603 | ||
2f1b6248 | 1604 | static inline int is_highmem_idx(enum zone_type idx) |
1da177e4 | 1605 | { |
e53ef38d | 1606 | #ifdef CONFIG_HIGHMEM |
2a1e274a | 1607 | return (idx == ZONE_HIGHMEM || |
b19bd1c9 | 1608 | (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); |
e53ef38d CL |
1609 | #else |
1610 | return 0; | |
1611 | #endif | |
1da177e4 LT |
1612 | } |
1613 | ||
1da177e4 | 1614 | /** |
b4a991ec | 1615 | * is_highmem - helper function to quickly check if a struct zone is a |
1da177e4 LT |
1616 | * highmem zone or not. This is an attempt to keep references |
1617 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | |
198fba41 MR |
1618 | * @zone: pointer to struct zone variable |
1619 | * Return: 1 for a highmem zone, 0 otherwise | |
1da177e4 LT |
1620 | */ |
1621 | static inline int is_highmem(struct zone *zone) | |
1622 | { | |
29f9cb53 | 1623 | return is_highmem_idx(zone_idx(zone)); |
bb077c3f KW |
1624 | } |
1625 | ||
1626 | #ifdef CONFIG_ZONE_DMA | |
1627 | bool has_managed_dma(void); | |
e53ef38d | 1628 | #else |
bb077c3f KW |
1629 | static inline bool has_managed_dma(void) |
1630 | { | |
1631 | return false; | |
1da177e4 | 1632 | } |
bb077c3f | 1633 | #endif |
1da177e4 | 1634 | |
f0c0b2b8 | 1635 | |
a9ee6cf5 | 1636 | #ifndef CONFIG_NUMA |
1da177e4 LT |
1637 | |
1638 | extern struct pglist_data contig_page_data; | |
351de44f MG |
1639 | static inline struct pglist_data *NODE_DATA(int nid) |
1640 | { | |
1641 | return &contig_page_data; | |
1642 | } | |
1da177e4 | 1643 | |
a9ee6cf5 | 1644 | #else /* CONFIG_NUMA */ |
1da177e4 LT |
1645 | |
1646 | #include <asm/mmzone.h> | |
1647 | ||
a9ee6cf5 | 1648 | #endif /* !CONFIG_NUMA */ |
348f8b6c | 1649 | |
95144c78 KH |
1650 | extern struct pglist_data *first_online_pgdat(void); |
1651 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | |
1652 | extern struct zone *next_zone(struct zone *zone); | |
8357f869 KH |
1653 | |
1654 | /** | |
12d15f0d | 1655 | * for_each_online_pgdat - helper macro to iterate over all online nodes |
198fba41 | 1656 | * @pgdat: pointer to a pg_data_t variable |
8357f869 KH |
1657 | */ |
1658 | #define for_each_online_pgdat(pgdat) \ | |
1659 | for (pgdat = first_online_pgdat(); \ | |
1660 | pgdat; \ | |
1661 | pgdat = next_online_pgdat(pgdat)) | |
8357f869 KH |
1662 | /** |
1663 | * for_each_zone - helper macro to iterate over all memory zones | |
198fba41 | 1664 | * @zone: pointer to struct zone variable |
8357f869 KH |
1665 | * |
1666 | * The user only needs to declare the zone variable, for_each_zone | |
1667 | * fills it in. | |
1668 | */ | |
1669 | #define for_each_zone(zone) \ | |
1670 | for (zone = (first_online_pgdat())->node_zones; \ | |
1671 | zone; \ | |
1672 | zone = next_zone(zone)) | |
1673 | ||
ee99c71c KM |
1674 | #define for_each_populated_zone(zone) \ |
1675 | for (zone = (first_online_pgdat())->node_zones; \ | |
1676 | zone; \ | |
1677 | zone = next_zone(zone)) \ | |
1678 | if (!populated_zone(zone)) \ | |
1679 | ; /* do nothing */ \ | |
1680 | else | |
1681 | ||
dd1a239f MG |
1682 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) |
1683 | { | |
1684 | return zoneref->zone; | |
1685 | } | |
1686 | ||
1687 | static inline int zonelist_zone_idx(struct zoneref *zoneref) | |
1688 | { | |
1689 | return zoneref->zone_idx; | |
1690 | } | |
1691 | ||
1692 | static inline int zonelist_node_idx(struct zoneref *zoneref) | |
1693 | { | |
c1093b74 | 1694 | return zone_to_nid(zoneref->zone); |
dd1a239f MG |
1695 | } |
1696 | ||
682a3385 MG |
1697 | struct zoneref *__next_zones_zonelist(struct zoneref *z, |
1698 | enum zone_type highest_zoneidx, | |
1699 | nodemask_t *nodes); | |
1700 | ||
19770b32 MG |
1701 | /** |
1702 | * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point | |
198fba41 MR |
1703 | * @z: The cursor used as a starting point for the search |
1704 | * @highest_zoneidx: The zone index of the highest zone to return | |
1705 | * @nodes: An optional nodemask to filter the zonelist with | |
19770b32 MG |
1706 | * |
1707 | * This function returns the next zone at or below a given zone index that is | |
1708 | * within the allowed nodemask using a cursor as the starting point for the | |
5bead2a0 MG |
1709 | * search. The zoneref returned is a cursor that represents the current zone |
1710 | * being examined. It should be advanced by one before calling | |
1711 | * next_zones_zonelist again. | |
198fba41 MR |
1712 | * |
1713 | * Return: the next zone at or below highest_zoneidx within the allowed | |
1714 | * nodemask using a cursor within a zonelist as a starting point | |
19770b32 | 1715 | */ |
682a3385 | 1716 | static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, |
19770b32 | 1717 | enum zone_type highest_zoneidx, |
682a3385 MG |
1718 | nodemask_t *nodes) |
1719 | { | |
1720 | if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) | |
1721 | return z; | |
1722 | return __next_zones_zonelist(z, highest_zoneidx, nodes); | |
1723 | } | |
dd1a239f | 1724 | |
19770b32 MG |
1725 | /** |
1726 | * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist | |
198fba41 MR |
1727 | * @zonelist: The zonelist to search for a suitable zone |
1728 | * @highest_zoneidx: The zone index of the highest zone to return | |
1729 | * @nodes: An optional nodemask to filter the zonelist with | |
19770b32 MG |
1730 | * |
1731 | * This function returns the first zone at or below a given zone index that is | |
1732 | * within the allowed nodemask. The zoneref returned is a cursor that can be | |
5bead2a0 MG |
1733 | * used to iterate the zonelist with next_zones_zonelist by advancing it by |
1734 | * one before calling. | |
ea57485a VB |
1735 | * |
1736 | * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is | |
1737 | * never NULL). This may happen either genuinely, or due to concurrent nodemask | |
1738 | * update due to cpuset modification. | |
198fba41 MR |
1739 | * |
1740 | * Return: Zoneref pointer for the first suitable zone found | |
19770b32 | 1741 | */ |
dd1a239f | 1742 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, |
19770b32 | 1743 | enum zone_type highest_zoneidx, |
c33d6c06 | 1744 | nodemask_t *nodes) |
54a6eb5c | 1745 | { |
c33d6c06 | 1746 | return next_zones_zonelist(zonelist->_zonerefs, |
05891fb0 | 1747 | highest_zoneidx, nodes); |
54a6eb5c MG |
1748 | } |
1749 | ||
19770b32 MG |
1750 | /** |
1751 | * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask | |
198fba41 MR |
1752 | * @zone: The current zone in the iterator |
1753 | * @z: The current pointer within zonelist->_zonerefs being iterated | |
1754 | * @zlist: The zonelist being iterated | |
1755 | * @highidx: The zone index of the highest zone to return | |
1756 | * @nodemask: Nodemask allowed by the allocator | |
19770b32 MG |
1757 | * |
1758 | * This iterator iterates though all zones at or below a given zone index and | |
1759 | * within a given nodemask | |
1760 | */ | |
1761 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ | |
c33d6c06 | 1762 | for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ |
19770b32 | 1763 | zone; \ |
05891fb0 | 1764 | z = next_zones_zonelist(++z, highidx, nodemask), \ |
c33d6c06 MG |
1765 | zone = zonelist_zone(z)) |
1766 | ||
30d8ec73 | 1767 | #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ |
29943248 | 1768 | for (zone = zonelist_zone(z); \ |
c33d6c06 MG |
1769 | zone; \ |
1770 | z = next_zones_zonelist(++z, highidx, nodemask), \ | |
1771 | zone = zonelist_zone(z)) | |
1772 | ||
54a6eb5c MG |
1773 | |
1774 | /** | |
1775 | * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index | |
198fba41 MR |
1776 | * @zone: The current zone in the iterator |
1777 | * @z: The current pointer within zonelist->zones being iterated | |
1778 | * @zlist: The zonelist being iterated | |
1779 | * @highidx: The zone index of the highest zone to return | |
54a6eb5c MG |
1780 | * |
1781 | * This iterator iterates though all zones at or below a given zone index. | |
1782 | */ | |
1783 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ | |
19770b32 | 1784 | for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) |
54a6eb5c | 1785 | |
8ca1b5a4 FT |
1786 | /* Whether the 'nodes' are all movable nodes */ |
1787 | static inline bool movable_only_nodes(nodemask_t *nodes) | |
1788 | { | |
1789 | struct zonelist *zonelist; | |
1790 | struct zoneref *z; | |
1791 | int nid; | |
1792 | ||
1793 | if (nodes_empty(*nodes)) | |
1794 | return false; | |
1795 | ||
1796 | /* | |
1797 | * We can chose arbitrary node from the nodemask to get a | |
1798 | * zonelist as they are interlinked. We just need to find | |
1799 | * at least one zone that can satisfy kernel allocations. | |
1800 | */ | |
1801 | nid = first_node(*nodes); | |
1802 | zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; | |
1803 | z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); | |
29943248 | 1804 | return (!zonelist_zone(z)) ? true : false; |
8ca1b5a4 FT |
1805 | } |
1806 | ||
1807 | ||
d41dee36 AW |
1808 | #ifdef CONFIG_SPARSEMEM |
1809 | #include <asm/sparsemem.h> | |
1810 | #endif | |
1811 | ||
2bdaf115 AW |
1812 | #ifdef CONFIG_FLATMEM |
1813 | #define pfn_to_nid(pfn) (0) | |
1814 | #endif | |
1815 | ||
d41dee36 AW |
1816 | #ifdef CONFIG_SPARSEMEM |
1817 | ||
1818 | /* | |
d41dee36 AW |
1819 | * PA_SECTION_SHIFT physical address to/from section number |
1820 | * PFN_SECTION_SHIFT pfn to/from section number | |
1821 | */ | |
d41dee36 AW |
1822 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) |
1823 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) | |
1824 | ||
1825 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) | |
1826 | ||
1827 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) | |
1828 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) | |
1829 | ||
835c134e | 1830 | #define SECTION_BLOCKFLAGS_BITS \ |
d9c23400 | 1831 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) |
835c134e | 1832 | |
5e0a760b KS |
1833 | #if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS |
1834 | #error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE | |
d41dee36 AW |
1835 | #endif |
1836 | ||
1dd2bfc8 YI |
1837 | static inline unsigned long pfn_to_section_nr(unsigned long pfn) |
1838 | { | |
1839 | return pfn >> PFN_SECTION_SHIFT; | |
1840 | } | |
1841 | static inline unsigned long section_nr_to_pfn(unsigned long sec) | |
1842 | { | |
1843 | return sec << PFN_SECTION_SHIFT; | |
1844 | } | |
e3c40f37 | 1845 | |
a539f353 DK |
1846 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) |
1847 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | |
1848 | ||
f1eca35a | 1849 | #define SUBSECTION_SHIFT 21 |
9ffc1d19 | 1850 | #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) |
f1eca35a DW |
1851 | |
1852 | #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) | |
1853 | #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) | |
1854 | #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) | |
1855 | ||
1856 | #if SUBSECTION_SHIFT > SECTION_SIZE_BITS | |
1857 | #error Subsection size exceeds section size | |
1858 | #else | |
1859 | #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) | |
1860 | #endif | |
1861 | ||
a3619190 DW |
1862 | #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) |
1863 | #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) | |
1864 | ||
f1eca35a | 1865 | struct mem_section_usage { |
5ec8e8ea | 1866 | struct rcu_head rcu; |
0a9f9f62 | 1867 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
f1eca35a | 1868 | DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); |
0a9f9f62 | 1869 | #endif |
f1eca35a DW |
1870 | /* See declaration of similar field in struct zone */ |
1871 | unsigned long pageblock_flags[0]; | |
1872 | }; | |
1873 | ||
f46edbd1 DW |
1874 | void subsection_map_init(unsigned long pfn, unsigned long nr_pages); |
1875 | ||
d41dee36 | 1876 | struct page; |
eefa864b | 1877 | struct page_ext; |
d41dee36 | 1878 | struct mem_section { |
29751f69 AW |
1879 | /* |
1880 | * This is, logically, a pointer to an array of struct | |
1881 | * pages. However, it is stored with some other magic. | |
1882 | * (see sparse.c::sparse_init_one_section()) | |
1883 | * | |
30c253e6 AW |
1884 | * Additionally during early boot we encode node id of |
1885 | * the location of the section here to guide allocation. | |
1886 | * (see sparse.c::memory_present()) | |
1887 | * | |
29751f69 AW |
1888 | * Making it a UL at least makes someone do a cast |
1889 | * before using it wrong. | |
1890 | */ | |
1891 | unsigned long section_mem_map; | |
5c0e3066 | 1892 | |
f1eca35a | 1893 | struct mem_section_usage *usage; |
eefa864b JK |
1894 | #ifdef CONFIG_PAGE_EXTENSION |
1895 | /* | |
0c9ad804 | 1896 | * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use |
eefa864b JK |
1897 | * section. (see page_ext.h about this.) |
1898 | */ | |
1899 | struct page_ext *page_ext; | |
1900 | unsigned long pad; | |
1901 | #endif | |
55878e88 CS |
1902 | /* |
1903 | * WARNING: mem_section must be a power-of-2 in size for the | |
1904 | * calculation and use of SECTION_ROOT_MASK to make sense. | |
1905 | */ | |
d41dee36 AW |
1906 | }; |
1907 | ||
3e347261 BP |
1908 | #ifdef CONFIG_SPARSEMEM_EXTREME |
1909 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) | |
1910 | #else | |
1911 | #define SECTIONS_PER_ROOT 1 | |
1912 | #endif | |
802f192e | 1913 | |
3e347261 | 1914 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
0faa5638 | 1915 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) |
3e347261 | 1916 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
802f192e | 1917 | |
3e347261 | 1918 | #ifdef CONFIG_SPARSEMEM_EXTREME |
83e3c487 | 1919 | extern struct mem_section **mem_section; |
802f192e | 1920 | #else |
3e347261 BP |
1921 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; |
1922 | #endif | |
d41dee36 | 1923 | |
f1eca35a DW |
1924 | static inline unsigned long *section_to_usemap(struct mem_section *ms) |
1925 | { | |
1926 | return ms->usage->pageblock_flags; | |
1927 | } | |
1928 | ||
29751f69 AW |
1929 | static inline struct mem_section *__nr_to_section(unsigned long nr) |
1930 | { | |
a431dbbc WL |
1931 | unsigned long root = SECTION_NR_TO_ROOT(nr); |
1932 | ||
1933 | if (unlikely(root >= NR_SECTION_ROOTS)) | |
1934 | return NULL; | |
1935 | ||
83e3c487 | 1936 | #ifdef CONFIG_SPARSEMEM_EXTREME |
a431dbbc | 1937 | if (!mem_section || !mem_section[root]) |
83e3c487 KS |
1938 | return NULL; |
1939 | #endif | |
a431dbbc | 1940 | return &mem_section[root][nr & SECTION_ROOT_MASK]; |
29751f69 | 1941 | } |
f1eca35a | 1942 | extern size_t mem_section_usage_size(void); |
29751f69 AW |
1943 | |
1944 | /* | |
1945 | * We use the lower bits of the mem_map pointer to store | |
def9b71e PT |
1946 | * a little bit of information. The pointer is calculated |
1947 | * as mem_map - section_nr_to_pfn(pnum). The result is | |
1948 | * aligned to the minimum alignment of the two values: | |
1949 | * 1. All mem_map arrays are page-aligned. | |
1950 | * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT | |
1951 | * lowest bits. PFN_SECTION_SHIFT is arch-specific | |
1952 | * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the | |
1953 | * worst combination is powerpc with 256k pages, | |
1954 | * which results in PFN_SECTION_SHIFT equal 6. | |
ed7802dd MS |
1955 | * To sum it up, at least 6 bits are available on all architectures. |
1956 | * However, we can exceed 6 bits on some other architectures except | |
1957 | * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available | |
1958 | * with the worst case of 64K pages on arm64) if we make sure the | |
1959 | * exceeded bit is not applicable to powerpc. | |
29751f69 | 1960 | */ |
ed7802dd MS |
1961 | enum { |
1962 | SECTION_MARKED_PRESENT_BIT, | |
1963 | SECTION_HAS_MEM_MAP_BIT, | |
1964 | SECTION_IS_ONLINE_BIT, | |
1965 | SECTION_IS_EARLY_BIT, | |
1966 | #ifdef CONFIG_ZONE_DEVICE | |
1967 | SECTION_TAINT_ZONE_DEVICE_BIT, | |
d65917c4 FL |
1968 | #endif |
1969 | #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT | |
1970 | SECTION_IS_VMEMMAP_PREINIT_BIT, | |
ed7802dd MS |
1971 | #endif |
1972 | SECTION_MAP_LAST_BIT, | |
1973 | }; | |
1974 | ||
1975 | #define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT) | |
1976 | #define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT) | |
1977 | #define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT) | |
1978 | #define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT) | |
1979 | #ifdef CONFIG_ZONE_DEVICE | |
1980 | #define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT) | |
1981 | #endif | |
d65917c4 FL |
1982 | #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT |
1983 | #define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT) | |
1984 | #endif | |
ed7802dd MS |
1985 | #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1)) |
1986 | #define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT | |
29751f69 AW |
1987 | |
1988 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | |
1989 | { | |
1990 | unsigned long map = section->section_mem_map; | |
1991 | map &= SECTION_MAP_MASK; | |
1992 | return (struct page *)map; | |
1993 | } | |
1994 | ||
540557b9 | 1995 | static inline int present_section(struct mem_section *section) |
29751f69 | 1996 | { |
802f192e | 1997 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
29751f69 AW |
1998 | } |
1999 | ||
540557b9 AW |
2000 | static inline int present_section_nr(unsigned long nr) |
2001 | { | |
2002 | return present_section(__nr_to_section(nr)); | |
2003 | } | |
2004 | ||
2005 | static inline int valid_section(struct mem_section *section) | |
29751f69 | 2006 | { |
802f192e | 2007 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
29751f69 AW |
2008 | } |
2009 | ||
326e1b8f DW |
2010 | static inline int early_section(struct mem_section *section) |
2011 | { | |
2012 | return (section && (section->section_mem_map & SECTION_IS_EARLY)); | |
2013 | } | |
2014 | ||
29751f69 AW |
2015 | static inline int valid_section_nr(unsigned long nr) |
2016 | { | |
2017 | return valid_section(__nr_to_section(nr)); | |
2018 | } | |
2019 | ||
2d070eab MH |
2020 | static inline int online_section(struct mem_section *section) |
2021 | { | |
2022 | return (section && (section->section_mem_map & SECTION_IS_ONLINE)); | |
2023 | } | |
2024 | ||
ed7802dd | 2025 | #ifdef CONFIG_ZONE_DEVICE |
1f90a347 DW |
2026 | static inline int online_device_section(struct mem_section *section) |
2027 | { | |
2028 | unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; | |
2029 | ||
2030 | return section && ((section->section_mem_map & flags) == flags); | |
2031 | } | |
ed7802dd MS |
2032 | #else |
2033 | static inline int online_device_section(struct mem_section *section) | |
2034 | { | |
2035 | return 0; | |
2036 | } | |
2037 | #endif | |
1f90a347 | 2038 | |
d65917c4 FL |
2039 | #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT |
2040 | static inline int preinited_vmemmap_section(struct mem_section *section) | |
2041 | { | |
2042 | return (section && | |
2043 | (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT)); | |
2044 | } | |
2045 | ||
2046 | void sparse_vmemmap_init_nid_early(int nid); | |
2047 | void sparse_vmemmap_init_nid_late(int nid); | |
2048 | ||
2049 | #else | |
2050 | static inline int preinited_vmemmap_section(struct mem_section *section) | |
2051 | { | |
2052 | return 0; | |
2053 | } | |
2054 | static inline void sparse_vmemmap_init_nid_early(int nid) | |
2055 | { | |
2056 | } | |
2057 | ||
2058 | static inline void sparse_vmemmap_init_nid_late(int nid) | |
2059 | { | |
2060 | } | |
2061 | #endif | |
2062 | ||
2d070eab MH |
2063 | static inline int online_section_nr(unsigned long nr) |
2064 | { | |
2065 | return online_section(__nr_to_section(nr)); | |
2066 | } | |
2067 | ||
2068 | #ifdef CONFIG_MEMORY_HOTPLUG | |
2069 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); | |
2d070eab MH |
2070 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); |
2071 | #endif | |
2d070eab | 2072 | |
d41dee36 AW |
2073 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
2074 | { | |
29751f69 | 2075 | return __nr_to_section(pfn_to_section_nr(pfn)); |
d41dee36 AW |
2076 | } |
2077 | ||
2491f0a2 | 2078 | extern unsigned long __highest_present_section_nr; |
c4e1be9e | 2079 | |
f46edbd1 DW |
2080 | static inline int subsection_map_index(unsigned long pfn) |
2081 | { | |
2082 | return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; | |
2083 | } | |
2084 | ||
2085 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
2086 | static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) | |
2087 | { | |
2088 | int idx = subsection_map_index(pfn); | |
82f0b6f0 | 2089 | struct mem_section_usage *usage = READ_ONCE(ms->usage); |
f46edbd1 | 2090 | |
82f0b6f0 | 2091 | return usage ? test_bit(idx, usage->subsection_map) : 0; |
f46edbd1 | 2092 | } |
03792631 DW |
2093 | |
2094 | static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn) | |
2095 | { | |
2096 | struct mem_section_usage *usage = READ_ONCE(ms->usage); | |
2097 | int idx = subsection_map_index(*pfn); | |
2098 | unsigned long bit; | |
2099 | ||
2100 | if (!usage) | |
2101 | return false; | |
2102 | ||
2103 | if (test_bit(idx, usage->subsection_map)) | |
2104 | return true; | |
2105 | ||
2106 | /* Find the next subsection that exists */ | |
2107 | bit = find_next_bit(usage->subsection_map, SUBSECTIONS_PER_SECTION, idx); | |
2108 | if (bit == SUBSECTIONS_PER_SECTION) | |
2109 | return false; | |
2110 | ||
2111 | *pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION); | |
2112 | return true; | |
2113 | } | |
f46edbd1 DW |
2114 | #else |
2115 | static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) | |
2116 | { | |
2117 | return 1; | |
2118 | } | |
03792631 DW |
2119 | |
2120 | static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn) | |
2121 | { | |
2122 | return true; | |
2123 | } | |
f46edbd1 DW |
2124 | #endif |
2125 | ||
d65917c4 FL |
2126 | void sparse_init_early_section(int nid, struct page *map, unsigned long pnum, |
2127 | unsigned long flags); | |
2128 | ||
7b7bf499 | 2129 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID |
51c656ae MR |
2130 | /** |
2131 | * pfn_valid - check if there is a valid memory map entry for a PFN | |
2132 | * @pfn: the page frame number to check | |
2133 | * | |
2134 | * Check if there is a valid memory map entry aka struct page for the @pfn. | |
2135 | * Note, that availability of the memory map entry does not imply that | |
2136 | * there is actual usable memory at that @pfn. The struct page may | |
2137 | * represent a hole or an unusable page frame. | |
2138 | * | |
2139 | * Return: 1 for PFNs that have memory map entries and 0 otherwise | |
2140 | */ | |
d41dee36 AW |
2141 | static inline int pfn_valid(unsigned long pfn) |
2142 | { | |
f46edbd1 | 2143 | struct mem_section *ms; |
5ec8e8ea | 2144 | int ret; |
f46edbd1 | 2145 | |
16c9afc7 AK |
2146 | /* |
2147 | * Ensure the upper PAGE_SHIFT bits are clear in the | |
2148 | * pfn. Else it might lead to false positives when | |
2149 | * some of the upper bits are set, but the lower bits | |
2150 | * match a valid pfn. | |
2151 | */ | |
2152 | if (PHYS_PFN(PFN_PHYS(pfn)) != pfn) | |
2153 | return 0; | |
2154 | ||
d41dee36 AW |
2155 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
2156 | return 0; | |
f1dc0db2 | 2157 | ms = __pfn_to_section(pfn); |
f6564fce | 2158 | rcu_read_lock_sched(); |
5ec8e8ea | 2159 | if (!valid_section(ms)) { |
f6564fce | 2160 | rcu_read_unlock_sched(); |
f46edbd1 | 2161 | return 0; |
5ec8e8ea | 2162 | } |
f46edbd1 DW |
2163 | /* |
2164 | * Traditionally early sections always returned pfn_valid() for | |
2165 | * the entire section-sized span. | |
2166 | */ | |
5ec8e8ea | 2167 | ret = early_section(ms) || pfn_section_valid(ms, pfn); |
f6564fce | 2168 | rcu_read_unlock_sched(); |
5ec8e8ea CTK |
2169 | |
2170 | return ret; | |
d41dee36 | 2171 | } |
03792631 DW |
2172 | |
2173 | /* Returns end_pfn or higher if no valid PFN remaining in range */ | |
2174 | static inline unsigned long first_valid_pfn(unsigned long pfn, unsigned long end_pfn) | |
2175 | { | |
2176 | unsigned long nr = pfn_to_section_nr(pfn); | |
2177 | ||
2178 | rcu_read_lock_sched(); | |
2179 | ||
2180 | while (nr <= __highest_present_section_nr && pfn < end_pfn) { | |
2181 | struct mem_section *ms = __pfn_to_section(pfn); | |
2182 | ||
2183 | if (valid_section(ms) && | |
2184 | (early_section(ms) || pfn_section_first_valid(ms, &pfn))) { | |
2185 | rcu_read_unlock_sched(); | |
2186 | return pfn; | |
2187 | } | |
2188 | ||
2189 | /* Nothing left in this section? Skip to next section */ | |
2190 | nr++; | |
2191 | pfn = section_nr_to_pfn(nr); | |
2192 | } | |
2193 | ||
2194 | rcu_read_unlock_sched(); | |
2195 | return end_pfn; | |
2196 | } | |
2197 | ||
2198 | static inline unsigned long next_valid_pfn(unsigned long pfn, unsigned long end_pfn) | |
2199 | { | |
2200 | pfn++; | |
2201 | ||
2202 | if (pfn >= end_pfn) | |
2203 | return end_pfn; | |
2204 | ||
2205 | /* | |
2206 | * Either every PFN within the section (or subsection for VMEMMAP) is | |
2207 | * valid, or none of them are. So there's no point repeating the check | |
2208 | * for every PFN; only call first_valid_pfn() again when crossing a | |
2209 | * (sub)section boundary (i.e. !(pfn & ~PAGE_{SUB,}SECTION_MASK)). | |
2210 | */ | |
2211 | if (pfn & ~(IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP) ? | |
2212 | PAGE_SUBSECTION_MASK : PAGE_SECTION_MASK)) | |
2213 | return pfn; | |
2214 | ||
2215 | return first_valid_pfn(pfn, end_pfn); | |
2216 | } | |
2217 | ||
2218 | ||
2219 | #define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \ | |
2220 | for ((_pfn) = first_valid_pfn((_start_pfn), (_end_pfn)); \ | |
2221 | (_pfn) < (_end_pfn); \ | |
2222 | (_pfn) = next_valid_pfn((_pfn), (_end_pfn))) | |
2223 | ||
7b7bf499 | 2224 | #endif |
d41dee36 | 2225 | |
e03d1f78 | 2226 | static inline int pfn_in_present_section(unsigned long pfn) |
540557b9 AW |
2227 | { |
2228 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
2229 | return 0; | |
f1dc0db2 | 2230 | return present_section(__pfn_to_section(pfn)); |
540557b9 AW |
2231 | } |
2232 | ||
4c605881 DH |
2233 | static inline unsigned long next_present_section_nr(unsigned long section_nr) |
2234 | { | |
2235 | while (++section_nr <= __highest_present_section_nr) { | |
2236 | if (present_section_nr(section_nr)) | |
2237 | return section_nr; | |
2238 | } | |
2239 | ||
2240 | return -1; | |
2241 | } | |
2242 | ||
61659efd GS |
2243 | #define for_each_present_section_nr(start, section_nr) \ |
2244 | for (section_nr = next_present_section_nr(start - 1); \ | |
2245 | section_nr != -1; \ | |
2246 | section_nr = next_present_section_nr(section_nr)) | |
2247 | ||
d41dee36 AW |
2248 | /* |
2249 | * These are _only_ used during initialisation, therefore they | |
2250 | * can use __initdata ... They could have names to indicate | |
2251 | * this restriction. | |
2252 | */ | |
2253 | #ifdef CONFIG_NUMA | |
161599ff AW |
2254 | #define pfn_to_nid(pfn) \ |
2255 | ({ \ | |
2256 | unsigned long __pfn_to_nid_pfn = (pfn); \ | |
2257 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ | |
2258 | }) | |
2bdaf115 AW |
2259 | #else |
2260 | #define pfn_to_nid(pfn) (0) | |
d41dee36 AW |
2261 | #endif |
2262 | ||
d41dee36 AW |
2263 | void sparse_init(void); |
2264 | #else | |
2265 | #define sparse_init() do {} while (0) | |
28ae55c9 | 2266 | #define sparse_index_init(_sec, _nid) do {} while (0) |
d65917c4 FL |
2267 | #define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0) |
2268 | #define sparse_vmemmap_init_nid_late(_nid) do {} while (0) | |
e03d1f78 | 2269 | #define pfn_in_present_section pfn_valid |
f46edbd1 | 2270 | #define subsection_map_init(_pfn, _nr_pages) do {} while (0) |
d41dee36 AW |
2271 | #endif /* CONFIG_SPARSEMEM */ |
2272 | ||
f88ce2c8 DW |
2273 | /* |
2274 | * Fallback case for when the architecture provides its own pfn_valid() but | |
2275 | * not a corresponding for_each_valid_pfn(). | |
2276 | */ | |
2277 | #ifndef for_each_valid_pfn | |
2278 | #define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \ | |
2279 | for ((_pfn) = (_start_pfn); (_pfn) < (_end_pfn); (_pfn)++) \ | |
2280 | if (pfn_valid(_pfn)) | |
2281 | #endif | |
2282 | ||
97965478 | 2283 | #endif /* !__GENERATING_BOUNDS.H */ |
1da177e4 | 2284 | #endif /* !__ASSEMBLY__ */ |
1da177e4 | 2285 | #endif /* _LINUX_MMZONE_H */ |