Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_GFP_H |
3 | #define __LINUX_GFP_H | |
4 | ||
309381fe | 5 | #include <linux/mmdebug.h> |
1da177e4 LT |
6 | #include <linux/mmzone.h> |
7 | #include <linux/stddef.h> | |
8 | #include <linux/linkage.h> | |
082edb7b | 9 | #include <linux/topology.h> |
1da177e4 | 10 | |
3b2ebeaf MWO |
11 | /* The typedef is in types.h but we want the documentation here */ |
12 | #if 0 | |
13 | /** | |
14 | * typedef gfp_t - Memory allocation flags. | |
15 | * | |
16 | * GFP flags are commonly used throughout Linux to indicate how memory | |
17 | * should be allocated. The GFP acronym stands for get_free_pages(), | |
18 | * the underlying memory allocation function. Not every GFP flag is | |
19 | * supported by every function which may allocate memory. Most users | |
20 | * will want to use a plain ``GFP_KERNEL``. | |
21 | */ | |
22 | typedef unsigned int __bitwise gfp_t; | |
23 | #endif | |
24 | ||
1da177e4 LT |
25 | struct vm_area_struct; |
26 | ||
1f7866b4 VB |
27 | /* |
28 | * In case of changes, please don't forget to update | |
420adbe9 | 29 | * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c |
1f7866b4 VB |
30 | */ |
31 | ||
16b56cf4 NK |
32 | /* Plain integer GFP bitmasks. Do not use this directly. */ |
33 | #define ___GFP_DMA 0x01u | |
34 | #define ___GFP_HIGHMEM 0x02u | |
35 | #define ___GFP_DMA32 0x04u | |
36 | #define ___GFP_MOVABLE 0x08u | |
016c13da | 37 | #define ___GFP_RECLAIMABLE 0x10u |
16b56cf4 NK |
38 | #define ___GFP_HIGH 0x20u |
39 | #define ___GFP_IO 0x40u | |
40 | #define ___GFP_FS 0x80u | |
d71e53ce AD |
41 | #define ___GFP_ZERO 0x100u |
42 | #define ___GFP_ATOMIC 0x200u | |
43 | #define ___GFP_DIRECT_RECLAIM 0x400u | |
44 | #define ___GFP_KSWAPD_RECLAIM 0x800u | |
45 | #define ___GFP_WRITE 0x1000u | |
46 | #define ___GFP_NOWARN 0x2000u | |
47 | #define ___GFP_RETRY_MAYFAIL 0x4000u | |
48 | #define ___GFP_NOFAIL 0x8000u | |
49 | #define ___GFP_NORETRY 0x10000u | |
50 | #define ___GFP_MEMALLOC 0x20000u | |
51 | #define ___GFP_COMP 0x40000u | |
52 | #define ___GFP_NOMEMALLOC 0x80000u | |
53 | #define ___GFP_HARDWALL 0x100000u | |
54 | #define ___GFP_THISNODE 0x200000u | |
55 | #define ___GFP_ACCOUNT 0x400000u | |
013bb59d | 56 | #define ___GFP_ZEROTAGS 0x800000u |
f49d9c5b | 57 | #ifdef CONFIG_KASAN_HW_TAGS |
9353ffa6 AK |
58 | #define ___GFP_SKIP_ZERO 0x1000000u |
59 | #define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u | |
60 | #define ___GFP_SKIP_KASAN_POISON 0x4000000u | |
f49d9c5b | 61 | #else |
9353ffa6 | 62 | #define ___GFP_SKIP_ZERO 0 |
53ae233c | 63 | #define ___GFP_SKIP_KASAN_UNPOISON 0 |
f49d9c5b AK |
64 | #define ___GFP_SKIP_KASAN_POISON 0 |
65 | #endif | |
7e784422 | 66 | #ifdef CONFIG_LOCKDEP |
9353ffa6 | 67 | #define ___GFP_NOLOCKDEP 0x8000000u |
7e784422 MH |
68 | #else |
69 | #define ___GFP_NOLOCKDEP 0 | |
70 | #endif | |
05b0afd7 | 71 | /* If the above are modified, __GFP_BITS_SHIFT may need updating */ |
16b56cf4 | 72 | |
1da177e4 | 73 | /* |
dd56b046 | 74 | * Physical address zone modifiers (see linux/mmzone.h - low four bits) |
e53ef38d | 75 | * |
e53ef38d | 76 | * Do not put any conditional on these. If necessary modify the definitions |
263ff5d8 | 77 | * without the underscores and use them consistently. The definitions here may |
e53ef38d | 78 | * be used in bit comparisons. |
1da177e4 | 79 | */ |
16b56cf4 NK |
80 | #define __GFP_DMA ((__force gfp_t)___GFP_DMA) |
81 | #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) | |
82 | #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) | |
dd56b046 | 83 | #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ |
b70d94ee | 84 | #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) |
dd56b046 | 85 | |
263fade5 MR |
86 | /** |
87 | * DOC: Page mobility and placement hints | |
88 | * | |
dd56b046 | 89 | * Page mobility and placement hints |
bf507030 | 90 | * --------------------------------- |
1da177e4 | 91 | * |
dd56b046 MG |
92 | * These flags provide hints about how mobile the page is. Pages with similar |
93 | * mobility are placed within the same pageblocks to minimise problems due | |
94 | * to external fragmentation. | |
1da177e4 | 95 | * |
263fade5 MR |
96 | * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be |
97 | * moved by page migration during memory compaction or can be reclaimed. | |
1da177e4 | 98 | * |
263fade5 MR |
99 | * %__GFP_RECLAIMABLE is used for slab allocations that specify |
100 | * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. | |
dd56b046 | 101 | * |
263fade5 MR |
102 | * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible, |
103 | * these pages will be spread between local zones to avoid all the dirty | |
104 | * pages being in one zone (fair zone allocation policy). | |
769848c0 | 105 | * |
263fade5 | 106 | * %__GFP_HARDWALL enforces the cpuset memory allocation policy. |
dd56b046 | 107 | * |
70c6066e | 108 | * %__GFP_THISNODE forces the allocation to be satisfied from the requested |
263fade5 | 109 | * node with no fallbacks or placement policy enforcements. |
a9bb7e62 | 110 | * |
263fade5 | 111 | * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. |
1da177e4 | 112 | */ |
dd56b046 MG |
113 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) |
114 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) | |
115 | #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) | |
116 | #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) | |
a9bb7e62 | 117 | #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) |
32dba98e | 118 | |
263fade5 MR |
119 | /** |
120 | * DOC: Watermark modifiers | |
121 | * | |
dd56b046 | 122 | * Watermark modifiers -- controls access to emergency reserves |
bf507030 | 123 | * ------------------------------------------------------------ |
dd56b046 | 124 | * |
263fade5 MR |
125 | * %__GFP_HIGH indicates that the caller is high-priority and that granting |
126 | * the request is necessary before the system can make forward progress. | |
127 | * For example, creating an IO context to clean pages. | |
dd56b046 | 128 | * |
263fade5 MR |
129 | * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is |
130 | * high priority. Users are typically interrupt handlers. This may be | |
131 | * used in conjunction with %__GFP_HIGH | |
dd56b046 | 132 | * |
263fade5 MR |
133 | * %__GFP_MEMALLOC allows access to all memory. This should only be used when |
134 | * the caller guarantees the allocation will allow more memory to be freed | |
135 | * very shortly e.g. process exiting or swapping. Users either should | |
136 | * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). | |
574c1ae6 MH |
137 | * Users of this flag have to be extremely careful to not deplete the reserve |
138 | * completely and implement a throttling mechanism which controls the | |
139 | * consumption of the reserve based on the amount of freed memory. | |
140 | * Usage of a pre-allocated pool (e.g. mempool) should be always considered | |
141 | * before using this flag. | |
dd56b046 | 142 | * |
263fade5 MR |
143 | * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. |
144 | * This takes precedence over the %__GFP_MEMALLOC flag if both are set. | |
d0164adc | 145 | */ |
dd56b046 MG |
146 | #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) |
147 | #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) | |
148 | #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) | |
149 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) | |
dd56b046 | 150 | |
263fade5 MR |
151 | /** |
152 | * DOC: Reclaim modifiers | |
153 | * | |
dd56b046 | 154 | * Reclaim modifiers |
bf507030 | 155 | * ----------------- |
29fd1897 MH |
156 | * Please note that all the following flags are only applicable to sleepable |
157 | * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). | |
dd56b046 | 158 | * |
263fade5 | 159 | * %__GFP_IO can start physical IO. |
dd56b046 | 160 | * |
263fade5 MR |
161 | * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the |
162 | * allocator recursing into the filesystem which might already be holding | |
163 | * locks. | |
dd56b046 | 164 | * |
263fade5 MR |
165 | * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. |
166 | * This flag can be cleared to avoid unnecessary delays when a fallback | |
167 | * option is available. | |
dd56b046 | 168 | * |
263fade5 MR |
169 | * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when |
170 | * the low watermark is reached and have it reclaim pages until the high | |
171 | * watermark is reached. A caller may wish to clear this flag when fallback | |
172 | * options are available and the reclaim is likely to disrupt the system. The | |
173 | * canonical example is THP allocation where a fallback is cheap but | |
174 | * reclaim/compaction may cause indirect stalls. | |
dd56b046 | 175 | * |
263fade5 | 176 | * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. |
dd56b046 | 177 | * |
dcda9b04 | 178 | * The default allocator behavior depends on the request size. We have a concept |
263fade5 | 179 | * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). |
dcda9b04 MH |
180 | * !costly allocations are too essential to fail so they are implicitly |
181 | * non-failing by default (with some exceptions like OOM victims might fail so | |
182 | * the caller still has to check for failures) while costly requests try to be | |
183 | * not disruptive and back off even without invoking the OOM killer. | |
184 | * The following three modifiers might be used to override some of these | |
185 | * implicit rules | |
186 | * | |
263fade5 MR |
187 | * %__GFP_NORETRY: The VM implementation will try only very lightweight |
188 | * memory direct reclaim to get some memory under memory pressure (thus | |
189 | * it can sleep). It will avoid disruptive actions like OOM killer. The | |
190 | * caller must handle the failure which is quite likely to happen under | |
191 | * heavy memory pressure. The flag is suitable when failure can easily be | |
192 | * handled at small cost, such as reduced throughput | |
193 | * | |
194 | * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim | |
195 | * procedures that have previously failed if there is some indication | |
196 | * that progress has been made else where. It can wait for other | |
197 | * tasks to attempt high level approaches to freeing memory such as | |
198 | * compaction (which removes fragmentation) and page-out. | |
199 | * There is still a definite limit to the number of retries, but it is | |
200 | * a larger limit than with %__GFP_NORETRY. | |
201 | * Allocations with this flag may fail, but only when there is | |
202 | * genuinely little unused memory. While these allocations do not | |
203 | * directly trigger the OOM killer, their failure indicates that | |
204 | * the system is likely to need to use the OOM killer soon. The | |
205 | * caller must handle failure, but can reasonably do so by failing | |
206 | * a higher-level request, or completing it only in a much less | |
207 | * efficient manner. | |
208 | * If the allocation does fail, and the caller is in a position to | |
209 | * free some non-essential memory, doing so could benefit the system | |
210 | * as a whole. | |
211 | * | |
212 | * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | |
213 | * cannot handle allocation failures. The allocation could block | |
214 | * indefinitely but will never return with failure. Testing for | |
215 | * failure is pointless. | |
216 | * New users should be evaluated carefully (and the flag should be | |
217 | * used only when there is no reasonable failure policy) but it is | |
218 | * definitely preferable to use the flag rather than opencode endless | |
219 | * loop around allocator. | |
220 | * Using this flag for costly allocations is _highly_ discouraged. | |
dd56b046 MG |
221 | */ |
222 | #define __GFP_IO ((__force gfp_t)___GFP_IO) | |
223 | #define __GFP_FS ((__force gfp_t)___GFP_FS) | |
d0164adc MG |
224 | #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ |
225 | #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ | |
dd56b046 | 226 | #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) |
dcda9b04 | 227 | #define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) |
dd56b046 MG |
228 | #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) |
229 | #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) | |
d0164adc | 230 | |
263fade5 MR |
231 | /** |
232 | * DOC: Action modifiers | |
233 | * | |
dd56b046 | 234 | * Action modifiers |
bf507030 | 235 | * ---------------- |
dd56b046 | 236 | * |
263fade5 | 237 | * %__GFP_NOWARN suppresses allocation failure reports. |
dd56b046 | 238 | * |
263fade5 | 239 | * %__GFP_COMP address compound page metadata. |
dd56b046 | 240 | * |
263fade5 | 241 | * %__GFP_ZERO returns a zeroed page on success. |
013bb59d | 242 | * |
c82ce319 | 243 | * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself |
9353ffa6 AK |
244 | * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that |
245 | * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting | |
246 | * memory tags at the same time as zeroing memory has minimal additional | |
247 | * performace impact. | |
c275c5c6 | 248 | * |
53ae233c AK |
249 | * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. |
250 | * Only effective in HW_TAGS mode. | |
251 | * | |
252 | * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation. | |
253 | * Typically, used for userspace pages. Only effective in HW_TAGS mode. | |
2dff4405 | 254 | */ |
dd56b046 MG |
255 | #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) |
256 | #define __GFP_COMP ((__force gfp_t)___GFP_COMP) | |
257 | #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) | |
013bb59d | 258 | #define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) |
9353ffa6 | 259 | #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) |
53ae233c AK |
260 | #define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) |
261 | #define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) | |
2dff4405 | 262 | |
7e784422 MH |
263 | /* Disable lockdep for GFP context tracking */ |
264 | #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) | |
265 | ||
dd56b046 | 266 | /* Room for N __GFP_FOO bits */ |
53ae233c | 267 | #define __GFP_BITS_SHIFT (24 + \ |
9353ffa6 | 268 | 3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \ |
f49d9c5b | 269 | IS_ENABLED(CONFIG_LOCKDEP)) |
af4ca457 | 270 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
1da177e4 | 271 | |
263fade5 MR |
272 | /** |
273 | * DOC: Useful GFP flag combinations | |
274 | * | |
275 | * Useful GFP flag combinations | |
bf507030 | 276 | * ---------------------------- |
263fade5 | 277 | * |
dd56b046 MG |
278 | * Useful GFP flag combinations that are commonly used. It is recommended |
279 | * that subsystems start with one of these combinations and then set/clear | |
263fade5 MR |
280 | * %__GFP_FOO flags as necessary. |
281 | * | |
282 | * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower | |
ab00db21 MH |
283 | * watermark is applied to allow access to "atomic reserves". |
284 | * The current implementation doesn't support NMI and few other strict | |
285 | * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT. | |
263fade5 MR |
286 | * |
287 | * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires | |
288 | * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. | |
289 | * | |
290 | * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is | |
291 | * accounted to kmemcg. | |
292 | * | |
293 | * %GFP_NOWAIT is for kernel allocations that should not stall for direct | |
294 | * reclaim, start physical IO or use any filesystem callback. | |
295 | * | |
296 | * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages | |
297 | * that do not require the starting of any physical IO. | |
298 | * Please try to avoid using this flag directly and instead use | |
299 | * memalloc_noio_{save,restore} to mark the whole scope which cannot | |
300 | * perform any IO with a short explanation why. All allocation requests | |
301 | * will inherit GFP_NOIO implicitly. | |
302 | * | |
303 | * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. | |
304 | * Please try to avoid using this flag directly and instead use | |
305 | * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't | |
306 | * recurse into the FS layer with a short explanation why. All allocation | |
307 | * requests will inherit GFP_NOFS implicitly. | |
308 | * | |
309 | * %GFP_USER is for userspace allocations that also need to be directly | |
310 | * accessibly by the kernel or hardware. It is typically used by hardware | |
311 | * for buffers that are mapped to userspace (e.g. graphics) that hardware | |
312 | * still must DMA to. cpuset limits are enforced for these allocations. | |
313 | * | |
314 | * %GFP_DMA exists for historical reasons and should be avoided where possible. | |
315 | * The flags indicates that the caller requires that the lowest zone be | |
316 | * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but | |
317 | * it would require careful auditing as some users really require it and | |
318 | * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the | |
319 | * lowest zone as a type of emergency reserve. | |
320 | * | |
321 | * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit | |
04a536bf MC |
322 | * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory |
323 | * because the DMA32 kmalloc cache array is not implemented. | |
324 | * (Reason: there is no such user in kernel). | |
263fade5 MR |
325 | * |
326 | * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, | |
327 | * do not need to be directly accessible by the kernel but that cannot | |
328 | * move once in use. An example may be a hardware allocation that maps | |
329 | * data directly into userspace but has no addressing limitations. | |
330 | * | |
331 | * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not | |
332 | * need direct access to but can use kmap() when access is required. They | |
333 | * are expected to be movable via page reclaim or page migration. Typically, | |
334 | * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE. | |
335 | * | |
336 | * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They | |
337 | * are compound allocations that will generally fail quickly if memory is not | |
338 | * available and will not wake kswapd/kcompactd on failure. The _LIGHT | |
339 | * version does not attempt reclaim/compaction at all and is by default used | |
340 | * in page fault path, while the non-light is used by khugepaged. | |
d0164adc MG |
341 | */ |
342 | #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) | |
dd56b046 | 343 | #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) |
a9bb7e62 | 344 | #define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) |
d0164adc | 345 | #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) |
71baba4b MG |
346 | #define GFP_NOIO (__GFP_RECLAIM) |
347 | #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) | |
71baba4b | 348 | #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
dd56b046 MG |
349 | #define GFP_DMA __GFP_DMA |
350 | #define GFP_DMA32 __GFP_DMA32 | |
2d48366b | 351 | #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) |
c275c5c6 PC |
352 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ |
353 | __GFP_SKIP_KASAN_POISON) | |
25160354 VB |
354 | #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ |
355 | __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) | |
356 | #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) | |
1da177e4 | 357 | |
dd56b046 | 358 | /* Convert GFP flags to their corresponding migrate type */ |
e12ba74d | 359 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
016c13da | 360 | #define GFP_MOVABLE_SHIFT 3 |
6cb06229 | 361 | |
01c0bfe0 | 362 | static inline int gfp_migratetype(const gfp_t gfp_flags) |
467c996c | 363 | { |
016c13da MG |
364 | VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); |
365 | BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); | |
366 | BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); | |
467c996c MG |
367 | |
368 | if (unlikely(page_group_by_mobility_disabled)) | |
369 | return MIGRATE_UNMOVABLE; | |
370 | ||
371 | /* Group based on mobility */ | |
016c13da | 372 | return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; |
467c996c | 373 | } |
dd56b046 MG |
374 | #undef GFP_MOVABLE_MASK |
375 | #undef GFP_MOVABLE_SHIFT | |
a2f1b424 | 376 | |
d0164adc MG |
377 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
378 | { | |
543dfb2d | 379 | return !!(gfp_flags & __GFP_DIRECT_RECLAIM); |
d0164adc MG |
380 | } |
381 | ||
20eb4f29 TH |
382 | /** |
383 | * gfpflags_normal_context - is gfp_flags a normal sleepable context? | |
384 | * @gfp_flags: gfp_flags to test | |
385 | * | |
386 | * Test whether @gfp_flags indicates that the allocation is from the | |
387 | * %current context and allowed to sleep. | |
388 | * | |
389 | * An allocation being allowed to block doesn't mean it owns the %current | |
390 | * context. When direct reclaim path tries to allocate memory, the | |
391 | * allocation context is nested inside whatever %current was doing at the | |
392 | * time of the original allocation. The nested allocation may be allowed | |
393 | * to block but modifying anything %current owns can corrupt the outer | |
394 | * context's expectations. | |
395 | * | |
396 | * %true result from this function indicates that the allocation context | |
397 | * can sleep and use anything that's associated with %current. | |
398 | */ | |
399 | static inline bool gfpflags_normal_context(const gfp_t gfp_flags) | |
400 | { | |
401 | return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) == | |
402 | __GFP_DIRECT_RECLAIM; | |
403 | } | |
404 | ||
b70d94ee CL |
405 | #ifdef CONFIG_HIGHMEM |
406 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM | |
407 | #else | |
408 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL | |
409 | #endif | |
410 | ||
4b51d669 | 411 | #ifdef CONFIG_ZONE_DMA |
b70d94ee CL |
412 | #define OPT_ZONE_DMA ZONE_DMA |
413 | #else | |
414 | #define OPT_ZONE_DMA ZONE_NORMAL | |
4b51d669 | 415 | #endif |
b70d94ee | 416 | |
4e4785bc | 417 | #ifdef CONFIG_ZONE_DMA32 |
b70d94ee CL |
418 | #define OPT_ZONE_DMA32 ZONE_DMA32 |
419 | #else | |
420 | #define OPT_ZONE_DMA32 ZONE_NORMAL | |
4e4785bc | 421 | #endif |
b70d94ee CL |
422 | |
423 | /* | |
424 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the | |
ac2e8e40 HL |
425 | * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT |
426 | * bits long and there are 16 of them to cover all possible combinations of | |
263ff5d8 | 427 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. |
b70d94ee CL |
428 | * |
429 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | |
430 | * But GFP_MOVABLE is not only a zone specifier but also an allocation | |
431 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | |
263ff5d8 | 432 | * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". |
b70d94ee CL |
433 | * |
434 | * bit result | |
435 | * ================= | |
436 | * 0x0 => NORMAL | |
437 | * 0x1 => DMA or NORMAL | |
438 | * 0x2 => HIGHMEM or NORMAL | |
439 | * 0x3 => BAD (DMA+HIGHMEM) | |
4b33b695 | 440 | * 0x4 => DMA32 or NORMAL |
b70d94ee CL |
441 | * 0x5 => BAD (DMA+DMA32) |
442 | * 0x6 => BAD (HIGHMEM+DMA32) | |
443 | * 0x7 => BAD (HIGHMEM+DMA32+DMA) | |
444 | * 0x8 => NORMAL (MOVABLE+0) | |
445 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) | |
446 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) | |
447 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) | |
4b33b695 | 448 | * 0xc => DMA32 or NORMAL (MOVABLE+DMA32) |
b70d94ee CL |
449 | * 0xd => BAD (MOVABLE+DMA32+DMA) |
450 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) | |
451 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | |
452 | * | |
b11a7b94 | 453 | * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. |
b70d94ee CL |
454 | */ |
455 | ||
b11a7b94 DW |
456 | #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 |
457 | /* ZONE_DEVICE is not a valid GFP zone specifier */ | |
458 | #define GFP_ZONES_SHIFT 2 | |
459 | #else | |
460 | #define GFP_ZONES_SHIFT ZONES_SHIFT | |
461 | #endif | |
462 | ||
463 | #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG | |
464 | #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | |
b70d94ee CL |
465 | #endif |
466 | ||
467 | #define GFP_ZONE_TABLE ( \ | |
b11a7b94 DW |
468 | (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ |
469 | | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ | |
470 | | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ | |
471 | | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ | |
472 | | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ | |
473 | | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ | |
474 | | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ | |
475 | | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ | |
b70d94ee CL |
476 | ) |
477 | ||
478 | /* | |
263ff5d8 | 479 | * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 |
b70d94ee CL |
480 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per |
481 | * entry starting with bit 0. Bit is set if the combination is not | |
482 | * allowed. | |
483 | */ | |
484 | #define GFP_ZONE_BAD ( \ | |
16b56cf4 NK |
485 | 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ |
486 | | 1 << (___GFP_DMA | ___GFP_DMA32) \ | |
487 | | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
488 | | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
489 | | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ | |
490 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ | |
491 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
492 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ | |
b70d94ee CL |
493 | ) |
494 | ||
495 | static inline enum zone_type gfp_zone(gfp_t flags) | |
496 | { | |
497 | enum zone_type z; | |
16b56cf4 | 498 | int bit = (__force int) (flags & GFP_ZONEMASK); |
b70d94ee | 499 | |
b11a7b94 DW |
500 | z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & |
501 | ((1 << GFP_ZONES_SHIFT) - 1); | |
82d4b577 | 502 | VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); |
b70d94ee | 503 | return z; |
4e4785bc CL |
504 | } |
505 | ||
1da177e4 LT |
506 | /* |
507 | * There is only one page-allocator function, and two main namespaces to | |
508 | * it. The alloc_page*() variants return 'struct page *' and as such | |
509 | * can allocate highmem pages, the *get*page*() variants return | |
510 | * virtual kernel addresses to the allocated page(s). | |
511 | */ | |
512 | ||
54a6eb5c MG |
513 | static inline int gfp_zonelist(gfp_t flags) |
514 | { | |
c00eb15a YB |
515 | #ifdef CONFIG_NUMA |
516 | if (unlikely(flags & __GFP_THISNODE)) | |
517 | return ZONELIST_NOFALLBACK; | |
518 | #endif | |
519 | return ZONELIST_FALLBACK; | |
54a6eb5c MG |
520 | } |
521 | ||
1da177e4 LT |
522 | /* |
523 | * We get the zone list from the current node and the gfp_mask. | |
cb152a1a | 524 | * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. |
54a6eb5c MG |
525 | * There are two zonelists per node, one for all zones with memory and |
526 | * one containing just zones from the node the zonelist belongs to. | |
1da177e4 | 527 | * |
d3c251ab MR |
528 | * For the case of non-NUMA systems the NODE_DATA() gets optimized to |
529 | * &contig_page_data at compile-time. | |
1da177e4 | 530 | */ |
0e88460d MG |
531 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) |
532 | { | |
54a6eb5c | 533 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); |
0e88460d | 534 | } |
1da177e4 LT |
535 | |
536 | #ifndef HAVE_ARCH_FREE_PAGE | |
537 | static inline void arch_free_page(struct page *page, int order) { } | |
538 | #endif | |
cc102509 NP |
539 | #ifndef HAVE_ARCH_ALLOC_PAGE |
540 | static inline void arch_alloc_page(struct page *page, int order) { } | |
541 | #endif | |
1da177e4 | 542 | |
84172f4b MWO |
543 | struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, |
544 | nodemask_t *nodemask); | |
cc09cb13 MWO |
545 | struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, |
546 | nodemask_t *nodemask); | |
e4048e5d | 547 | |
387ba26f MG |
548 | unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, |
549 | nodemask_t *nodemask, int nr_pages, | |
0f87d9d3 MG |
550 | struct list_head *page_list, |
551 | struct page **page_array); | |
387ba26f | 552 | |
c00b6b96 CW |
553 | unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, |
554 | unsigned long nr_pages, | |
555 | struct page **page_array); | |
556 | ||
387ba26f MG |
557 | /* Bulk allocate order-0 pages */ |
558 | static inline unsigned long | |
0f87d9d3 | 559 | alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) |
387ba26f | 560 | { |
0f87d9d3 MG |
561 | return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL); |
562 | } | |
563 | ||
564 | static inline unsigned long | |
565 | alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array) | |
566 | { | |
567 | return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); | |
387ba26f MG |
568 | } |
569 | ||
a2afc59f URS |
570 | static inline unsigned long |
571 | alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) | |
572 | { | |
573 | if (nid == NUMA_NO_NODE) | |
574 | nid = numa_mem_id(); | |
575 | ||
576 | return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); | |
577 | } | |
578 | ||
96db800f VB |
579 | /* |
580 | * Allocate pages, preferring the node given as nid. The node must be valid and | |
581 | * online. For more general interface, see alloc_pages_node(). | |
582 | */ | |
583 | static inline struct page * | |
584 | __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) | |
1da177e4 | 585 | { |
0bc35a97 | 586 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
8addc2d0 | 587 | VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); |
819a6928 | 588 | |
84172f4b | 589 | return __alloc_pages(gfp_mask, order, nid, NULL); |
1da177e4 LT |
590 | } |
591 | ||
cc09cb13 MWO |
592 | static inline |
593 | struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid) | |
594 | { | |
595 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); | |
596 | VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid)); | |
597 | ||
598 | return __folio_alloc(gfp, order, nid, NULL); | |
599 | } | |
600 | ||
96db800f VB |
601 | /* |
602 | * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, | |
82c1fc71 VB |
603 | * prefer the current CPU's closest node. Otherwise node must be valid and |
604 | * online. | |
96db800f VB |
605 | */ |
606 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | |
6484eb3e MG |
607 | unsigned int order) |
608 | { | |
0bc35a97 | 609 | if (nid == NUMA_NO_NODE) |
82c1fc71 | 610 | nid = numa_mem_id(); |
6484eb3e | 611 | |
0bc35a97 | 612 | return __alloc_pages_node(nid, gfp_mask, order); |
6484eb3e MG |
613 | } |
614 | ||
1da177e4 | 615 | #ifdef CONFIG_NUMA |
d7f946d0 | 616 | struct page *alloc_pages(gfp_t gfp, unsigned int order); |
cc09cb13 | 617 | struct folio *folio_alloc(gfp_t gfp, unsigned order); |
0bbbc0b3 | 618 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, |
2f5f9486 | 619 | struct vm_area_struct *vma, unsigned long addr, |
be1a13eb | 620 | bool hugepage); |
19deb769 | 621 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ |
be1a13eb | 622 | alloc_pages_vma(gfp_mask, order, vma, addr, true) |
1da177e4 | 623 | #else |
43ee5b6d CH |
624 | static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) |
625 | { | |
626 | return alloc_pages_node(numa_node_id(), gfp_mask, order); | |
627 | } | |
cc09cb13 MWO |
628 | static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) |
629 | { | |
630 | return __folio_alloc_node(gfp, order, numa_node_id()); | |
631 | } | |
be1a13eb | 632 | #define alloc_pages_vma(gfp_mask, order, vma, addr, false)\ |
19deb769 DR |
633 | alloc_pages(gfp_mask, order) |
634 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ | |
077fcf11 | 635 | alloc_pages(gfp_mask, order) |
1da177e4 LT |
636 | #endif |
637 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | |
2f5f9486 | 638 | #define alloc_page_vma(gfp_mask, vma, addr) \ |
be1a13eb | 639 | alloc_pages_vma(gfp_mask, 0, vma, addr, false) |
1da177e4 | 640 | |
b3c97528 HH |
641 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
642 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |
1da177e4 | 643 | |
abd58f38 | 644 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1); |
2be0ffe2 | 645 | void free_pages_exact(void *virt, size_t size); |
595ec197 | 646 | __meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); |
2be0ffe2 | 647 | |
1da177e4 | 648 | #define __get_free_page(gfp_mask) \ |
fd23855e | 649 | __get_free_pages((gfp_mask), 0) |
1da177e4 LT |
650 | |
651 | #define __get_dma_pages(gfp_mask, order) \ | |
fd23855e | 652 | __get_free_pages((gfp_mask) | GFP_DMA, (order)) |
1da177e4 | 653 | |
b3c97528 HH |
654 | extern void __free_pages(struct page *page, unsigned int order); |
655 | extern void free_pages(unsigned long addr, unsigned int order); | |
1da177e4 | 656 | |
b63ae8ca | 657 | struct page_frag_cache; |
2976db80 | 658 | extern void __page_frag_cache_drain(struct page *page, unsigned int count); |
b358e212 KH |
659 | extern void *page_frag_alloc_align(struct page_frag_cache *nc, |
660 | unsigned int fragsz, gfp_t gfp_mask, | |
661 | unsigned int align_mask); | |
662 | ||
663 | static inline void *page_frag_alloc(struct page_frag_cache *nc, | |
664 | unsigned int fragsz, gfp_t gfp_mask) | |
665 | { | |
666 | return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u); | |
667 | } | |
668 | ||
8c2dd3e4 | 669 | extern void page_frag_free(void *addr); |
b63ae8ca | 670 | |
1da177e4 | 671 | #define __free_page(page) __free_pages((page), 0) |
fd23855e | 672 | #define free_page(addr) free_pages((addr), 0) |
1da177e4 LT |
673 | |
674 | void page_alloc_init(void); | |
4037d452 | 675 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
93481ff0 VB |
676 | void drain_all_pages(struct zone *zone); |
677 | void drain_local_pages(struct zone *zone); | |
1da177e4 | 678 | |
0e1cc95b | 679 | void page_alloc_init_late(void); |
0e1cc95b | 680 | |
f90ac398 MG |
681 | /* |
682 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what | |
683 | * GFP flags are used before interrupts are enabled. Once interrupts are | |
684 | * enabled, it is set to __GFP_BITS_MASK while the system is running. During | |
685 | * hibernation, it is used by PM to avoid I/O during memory allocation while | |
686 | * devices are suspended. | |
687 | */ | |
dcce284a BH |
688 | extern gfp_t gfp_allowed_mask; |
689 | ||
c93bdd0e MG |
690 | /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ |
691 | bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); | |
692 | ||
c9e664f1 RW |
693 | extern void pm_restrict_gfp_mask(void); |
694 | extern void pm_restore_gfp_mask(void); | |
dcce284a | 695 | |
164cc4fe RR |
696 | extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); |
697 | ||
f90ac398 MG |
698 | #ifdef CONFIG_PM_SLEEP |
699 | extern bool pm_suspended_storage(void); | |
700 | #else | |
701 | static inline bool pm_suspended_storage(void) | |
702 | { | |
703 | return false; | |
704 | } | |
705 | #endif /* CONFIG_PM_SLEEP */ | |
706 | ||
8df995f6 | 707 | #ifdef CONFIG_CONTIG_ALLOC |
041d3a8c | 708 | /* The below functions must be run on a range from a single zone. */ |
0815f3d8 | 709 | extern int alloc_contig_range(unsigned long start, unsigned long end, |
ca96b625 | 710 | unsigned migratetype, gfp_t gfp_mask); |
5e27a2df AK |
711 | extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, |
712 | int nid, nodemask_t *nodemask); | |
080fe206 | 713 | #endif |
78fa5150 | 714 | void free_contig_range(unsigned long pfn, unsigned long nr_pages); |
041d3a8c | 715 | |
080fe206 | 716 | #ifdef CONFIG_CMA |
47118af0 MN |
717 | /* CMA stuff */ |
718 | extern void init_cma_reserved_pageblock(struct page *page); | |
041d3a8c MN |
719 | #endif |
720 | ||
1da177e4 | 721 | #endif /* __LINUX_GFP_H */ |