Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_GFP_H |
2 | #define __LINUX_GFP_H | |
3 | ||
309381fe | 4 | #include <linux/mmdebug.h> |
1da177e4 LT |
5 | #include <linux/mmzone.h> |
6 | #include <linux/stddef.h> | |
7 | #include <linux/linkage.h> | |
082edb7b | 8 | #include <linux/topology.h> |
1da177e4 LT |
9 | |
10 | struct vm_area_struct; | |
11 | ||
16b56cf4 NK |
12 | /* Plain integer GFP bitmasks. Do not use this directly. */ |
13 | #define ___GFP_DMA 0x01u | |
14 | #define ___GFP_HIGHMEM 0x02u | |
15 | #define ___GFP_DMA32 0x04u | |
16 | #define ___GFP_MOVABLE 0x08u | |
016c13da | 17 | #define ___GFP_RECLAIMABLE 0x10u |
16b56cf4 NK |
18 | #define ___GFP_HIGH 0x20u |
19 | #define ___GFP_IO 0x40u | |
20 | #define ___GFP_FS 0x80u | |
21 | #define ___GFP_COLD 0x100u | |
22 | #define ___GFP_NOWARN 0x200u | |
23 | #define ___GFP_REPEAT 0x400u | |
24 | #define ___GFP_NOFAIL 0x800u | |
25 | #define ___GFP_NORETRY 0x1000u | |
b37f1dd0 | 26 | #define ___GFP_MEMALLOC 0x2000u |
16b56cf4 NK |
27 | #define ___GFP_COMP 0x4000u |
28 | #define ___GFP_ZERO 0x8000u | |
29 | #define ___GFP_NOMEMALLOC 0x10000u | |
30 | #define ___GFP_HARDWALL 0x20000u | |
31 | #define ___GFP_THISNODE 0x40000u | |
d0164adc | 32 | #define ___GFP_ATOMIC 0x80000u |
8f4fc071 | 33 | #define ___GFP_NOACCOUNT 0x100000u |
caf49191 | 34 | #define ___GFP_NOTRACK 0x200000u |
d0164adc | 35 | #define ___GFP_DIRECT_RECLAIM 0x400000u |
caf49191 LT |
36 | #define ___GFP_OTHER_NODE 0x800000u |
37 | #define ___GFP_WRITE 0x1000000u | |
d0164adc | 38 | #define ___GFP_KSWAPD_RECLAIM 0x2000000u |
05b0afd7 | 39 | /* If the above are modified, __GFP_BITS_SHIFT may need updating */ |
16b56cf4 | 40 | |
1da177e4 | 41 | /* |
dd56b046 | 42 | * Physical address zone modifiers (see linux/mmzone.h - low four bits) |
e53ef38d | 43 | * |
e53ef38d | 44 | * Do not put any conditional on these. If necessary modify the definitions |
263ff5d8 | 45 | * without the underscores and use them consistently. The definitions here may |
e53ef38d | 46 | * be used in bit comparisons. |
1da177e4 | 47 | */ |
16b56cf4 NK |
48 | #define __GFP_DMA ((__force gfp_t)___GFP_DMA) |
49 | #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) | |
50 | #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) | |
51 | #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ | |
dd56b046 | 52 | #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ |
b70d94ee | 53 | #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) |
dd56b046 | 54 | |
1da177e4 | 55 | /* |
dd56b046 | 56 | * Page mobility and placement hints |
1da177e4 | 57 | * |
dd56b046 MG |
58 | * These flags provide hints about how mobile the page is. Pages with similar |
59 | * mobility are placed within the same pageblocks to minimise problems due | |
60 | * to external fragmentation. | |
1da177e4 | 61 | * |
dd56b046 MG |
62 | * __GFP_MOVABLE (also a zone modifier) indicates that the page can be |
63 | * moved by page migration during memory compaction or can be reclaimed. | |
1da177e4 | 64 | * |
dd56b046 MG |
65 | * __GFP_RECLAIMABLE is used for slab allocations that specify |
66 | * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. | |
67 | * | |
68 | * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, | |
69 | * these pages will be spread between local zones to avoid all the dirty | |
70 | * pages being in one zone (fair zone allocation policy). | |
769848c0 | 71 | * |
dd56b046 MG |
72 | * __GFP_HARDWALL enforces the cpuset memory allocation policy. |
73 | * | |
74 | * __GFP_THISNODE forces the allocation to be satisified from the requested | |
75 | * node with no fallbacks or placement policy enforcements. | |
1da177e4 | 76 | */ |
dd56b046 MG |
77 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) |
78 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) | |
79 | #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) | |
80 | #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) | |
32dba98e | 81 | |
d0164adc | 82 | /* |
dd56b046 MG |
83 | * Watermark modifiers -- controls access to emergency reserves |
84 | * | |
85 | * __GFP_HIGH indicates that the caller is high-priority and that granting | |
86 | * the request is necessary before the system can make forward progress. | |
87 | * For example, creating an IO context to clean pages. | |
88 | * | |
89 | * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is | |
90 | * high priority. Users are typically interrupt handlers. This may be | |
91 | * used in conjunction with __GFP_HIGH | |
92 | * | |
93 | * __GFP_MEMALLOC allows access to all memory. This should only be used when | |
94 | * the caller guarantees the allocation will allow more memory to be freed | |
95 | * very shortly e.g. process exiting or swapping. Users either should | |
96 | * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). | |
97 | * | |
98 | * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. | |
99 | * This takes precedence over the __GFP_MEMALLOC flag if both are set. | |
100 | * | |
101 | * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement. | |
d0164adc | 102 | */ |
dd56b046 MG |
103 | #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) |
104 | #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) | |
105 | #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) | |
106 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) | |
107 | #define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) | |
108 | ||
109 | /* | |
110 | * Reclaim modifiers | |
111 | * | |
112 | * __GFP_IO can start physical IO. | |
113 | * | |
114 | * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the | |
115 | * allocator recursing into the filesystem which might already be holding | |
116 | * locks. | |
117 | * | |
118 | * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. | |
119 | * This flag can be cleared to avoid unnecessary delays when a fallback | |
120 | * option is available. | |
121 | * | |
122 | * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when | |
123 | * the low watermark is reached and have it reclaim pages until the high | |
124 | * watermark is reached. A caller may wish to clear this flag when fallback | |
125 | * options are available and the reclaim is likely to disrupt the system. The | |
126 | * canonical example is THP allocation where a fallback is cheap but | |
127 | * reclaim/compaction may cause indirect stalls. | |
128 | * | |
129 | * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. | |
130 | * | |
131 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt | |
132 | * _might_ fail. This depends upon the particular VM implementation. | |
133 | * | |
134 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | |
135 | * cannot handle allocation failures. New users should be evaluated carefully | |
136 | * (and the flag should be used only when there is no reasonable failure | |
137 | * policy) but it is definitely preferable to use the flag rather than | |
138 | * opencode endless loop around allocator. | |
139 | * | |
140 | * __GFP_NORETRY: The VM implementation must not retry indefinitely and will | |
141 | * return NULL when direct reclaim and memory compaction have failed to allow | |
142 | * the allocation to succeed. The OOM killer is not called with the current | |
143 | * implementation. | |
144 | */ | |
145 | #define __GFP_IO ((__force gfp_t)___GFP_IO) | |
146 | #define __GFP_FS ((__force gfp_t)___GFP_FS) | |
d0164adc MG |
147 | #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ |
148 | #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ | |
dd56b046 MG |
149 | #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) |
150 | #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) | |
151 | #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) | |
152 | #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) | |
d0164adc | 153 | |
2dff4405 | 154 | /* |
dd56b046 MG |
155 | * Action modifiers |
156 | * | |
157 | * __GFP_COLD indicates that the caller does not expect to be used in the near | |
158 | * future. Where possible, a cache-cold page will be returned. | |
159 | * | |
160 | * __GFP_NOWARN suppresses allocation failure reports. | |
161 | * | |
162 | * __GFP_COMP address compound page metadata. | |
163 | * | |
164 | * __GFP_ZERO returns a zeroed page on success. | |
165 | * | |
166 | * __GFP_NOTRACK avoids tracking with kmemcheck. | |
167 | * | |
168 | * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of | |
169 | * distinguishing in the source between false positives and allocations that | |
170 | * cannot be supported (e.g. page tables). | |
171 | * | |
172 | * __GFP_OTHER_NODE is for allocations that are on a remote node but that | |
173 | * should not be accounted for as a remote allocation in vmstat. A | |
174 | * typical user would be khugepaged collapsing a huge page on a remote | |
175 | * node. | |
2dff4405 | 176 | */ |
dd56b046 MG |
177 | #define __GFP_COLD ((__force gfp_t)___GFP_COLD) |
178 | #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) | |
179 | #define __GFP_COMP ((__force gfp_t)___GFP_COMP) | |
180 | #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) | |
181 | #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) | |
2dff4405 | 182 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) |
dd56b046 | 183 | #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) |
2dff4405 | 184 | |
dd56b046 MG |
185 | /* Room for N __GFP_FOO bits */ |
186 | #define __GFP_BITS_SHIFT 26 | |
af4ca457 | 187 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
1da177e4 | 188 | |
d0164adc | 189 | /* |
dd56b046 MG |
190 | * Useful GFP flag combinations that are commonly used. It is recommended |
191 | * that subsystems start with one of these combinations and then set/clear | |
192 | * __GFP_FOO flags as necessary. | |
193 | * | |
194 | * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower | |
195 | * watermark is applied to allow access to "atomic reserves" | |
196 | * | |
197 | * GFP_KERNEL is typical for kernel-internal allocations. The caller requires | |
198 | * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. | |
199 | * | |
200 | * GFP_NOWAIT is for kernel allocations that should not stall for direct | |
201 | * reclaim, start physical IO or use any filesystem callback. | |
202 | * | |
203 | * GFP_NOIO will use direct reclaim to discard clean pages or slab pages | |
204 | * that do not require the starting of any physical IO. | |
205 | * | |
206 | * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. | |
207 | * | |
208 | * GFP_USER is for userspace allocations that also need to be directly | |
209 | * accessibly by the kernel or hardware. It is typically used by hardware | |
210 | * for buffers that are mapped to userspace (e.g. graphics) that hardware | |
211 | * still must DMA to. cpuset limits are enforced for these allocations. | |
212 | * | |
213 | * GFP_DMA exists for historical reasons and should be avoided where possible. | |
214 | * The flags indicates that the caller requires that the lowest zone be | |
215 | * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but | |
216 | * it would require careful auditing as some users really require it and | |
217 | * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the | |
218 | * lowest zone as a type of emergency reserve. | |
219 | * | |
220 | * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit | |
221 | * address. | |
222 | * | |
223 | * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, | |
224 | * do not need to be directly accessible by the kernel but that cannot | |
225 | * move once in use. An example may be a hardware allocation that maps | |
226 | * data directly into userspace but has no addressing limitations. | |
227 | * | |
228 | * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not | |
229 | * need direct access to but can use kmap() when access is required. They | |
230 | * are expected to be movable via page reclaim or page migration. Typically, | |
231 | * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. | |
232 | * | |
233 | * GFP_TRANSHUGE is used for THP allocations. They are compound allocations | |
234 | * that will fail quickly if memory is not available and will not wake | |
235 | * kswapd on failure. | |
d0164adc MG |
236 | */ |
237 | #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) | |
dd56b046 | 238 | #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) |
d0164adc | 239 | #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) |
71baba4b MG |
240 | #define GFP_NOIO (__GFP_RECLAIM) |
241 | #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) | |
71baba4b | 242 | #define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ |
e12ba74d | 243 | __GFP_RECLAIMABLE) |
71baba4b | 244 | #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
dd56b046 MG |
245 | #define GFP_DMA __GFP_DMA |
246 | #define GFP_DMA32 __GFP_DMA32 | |
2d48366b JZ |
247 | #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) |
248 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) | |
d0164adc MG |
249 | #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ |
250 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ | |
251 | ~__GFP_KSWAPD_RECLAIM) | |
1da177e4 | 252 | |
dd56b046 | 253 | /* Convert GFP flags to their corresponding migrate type */ |
e12ba74d | 254 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
016c13da | 255 | #define GFP_MOVABLE_SHIFT 3 |
6cb06229 | 256 | |
43e7a34d | 257 | static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) |
467c996c | 258 | { |
016c13da MG |
259 | VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); |
260 | BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); | |
261 | BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); | |
467c996c MG |
262 | |
263 | if (unlikely(page_group_by_mobility_disabled)) | |
264 | return MIGRATE_UNMOVABLE; | |
265 | ||
266 | /* Group based on mobility */ | |
016c13da | 267 | return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; |
467c996c | 268 | } |
dd56b046 MG |
269 | #undef GFP_MOVABLE_MASK |
270 | #undef GFP_MOVABLE_SHIFT | |
a2f1b424 | 271 | |
d0164adc MG |
272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
273 | { | |
21fa8442 | 274 | return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); |
d0164adc MG |
275 | } |
276 | ||
b70d94ee CL |
277 | #ifdef CONFIG_HIGHMEM |
278 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM | |
279 | #else | |
280 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL | |
281 | #endif | |
282 | ||
4b51d669 | 283 | #ifdef CONFIG_ZONE_DMA |
b70d94ee CL |
284 | #define OPT_ZONE_DMA ZONE_DMA |
285 | #else | |
286 | #define OPT_ZONE_DMA ZONE_NORMAL | |
4b51d669 | 287 | #endif |
b70d94ee | 288 | |
4e4785bc | 289 | #ifdef CONFIG_ZONE_DMA32 |
b70d94ee CL |
290 | #define OPT_ZONE_DMA32 ZONE_DMA32 |
291 | #else | |
292 | #define OPT_ZONE_DMA32 ZONE_NORMAL | |
4e4785bc | 293 | #endif |
b70d94ee CL |
294 | |
295 | /* | |
296 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the | |
297 | * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long | |
298 | * and there are 16 of them to cover all possible combinations of | |
263ff5d8 | 299 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. |
b70d94ee CL |
300 | * |
301 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | |
302 | * But GFP_MOVABLE is not only a zone specifier but also an allocation | |
303 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | |
263ff5d8 | 304 | * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". |
b70d94ee CL |
305 | * |
306 | * bit result | |
307 | * ================= | |
308 | * 0x0 => NORMAL | |
309 | * 0x1 => DMA or NORMAL | |
310 | * 0x2 => HIGHMEM or NORMAL | |
311 | * 0x3 => BAD (DMA+HIGHMEM) | |
312 | * 0x4 => DMA32 or DMA or NORMAL | |
313 | * 0x5 => BAD (DMA+DMA32) | |
314 | * 0x6 => BAD (HIGHMEM+DMA32) | |
315 | * 0x7 => BAD (HIGHMEM+DMA32+DMA) | |
316 | * 0x8 => NORMAL (MOVABLE+0) | |
317 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) | |
318 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) | |
319 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) | |
537926ca | 320 | * 0xc => DMA32 (MOVABLE+DMA32) |
b70d94ee CL |
321 | * 0xd => BAD (MOVABLE+DMA32+DMA) |
322 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) | |
323 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | |
324 | * | |
325 | * ZONES_SHIFT must be <= 2 on 32 bit platforms. | |
326 | */ | |
327 | ||
328 | #if 16 * ZONES_SHIFT > BITS_PER_LONG | |
329 | #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | |
330 | #endif | |
331 | ||
332 | #define GFP_ZONE_TABLE ( \ | |
16b56cf4 NK |
333 | (ZONE_NORMAL << 0 * ZONES_SHIFT) \ |
334 | | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ | |
335 | | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ | |
336 | | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ | |
337 | | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ | |
338 | | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ | |
339 | | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ | |
340 | | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ | |
b70d94ee CL |
341 | ) |
342 | ||
343 | /* | |
263ff5d8 | 344 | * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 |
b70d94ee CL |
345 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per |
346 | * entry starting with bit 0. Bit is set if the combination is not | |
347 | * allowed. | |
348 | */ | |
349 | #define GFP_ZONE_BAD ( \ | |
16b56cf4 NK |
350 | 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ |
351 | | 1 << (___GFP_DMA | ___GFP_DMA32) \ | |
352 | | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
353 | | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
354 | | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ | |
355 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ | |
356 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
357 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ | |
b70d94ee CL |
358 | ) |
359 | ||
360 | static inline enum zone_type gfp_zone(gfp_t flags) | |
361 | { | |
362 | enum zone_type z; | |
16b56cf4 | 363 | int bit = (__force int) (flags & GFP_ZONEMASK); |
b70d94ee CL |
364 | |
365 | z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & | |
366 | ((1 << ZONES_SHIFT) - 1); | |
82d4b577 | 367 | VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); |
b70d94ee | 368 | return z; |
4e4785bc CL |
369 | } |
370 | ||
1da177e4 LT |
371 | /* |
372 | * There is only one page-allocator function, and two main namespaces to | |
373 | * it. The alloc_page*() variants return 'struct page *' and as such | |
374 | * can allocate highmem pages, the *get*page*() variants return | |
375 | * virtual kernel addresses to the allocated page(s). | |
376 | */ | |
377 | ||
54a6eb5c MG |
378 | static inline int gfp_zonelist(gfp_t flags) |
379 | { | |
e5adfffc | 380 | if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE)) |
54a6eb5c MG |
381 | return 1; |
382 | ||
383 | return 0; | |
384 | } | |
385 | ||
1da177e4 LT |
386 | /* |
387 | * We get the zone list from the current node and the gfp_mask. | |
388 | * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. | |
54a6eb5c MG |
389 | * There are two zonelists per node, one for all zones with memory and |
390 | * one containing just zones from the node the zonelist belongs to. | |
1da177e4 LT |
391 | * |
392 | * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets | |
393 | * optimized to &contig_page_data at compile-time. | |
394 | */ | |
0e88460d MG |
395 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) |
396 | { | |
54a6eb5c | 397 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); |
0e88460d | 398 | } |
1da177e4 LT |
399 | |
400 | #ifndef HAVE_ARCH_FREE_PAGE | |
401 | static inline void arch_free_page(struct page *page, int order) { } | |
402 | #endif | |
cc102509 NP |
403 | #ifndef HAVE_ARCH_ALLOC_PAGE |
404 | static inline void arch_alloc_page(struct page *page, int order) { } | |
405 | #endif | |
1da177e4 | 406 | |
e4048e5d | 407 | struct page * |
d239171e | 408 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, |
e4048e5d KM |
409 | struct zonelist *zonelist, nodemask_t *nodemask); |
410 | ||
411 | static inline struct page * | |
412 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | |
413 | struct zonelist *zonelist) | |
414 | { | |
d239171e | 415 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); |
e4048e5d KM |
416 | } |
417 | ||
96db800f VB |
418 | /* |
419 | * Allocate pages, preferring the node given as nid. The node must be valid and | |
420 | * online. For more general interface, see alloc_pages_node(). | |
421 | */ | |
422 | static inline struct page * | |
423 | __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) | |
1da177e4 | 424 | { |
0bc35a97 VB |
425 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
426 | VM_WARN_ON(!node_online(nid)); | |
819a6928 | 427 | |
0e88460d | 428 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
1da177e4 LT |
429 | } |
430 | ||
96db800f VB |
431 | /* |
432 | * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, | |
82c1fc71 VB |
433 | * prefer the current CPU's closest node. Otherwise node must be valid and |
434 | * online. | |
96db800f VB |
435 | */ |
436 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | |
6484eb3e MG |
437 | unsigned int order) |
438 | { | |
0bc35a97 | 439 | if (nid == NUMA_NO_NODE) |
82c1fc71 | 440 | nid = numa_mem_id(); |
6484eb3e | 441 | |
0bc35a97 | 442 | return __alloc_pages_node(nid, gfp_mask, order); |
6484eb3e MG |
443 | } |
444 | ||
1da177e4 | 445 | #ifdef CONFIG_NUMA |
dd0fc66f | 446 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
1da177e4 LT |
447 | |
448 | static inline struct page * | |
dd0fc66f | 449 | alloc_pages(gfp_t gfp_mask, unsigned int order) |
1da177e4 | 450 | { |
1da177e4 LT |
451 | return alloc_pages_current(gfp_mask, order); |
452 | } | |
0bbbc0b3 | 453 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, |
2f5f9486 | 454 | struct vm_area_struct *vma, unsigned long addr, |
be97a41b VB |
455 | int node, bool hugepage); |
456 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ | |
457 | alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) | |
1da177e4 LT |
458 | #else |
459 | #define alloc_pages(gfp_mask, order) \ | |
460 | alloc_pages_node(numa_node_id(), gfp_mask, order) | |
be97a41b | 461 | #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ |
0bbbc0b3 | 462 | alloc_pages(gfp_mask, order) |
077fcf11 AK |
463 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ |
464 | alloc_pages(gfp_mask, order) | |
1da177e4 LT |
465 | #endif |
466 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | |
2f5f9486 | 467 | #define alloc_page_vma(gfp_mask, vma, addr) \ |
be97a41b | 468 | alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) |
236344d6 | 469 | #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ |
be97a41b | 470 | alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) |
1da177e4 | 471 | |
52383431 VD |
472 | extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order); |
473 | extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, | |
474 | unsigned int order); | |
475 | ||
b3c97528 HH |
476 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
477 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |
1da177e4 | 478 | |
2be0ffe2 TT |
479 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
480 | void free_pages_exact(void *virt, size_t size); | |
e1931811 | 481 | void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); |
2be0ffe2 | 482 | |
1da177e4 | 483 | #define __get_free_page(gfp_mask) \ |
fd23855e | 484 | __get_free_pages((gfp_mask), 0) |
1da177e4 LT |
485 | |
486 | #define __get_dma_pages(gfp_mask, order) \ | |
fd23855e | 487 | __get_free_pages((gfp_mask) | GFP_DMA, (order)) |
1da177e4 | 488 | |
b3c97528 HH |
489 | extern void __free_pages(struct page *page, unsigned int order); |
490 | extern void free_pages(unsigned long addr, unsigned int order); | |
b745bc85 MG |
491 | extern void free_hot_cold_page(struct page *page, bool cold); |
492 | extern void free_hot_cold_page_list(struct list_head *list, bool cold); | |
1da177e4 | 493 | |
b63ae8ca AD |
494 | struct page_frag_cache; |
495 | extern void *__alloc_page_frag(struct page_frag_cache *nc, | |
496 | unsigned int fragsz, gfp_t gfp_mask); | |
497 | extern void __free_page_frag(void *addr); | |
498 | ||
52383431 VD |
499 | extern void __free_kmem_pages(struct page *page, unsigned int order); |
500 | extern void free_kmem_pages(unsigned long addr, unsigned int order); | |
6a1a0d3b | 501 | |
1da177e4 | 502 | #define __free_page(page) __free_pages((page), 0) |
fd23855e | 503 | #define free_page(addr) free_pages((addr), 0) |
1da177e4 LT |
504 | |
505 | void page_alloc_init(void); | |
4037d452 | 506 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
93481ff0 VB |
507 | void drain_all_pages(struct zone *zone); |
508 | void drain_local_pages(struct zone *zone); | |
1da177e4 | 509 | |
0e1cc95b MG |
510 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
511 | void page_alloc_init_late(void); | |
512 | #else | |
513 | static inline void page_alloc_init_late(void) | |
514 | { | |
515 | } | |
516 | #endif | |
517 | ||
f90ac398 MG |
518 | /* |
519 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what | |
520 | * GFP flags are used before interrupts are enabled. Once interrupts are | |
521 | * enabled, it is set to __GFP_BITS_MASK while the system is running. During | |
522 | * hibernation, it is used by PM to avoid I/O during memory allocation while | |
523 | * devices are suspended. | |
524 | */ | |
dcce284a BH |
525 | extern gfp_t gfp_allowed_mask; |
526 | ||
c93bdd0e MG |
527 | /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ |
528 | bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); | |
529 | ||
c9e664f1 RW |
530 | extern void pm_restrict_gfp_mask(void); |
531 | extern void pm_restore_gfp_mask(void); | |
dcce284a | 532 | |
f90ac398 MG |
533 | #ifdef CONFIG_PM_SLEEP |
534 | extern bool pm_suspended_storage(void); | |
535 | #else | |
536 | static inline bool pm_suspended_storage(void) | |
537 | { | |
538 | return false; | |
539 | } | |
540 | #endif /* CONFIG_PM_SLEEP */ | |
541 | ||
041d3a8c MN |
542 | #ifdef CONFIG_CMA |
543 | ||
544 | /* The below functions must be run on a range from a single zone. */ | |
0815f3d8 MN |
545 | extern int alloc_contig_range(unsigned long start, unsigned long end, |
546 | unsigned migratetype); | |
041d3a8c MN |
547 | extern void free_contig_range(unsigned long pfn, unsigned nr_pages); |
548 | ||
47118af0 MN |
549 | /* CMA stuff */ |
550 | extern void init_cma_reserved_pageblock(struct page *page); | |
551 | ||
041d3a8c MN |
552 | #endif |
553 | ||
1da177e4 | 554 | #endif /* __LINUX_GFP_H */ |