Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_GFP_H |
2 | #define __LINUX_GFP_H | |
3 | ||
4 | #include <linux/mmzone.h> | |
5 | #include <linux/stddef.h> | |
6 | #include <linux/linkage.h> | |
082edb7b | 7 | #include <linux/topology.h> |
1da177e4 LT |
8 | |
9 | struct vm_area_struct; | |
10 | ||
11 | /* | |
12 | * GFP bitmasks.. | |
e53ef38d CL |
13 | * |
14 | * Zone modifiers (see linux/mmzone.h - low three bits) | |
15 | * | |
e53ef38d CL |
16 | * Do not put any conditional on these. If necessary modify the definitions |
17 | * without the underscores and use the consistently. The definitions here may | |
18 | * be used in bit comparisons. | |
1da177e4 | 19 | */ |
af4ca457 AV |
20 | #define __GFP_DMA ((__force gfp_t)0x01u) |
21 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) | |
e53ef38d | 22 | #define __GFP_DMA32 ((__force gfp_t)0x04u) |
1da177e4 LT |
23 | |
24 | /* | |
25 | * Action modifiers - doesn't change the zoning | |
26 | * | |
27 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt | |
28 | * _might_ fail. This depends upon the particular VM implementation. | |
29 | * | |
30 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | |
31 | * cannot handle allocation failures. | |
32 | * | |
33 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. | |
769848c0 MG |
34 | * |
35 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration | |
36 | * mechanism or reclaimed | |
1da177e4 | 37 | */ |
af4ca457 AV |
38 | #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ |
39 | #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ | |
40 | #define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */ | |
41 | #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ | |
42 | #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ | |
43 | #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ | |
ab857d09 NA |
44 | #define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ |
45 | #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ | |
46 | #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ | |
af4ca457 AV |
47 | #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ |
48 | #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ | |
49 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ | |
2d6c666e | 50 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
9b819d20 | 51 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
e12ba74d MG |
52 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
53 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ | |
b1eeab67 VN |
54 | |
55 | #ifdef CONFIG_KMEMCHECK | |
2dff4405 | 56 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ |
b1eeab67 VN |
57 | #else |
58 | #define __GFP_NOTRACK ((__force gfp_t)0) | |
59 | #endif | |
1da177e4 | 60 | |
2dff4405 VN |
61 | /* |
62 | * This may seem redundant, but it's a way of annotating false positives vs. | |
63 | * allocations that simply cannot be supported (e.g. page tables). | |
64 | */ | |
65 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) | |
66 | ||
67 | #define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ | |
af4ca457 | 68 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
1da177e4 | 69 | |
7b04d717 JD |
70 | /* This equals 0, but use constants in case they ever change */ |
71 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) | |
4eac915d | 72 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ |
1da177e4 LT |
73 | #define GFP_ATOMIC (__GFP_HIGH) |
74 | #define GFP_NOIO (__GFP_WAIT) | |
75 | #define GFP_NOFS (__GFP_WAIT | __GFP_IO) | |
76 | #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) | |
e12ba74d MG |
77 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
78 | __GFP_RECLAIMABLE) | |
f90b1d2f PJ |
79 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
80 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | |
81 | __GFP_HIGHMEM) | |
769848c0 MG |
82 | #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
83 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | |
84 | __GFP_MOVABLE) | |
1da177e4 | 85 | |
77f700da | 86 | #ifdef CONFIG_NUMA |
980128f2 | 87 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
77f700da | 88 | #else |
f2e97df6 | 89 | #define GFP_THISNODE ((__force gfp_t)0) |
77f700da CL |
90 | #endif |
91 | ||
6cb06229 | 92 | /* This mask makes up all the page movable related flags */ |
e12ba74d | 93 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
6cb06229 CL |
94 | |
95 | /* Control page allocator reclaim behavior */ | |
96 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | |
97 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | |
98 | __GFP_NORETRY|__GFP_NOMEMALLOC) | |
99 | ||
100 | /* Control allocation constraints */ | |
101 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | |
102 | ||
103 | /* Do not use these with a slab allocator */ | |
104 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | |
980128f2 | 105 | |
1da177e4 LT |
106 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some |
107 | platforms, used as appropriate on others */ | |
108 | ||
109 | #define GFP_DMA __GFP_DMA | |
110 | ||
a2f1b424 AK |
111 | /* 4GB DMA on some platforms */ |
112 | #define GFP_DMA32 __GFP_DMA32 | |
113 | ||
467c996c MG |
114 | /* Convert GFP flags to their corresponding migrate type */ |
115 | static inline int allocflags_to_migratetype(gfp_t gfp_flags) | |
116 | { | |
117 | WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); | |
118 | ||
119 | if (unlikely(page_group_by_mobility_disabled)) | |
120 | return MIGRATE_UNMOVABLE; | |
121 | ||
122 | /* Group based on mobility */ | |
123 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | | |
124 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | |
125 | } | |
a2f1b424 | 126 | |
19655d34 | 127 | static inline enum zone_type gfp_zone(gfp_t flags) |
4e4785bc | 128 | { |
4b51d669 | 129 | #ifdef CONFIG_ZONE_DMA |
4e4785bc | 130 | if (flags & __GFP_DMA) |
8cece85e | 131 | return ZONE_DMA; |
4b51d669 | 132 | #endif |
4e4785bc CL |
133 | #ifdef CONFIG_ZONE_DMA32 |
134 | if (flags & __GFP_DMA32) | |
8cece85e | 135 | return ZONE_DMA32; |
4e4785bc | 136 | #endif |
2a1e274a MG |
137 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == |
138 | (__GFP_HIGHMEM | __GFP_MOVABLE)) | |
8cece85e | 139 | return ZONE_MOVABLE; |
4e4785bc CL |
140 | #ifdef CONFIG_HIGHMEM |
141 | if (flags & __GFP_HIGHMEM) | |
8cece85e | 142 | return ZONE_HIGHMEM; |
4e4785bc | 143 | #endif |
8cece85e | 144 | return ZONE_NORMAL; |
4e4785bc CL |
145 | } |
146 | ||
1da177e4 LT |
147 | /* |
148 | * There is only one page-allocator function, and two main namespaces to | |
149 | * it. The alloc_page*() variants return 'struct page *' and as such | |
150 | * can allocate highmem pages, the *get*page*() variants return | |
151 | * virtual kernel addresses to the allocated page(s). | |
152 | */ | |
153 | ||
54a6eb5c MG |
154 | static inline int gfp_zonelist(gfp_t flags) |
155 | { | |
156 | if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) | |
157 | return 1; | |
158 | ||
159 | return 0; | |
160 | } | |
161 | ||
1da177e4 LT |
162 | /* |
163 | * We get the zone list from the current node and the gfp_mask. | |
164 | * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. | |
54a6eb5c MG |
165 | * There are two zonelists per node, one for all zones with memory and |
166 | * one containing just zones from the node the zonelist belongs to. | |
1da177e4 LT |
167 | * |
168 | * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets | |
169 | * optimized to &contig_page_data at compile-time. | |
170 | */ | |
0e88460d MG |
171 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) |
172 | { | |
54a6eb5c | 173 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); |
0e88460d | 174 | } |
1da177e4 LT |
175 | |
176 | #ifndef HAVE_ARCH_FREE_PAGE | |
177 | static inline void arch_free_page(struct page *page, int order) { } | |
178 | #endif | |
cc102509 NP |
179 | #ifndef HAVE_ARCH_ALLOC_PAGE |
180 | static inline void arch_alloc_page(struct page *page, int order) { } | |
181 | #endif | |
1da177e4 | 182 | |
e4048e5d KM |
183 | struct page * |
184 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | |
185 | struct zonelist *zonelist, nodemask_t *nodemask); | |
186 | ||
187 | static inline struct page * | |
188 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | |
189 | struct zonelist *zonelist) | |
190 | { | |
191 | return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); | |
192 | } | |
193 | ||
194 | static inline struct page * | |
195 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | |
196 | struct zonelist *zonelist, nodemask_t *nodemask) | |
197 | { | |
198 | return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); | |
199 | } | |
1da177e4 | 200 | |
19770b32 | 201 | |
dd0fc66f | 202 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, |
1da177e4 LT |
203 | unsigned int order) |
204 | { | |
205 | if (unlikely(order >= MAX_ORDER)) | |
206 | return NULL; | |
207 | ||
819a6928 AK |
208 | /* Unknown node is current node */ |
209 | if (nid < 0) | |
210 | nid = numa_node_id(); | |
211 | ||
0e88460d | 212 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
1da177e4 LT |
213 | } |
214 | ||
215 | #ifdef CONFIG_NUMA | |
dd0fc66f | 216 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
1da177e4 LT |
217 | |
218 | static inline struct page * | |
dd0fc66f | 219 | alloc_pages(gfp_t gfp_mask, unsigned int order) |
1da177e4 LT |
220 | { |
221 | if (unlikely(order >= MAX_ORDER)) | |
222 | return NULL; | |
223 | ||
224 | return alloc_pages_current(gfp_mask, order); | |
225 | } | |
dd0fc66f | 226 | extern struct page *alloc_page_vma(gfp_t gfp_mask, |
1da177e4 LT |
227 | struct vm_area_struct *vma, unsigned long addr); |
228 | #else | |
229 | #define alloc_pages(gfp_mask, order) \ | |
230 | alloc_pages_node(numa_node_id(), gfp_mask, order) | |
231 | #define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) | |
232 | #endif | |
233 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | |
234 | ||
b3c97528 HH |
235 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
236 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |
1da177e4 | 237 | |
2be0ffe2 TT |
238 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
239 | void free_pages_exact(void *virt, size_t size); | |
240 | ||
1da177e4 LT |
241 | #define __get_free_page(gfp_mask) \ |
242 | __get_free_pages((gfp_mask),0) | |
243 | ||
244 | #define __get_dma_pages(gfp_mask, order) \ | |
245 | __get_free_pages((gfp_mask) | GFP_DMA,(order)) | |
246 | ||
b3c97528 HH |
247 | extern void __free_pages(struct page *page, unsigned int order); |
248 | extern void free_pages(unsigned long addr, unsigned int order); | |
249 | extern void free_hot_page(struct page *page); | |
250 | extern void free_cold_page(struct page *page); | |
1da177e4 LT |
251 | |
252 | #define __free_page(page) __free_pages((page), 0) | |
253 | #define free_page(addr) free_pages((addr),0) | |
254 | ||
255 | void page_alloc_init(void); | |
4037d452 | 256 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
9f8f2172 CL |
257 | void drain_all_pages(void); |
258 | void drain_local_pages(void *dummy); | |
1da177e4 LT |
259 | |
260 | #endif /* __LINUX_GFP_H */ |