Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_GFP_H |
2 | #define __LINUX_GFP_H | |
3 | ||
4 | #include <linux/mmzone.h> | |
5 | #include <linux/stddef.h> | |
6 | #include <linux/linkage.h> | |
082edb7b | 7 | #include <linux/topology.h> |
1da177e4 LT |
8 | |
9 | struct vm_area_struct; | |
10 | ||
11 | /* | |
12 | * GFP bitmasks.. | |
e53ef38d CL |
13 | * |
14 | * Zone modifiers (see linux/mmzone.h - low three bits) | |
15 | * | |
e53ef38d CL |
16 | * Do not put any conditional on these. If necessary modify the definitions |
17 | * without the underscores and use the consistently. The definitions here may | |
18 | * be used in bit comparisons. | |
1da177e4 | 19 | */ |
af4ca457 AV |
20 | #define __GFP_DMA ((__force gfp_t)0x01u) |
21 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) | |
e53ef38d | 22 | #define __GFP_DMA32 ((__force gfp_t)0x04u) |
1da177e4 LT |
23 | |
24 | /* | |
25 | * Action modifiers - doesn't change the zoning | |
26 | * | |
27 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt | |
28 | * _might_ fail. This depends upon the particular VM implementation. | |
29 | * | |
30 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | |
31 | * cannot handle allocation failures. | |
32 | * | |
33 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. | |
769848c0 MG |
34 | * |
35 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration | |
36 | * mechanism or reclaimed | |
1da177e4 | 37 | */ |
af4ca457 AV |
38 | #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ |
39 | #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ | |
40 | #define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */ | |
41 | #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ | |
42 | #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ | |
43 | #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ | |
ab857d09 NA |
44 | #define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ |
45 | #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ | |
46 | #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ | |
af4ca457 AV |
47 | #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ |
48 | #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ | |
49 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ | |
2d6c666e | 50 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
9b819d20 | 51 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
e12ba74d MG |
52 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
53 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ | |
2dff4405 | 54 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ |
1da177e4 | 55 | |
2dff4405 VN |
56 | /* |
57 | * This may seem redundant, but it's a way of annotating false positives vs. | |
58 | * allocations that simply cannot be supported (e.g. page tables). | |
59 | */ | |
60 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) | |
61 | ||
62 | #define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ | |
af4ca457 | 63 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
1da177e4 | 64 | |
7b04d717 JD |
65 | /* This equals 0, but use constants in case they ever change */ |
66 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) | |
4eac915d | 67 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ |
1da177e4 LT |
68 | #define GFP_ATOMIC (__GFP_HIGH) |
69 | #define GFP_NOIO (__GFP_WAIT) | |
70 | #define GFP_NOFS (__GFP_WAIT | __GFP_IO) | |
71 | #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) | |
e12ba74d MG |
72 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
73 | __GFP_RECLAIMABLE) | |
f90b1d2f PJ |
74 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
75 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | |
76 | __GFP_HIGHMEM) | |
769848c0 MG |
77 | #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
78 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | |
79 | __GFP_MOVABLE) | |
1da177e4 | 80 | |
77f700da | 81 | #ifdef CONFIG_NUMA |
980128f2 | 82 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
77f700da | 83 | #else |
f2e97df6 | 84 | #define GFP_THISNODE ((__force gfp_t)0) |
77f700da CL |
85 | #endif |
86 | ||
6cb06229 | 87 | /* This mask makes up all the page movable related flags */ |
e12ba74d | 88 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
6cb06229 CL |
89 | |
90 | /* Control page allocator reclaim behavior */ | |
91 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | |
92 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | |
93 | __GFP_NORETRY|__GFP_NOMEMALLOC) | |
94 | ||
95 | /* Control allocation constraints */ | |
96 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | |
97 | ||
98 | /* Do not use these with a slab allocator */ | |
99 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | |
980128f2 | 100 | |
1da177e4 LT |
101 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some |
102 | platforms, used as appropriate on others */ | |
103 | ||
104 | #define GFP_DMA __GFP_DMA | |
105 | ||
a2f1b424 AK |
106 | /* 4GB DMA on some platforms */ |
107 | #define GFP_DMA32 __GFP_DMA32 | |
108 | ||
467c996c MG |
109 | /* Convert GFP flags to their corresponding migrate type */ |
110 | static inline int allocflags_to_migratetype(gfp_t gfp_flags) | |
111 | { | |
112 | WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); | |
113 | ||
114 | if (unlikely(page_group_by_mobility_disabled)) | |
115 | return MIGRATE_UNMOVABLE; | |
116 | ||
117 | /* Group based on mobility */ | |
118 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | | |
119 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | |
120 | } | |
a2f1b424 | 121 | |
19655d34 | 122 | static inline enum zone_type gfp_zone(gfp_t flags) |
4e4785bc | 123 | { |
4b51d669 | 124 | #ifdef CONFIG_ZONE_DMA |
4e4785bc | 125 | if (flags & __GFP_DMA) |
8cece85e | 126 | return ZONE_DMA; |
4b51d669 | 127 | #endif |
4e4785bc CL |
128 | #ifdef CONFIG_ZONE_DMA32 |
129 | if (flags & __GFP_DMA32) | |
8cece85e | 130 | return ZONE_DMA32; |
4e4785bc | 131 | #endif |
2a1e274a MG |
132 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == |
133 | (__GFP_HIGHMEM | __GFP_MOVABLE)) | |
8cece85e | 134 | return ZONE_MOVABLE; |
4e4785bc CL |
135 | #ifdef CONFIG_HIGHMEM |
136 | if (flags & __GFP_HIGHMEM) | |
8cece85e | 137 | return ZONE_HIGHMEM; |
4e4785bc | 138 | #endif |
8cece85e | 139 | return ZONE_NORMAL; |
4e4785bc CL |
140 | } |
141 | ||
1da177e4 LT |
142 | /* |
143 | * There is only one page-allocator function, and two main namespaces to | |
144 | * it. The alloc_page*() variants return 'struct page *' and as such | |
145 | * can allocate highmem pages, the *get*page*() variants return | |
146 | * virtual kernel addresses to the allocated page(s). | |
147 | */ | |
148 | ||
54a6eb5c MG |
149 | static inline int gfp_zonelist(gfp_t flags) |
150 | { | |
151 | if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) | |
152 | return 1; | |
153 | ||
154 | return 0; | |
155 | } | |
156 | ||
1da177e4 LT |
157 | /* |
158 | * We get the zone list from the current node and the gfp_mask. | |
159 | * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. | |
54a6eb5c MG |
160 | * There are two zonelists per node, one for all zones with memory and |
161 | * one containing just zones from the node the zonelist belongs to. | |
1da177e4 LT |
162 | * |
163 | * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets | |
164 | * optimized to &contig_page_data at compile-time. | |
165 | */ | |
0e88460d MG |
166 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) |
167 | { | |
54a6eb5c | 168 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); |
0e88460d | 169 | } |
1da177e4 LT |
170 | |
171 | #ifndef HAVE_ARCH_FREE_PAGE | |
172 | static inline void arch_free_page(struct page *page, int order) { } | |
173 | #endif | |
cc102509 NP |
174 | #ifndef HAVE_ARCH_ALLOC_PAGE |
175 | static inline void arch_alloc_page(struct page *page, int order) { } | |
176 | #endif | |
1da177e4 | 177 | |
e4048e5d KM |
178 | struct page * |
179 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | |
180 | struct zonelist *zonelist, nodemask_t *nodemask); | |
181 | ||
182 | static inline struct page * | |
183 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | |
184 | struct zonelist *zonelist) | |
185 | { | |
186 | return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); | |
187 | } | |
188 | ||
189 | static inline struct page * | |
190 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | |
191 | struct zonelist *zonelist, nodemask_t *nodemask) | |
192 | { | |
193 | return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); | |
194 | } | |
1da177e4 | 195 | |
19770b32 | 196 | |
dd0fc66f | 197 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, |
1da177e4 LT |
198 | unsigned int order) |
199 | { | |
200 | if (unlikely(order >= MAX_ORDER)) | |
201 | return NULL; | |
202 | ||
819a6928 AK |
203 | /* Unknown node is current node */ |
204 | if (nid < 0) | |
205 | nid = numa_node_id(); | |
206 | ||
0e88460d | 207 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
1da177e4 LT |
208 | } |
209 | ||
210 | #ifdef CONFIG_NUMA | |
dd0fc66f | 211 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
1da177e4 LT |
212 | |
213 | static inline struct page * | |
dd0fc66f | 214 | alloc_pages(gfp_t gfp_mask, unsigned int order) |
1da177e4 LT |
215 | { |
216 | if (unlikely(order >= MAX_ORDER)) | |
217 | return NULL; | |
218 | ||
219 | return alloc_pages_current(gfp_mask, order); | |
220 | } | |
dd0fc66f | 221 | extern struct page *alloc_page_vma(gfp_t gfp_mask, |
1da177e4 LT |
222 | struct vm_area_struct *vma, unsigned long addr); |
223 | #else | |
224 | #define alloc_pages(gfp_mask, order) \ | |
225 | alloc_pages_node(numa_node_id(), gfp_mask, order) | |
226 | #define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) | |
227 | #endif | |
228 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | |
229 | ||
b3c97528 HH |
230 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
231 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |
1da177e4 | 232 | |
2be0ffe2 TT |
233 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
234 | void free_pages_exact(void *virt, size_t size); | |
235 | ||
1da177e4 LT |
236 | #define __get_free_page(gfp_mask) \ |
237 | __get_free_pages((gfp_mask),0) | |
238 | ||
239 | #define __get_dma_pages(gfp_mask, order) \ | |
240 | __get_free_pages((gfp_mask) | GFP_DMA,(order)) | |
241 | ||
b3c97528 HH |
242 | extern void __free_pages(struct page *page, unsigned int order); |
243 | extern void free_pages(unsigned long addr, unsigned int order); | |
244 | extern void free_hot_page(struct page *page); | |
245 | extern void free_cold_page(struct page *page); | |
1da177e4 LT |
246 | |
247 | #define __free_page(page) __free_pages((page), 0) | |
248 | #define free_page(addr) free_pages((addr),0) | |
249 | ||
250 | void page_alloc_init(void); | |
4037d452 | 251 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
9f8f2172 CL |
252 | void drain_all_pages(void); |
253 | void drain_local_pages(void *dummy); | |
1da177e4 LT |
254 | |
255 | #endif /* __LINUX_GFP_H */ |