Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_VMALLOC_H |
3 | #define _LINUX_VMALLOC_H | |
4 | ||
88ae5fb7 KO |
5 | #include <linux/alloc_tag.h> |
6 | #include <linux/sched.h> | |
1da177e4 | 7 | #include <linux/spinlock.h> |
db64fe02 | 8 | #include <linux/init.h> |
13ba3fcb | 9 | #include <linux/list.h> |
80c4bd7a | 10 | #include <linux/llist.h> |
1da177e4 | 11 | #include <asm/page.h> /* pgprot_t */ |
13ba3fcb | 12 | #include <linux/rbtree.h> |
3b3b1a29 | 13 | #include <linux/overflow.h> |
1da177e4 | 14 | |
1f059dfd IM |
15 | #include <asm/vmalloc.h> |
16 | ||
605d9288 | 17 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
4da56b99 | 18 | struct notifier_block; /* in notifier.h */ |
4c91c07c | 19 | struct iov_iter; /* in uio.h */ |
83342314 | 20 | |
605d9288 | 21 | /* bits in flags of vmalloc's vm_struct below */ |
20fc02b4 ZY |
22 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ |
23 | #define VM_ALLOC 0x00000002 /* vmalloc() */ | |
24 | #define VM_MAP 0x00000004 /* vmap()ed pages */ | |
25 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ | |
fe9041c2 | 26 | #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ |
20fc02b4 | 27 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
bd1a8fb2 | 28 | #define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */ |
a5af5aa8 | 29 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ |
4f6ec860 RE |
30 | #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ |
31 | #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ | |
559089e0 | 32 | #define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */ |
3c5c3cfb | 33 | |
60115fa5 KW |
34 | #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ |
35 | !defined(CONFIG_KASAN_VMALLOC) | |
36 | #define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */ | |
37 | #else | |
38 | #define VM_DEFER_KMEMLEAK 0 | |
39 | #endif | |
e6f79822 | 40 | #define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */ |
60115fa5 | 41 | |
1da177e4 LT |
42 | /* bits [20..32] reserved for arch specific ioremap internals */ |
43 | ||
fd195c49 DS |
44 | /* |
45 | * Maximum alignment for ioremap() regions. | |
f0953a1b | 46 | * Can be overridden by arch-specific value. |
fd195c49 DS |
47 | */ |
48 | #ifndef IOREMAP_MAX_ORDER | |
49 | #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ | |
50 | #endif | |
51 | ||
1da177e4 | 52 | struct vm_struct { |
2b4ac44e | 53 | struct vm_struct *next; |
1da177e4 LT |
54 | void *addr; |
55 | unsigned long size; | |
56 | unsigned long flags; | |
57 | struct page **pages; | |
121e6f32 NP |
58 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC |
59 | unsigned int page_order; | |
60 | #endif | |
1da177e4 | 61 | unsigned int nr_pages; |
ffa71f33 | 62 | phys_addr_t phys_addr; |
5e6cafc8 | 63 | const void *caller; |
1da177e4 LT |
64 | }; |
65 | ||
13ba3fcb AK |
66 | struct vmap_area { |
67 | unsigned long va_start; | |
68 | unsigned long va_end; | |
68ad4a33 | 69 | |
13ba3fcb AK |
70 | struct rb_node rb_node; /* address sorted rbtree */ |
71 | struct list_head list; /* address sorted list */ | |
688fcbfc PL |
72 | |
73 | /* | |
96e2db45 URS |
74 | * The following two variables can be packed, because |
75 | * a vmap_area object can be either: | |
ff11a7ce BL |
76 | * 1) in "free" tree (root is free_vmap_area_root) |
77 | * 2) or "busy" tree (root is vmap_area_root) | |
688fcbfc PL |
78 | */ |
79 | union { | |
80 | unsigned long subtree_max_size; /* in "free" tree */ | |
81 | struct vm_struct *vm; /* in "busy" tree */ | |
688fcbfc | 82 | }; |
869176a0 | 83 | unsigned long flags; /* mark type of vm_map_ram area */ |
13ba3fcb AK |
84 | }; |
85 | ||
6f680e70 NP |
86 | /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ |
87 | #ifndef arch_vmap_p4d_supported | |
88 | static inline bool arch_vmap_p4d_supported(pgprot_t prot) | |
89 | { | |
90 | return false; | |
91 | } | |
92 | #endif | |
93 | ||
94 | #ifndef arch_vmap_pud_supported | |
95 | static inline bool arch_vmap_pud_supported(pgprot_t prot) | |
96 | { | |
97 | return false; | |
98 | } | |
99 | #endif | |
100 | ||
101 | #ifndef arch_vmap_pmd_supported | |
102 | static inline bool arch_vmap_pmd_supported(pgprot_t prot) | |
103 | { | |
104 | return false; | |
105 | } | |
bbc180a5 NP |
106 | #endif |
107 | ||
f7ee1f13 CL |
108 | #ifndef arch_vmap_pte_range_map_size |
109 | static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, | |
110 | u64 pfn, unsigned int max_page_shift) | |
111 | { | |
112 | return PAGE_SIZE; | |
113 | } | |
114 | #endif | |
115 | ||
3382bbee CL |
116 | #ifndef arch_vmap_pte_supported_shift |
117 | static inline int arch_vmap_pte_supported_shift(unsigned long size) | |
118 | { | |
119 | return PAGE_SHIFT; | |
120 | } | |
121 | #endif | |
122 | ||
01d92c7f AK |
123 | #ifndef arch_vmap_pgprot_tagged |
124 | static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) | |
125 | { | |
126 | return prot; | |
127 | } | |
128 | #endif | |
129 | ||
1da177e4 LT |
130 | /* |
131 | * Highlevel APIs for driver use | |
132 | */ | |
db64fe02 | 133 | extern void vm_unmap_ram(const void *mem, unsigned int count); |
d4efd79a | 134 | extern void *vm_map_ram(struct page **pages, unsigned int count, int node); |
db64fe02 NP |
135 | extern void vm_unmap_aliases(void); |
136 | ||
137 | #ifdef CONFIG_MMU | |
97105f0a | 138 | extern unsigned long vmalloc_nr_pages(void); |
db64fe02 | 139 | #else |
97105f0a | 140 | static inline unsigned long vmalloc_nr_pages(void) { return 0; } |
db64fe02 NP |
141 | #endif |
142 | ||
88ae5fb7 KO |
143 | extern void *vmalloc_noprof(unsigned long size) __alloc_size(1); |
144 | #define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__)) | |
145 | ||
146 | extern void *vzalloc_noprof(unsigned long size) __alloc_size(1); | |
147 | #define vzalloc(...) alloc_hooks(vzalloc_noprof(__VA_ARGS__)) | |
148 | ||
149 | extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1); | |
150 | #define vmalloc_user(...) alloc_hooks(vmalloc_user_noprof(__VA_ARGS__)) | |
151 | ||
152 | extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1); | |
153 | #define vmalloc_node(...) alloc_hooks(vmalloc_node_noprof(__VA_ARGS__)) | |
154 | ||
155 | extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1); | |
156 | #define vzalloc_node(...) alloc_hooks(vzalloc_node_noprof(__VA_ARGS__)) | |
157 | ||
158 | extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1); | |
159 | #define vmalloc_32(...) alloc_hooks(vmalloc_32_noprof(__VA_ARGS__)) | |
160 | ||
161 | extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1); | |
162 | #define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__)) | |
163 | ||
164 | extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); | |
165 | #define __vmalloc(...) alloc_hooks(__vmalloc_noprof(__VA_ARGS__)) | |
166 | ||
167 | extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, | |
d0a21265 | 168 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
cb9e3c29 | 169 | pgprot_t prot, unsigned long vm_flags, int node, |
894f24bb | 170 | const void *caller) __alloc_size(1); |
88ae5fb7 KO |
171 | #define __vmalloc_node_range(...) alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__)) |
172 | ||
173 | void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, | |
894f24bb | 174 | int node, const void *caller) __alloc_size(1); |
88ae5fb7 KO |
175 | #define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__)) |
176 | ||
177 | void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); | |
178 | #define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__)) | |
179 | ||
180 | extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); | |
181 | #define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__)) | |
182 | ||
183 | extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2); | |
184 | #define vmalloc_array(...) alloc_hooks(vmalloc_array_noprof(__VA_ARGS__)) | |
185 | ||
186 | extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); | |
187 | #define __vcalloc(...) alloc_hooks(__vcalloc_noprof(__VA_ARGS__)) | |
cb9e3c29 | 188 | |
88ae5fb7 KO |
189 | extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2); |
190 | #define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__)) | |
a8749a35 | 191 | |
3ddc2fef DK |
192 | void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags) |
193 | __realloc_size(2); | |
194 | #define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__)) | |
195 | ||
b3bdda02 | 196 | extern void vfree(const void *addr); |
bf22e37a | 197 | extern void vfree_atomic(const void *addr); |
1da177e4 LT |
198 | |
199 | extern void *vmap(struct page **pages, unsigned int count, | |
200 | unsigned long flags, pgprot_t prot); | |
3e9a9e25 | 201 | void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); |
b3bdda02 | 202 | extern void vunmap(const void *addr); |
83342314 | 203 | |
e69e9d4a HD |
204 | extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, |
205 | unsigned long uaddr, void *kaddr, | |
bdebd6a2 | 206 | unsigned long pgoff, unsigned long size); |
e69e9d4a | 207 | |
83342314 NP |
208 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
209 | unsigned long pgoff); | |
763802b5 | 210 | |
2ba3e694 JR |
211 | /* |
212 | * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values | |
213 | * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() | |
214 | * needs to be called. | |
215 | */ | |
216 | #ifndef ARCH_PAGE_TABLE_SYNC_MASK | |
217 | #define ARCH_PAGE_TABLE_SYNC_MASK 0 | |
218 | #endif | |
219 | ||
220 | /* | |
221 | * There is no default implementation for arch_sync_kernel_mappings(). It is | |
222 | * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK | |
223 | * is 0. | |
224 | */ | |
225 | void arch_sync_kernel_mappings(unsigned long start, unsigned long end); | |
226 | ||
1da177e4 LT |
227 | /* |
228 | * Lowlevel-APIs (not for driver use!) | |
229 | */ | |
9585116b JF |
230 | |
231 | static inline size_t get_vm_area_size(const struct vm_struct *area) | |
232 | { | |
71394fe5 AR |
233 | if (!(area->flags & VM_NO_GUARD)) |
234 | /* return actual size without guard page */ | |
235 | return area->size - PAGE_SIZE; | |
236 | else | |
237 | return area->size; | |
238 | ||
9585116b JF |
239 | } |
240 | ||
1da177e4 | 241 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); |
23016969 | 242 | extern struct vm_struct *get_vm_area_caller(unsigned long size, |
5e6cafc8 | 243 | unsigned long flags, const void *caller); |
c2968612 BH |
244 | extern struct vm_struct *__get_vm_area_caller(unsigned long size, |
245 | unsigned long flags, | |
246 | unsigned long start, unsigned long end, | |
5e6cafc8 | 247 | const void *caller); |
301fa9f2 | 248 | void free_vm_area(struct vm_struct *area); |
b3bdda02 | 249 | extern struct vm_struct *remove_vm_area(const void *addr); |
e9da6e99 | 250 | extern struct vm_struct *find_vm_area(const void *addr); |
993d0b28 | 251 | struct vmap_area *find_vmap_area(unsigned long addr); |
c19c03fc | 252 | |
121e6f32 NP |
253 | static inline bool is_vm_area_hugepages(const void *addr) |
254 | { | |
255 | /* | |
256 | * This may not 100% tell if the area is mapped with > PAGE_SIZE | |
257 | * page table entries, if for some reason the architecture indicates | |
258 | * larger sizes are available but decides not to use them, nothing | |
259 | * prevents that. This only indicates the size of the physical page | |
260 | * allocated in the vmalloc layer. | |
261 | */ | |
262 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC | |
263 | return find_vm_area(addr)->page_order > 0; | |
264 | #else | |
265 | return false; | |
266 | #endif | |
267 | } | |
268 | ||
b554cb42 | 269 | #ifdef CONFIG_MMU |
e6f79822 AS |
270 | int vm_area_map_pages(struct vm_struct *area, unsigned long start, |
271 | unsigned long end, struct page **pages); | |
272 | void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, | |
273 | unsigned long end); | |
4ad0ae8c | 274 | void vunmap_range(unsigned long addr, unsigned long end); |
868b104d RE |
275 | static inline void set_vm_flush_reset_perms(void *addr) |
276 | { | |
277 | struct vm_struct *vm = find_vm_area(addr); | |
278 | ||
279 | if (vm) | |
280 | vm->flags |= VM_FLUSH_RESET_PERMS; | |
281 | } | |
121e6f32 | 282 | |
b554cb42 | 283 | #else |
868b104d RE |
284 | static inline void set_vm_flush_reset_perms(void *addr) |
285 | { | |
286 | } | |
b554cb42 | 287 | #endif |
1da177e4 | 288 | |
bbcd53c9 | 289 | /* for /proc/kcore */ |
4c91c07c | 290 | extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count); |
69beeb1d | 291 | |
1da177e4 | 292 | /* |
06c88398 | 293 | * Internals. Don't use.. |
1da177e4 | 294 | */ |
be9b7335 | 295 | extern __init void vm_area_add_early(struct vm_struct *vm); |
c0c0a293 | 296 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
1da177e4 | 297 | |
4f8b02b4 | 298 | #ifdef CONFIG_SMP |
b554cb42 | 299 | # ifdef CONFIG_MMU |
ca23e405 TH |
300 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
301 | const size_t *sizes, int nr_vms, | |
ec3f64fc | 302 | size_t align); |
ca23e405 TH |
303 | |
304 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | |
b554cb42 GY |
305 | # else |
306 | static inline struct vm_struct ** | |
307 | pcpu_get_vm_areas(const unsigned long *offsets, | |
308 | const size_t *sizes, int nr_vms, | |
309 | size_t align) | |
310 | { | |
311 | return NULL; | |
312 | } | |
313 | ||
314 | static inline void | |
315 | pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |
316 | { | |
317 | } | |
318 | # endif | |
4f8b02b4 | 319 | #endif |
ca23e405 | 320 | |
db3808c1 JK |
321 | #ifdef CONFIG_MMU |
322 | #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) | |
db3808c1 | 323 | #else |
db3808c1 | 324 | #define VMALLOC_TOTAL 0UL |
db3808c1 JK |
325 | #endif |
326 | ||
4da56b99 CW |
327 | int register_vmap_purge_notifier(struct notifier_block *nb); |
328 | int unregister_vmap_purge_notifier(struct notifier_block *nb); | |
329 | ||
5bb1bb35 | 330 | #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) |
98f18083 PM |
331 | bool vmalloc_dump_obj(void *object); |
332 | #else | |
333 | static inline bool vmalloc_dump_obj(void *object) { return false; } | |
334 | #endif | |
335 | ||
1da177e4 | 336 | #endif /* _LINUX_VMALLOC_H */ |