Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_VMALLOC_H |
3 | #define _LINUX_VMALLOC_H | |
4 | ||
5 | #include <linux/spinlock.h> | |
db64fe02 | 6 | #include <linux/init.h> |
13ba3fcb | 7 | #include <linux/list.h> |
80c4bd7a | 8 | #include <linux/llist.h> |
1da177e4 | 9 | #include <asm/page.h> /* pgprot_t */ |
13ba3fcb | 10 | #include <linux/rbtree.h> |
3b3b1a29 | 11 | #include <linux/overflow.h> |
1da177e4 | 12 | |
1f059dfd IM |
13 | #include <asm/vmalloc.h> |
14 | ||
605d9288 | 15 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
4da56b99 | 16 | struct notifier_block; /* in notifier.h */ |
4c91c07c | 17 | struct iov_iter; /* in uio.h */ |
83342314 | 18 | |
605d9288 | 19 | /* bits in flags of vmalloc's vm_struct below */ |
20fc02b4 ZY |
20 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ |
21 | #define VM_ALLOC 0x00000002 /* vmalloc() */ | |
22 | #define VM_MAP 0x00000004 /* vmap()ed pages */ | |
23 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ | |
fe9041c2 | 24 | #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ |
20fc02b4 | 25 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
bd1a8fb2 | 26 | #define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */ |
a5af5aa8 | 27 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ |
4f6ec860 RE |
28 | #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ |
29 | #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ | |
559089e0 | 30 | #define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */ |
3c5c3cfb | 31 | |
60115fa5 KW |
32 | #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ |
33 | !defined(CONFIG_KASAN_VMALLOC) | |
34 | #define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */ | |
35 | #else | |
36 | #define VM_DEFER_KMEMLEAK 0 | |
37 | #endif | |
38 | ||
1da177e4 LT |
39 | /* bits [20..32] reserved for arch specific ioremap internals */ |
40 | ||
fd195c49 DS |
41 | /* |
42 | * Maximum alignment for ioremap() regions. | |
f0953a1b | 43 | * Can be overridden by arch-specific value. |
fd195c49 DS |
44 | */ |
45 | #ifndef IOREMAP_MAX_ORDER | |
46 | #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ | |
47 | #endif | |
48 | ||
1da177e4 | 49 | struct vm_struct { |
2b4ac44e | 50 | struct vm_struct *next; |
1da177e4 LT |
51 | void *addr; |
52 | unsigned long size; | |
53 | unsigned long flags; | |
54 | struct page **pages; | |
121e6f32 NP |
55 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC |
56 | unsigned int page_order; | |
57 | #endif | |
1da177e4 | 58 | unsigned int nr_pages; |
ffa71f33 | 59 | phys_addr_t phys_addr; |
5e6cafc8 | 60 | const void *caller; |
1da177e4 LT |
61 | }; |
62 | ||
13ba3fcb AK |
63 | struct vmap_area { |
64 | unsigned long va_start; | |
65 | unsigned long va_end; | |
68ad4a33 | 66 | |
13ba3fcb AK |
67 | struct rb_node rb_node; /* address sorted rbtree */ |
68 | struct list_head list; /* address sorted list */ | |
688fcbfc PL |
69 | |
70 | /* | |
96e2db45 URS |
71 | * The following two variables can be packed, because |
72 | * a vmap_area object can be either: | |
ff11a7ce BL |
73 | * 1) in "free" tree (root is free_vmap_area_root) |
74 | * 2) or "busy" tree (root is vmap_area_root) | |
688fcbfc PL |
75 | */ |
76 | union { | |
77 | unsigned long subtree_max_size; /* in "free" tree */ | |
78 | struct vm_struct *vm; /* in "busy" tree */ | |
688fcbfc | 79 | }; |
869176a0 | 80 | unsigned long flags; /* mark type of vm_map_ram area */ |
13ba3fcb AK |
81 | }; |
82 | ||
6f680e70 NP |
83 | /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ |
84 | #ifndef arch_vmap_p4d_supported | |
85 | static inline bool arch_vmap_p4d_supported(pgprot_t prot) | |
86 | { | |
87 | return false; | |
88 | } | |
89 | #endif | |
90 | ||
91 | #ifndef arch_vmap_pud_supported | |
92 | static inline bool arch_vmap_pud_supported(pgprot_t prot) | |
93 | { | |
94 | return false; | |
95 | } | |
96 | #endif | |
97 | ||
98 | #ifndef arch_vmap_pmd_supported | |
99 | static inline bool arch_vmap_pmd_supported(pgprot_t prot) | |
100 | { | |
101 | return false; | |
102 | } | |
bbc180a5 NP |
103 | #endif |
104 | ||
f7ee1f13 CL |
105 | #ifndef arch_vmap_pte_range_map_size |
106 | static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, | |
107 | u64 pfn, unsigned int max_page_shift) | |
108 | { | |
109 | return PAGE_SIZE; | |
110 | } | |
111 | #endif | |
112 | ||
3382bbee CL |
113 | #ifndef arch_vmap_pte_supported_shift |
114 | static inline int arch_vmap_pte_supported_shift(unsigned long size) | |
115 | { | |
116 | return PAGE_SHIFT; | |
117 | } | |
118 | #endif | |
119 | ||
01d92c7f AK |
120 | #ifndef arch_vmap_pgprot_tagged |
121 | static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) | |
122 | { | |
123 | return prot; | |
124 | } | |
125 | #endif | |
126 | ||
1da177e4 LT |
127 | /* |
128 | * Highlevel APIs for driver use | |
129 | */ | |
db64fe02 | 130 | extern void vm_unmap_ram(const void *mem, unsigned int count); |
d4efd79a | 131 | extern void *vm_map_ram(struct page **pages, unsigned int count, int node); |
db64fe02 NP |
132 | extern void vm_unmap_aliases(void); |
133 | ||
134 | #ifdef CONFIG_MMU | |
97105f0a | 135 | extern unsigned long vmalloc_nr_pages(void); |
db64fe02 | 136 | #else |
97105f0a | 137 | static inline unsigned long vmalloc_nr_pages(void) { return 0; } |
db64fe02 NP |
138 | #endif |
139 | ||
894f24bb KC |
140 | extern void *vmalloc(unsigned long size) __alloc_size(1); |
141 | extern void *vzalloc(unsigned long size) __alloc_size(1); | |
142 | extern void *vmalloc_user(unsigned long size) __alloc_size(1); | |
143 | extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1); | |
144 | extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1); | |
145 | extern void *vmalloc_32(unsigned long size) __alloc_size(1); | |
146 | extern void *vmalloc_32_user(unsigned long size) __alloc_size(1); | |
147 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1); | |
d0a21265 DR |
148 | extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
149 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
cb9e3c29 | 150 | pgprot_t prot, unsigned long vm_flags, int node, |
894f24bb | 151 | const void *caller) __alloc_size(1); |
2b905948 | 152 | void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, |
894f24bb | 153 | int node, const void *caller) __alloc_size(1); |
559089e0 | 154 | void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1); |
cb9e3c29 | 155 | |
a8749a35 PB |
156 | extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); |
157 | extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); | |
158 | extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); | |
159 | extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2); | |
160 | ||
b3bdda02 | 161 | extern void vfree(const void *addr); |
bf22e37a | 162 | extern void vfree_atomic(const void *addr); |
1da177e4 LT |
163 | |
164 | extern void *vmap(struct page **pages, unsigned int count, | |
165 | unsigned long flags, pgprot_t prot); | |
3e9a9e25 | 166 | void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); |
b3bdda02 | 167 | extern void vunmap(const void *addr); |
83342314 | 168 | |
e69e9d4a HD |
169 | extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, |
170 | unsigned long uaddr, void *kaddr, | |
bdebd6a2 | 171 | unsigned long pgoff, unsigned long size); |
e69e9d4a | 172 | |
83342314 NP |
173 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
174 | unsigned long pgoff); | |
763802b5 | 175 | |
2ba3e694 JR |
176 | /* |
177 | * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values | |
178 | * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() | |
179 | * needs to be called. | |
180 | */ | |
181 | #ifndef ARCH_PAGE_TABLE_SYNC_MASK | |
182 | #define ARCH_PAGE_TABLE_SYNC_MASK 0 | |
183 | #endif | |
184 | ||
185 | /* | |
186 | * There is no default implementation for arch_sync_kernel_mappings(). It is | |
187 | * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK | |
188 | * is 0. | |
189 | */ | |
190 | void arch_sync_kernel_mappings(unsigned long start, unsigned long end); | |
191 | ||
1da177e4 LT |
192 | /* |
193 | * Lowlevel-APIs (not for driver use!) | |
194 | */ | |
9585116b JF |
195 | |
196 | static inline size_t get_vm_area_size(const struct vm_struct *area) | |
197 | { | |
71394fe5 AR |
198 | if (!(area->flags & VM_NO_GUARD)) |
199 | /* return actual size without guard page */ | |
200 | return area->size - PAGE_SIZE; | |
201 | else | |
202 | return area->size; | |
203 | ||
9585116b JF |
204 | } |
205 | ||
1da177e4 | 206 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); |
23016969 | 207 | extern struct vm_struct *get_vm_area_caller(unsigned long size, |
5e6cafc8 | 208 | unsigned long flags, const void *caller); |
c2968612 BH |
209 | extern struct vm_struct *__get_vm_area_caller(unsigned long size, |
210 | unsigned long flags, | |
211 | unsigned long start, unsigned long end, | |
5e6cafc8 | 212 | const void *caller); |
301fa9f2 | 213 | void free_vm_area(struct vm_struct *area); |
b3bdda02 | 214 | extern struct vm_struct *remove_vm_area(const void *addr); |
e9da6e99 | 215 | extern struct vm_struct *find_vm_area(const void *addr); |
993d0b28 | 216 | struct vmap_area *find_vmap_area(unsigned long addr); |
c19c03fc | 217 | |
121e6f32 NP |
218 | static inline bool is_vm_area_hugepages(const void *addr) |
219 | { | |
220 | /* | |
221 | * This may not 100% tell if the area is mapped with > PAGE_SIZE | |
222 | * page table entries, if for some reason the architecture indicates | |
223 | * larger sizes are available but decides not to use them, nothing | |
224 | * prevents that. This only indicates the size of the physical page | |
225 | * allocated in the vmalloc layer. | |
226 | */ | |
227 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC | |
228 | return find_vm_area(addr)->page_order > 0; | |
229 | #else | |
230 | return false; | |
231 | #endif | |
232 | } | |
233 | ||
b554cb42 | 234 | #ifdef CONFIG_MMU |
4ad0ae8c | 235 | void vunmap_range(unsigned long addr, unsigned long end); |
868b104d RE |
236 | static inline void set_vm_flush_reset_perms(void *addr) |
237 | { | |
238 | struct vm_struct *vm = find_vm_area(addr); | |
239 | ||
240 | if (vm) | |
241 | vm->flags |= VM_FLUSH_RESET_PERMS; | |
242 | } | |
121e6f32 | 243 | |
b554cb42 | 244 | #else |
868b104d RE |
245 | static inline void set_vm_flush_reset_perms(void *addr) |
246 | { | |
247 | } | |
b554cb42 | 248 | #endif |
1da177e4 | 249 | |
bbcd53c9 | 250 | /* for /proc/kcore */ |
4c91c07c | 251 | extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count); |
69beeb1d | 252 | |
1da177e4 | 253 | /* |
06c88398 | 254 | * Internals. Don't use.. |
1da177e4 | 255 | */ |
f1c4069e | 256 | extern struct list_head vmap_area_list; |
be9b7335 | 257 | extern __init void vm_area_add_early(struct vm_struct *vm); |
c0c0a293 | 258 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
1da177e4 | 259 | |
4f8b02b4 | 260 | #ifdef CONFIG_SMP |
b554cb42 | 261 | # ifdef CONFIG_MMU |
ca23e405 TH |
262 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
263 | const size_t *sizes, int nr_vms, | |
ec3f64fc | 264 | size_t align); |
ca23e405 TH |
265 | |
266 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | |
b554cb42 GY |
267 | # else |
268 | static inline struct vm_struct ** | |
269 | pcpu_get_vm_areas(const unsigned long *offsets, | |
270 | const size_t *sizes, int nr_vms, | |
271 | size_t align) | |
272 | { | |
273 | return NULL; | |
274 | } | |
275 | ||
276 | static inline void | |
277 | pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |
278 | { | |
279 | } | |
280 | # endif | |
4f8b02b4 | 281 | #endif |
ca23e405 | 282 | |
db3808c1 JK |
283 | #ifdef CONFIG_MMU |
284 | #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) | |
db3808c1 | 285 | #else |
db3808c1 | 286 | #define VMALLOC_TOTAL 0UL |
db3808c1 JK |
287 | #endif |
288 | ||
4da56b99 CW |
289 | int register_vmap_purge_notifier(struct notifier_block *nb); |
290 | int unregister_vmap_purge_notifier(struct notifier_block *nb); | |
291 | ||
5bb1bb35 | 292 | #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) |
98f18083 PM |
293 | bool vmalloc_dump_obj(void *object); |
294 | #else | |
295 | static inline bool vmalloc_dump_obj(void *object) { return false; } | |
296 | #endif | |
297 | ||
1da177e4 | 298 | #endif /* _LINUX_VMALLOC_H */ |