x86: inline huge vmap supported functions
[linux-block.git] / include / linux / vmalloc.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_VMALLOC_H
3#define _LINUX_VMALLOC_H
4
5#include <linux/spinlock.h>
db64fe02 6#include <linux/init.h>
13ba3fcb 7#include <linux/list.h>
80c4bd7a 8#include <linux/llist.h>
1da177e4 9#include <asm/page.h> /* pgprot_t */
13ba3fcb 10#include <linux/rbtree.h>
3b3b1a29 11#include <linux/overflow.h>
1da177e4 12
1f059dfd
IM
13#include <asm/vmalloc.h>
14
605d9288 15struct vm_area_struct; /* vma defining user mapping in mm_types.h */
4da56b99 16struct notifier_block; /* in notifier.h */
83342314 17
605d9288 18/* bits in flags of vmalloc's vm_struct below */
20fc02b4
ZY
19#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
20#define VM_ALLOC 0x00000002 /* vmalloc() */
21#define VM_MAP 0x00000004 /* vmap()ed pages */
22#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
fe9041c2 23#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
20fc02b4 24#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
71394fe5 25#define VM_NO_GUARD 0x00000040 /* don't add guard page */
a5af5aa8 26#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
4f6ec860
RE
27#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
28#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
3c5c3cfb
DA
29
30/*
31 * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
32 *
33 * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
34 * shadow memory has been mapped. It's used to handle allocation errors so that
35 * we don't try to poision shadow on free if it was never allocated.
36 *
37 * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
38 * determine which allocations need the module shadow freed.
39 */
40
1da177e4
LT
41/* bits [20..32] reserved for arch specific ioremap internals */
42
fd195c49
DS
43/*
44 * Maximum alignment for ioremap() regions.
45 * Can be overriden by arch-specific value.
46 */
47#ifndef IOREMAP_MAX_ORDER
48#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
49#endif
50
1da177e4 51struct vm_struct {
2b4ac44e 52 struct vm_struct *next;
1da177e4
LT
53 void *addr;
54 unsigned long size;
55 unsigned long flags;
56 struct page **pages;
57 unsigned int nr_pages;
ffa71f33 58 phys_addr_t phys_addr;
5e6cafc8 59 const void *caller;
1da177e4
LT
60};
61
13ba3fcb
AK
62struct vmap_area {
63 unsigned long va_start;
64 unsigned long va_end;
68ad4a33 65
13ba3fcb
AK
66 struct rb_node rb_node; /* address sorted rbtree */
67 struct list_head list; /* address sorted list */
688fcbfc
PL
68
69 /*
96e2db45
URS
70 * The following two variables can be packed, because
71 * a vmap_area object can be either:
688fcbfc 72 * 1) in "free" tree (root is vmap_area_root)
96e2db45 73 * 2) or "busy" tree (root is free_vmap_area_root)
688fcbfc
PL
74 */
75 union {
76 unsigned long subtree_max_size; /* in "free" tree */
77 struct vm_struct *vm; /* in "busy" tree */
688fcbfc 78 };
13ba3fcb
AK
79};
80
bbc180a5
NP
81#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP
82static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; }
83static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; }
84static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; }
85#endif
86
1da177e4
LT
87/*
88 * Highlevel APIs for driver use
89 */
db64fe02 90extern void vm_unmap_ram(const void *mem, unsigned int count);
d4efd79a 91extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
db64fe02
NP
92extern void vm_unmap_aliases(void);
93
94#ifdef CONFIG_MMU
95extern void __init vmalloc_init(void);
97105f0a 96extern unsigned long vmalloc_nr_pages(void);
db64fe02
NP
97#else
98static inline void vmalloc_init(void)
99{
100}
97105f0a 101static inline unsigned long vmalloc_nr_pages(void) { return 0; }
db64fe02
NP
102#endif
103
1da177e4 104extern void *vmalloc(unsigned long size);
e1ca7788 105extern void *vzalloc(unsigned long size);
83342314 106extern void *vmalloc_user(unsigned long size);
930fc45a 107extern void *vmalloc_node(unsigned long size, int node);
e1ca7788 108extern void *vzalloc_node(unsigned long size, int node);
1da177e4 109extern void *vmalloc_32(unsigned long size);
83342314 110extern void *vmalloc_32_user(unsigned long size);
88dca4ca 111extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
d0a21265
DR
112extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
113 unsigned long start, unsigned long end, gfp_t gfp_mask,
cb9e3c29
AR
114 pgprot_t prot, unsigned long vm_flags, int node,
115 const void *caller);
2b905948
CH
116void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
117 int node, const void *caller);
cb9e3c29 118
b3bdda02 119extern void vfree(const void *addr);
bf22e37a 120extern void vfree_atomic(const void *addr);
1da177e4
LT
121
122extern void *vmap(struct page **pages, unsigned int count,
123 unsigned long flags, pgprot_t prot);
3e9a9e25 124void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
b3bdda02 125extern void vunmap(const void *addr);
83342314 126
e69e9d4a
HD
127extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
128 unsigned long uaddr, void *kaddr,
bdebd6a2 129 unsigned long pgoff, unsigned long size);
e69e9d4a 130
83342314
NP
131extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
132 unsigned long pgoff);
763802b5 133
2ba3e694
JR
134/*
135 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
136 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
137 * needs to be called.
138 */
139#ifndef ARCH_PAGE_TABLE_SYNC_MASK
140#define ARCH_PAGE_TABLE_SYNC_MASK 0
141#endif
142
143/*
144 * There is no default implementation for arch_sync_kernel_mappings(). It is
145 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
146 * is 0.
147 */
148void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
149
1da177e4
LT
150/*
151 * Lowlevel-APIs (not for driver use!)
152 */
9585116b
JF
153
154static inline size_t get_vm_area_size(const struct vm_struct *area)
155{
71394fe5
AR
156 if (!(area->flags & VM_NO_GUARD))
157 /* return actual size without guard page */
158 return area->size - PAGE_SIZE;
159 else
160 return area->size;
161
9585116b
JF
162}
163
1da177e4 164extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
23016969 165extern struct vm_struct *get_vm_area_caller(unsigned long size,
5e6cafc8 166 unsigned long flags, const void *caller);
c2968612
BH
167extern struct vm_struct *__get_vm_area_caller(unsigned long size,
168 unsigned long flags,
169 unsigned long start, unsigned long end,
5e6cafc8 170 const void *caller);
301fa9f2 171void free_vm_area(struct vm_struct *area);
b3bdda02 172extern struct vm_struct *remove_vm_area(const void *addr);
e9da6e99 173extern struct vm_struct *find_vm_area(const void *addr);
c19c03fc 174
b554cb42 175#ifdef CONFIG_MMU
8fc48985
TH
176extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
177 pgprot_t prot, struct page **pages);
ed1f324c
CH
178int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
179 struct page **pages);
8fc48985 180extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
c19c03fc 181extern void unmap_kernel_range(unsigned long addr, unsigned long size);
868b104d
RE
182static inline void set_vm_flush_reset_perms(void *addr)
183{
184 struct vm_struct *vm = find_vm_area(addr);
185
186 if (vm)
187 vm->flags |= VM_FLUSH_RESET_PERMS;
188}
b554cb42
GY
189#else
190static inline int
191map_kernel_range_noflush(unsigned long start, unsigned long size,
192 pgprot_t prot, struct page **pages)
193{
194 return size >> PAGE_SHIFT;
195}
ed1f324c 196#define map_kernel_range map_kernel_range_noflush
b554cb42
GY
197static inline void
198unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
199{
200}
ed1f324c 201#define unmap_kernel_range unmap_kernel_range_noflush
868b104d
RE
202static inline void set_vm_flush_reset_perms(void *addr)
203{
204}
b554cb42 205#endif
1da177e4 206
69beeb1d
KM
207/* for /dev/kmem */
208extern long vread(char *buf, char *addr, unsigned long count);
209extern long vwrite(char *buf, char *addr, unsigned long count);
210
1da177e4
LT
211/*
212 * Internals. Dont't use..
213 */
f1c4069e 214extern struct list_head vmap_area_list;
be9b7335 215extern __init void vm_area_add_early(struct vm_struct *vm);
c0c0a293 216extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
1da177e4 217
4f8b02b4 218#ifdef CONFIG_SMP
b554cb42 219# ifdef CONFIG_MMU
ca23e405
TH
220struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
221 const size_t *sizes, int nr_vms,
ec3f64fc 222 size_t align);
ca23e405
TH
223
224void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
b554cb42
GY
225# else
226static inline struct vm_struct **
227pcpu_get_vm_areas(const unsigned long *offsets,
228 const size_t *sizes, int nr_vms,
229 size_t align)
230{
231 return NULL;
232}
233
234static inline void
235pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
236{
237}
238# endif
4f8b02b4 239#endif
ca23e405 240
db3808c1
JK
241#ifdef CONFIG_MMU
242#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
db3808c1 243#else
db3808c1 244#define VMALLOC_TOTAL 0UL
db3808c1
JK
245#endif
246
4da56b99
CW
247int register_vmap_purge_notifier(struct notifier_block *nb);
248int unregister_vmap_purge_notifier(struct notifier_block *nb);
249
5bb1bb35 250#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
98f18083
PM
251bool vmalloc_dump_obj(void *object);
252#else
253static inline bool vmalloc_dump_obj(void *object) { return false; }
254#endif
255
1da177e4 256#endif /* _LINUX_VMALLOC_H */