Merge tag 'drm-intel-fixes-2016-06-30' of git://anongit.freedesktop.org/drm-intel...
[linux-2.6-block.git] / mm / vmalloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 8 * Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4
LT
9 */
10
db64fe02 11#include <linux/vmalloc.h>
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
d43c36dc 15#include <linux/sched.h>
1da177e4
LT
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
5f6a6a9c 19#include <linux/proc_fs.h>
a10aa579 20#include <linux/seq_file.h>
3ac7fe5a 21#include <linux/debugobjects.h>
23016969 22#include <linux/kallsyms.h>
db64fe02 23#include <linux/list.h>
4da56b99 24#include <linux/notifier.h>
db64fe02
NP
25#include <linux/rbtree.h>
26#include <linux/radix-tree.h>
27#include <linux/rcupdate.h>
f0aa6617 28#include <linux/pfn.h>
89219d37 29#include <linux/kmemleak.h>
60063497 30#include <linux/atomic.h>
3b32123d 31#include <linux/compiler.h>
32fcfd40 32#include <linux/llist.h>
0f616be1 33#include <linux/bitops.h>
3b32123d 34
1da177e4
LT
35#include <asm/uaccess.h>
36#include <asm/tlbflush.h>
2dca6999 37#include <asm/shmparam.h>
1da177e4 38
dd56b046
MG
39#include "internal.h"
40
32fcfd40
AV
41struct vfree_deferred {
42 struct llist_head list;
43 struct work_struct wq;
44};
45static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
46
47static void __vunmap(const void *, int);
48
49static void free_work(struct work_struct *w)
50{
51 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
52 struct llist_node *llnode = llist_del_all(&p->list);
53 while (llnode) {
54 void *p = llnode;
55 llnode = llist_next(llnode);
56 __vunmap(p, 1);
57 }
58}
59
db64fe02 60/*** Page table manipulation functions ***/
b221385b 61
1da177e4
LT
62static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
63{
64 pte_t *pte;
65
66 pte = pte_offset_kernel(pmd, addr);
67 do {
68 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
69 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70 } while (pte++, addr += PAGE_SIZE, addr != end);
71}
72
db64fe02 73static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
1da177e4
LT
74{
75 pmd_t *pmd;
76 unsigned long next;
77
78 pmd = pmd_offset(pud, addr);
79 do {
80 next = pmd_addr_end(addr, end);
b9820d8f
TK
81 if (pmd_clear_huge(pmd))
82 continue;
1da177e4
LT
83 if (pmd_none_or_clear_bad(pmd))
84 continue;
85 vunmap_pte_range(pmd, addr, next);
86 } while (pmd++, addr = next, addr != end);
87}
88
db64fe02 89static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
1da177e4
LT
90{
91 pud_t *pud;
92 unsigned long next;
93
94 pud = pud_offset(pgd, addr);
95 do {
96 next = pud_addr_end(addr, end);
b9820d8f
TK
97 if (pud_clear_huge(pud))
98 continue;
1da177e4
LT
99 if (pud_none_or_clear_bad(pud))
100 continue;
101 vunmap_pmd_range(pud, addr, next);
102 } while (pud++, addr = next, addr != end);
103}
104
db64fe02 105static void vunmap_page_range(unsigned long addr, unsigned long end)
1da177e4
LT
106{
107 pgd_t *pgd;
108 unsigned long next;
1da177e4
LT
109
110 BUG_ON(addr >= end);
111 pgd = pgd_offset_k(addr);
1da177e4
LT
112 do {
113 next = pgd_addr_end(addr, end);
114 if (pgd_none_or_clear_bad(pgd))
115 continue;
116 vunmap_pud_range(pgd, addr, next);
117 } while (pgd++, addr = next, addr != end);
1da177e4
LT
118}
119
120static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
db64fe02 121 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
122{
123 pte_t *pte;
124
db64fe02
NP
125 /*
126 * nr is a running index into the array which helps higher level
127 * callers keep track of where we're up to.
128 */
129
872fec16 130 pte = pte_alloc_kernel(pmd, addr);
1da177e4
LT
131 if (!pte)
132 return -ENOMEM;
133 do {
db64fe02
NP
134 struct page *page = pages[*nr];
135
136 if (WARN_ON(!pte_none(*pte)))
137 return -EBUSY;
138 if (WARN_ON(!page))
1da177e4
LT
139 return -ENOMEM;
140 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe02 141 (*nr)++;
1da177e4
LT
142 } while (pte++, addr += PAGE_SIZE, addr != end);
143 return 0;
144}
145
db64fe02
NP
146static int vmap_pmd_range(pud_t *pud, unsigned long addr,
147 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
148{
149 pmd_t *pmd;
150 unsigned long next;
151
152 pmd = pmd_alloc(&init_mm, pud, addr);
153 if (!pmd)
154 return -ENOMEM;
155 do {
156 next = pmd_addr_end(addr, end);
db64fe02 157 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
1da177e4
LT
158 return -ENOMEM;
159 } while (pmd++, addr = next, addr != end);
160 return 0;
161}
162
db64fe02
NP
163static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
164 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
165{
166 pud_t *pud;
167 unsigned long next;
168
169 pud = pud_alloc(&init_mm, pgd, addr);
170 if (!pud)
171 return -ENOMEM;
172 do {
173 next = pud_addr_end(addr, end);
db64fe02 174 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
1da177e4
LT
175 return -ENOMEM;
176 } while (pud++, addr = next, addr != end);
177 return 0;
178}
179
db64fe02
NP
180/*
181 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
182 * will have pfns corresponding to the "pages" array.
183 *
184 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
185 */
8fc48985
TH
186static int vmap_page_range_noflush(unsigned long start, unsigned long end,
187 pgprot_t prot, struct page **pages)
1da177e4
LT
188{
189 pgd_t *pgd;
190 unsigned long next;
2e4e27c7 191 unsigned long addr = start;
db64fe02
NP
192 int err = 0;
193 int nr = 0;
1da177e4
LT
194
195 BUG_ON(addr >= end);
196 pgd = pgd_offset_k(addr);
1da177e4
LT
197 do {
198 next = pgd_addr_end(addr, end);
db64fe02 199 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
1da177e4 200 if (err)
bf88c8c8 201 return err;
1da177e4 202 } while (pgd++, addr = next, addr != end);
db64fe02 203
db64fe02 204 return nr;
1da177e4
LT
205}
206
8fc48985
TH
207static int vmap_page_range(unsigned long start, unsigned long end,
208 pgprot_t prot, struct page **pages)
209{
210 int ret;
211
212 ret = vmap_page_range_noflush(start, end, prot, pages);
213 flush_cache_vmap(start, end);
214 return ret;
215}
216
81ac3ad9 217int is_vmalloc_or_module_addr(const void *x)
73bdf0a6
LT
218{
219 /*
ab4f2ee1 220 * ARM, x86-64 and sparc64 put modules in a special place,
73bdf0a6
LT
221 * and fall back on vmalloc() if that fails. Others
222 * just put it in the vmalloc space.
223 */
224#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
225 unsigned long addr = (unsigned long)x;
226 if (addr >= MODULES_VADDR && addr < MODULES_END)
227 return 1;
228#endif
229 return is_vmalloc_addr(x);
230}
231
48667e7a 232/*
add688fb 233 * Walk a vmap address to the struct page it maps.
48667e7a 234 */
add688fb 235struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a
CL
236{
237 unsigned long addr = (unsigned long) vmalloc_addr;
add688fb 238 struct page *page = NULL;
48667e7a 239 pgd_t *pgd = pgd_offset_k(addr);
48667e7a 240
7aa413de
IM
241 /*
242 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
243 * architectures that do not vmalloc module space
244 */
73bdf0a6 245 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea7463 246
48667e7a 247 if (!pgd_none(*pgd)) {
db64fe02 248 pud_t *pud = pud_offset(pgd, addr);
48667e7a 249 if (!pud_none(*pud)) {
db64fe02 250 pmd_t *pmd = pmd_offset(pud, addr);
48667e7a 251 if (!pmd_none(*pmd)) {
db64fe02
NP
252 pte_t *ptep, pte;
253
48667e7a
CL
254 ptep = pte_offset_map(pmd, addr);
255 pte = *ptep;
256 if (pte_present(pte))
add688fb 257 page = pte_page(pte);
48667e7a
CL
258 pte_unmap(ptep);
259 }
260 }
261 }
add688fb 262 return page;
48667e7a 263}
add688fb 264EXPORT_SYMBOL(vmalloc_to_page);
48667e7a
CL
265
266/*
add688fb 267 * Map a vmalloc()-space virtual address to the physical page frame number.
48667e7a 268 */
add688fb 269unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a 270{
add688fb 271 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
48667e7a 272}
add688fb 273EXPORT_SYMBOL(vmalloc_to_pfn);
48667e7a 274
db64fe02
NP
275
276/*** Global kva allocator ***/
277
db64fe02
NP
278#define VM_VM_AREA 0x04
279
db64fe02 280static DEFINE_SPINLOCK(vmap_area_lock);
f1c4069e
JK
281/* Export for kexec only */
282LIST_HEAD(vmap_area_list);
80c4bd7a 283static LLIST_HEAD(vmap_purge_list);
89699605
NP
284static struct rb_root vmap_area_root = RB_ROOT;
285
286/* The vmap cache globals are protected by vmap_area_lock */
287static struct rb_node *free_vmap_cache;
288static unsigned long cached_hole_size;
289static unsigned long cached_vstart;
290static unsigned long cached_align;
291
ca23e405 292static unsigned long vmap_area_pcpu_hole;
db64fe02
NP
293
294static struct vmap_area *__find_vmap_area(unsigned long addr)
1da177e4 295{
db64fe02
NP
296 struct rb_node *n = vmap_area_root.rb_node;
297
298 while (n) {
299 struct vmap_area *va;
300
301 va = rb_entry(n, struct vmap_area, rb_node);
302 if (addr < va->va_start)
303 n = n->rb_left;
cef2ac3f 304 else if (addr >= va->va_end)
db64fe02
NP
305 n = n->rb_right;
306 else
307 return va;
308 }
309
310 return NULL;
311}
312
313static void __insert_vmap_area(struct vmap_area *va)
314{
315 struct rb_node **p = &vmap_area_root.rb_node;
316 struct rb_node *parent = NULL;
317 struct rb_node *tmp;
318
319 while (*p) {
170168d0 320 struct vmap_area *tmp_va;
db64fe02
NP
321
322 parent = *p;
170168d0
NK
323 tmp_va = rb_entry(parent, struct vmap_area, rb_node);
324 if (va->va_start < tmp_va->va_end)
db64fe02 325 p = &(*p)->rb_left;
170168d0 326 else if (va->va_end > tmp_va->va_start)
db64fe02
NP
327 p = &(*p)->rb_right;
328 else
329 BUG();
330 }
331
332 rb_link_node(&va->rb_node, parent, p);
333 rb_insert_color(&va->rb_node, &vmap_area_root);
334
4341fa45 335 /* address-sort this list */
db64fe02
NP
336 tmp = rb_prev(&va->rb_node);
337 if (tmp) {
338 struct vmap_area *prev;
339 prev = rb_entry(tmp, struct vmap_area, rb_node);
340 list_add_rcu(&va->list, &prev->list);
341 } else
342 list_add_rcu(&va->list, &vmap_area_list);
343}
344
345static void purge_vmap_area_lazy(void);
346
4da56b99
CW
347static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
348
db64fe02
NP
349/*
350 * Allocate a region of KVA of the specified size and alignment, within the
351 * vstart and vend.
352 */
353static struct vmap_area *alloc_vmap_area(unsigned long size,
354 unsigned long align,
355 unsigned long vstart, unsigned long vend,
356 int node, gfp_t gfp_mask)
357{
358 struct vmap_area *va;
359 struct rb_node *n;
1da177e4 360 unsigned long addr;
db64fe02 361 int purged = 0;
89699605 362 struct vmap_area *first;
db64fe02 363
7766970c 364 BUG_ON(!size);
891c49ab 365 BUG_ON(offset_in_page(size));
89699605 366 BUG_ON(!is_power_of_2(align));
db64fe02 367
4da56b99
CW
368 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
369
db64fe02
NP
370 va = kmalloc_node(sizeof(struct vmap_area),
371 gfp_mask & GFP_RECLAIM_MASK, node);
372 if (unlikely(!va))
373 return ERR_PTR(-ENOMEM);
374
7f88f88f
CM
375 /*
376 * Only scan the relevant parts containing pointers to other objects
377 * to avoid false negatives.
378 */
379 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
380
db64fe02
NP
381retry:
382 spin_lock(&vmap_area_lock);
89699605
NP
383 /*
384 * Invalidate cache if we have more permissive parameters.
385 * cached_hole_size notes the largest hole noticed _below_
386 * the vmap_area cached in free_vmap_cache: if size fits
387 * into that hole, we want to scan from vstart to reuse
388 * the hole instead of allocating above free_vmap_cache.
389 * Note that __free_vmap_area may update free_vmap_cache
390 * without updating cached_hole_size or cached_align.
391 */
392 if (!free_vmap_cache ||
393 size < cached_hole_size ||
394 vstart < cached_vstart ||
395 align < cached_align) {
396nocache:
397 cached_hole_size = 0;
398 free_vmap_cache = NULL;
399 }
400 /* record if we encounter less permissive parameters */
401 cached_vstart = vstart;
402 cached_align = align;
403
404 /* find starting point for our search */
405 if (free_vmap_cache) {
406 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
248ac0e1 407 addr = ALIGN(first->va_end, align);
89699605
NP
408 if (addr < vstart)
409 goto nocache;
bcb615a8 410 if (addr + size < addr)
89699605
NP
411 goto overflow;
412
413 } else {
414 addr = ALIGN(vstart, align);
bcb615a8 415 if (addr + size < addr)
89699605
NP
416 goto overflow;
417
418 n = vmap_area_root.rb_node;
419 first = NULL;
420
421 while (n) {
db64fe02
NP
422 struct vmap_area *tmp;
423 tmp = rb_entry(n, struct vmap_area, rb_node);
424 if (tmp->va_end >= addr) {
db64fe02 425 first = tmp;
89699605
NP
426 if (tmp->va_start <= addr)
427 break;
428 n = n->rb_left;
429 } else
db64fe02 430 n = n->rb_right;
89699605 431 }
db64fe02
NP
432
433 if (!first)
434 goto found;
db64fe02 435 }
89699605
NP
436
437 /* from the starting point, walk areas until a suitable hole is found */
248ac0e1 438 while (addr + size > first->va_start && addr + size <= vend) {
89699605
NP
439 if (addr + cached_hole_size < first->va_start)
440 cached_hole_size = first->va_start - addr;
248ac0e1 441 addr = ALIGN(first->va_end, align);
bcb615a8 442 if (addr + size < addr)
89699605
NP
443 goto overflow;
444
92ca922f 445 if (list_is_last(&first->list, &vmap_area_list))
89699605 446 goto found;
92ca922f 447
6219c2a2 448 first = list_next_entry(first, list);
db64fe02
NP
449 }
450
89699605
NP
451found:
452 if (addr + size > vend)
453 goto overflow;
db64fe02
NP
454
455 va->va_start = addr;
456 va->va_end = addr + size;
457 va->flags = 0;
458 __insert_vmap_area(va);
89699605 459 free_vmap_cache = &va->rb_node;
db64fe02
NP
460 spin_unlock(&vmap_area_lock);
461
61e16557 462 BUG_ON(!IS_ALIGNED(va->va_start, align));
89699605
NP
463 BUG_ON(va->va_start < vstart);
464 BUG_ON(va->va_end > vend);
465
db64fe02 466 return va;
89699605
NP
467
468overflow:
469 spin_unlock(&vmap_area_lock);
470 if (!purged) {
471 purge_vmap_area_lazy();
472 purged = 1;
473 goto retry;
474 }
4da56b99
CW
475
476 if (gfpflags_allow_blocking(gfp_mask)) {
477 unsigned long freed = 0;
478 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
479 if (freed > 0) {
480 purged = 0;
481 goto retry;
482 }
483 }
484
89699605 485 if (printk_ratelimit())
756a025f
JP
486 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
487 size);
89699605
NP
488 kfree(va);
489 return ERR_PTR(-EBUSY);
db64fe02
NP
490}
491
4da56b99
CW
492int register_vmap_purge_notifier(struct notifier_block *nb)
493{
494 return blocking_notifier_chain_register(&vmap_notify_list, nb);
495}
496EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
497
498int unregister_vmap_purge_notifier(struct notifier_block *nb)
499{
500 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
501}
502EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
503
db64fe02
NP
504static void __free_vmap_area(struct vmap_area *va)
505{
506 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
89699605
NP
507
508 if (free_vmap_cache) {
509 if (va->va_end < cached_vstart) {
510 free_vmap_cache = NULL;
511 } else {
512 struct vmap_area *cache;
513 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
514 if (va->va_start <= cache->va_start) {
515 free_vmap_cache = rb_prev(&va->rb_node);
516 /*
517 * We don't try to update cached_hole_size or
518 * cached_align, but it won't go very wrong.
519 */
520 }
521 }
522 }
db64fe02
NP
523 rb_erase(&va->rb_node, &vmap_area_root);
524 RB_CLEAR_NODE(&va->rb_node);
525 list_del_rcu(&va->list);
526
ca23e405
TH
527 /*
528 * Track the highest possible candidate for pcpu area
529 * allocation. Areas outside of vmalloc area can be returned
530 * here too, consider only end addresses which fall inside
531 * vmalloc area proper.
532 */
533 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
534 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
535
14769de9 536 kfree_rcu(va, rcu_head);
db64fe02
NP
537}
538
539/*
540 * Free a region of KVA allocated by alloc_vmap_area
541 */
542static void free_vmap_area(struct vmap_area *va)
543{
544 spin_lock(&vmap_area_lock);
545 __free_vmap_area(va);
546 spin_unlock(&vmap_area_lock);
547}
548
549/*
550 * Clear the pagetable entries of a given vmap_area
551 */
552static void unmap_vmap_area(struct vmap_area *va)
553{
554 vunmap_page_range(va->va_start, va->va_end);
555}
556
cd52858c
NP
557static void vmap_debug_free_range(unsigned long start, unsigned long end)
558{
559 /*
f48d97f3
JK
560 * Unmap page tables and force a TLB flush immediately if pagealloc
561 * debugging is enabled. This catches use after free bugs similarly to
562 * those in linear kernel virtual address space after a page has been
563 * freed.
cd52858c 564 *
f48d97f3
JK
565 * All the lazy freeing logic is still retained, in order to minimise
566 * intrusiveness of this debugging feature.
cd52858c 567 *
f48d97f3
JK
568 * This is going to be *slow* (linear kernel virtual address debugging
569 * doesn't do a broadcast TLB flush so it is a lot faster).
cd52858c 570 */
f48d97f3
JK
571 if (debug_pagealloc_enabled()) {
572 vunmap_page_range(start, end);
573 flush_tlb_kernel_range(start, end);
574 }
cd52858c
NP
575}
576
db64fe02
NP
577/*
578 * lazy_max_pages is the maximum amount of virtual address space we gather up
579 * before attempting to purge with a TLB flush.
580 *
581 * There is a tradeoff here: a larger number will cover more kernel page tables
582 * and take slightly longer to purge, but it will linearly reduce the number of
583 * global TLB flushes that must be performed. It would seem natural to scale
584 * this number up linearly with the number of CPUs (because vmapping activity
585 * could also scale linearly with the number of CPUs), however it is likely
586 * that in practice, workloads might be constrained in other ways that mean
587 * vmap activity will not scale linearly with CPUs. Also, I want to be
588 * conservative and not introduce a big latency on huge systems, so go with
589 * a less aggressive log scale. It will still be an improvement over the old
590 * code, and it will be simple to change the scale factor if we find that it
591 * becomes a problem on bigger systems.
592 */
593static unsigned long lazy_max_pages(void)
594{
595 unsigned int log;
596
597 log = fls(num_online_cpus());
598
599 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
600}
601
602static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
603
02b709df
NP
604/* for per-CPU blocks */
605static void purge_fragmented_blocks_allcpus(void);
606
3ee48b6a
CW
607/*
608 * called before a call to iounmap() if the caller wants vm_area_struct's
609 * immediately freed.
610 */
611void set_iounmap_nonlazy(void)
612{
613 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
614}
615
db64fe02
NP
616/*
617 * Purges all lazily-freed vmap areas.
618 *
619 * If sync is 0 then don't purge if there is already a purge in progress.
620 * If force_flush is 1, then flush kernel TLBs between *start and *end even
621 * if we found no lazy vmap areas to unmap (callers can use this to optimise
622 * their own TLB flushing).
623 * Returns with *start = min(*start, lowest purged address)
624 * *end = max(*end, highest purged address)
625 */
626static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
627 int sync, int force_flush)
628{
46666d8a 629 static DEFINE_SPINLOCK(purge_lock);
80c4bd7a 630 struct llist_node *valist;
db64fe02 631 struct vmap_area *va;
cbb76676 632 struct vmap_area *n_va;
db64fe02
NP
633 int nr = 0;
634
635 /*
636 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
637 * should not expect such behaviour. This just simplifies locking for
638 * the case that isn't actually used at the moment anyway.
639 */
640 if (!sync && !force_flush) {
46666d8a 641 if (!spin_trylock(&purge_lock))
db64fe02
NP
642 return;
643 } else
46666d8a 644 spin_lock(&purge_lock);
db64fe02 645
02b709df
NP
646 if (sync)
647 purge_fragmented_blocks_allcpus();
648
80c4bd7a
CW
649 valist = llist_del_all(&vmap_purge_list);
650 llist_for_each_entry(va, valist, purge_list) {
651 if (va->va_start < *start)
652 *start = va->va_start;
653 if (va->va_end > *end)
654 *end = va->va_end;
655 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
db64fe02 656 }
db64fe02 657
88f50044 658 if (nr)
db64fe02 659 atomic_sub(nr, &vmap_lazy_nr);
db64fe02
NP
660
661 if (nr || force_flush)
662 flush_tlb_kernel_range(*start, *end);
663
664 if (nr) {
665 spin_lock(&vmap_area_lock);
80c4bd7a 666 llist_for_each_entry_safe(va, n_va, valist, purge_list)
db64fe02
NP
667 __free_vmap_area(va);
668 spin_unlock(&vmap_area_lock);
669 }
46666d8a 670 spin_unlock(&purge_lock);
db64fe02
NP
671}
672
496850e5
NP
673/*
674 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
675 * is already purging.
676 */
677static void try_purge_vmap_area_lazy(void)
678{
679 unsigned long start = ULONG_MAX, end = 0;
680
681 __purge_vmap_area_lazy(&start, &end, 0, 0);
682}
683
db64fe02
NP
684/*
685 * Kick off a purge of the outstanding lazy areas.
686 */
687static void purge_vmap_area_lazy(void)
688{
689 unsigned long start = ULONG_MAX, end = 0;
690
496850e5 691 __purge_vmap_area_lazy(&start, &end, 1, 0);
db64fe02
NP
692}
693
694/*
64141da5
JF
695 * Free a vmap area, caller ensuring that the area has been unmapped
696 * and flush_cache_vunmap had been called for the correct range
697 * previously.
db64fe02 698 */
64141da5 699static void free_vmap_area_noflush(struct vmap_area *va)
db64fe02 700{
80c4bd7a
CW
701 int nr_lazy;
702
703 nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT,
704 &vmap_lazy_nr);
705
706 /* After this point, we may free va at any time */
707 llist_add(&va->purge_list, &vmap_purge_list);
708
709 if (unlikely(nr_lazy > lazy_max_pages()))
496850e5 710 try_purge_vmap_area_lazy();
db64fe02
NP
711}
712
64141da5
JF
713/*
714 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
715 * called for the correct range previously.
716 */
717static void free_unmap_vmap_area_noflush(struct vmap_area *va)
718{
719 unmap_vmap_area(va);
720 free_vmap_area_noflush(va);
721}
722
b29acbdc
NP
723/*
724 * Free and unmap a vmap area
725 */
726static void free_unmap_vmap_area(struct vmap_area *va)
727{
728 flush_cache_vunmap(va->va_start, va->va_end);
729 free_unmap_vmap_area_noflush(va);
730}
731
db64fe02
NP
732static struct vmap_area *find_vmap_area(unsigned long addr)
733{
734 struct vmap_area *va;
735
736 spin_lock(&vmap_area_lock);
737 va = __find_vmap_area(addr);
738 spin_unlock(&vmap_area_lock);
739
740 return va;
741}
742
743static void free_unmap_vmap_area_addr(unsigned long addr)
744{
745 struct vmap_area *va;
746
747 va = find_vmap_area(addr);
748 BUG_ON(!va);
749 free_unmap_vmap_area(va);
750}
751
752
753/*** Per cpu kva allocator ***/
754
755/*
756 * vmap space is limited especially on 32 bit architectures. Ensure there is
757 * room for at least 16 percpu vmap blocks per CPU.
758 */
759/*
760 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
761 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
762 * instead (we just need a rough idea)
763 */
764#if BITS_PER_LONG == 32
765#define VMALLOC_SPACE (128UL*1024*1024)
766#else
767#define VMALLOC_SPACE (128UL*1024*1024*1024)
768#endif
769
770#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
771#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
772#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
773#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
774#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
775#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
f982f915
CL
776#define VMAP_BBMAP_BITS \
777 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
778 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
779 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
db64fe02
NP
780
781#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
782
9b463334
JF
783static bool vmap_initialized __read_mostly = false;
784
db64fe02
NP
785struct vmap_block_queue {
786 spinlock_t lock;
787 struct list_head free;
db64fe02
NP
788};
789
790struct vmap_block {
791 spinlock_t lock;
792 struct vmap_area *va;
db64fe02 793 unsigned long free, dirty;
7d61bfe8 794 unsigned long dirty_min, dirty_max; /*< dirty range */
de560423
NP
795 struct list_head free_list;
796 struct rcu_head rcu_head;
02b709df 797 struct list_head purge;
db64fe02
NP
798};
799
800/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
801static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
802
803/*
804 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
805 * in the free path. Could get rid of this if we change the API to return a
806 * "cookie" from alloc, to be passed to free. But no big deal yet.
807 */
808static DEFINE_SPINLOCK(vmap_block_tree_lock);
809static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
810
811/*
812 * We should probably have a fallback mechanism to allocate virtual memory
813 * out of partially filled vmap blocks. However vmap block sizing should be
814 * fairly reasonable according to the vmalloc size, so it shouldn't be a
815 * big problem.
816 */
817
818static unsigned long addr_to_vb_idx(unsigned long addr)
819{
820 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
821 addr /= VMAP_BLOCK_SIZE;
822 return addr;
823}
824
cf725ce2
RP
825static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
826{
827 unsigned long addr;
828
829 addr = va_start + (pages_off << PAGE_SHIFT);
830 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
831 return (void *)addr;
832}
833
834/**
835 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
836 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
837 * @order: how many 2^order pages should be occupied in newly allocated block
838 * @gfp_mask: flags for the page level allocator
839 *
840 * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
841 */
842static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
db64fe02
NP
843{
844 struct vmap_block_queue *vbq;
845 struct vmap_block *vb;
846 struct vmap_area *va;
847 unsigned long vb_idx;
848 int node, err;
cf725ce2 849 void *vaddr;
db64fe02
NP
850
851 node = numa_node_id();
852
853 vb = kmalloc_node(sizeof(struct vmap_block),
854 gfp_mask & GFP_RECLAIM_MASK, node);
855 if (unlikely(!vb))
856 return ERR_PTR(-ENOMEM);
857
858 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
859 VMALLOC_START, VMALLOC_END,
860 node, gfp_mask);
ddf9c6d4 861 if (IS_ERR(va)) {
db64fe02 862 kfree(vb);
e7d86340 863 return ERR_CAST(va);
db64fe02
NP
864 }
865
866 err = radix_tree_preload(gfp_mask);
867 if (unlikely(err)) {
868 kfree(vb);
869 free_vmap_area(va);
870 return ERR_PTR(err);
871 }
872
cf725ce2 873 vaddr = vmap_block_vaddr(va->va_start, 0);
db64fe02
NP
874 spin_lock_init(&vb->lock);
875 vb->va = va;
cf725ce2
RP
876 /* At least something should be left free */
877 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
878 vb->free = VMAP_BBMAP_BITS - (1UL << order);
db64fe02 879 vb->dirty = 0;
7d61bfe8
RP
880 vb->dirty_min = VMAP_BBMAP_BITS;
881 vb->dirty_max = 0;
db64fe02 882 INIT_LIST_HEAD(&vb->free_list);
db64fe02
NP
883
884 vb_idx = addr_to_vb_idx(va->va_start);
885 spin_lock(&vmap_block_tree_lock);
886 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
887 spin_unlock(&vmap_block_tree_lock);
888 BUG_ON(err);
889 radix_tree_preload_end();
890
891 vbq = &get_cpu_var(vmap_block_queue);
db64fe02 892 spin_lock(&vbq->lock);
68ac546f 893 list_add_tail_rcu(&vb->free_list, &vbq->free);
db64fe02 894 spin_unlock(&vbq->lock);
3f04ba85 895 put_cpu_var(vmap_block_queue);
db64fe02 896
cf725ce2 897 return vaddr;
db64fe02
NP
898}
899
db64fe02
NP
900static void free_vmap_block(struct vmap_block *vb)
901{
902 struct vmap_block *tmp;
903 unsigned long vb_idx;
904
db64fe02
NP
905 vb_idx = addr_to_vb_idx(vb->va->va_start);
906 spin_lock(&vmap_block_tree_lock);
907 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
908 spin_unlock(&vmap_block_tree_lock);
909 BUG_ON(tmp != vb);
910
64141da5 911 free_vmap_area_noflush(vb->va);
22a3c7d1 912 kfree_rcu(vb, rcu_head);
db64fe02
NP
913}
914
02b709df
NP
915static void purge_fragmented_blocks(int cpu)
916{
917 LIST_HEAD(purge);
918 struct vmap_block *vb;
919 struct vmap_block *n_vb;
920 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
921
922 rcu_read_lock();
923 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
924
925 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
926 continue;
927
928 spin_lock(&vb->lock);
929 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
930 vb->free = 0; /* prevent further allocs after releasing lock */
931 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
7d61bfe8
RP
932 vb->dirty_min = 0;
933 vb->dirty_max = VMAP_BBMAP_BITS;
02b709df
NP
934 spin_lock(&vbq->lock);
935 list_del_rcu(&vb->free_list);
936 spin_unlock(&vbq->lock);
937 spin_unlock(&vb->lock);
938 list_add_tail(&vb->purge, &purge);
939 } else
940 spin_unlock(&vb->lock);
941 }
942 rcu_read_unlock();
943
944 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
945 list_del(&vb->purge);
946 free_vmap_block(vb);
947 }
948}
949
02b709df
NP
950static void purge_fragmented_blocks_allcpus(void)
951{
952 int cpu;
953
954 for_each_possible_cpu(cpu)
955 purge_fragmented_blocks(cpu);
956}
957
db64fe02
NP
958static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
959{
960 struct vmap_block_queue *vbq;
961 struct vmap_block *vb;
cf725ce2 962 void *vaddr = NULL;
db64fe02
NP
963 unsigned int order;
964
891c49ab 965 BUG_ON(offset_in_page(size));
db64fe02 966 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
aa91c4d8
JK
967 if (WARN_ON(size == 0)) {
968 /*
969 * Allocating 0 bytes isn't what caller wants since
970 * get_order(0) returns funny result. Just warn and terminate
971 * early.
972 */
973 return NULL;
974 }
db64fe02
NP
975 order = get_order(size);
976
db64fe02
NP
977 rcu_read_lock();
978 vbq = &get_cpu_var(vmap_block_queue);
979 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
cf725ce2 980 unsigned long pages_off;
db64fe02
NP
981
982 spin_lock(&vb->lock);
cf725ce2
RP
983 if (vb->free < (1UL << order)) {
984 spin_unlock(&vb->lock);
985 continue;
986 }
02b709df 987
cf725ce2
RP
988 pages_off = VMAP_BBMAP_BITS - vb->free;
989 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
02b709df
NP
990 vb->free -= 1UL << order;
991 if (vb->free == 0) {
992 spin_lock(&vbq->lock);
993 list_del_rcu(&vb->free_list);
994 spin_unlock(&vbq->lock);
995 }
cf725ce2 996
02b709df
NP
997 spin_unlock(&vb->lock);
998 break;
db64fe02 999 }
02b709df 1000
3f04ba85 1001 put_cpu_var(vmap_block_queue);
db64fe02
NP
1002 rcu_read_unlock();
1003
cf725ce2
RP
1004 /* Allocate new block if nothing was found */
1005 if (!vaddr)
1006 vaddr = new_vmap_block(order, gfp_mask);
db64fe02 1007
cf725ce2 1008 return vaddr;
db64fe02
NP
1009}
1010
1011static void vb_free(const void *addr, unsigned long size)
1012{
1013 unsigned long offset;
1014 unsigned long vb_idx;
1015 unsigned int order;
1016 struct vmap_block *vb;
1017
891c49ab 1018 BUG_ON(offset_in_page(size));
db64fe02 1019 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
b29acbdc
NP
1020
1021 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1022
db64fe02
NP
1023 order = get_order(size);
1024
1025 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
7d61bfe8 1026 offset >>= PAGE_SHIFT;
db64fe02
NP
1027
1028 vb_idx = addr_to_vb_idx((unsigned long)addr);
1029 rcu_read_lock();
1030 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1031 rcu_read_unlock();
1032 BUG_ON(!vb);
1033
64141da5
JF
1034 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1035
db64fe02 1036 spin_lock(&vb->lock);
7d61bfe8
RP
1037
1038 /* Expand dirty range */
1039 vb->dirty_min = min(vb->dirty_min, offset);
1040 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
d086817d 1041
db64fe02
NP
1042 vb->dirty += 1UL << order;
1043 if (vb->dirty == VMAP_BBMAP_BITS) {
de560423 1044 BUG_ON(vb->free);
db64fe02
NP
1045 spin_unlock(&vb->lock);
1046 free_vmap_block(vb);
1047 } else
1048 spin_unlock(&vb->lock);
1049}
1050
1051/**
1052 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1053 *
1054 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1055 * to amortize TLB flushing overheads. What this means is that any page you
1056 * have now, may, in a former life, have been mapped into kernel virtual
1057 * address by the vmap layer and so there might be some CPUs with TLB entries
1058 * still referencing that page (additional to the regular 1:1 kernel mapping).
1059 *
1060 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1061 * be sure that none of the pages we have control over will have any aliases
1062 * from the vmap layer.
1063 */
1064void vm_unmap_aliases(void)
1065{
1066 unsigned long start = ULONG_MAX, end = 0;
1067 int cpu;
1068 int flush = 0;
1069
9b463334
JF
1070 if (unlikely(!vmap_initialized))
1071 return;
1072
db64fe02
NP
1073 for_each_possible_cpu(cpu) {
1074 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1075 struct vmap_block *vb;
1076
1077 rcu_read_lock();
1078 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
db64fe02 1079 spin_lock(&vb->lock);
7d61bfe8
RP
1080 if (vb->dirty) {
1081 unsigned long va_start = vb->va->va_start;
db64fe02 1082 unsigned long s, e;
b136be5e 1083
7d61bfe8
RP
1084 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1085 e = va_start + (vb->dirty_max << PAGE_SHIFT);
db64fe02 1086
7d61bfe8
RP
1087 start = min(s, start);
1088 end = max(e, end);
db64fe02 1089
7d61bfe8 1090 flush = 1;
db64fe02
NP
1091 }
1092 spin_unlock(&vb->lock);
1093 }
1094 rcu_read_unlock();
1095 }
1096
1097 __purge_vmap_area_lazy(&start, &end, 1, flush);
1098}
1099EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1100
1101/**
1102 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1103 * @mem: the pointer returned by vm_map_ram
1104 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1105 */
1106void vm_unmap_ram(const void *mem, unsigned int count)
1107{
65ee03c4 1108 unsigned long size = (unsigned long)count << PAGE_SHIFT;
db64fe02
NP
1109 unsigned long addr = (unsigned long)mem;
1110
1111 BUG_ON(!addr);
1112 BUG_ON(addr < VMALLOC_START);
1113 BUG_ON(addr > VMALLOC_END);
a1c0b1a0 1114 BUG_ON(!PAGE_ALIGNED(addr));
db64fe02
NP
1115
1116 debug_check_no_locks_freed(mem, size);
cd52858c 1117 vmap_debug_free_range(addr, addr+size);
db64fe02
NP
1118
1119 if (likely(count <= VMAP_MAX_ALLOC))
1120 vb_free(mem, size);
1121 else
1122 free_unmap_vmap_area_addr(addr);
1123}
1124EXPORT_SYMBOL(vm_unmap_ram);
1125
1126/**
1127 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1128 * @pages: an array of pointers to the pages to be mapped
1129 * @count: number of pages
1130 * @node: prefer to allocate data structures on this node
1131 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
e99c97ad 1132 *
36437638
GK
1133 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1134 * faster than vmap so it's good. But if you mix long-life and short-life
1135 * objects with vm_map_ram(), it could consume lots of address space through
1136 * fragmentation (especially on a 32bit machine). You could see failures in
1137 * the end. Please use this function for short-lived objects.
1138 *
e99c97ad 1139 * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe02
NP
1140 */
1141void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1142{
65ee03c4 1143 unsigned long size = (unsigned long)count << PAGE_SHIFT;
db64fe02
NP
1144 unsigned long addr;
1145 void *mem;
1146
1147 if (likely(count <= VMAP_MAX_ALLOC)) {
1148 mem = vb_alloc(size, GFP_KERNEL);
1149 if (IS_ERR(mem))
1150 return NULL;
1151 addr = (unsigned long)mem;
1152 } else {
1153 struct vmap_area *va;
1154 va = alloc_vmap_area(size, PAGE_SIZE,
1155 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1156 if (IS_ERR(va))
1157 return NULL;
1158
1159 addr = va->va_start;
1160 mem = (void *)addr;
1161 }
1162 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1163 vm_unmap_ram(mem, count);
1164 return NULL;
1165 }
1166 return mem;
1167}
1168EXPORT_SYMBOL(vm_map_ram);
1169
4341fa45 1170static struct vm_struct *vmlist __initdata;
be9b7335
NP
1171/**
1172 * vm_area_add_early - add vmap area early during boot
1173 * @vm: vm_struct to add
1174 *
1175 * This function is used to add fixed kernel vm area to vmlist before
1176 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1177 * should contain proper values and the other fields should be zero.
1178 *
1179 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1180 */
1181void __init vm_area_add_early(struct vm_struct *vm)
1182{
1183 struct vm_struct *tmp, **p;
1184
1185 BUG_ON(vmap_initialized);
1186 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1187 if (tmp->addr >= vm->addr) {
1188 BUG_ON(tmp->addr < vm->addr + vm->size);
1189 break;
1190 } else
1191 BUG_ON(tmp->addr + tmp->size > vm->addr);
1192 }
1193 vm->next = *p;
1194 *p = vm;
1195}
1196
f0aa6617
TH
1197/**
1198 * vm_area_register_early - register vmap area early during boot
1199 * @vm: vm_struct to register
c0c0a293 1200 * @align: requested alignment
f0aa6617
TH
1201 *
1202 * This function is used to register kernel vm area before
1203 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1204 * proper values on entry and other fields should be zero. On return,
1205 * vm->addr contains the allocated address.
1206 *
1207 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1208 */
c0c0a293 1209void __init vm_area_register_early(struct vm_struct *vm, size_t align)
f0aa6617
TH
1210{
1211 static size_t vm_init_off __initdata;
c0c0a293
TH
1212 unsigned long addr;
1213
1214 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1215 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
f0aa6617 1216
c0c0a293 1217 vm->addr = (void *)addr;
f0aa6617 1218
be9b7335 1219 vm_area_add_early(vm);
f0aa6617
TH
1220}
1221
db64fe02
NP
1222void __init vmalloc_init(void)
1223{
822c18f2
IK
1224 struct vmap_area *va;
1225 struct vm_struct *tmp;
db64fe02
NP
1226 int i;
1227
1228 for_each_possible_cpu(i) {
1229 struct vmap_block_queue *vbq;
32fcfd40 1230 struct vfree_deferred *p;
db64fe02
NP
1231
1232 vbq = &per_cpu(vmap_block_queue, i);
1233 spin_lock_init(&vbq->lock);
1234 INIT_LIST_HEAD(&vbq->free);
32fcfd40
AV
1235 p = &per_cpu(vfree_deferred, i);
1236 init_llist_head(&p->list);
1237 INIT_WORK(&p->wq, free_work);
db64fe02 1238 }
9b463334 1239
822c18f2
IK
1240 /* Import existing vmlist entries. */
1241 for (tmp = vmlist; tmp; tmp = tmp->next) {
43ebdac4 1242 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
dbda591d 1243 va->flags = VM_VM_AREA;
822c18f2
IK
1244 va->va_start = (unsigned long)tmp->addr;
1245 va->va_end = va->va_start + tmp->size;
dbda591d 1246 va->vm = tmp;
822c18f2
IK
1247 __insert_vmap_area(va);
1248 }
ca23e405
TH
1249
1250 vmap_area_pcpu_hole = VMALLOC_END;
1251
9b463334 1252 vmap_initialized = true;
db64fe02
NP
1253}
1254
8fc48985
TH
1255/**
1256 * map_kernel_range_noflush - map kernel VM area with the specified pages
1257 * @addr: start of the VM area to map
1258 * @size: size of the VM area to map
1259 * @prot: page protection flags to use
1260 * @pages: pages to map
1261 *
1262 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1263 * specify should have been allocated using get_vm_area() and its
1264 * friends.
1265 *
1266 * NOTE:
1267 * This function does NOT do any cache flushing. The caller is
1268 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1269 * before calling this function.
1270 *
1271 * RETURNS:
1272 * The number of pages mapped on success, -errno on failure.
1273 */
1274int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1275 pgprot_t prot, struct page **pages)
1276{
1277 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1278}
1279
1280/**
1281 * unmap_kernel_range_noflush - unmap kernel VM area
1282 * @addr: start of the VM area to unmap
1283 * @size: size of the VM area to unmap
1284 *
1285 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1286 * specify should have been allocated using get_vm_area() and its
1287 * friends.
1288 *
1289 * NOTE:
1290 * This function does NOT do any cache flushing. The caller is
1291 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1292 * before calling this function and flush_tlb_kernel_range() after.
1293 */
1294void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1295{
1296 vunmap_page_range(addr, addr + size);
1297}
81e88fdc 1298EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
8fc48985
TH
1299
1300/**
1301 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1302 * @addr: start of the VM area to unmap
1303 * @size: size of the VM area to unmap
1304 *
1305 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1306 * the unmapping and tlb after.
1307 */
db64fe02
NP
1308void unmap_kernel_range(unsigned long addr, unsigned long size)
1309{
1310 unsigned long end = addr + size;
f6fcba70
TH
1311
1312 flush_cache_vunmap(addr, end);
db64fe02
NP
1313 vunmap_page_range(addr, end);
1314 flush_tlb_kernel_range(addr, end);
1315}
93ef6d6c 1316EXPORT_SYMBOL_GPL(unmap_kernel_range);
db64fe02 1317
f6f8ed47 1318int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
db64fe02
NP
1319{
1320 unsigned long addr = (unsigned long)area->addr;
762216ab 1321 unsigned long end = addr + get_vm_area_size(area);
db64fe02
NP
1322 int err;
1323
f6f8ed47 1324 err = vmap_page_range(addr, end, prot, pages);
db64fe02 1325
f6f8ed47 1326 return err > 0 ? 0 : err;
db64fe02
NP
1327}
1328EXPORT_SYMBOL_GPL(map_vm_area);
1329
f5252e00 1330static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
5e6cafc8 1331 unsigned long flags, const void *caller)
cf88c790 1332{
c69480ad 1333 spin_lock(&vmap_area_lock);
cf88c790
TH
1334 vm->flags = flags;
1335 vm->addr = (void *)va->va_start;
1336 vm->size = va->va_end - va->va_start;
1337 vm->caller = caller;
db1aecaf 1338 va->vm = vm;
cf88c790 1339 va->flags |= VM_VM_AREA;
c69480ad 1340 spin_unlock(&vmap_area_lock);
f5252e00 1341}
cf88c790 1342
20fc02b4 1343static void clear_vm_uninitialized_flag(struct vm_struct *vm)
f5252e00 1344{
d4033afd 1345 /*
20fc02b4 1346 * Before removing VM_UNINITIALIZED,
d4033afd
JK
1347 * we should make sure that vm has proper values.
1348 * Pair with smp_rmb() in show_numa_info().
1349 */
1350 smp_wmb();
20fc02b4 1351 vm->flags &= ~VM_UNINITIALIZED;
cf88c790
TH
1352}
1353
db64fe02 1354static struct vm_struct *__get_vm_area_node(unsigned long size,
2dca6999 1355 unsigned long align, unsigned long flags, unsigned long start,
5e6cafc8 1356 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
db64fe02 1357{
0006526d 1358 struct vmap_area *va;
db64fe02 1359 struct vm_struct *area;
1da177e4 1360
52fd24ca 1361 BUG_ON(in_interrupt());
0f2d4a8e 1362 if (flags & VM_IOREMAP)
0f616be1
TK
1363 align = 1ul << clamp_t(int, fls_long(size),
1364 PAGE_SHIFT, IOREMAP_MAX_ORDER);
db64fe02 1365
1da177e4 1366 size = PAGE_ALIGN(size);
31be8309
OH
1367 if (unlikely(!size))
1368 return NULL;
1da177e4 1369
cf88c790 1370 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4
LT
1371 if (unlikely(!area))
1372 return NULL;
1373
71394fe5
AR
1374 if (!(flags & VM_NO_GUARD))
1375 size += PAGE_SIZE;
1da177e4 1376
db64fe02
NP
1377 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1378 if (IS_ERR(va)) {
1379 kfree(area);
1380 return NULL;
1da177e4 1381 }
1da177e4 1382
d82b1d85 1383 setup_vmalloc_vm(area, va, flags, caller);
f5252e00 1384
1da177e4 1385 return area;
1da177e4
LT
1386}
1387
930fc45a
CL
1388struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1389 unsigned long start, unsigned long end)
1390{
00ef2d2f
DR
1391 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1392 GFP_KERNEL, __builtin_return_address(0));
930fc45a 1393}
5992b6da 1394EXPORT_SYMBOL_GPL(__get_vm_area);
930fc45a 1395
c2968612
BH
1396struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1397 unsigned long start, unsigned long end,
5e6cafc8 1398 const void *caller)
c2968612 1399{
00ef2d2f
DR
1400 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1401 GFP_KERNEL, caller);
c2968612
BH
1402}
1403
1da177e4 1404/**
183ff22b 1405 * get_vm_area - reserve a contiguous kernel virtual area
1da177e4
LT
1406 * @size: size of the area
1407 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1408 *
1409 * Search an area of @size in the kernel virtual mapping area,
1410 * and reserved it for out purposes. Returns the area descriptor
1411 * on success or %NULL on failure.
1412 */
1413struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1414{
2dca6999 1415 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f
DR
1416 NUMA_NO_NODE, GFP_KERNEL,
1417 __builtin_return_address(0));
23016969
CL
1418}
1419
1420struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
5e6cafc8 1421 const void *caller)
23016969 1422{
2dca6999 1423 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f 1424 NUMA_NO_NODE, GFP_KERNEL, caller);
1da177e4
LT
1425}
1426
e9da6e99
MS
1427/**
1428 * find_vm_area - find a continuous kernel virtual area
1429 * @addr: base address
1430 *
1431 * Search for the kernel VM area starting at @addr, and return it.
1432 * It is up to the caller to do all required locking to keep the returned
1433 * pointer valid.
1434 */
1435struct vm_struct *find_vm_area(const void *addr)
83342314 1436{
db64fe02 1437 struct vmap_area *va;
83342314 1438
db64fe02
NP
1439 va = find_vmap_area((unsigned long)addr);
1440 if (va && va->flags & VM_VM_AREA)
db1aecaf 1441 return va->vm;
1da177e4 1442
1da177e4 1443 return NULL;
1da177e4
LT
1444}
1445
7856dfeb 1446/**
183ff22b 1447 * remove_vm_area - find and remove a continuous kernel virtual area
7856dfeb
AK
1448 * @addr: base address
1449 *
1450 * Search for the kernel VM area starting at @addr, and remove it.
1451 * This function returns the found VM area, but using it is NOT safe
1452 * on SMP machines, except for its size or flags.
1453 */
b3bdda02 1454struct vm_struct *remove_vm_area(const void *addr)
7856dfeb 1455{
db64fe02
NP
1456 struct vmap_area *va;
1457
1458 va = find_vmap_area((unsigned long)addr);
1459 if (va && va->flags & VM_VM_AREA) {
db1aecaf 1460 struct vm_struct *vm = va->vm;
f5252e00 1461
c69480ad
JK
1462 spin_lock(&vmap_area_lock);
1463 va->vm = NULL;
1464 va->flags &= ~VM_VM_AREA;
1465 spin_unlock(&vmap_area_lock);
1466
dd32c279 1467 vmap_debug_free_range(va->va_start, va->va_end);
a5af5aa8 1468 kasan_free_shadow(vm);
dd32c279 1469 free_unmap_vmap_area(va);
dd32c279 1470
db64fe02
NP
1471 return vm;
1472 }
1473 return NULL;
7856dfeb
AK
1474}
1475
b3bdda02 1476static void __vunmap(const void *addr, int deallocate_pages)
1da177e4
LT
1477{
1478 struct vm_struct *area;
1479
1480 if (!addr)
1481 return;
1482
e69e9d4a 1483 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
ab15d9b4 1484 addr))
1da177e4 1485 return;
1da177e4
LT
1486
1487 area = remove_vm_area(addr);
1488 if (unlikely(!area)) {
4c8573e2 1489 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1da177e4 1490 addr);
1da177e4
LT
1491 return;
1492 }
1493
7511c3ed
JM
1494 debug_check_no_locks_freed(addr, get_vm_area_size(area));
1495 debug_check_no_obj_freed(addr, get_vm_area_size(area));
9a11b49a 1496
1da177e4
LT
1497 if (deallocate_pages) {
1498 int i;
1499
1500 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1501 struct page *page = area->pages[i];
1502
1503 BUG_ON(!page);
37f08dda 1504 __free_kmem_pages(page, 0);
1da177e4
LT
1505 }
1506
244d63ee 1507 kvfree(area->pages);
1da177e4
LT
1508 }
1509
1510 kfree(area);
1511 return;
1512}
32fcfd40 1513
1da177e4
LT
1514/**
1515 * vfree - release memory allocated by vmalloc()
1da177e4
LT
1516 * @addr: memory base address
1517 *
183ff22b 1518 * Free the virtually continuous memory area starting at @addr, as
80e93eff
PE
1519 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1520 * NULL, no operation is performed.
1da177e4 1521 *
32fcfd40
AV
1522 * Must not be called in NMI context (strictly speaking, only if we don't
1523 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1524 * conventions for vfree() arch-depenedent would be a really bad idea)
c9fcee51
AM
1525 *
1526 * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
1da177e4 1527 */
b3bdda02 1528void vfree(const void *addr)
1da177e4 1529{
32fcfd40 1530 BUG_ON(in_nmi());
89219d37
CM
1531
1532 kmemleak_free(addr);
1533
32fcfd40
AV
1534 if (!addr)
1535 return;
1536 if (unlikely(in_interrupt())) {
7c8e0181 1537 struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
59d3132f
ON
1538 if (llist_add((struct llist_node *)addr, &p->list))
1539 schedule_work(&p->wq);
32fcfd40
AV
1540 } else
1541 __vunmap(addr, 1);
1da177e4 1542}
1da177e4
LT
1543EXPORT_SYMBOL(vfree);
1544
1545/**
1546 * vunmap - release virtual mapping obtained by vmap()
1da177e4
LT
1547 * @addr: memory base address
1548 *
1549 * Free the virtually contiguous memory area starting at @addr,
1550 * which was created from the page array passed to vmap().
1551 *
80e93eff 1552 * Must not be called in interrupt context.
1da177e4 1553 */
b3bdda02 1554void vunmap(const void *addr)
1da177e4
LT
1555{
1556 BUG_ON(in_interrupt());
34754b69 1557 might_sleep();
32fcfd40
AV
1558 if (addr)
1559 __vunmap(addr, 0);
1da177e4 1560}
1da177e4
LT
1561EXPORT_SYMBOL(vunmap);
1562
1563/**
1564 * vmap - map an array of pages into virtually contiguous space
1da177e4
LT
1565 * @pages: array of page pointers
1566 * @count: number of pages to map
1567 * @flags: vm_area->flags
1568 * @prot: page protection for the mapping
1569 *
1570 * Maps @count pages from @pages into contiguous kernel virtual
1571 * space.
1572 */
1573void *vmap(struct page **pages, unsigned int count,
1574 unsigned long flags, pgprot_t prot)
1575{
1576 struct vm_struct *area;
65ee03c4 1577 unsigned long size; /* In bytes */
1da177e4 1578
34754b69
PZ
1579 might_sleep();
1580
4481374c 1581 if (count > totalram_pages)
1da177e4
LT
1582 return NULL;
1583
65ee03c4
GJM
1584 size = (unsigned long)count << PAGE_SHIFT;
1585 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
1da177e4
LT
1586 if (!area)
1587 return NULL;
23016969 1588
f6f8ed47 1589 if (map_vm_area(area, prot, pages)) {
1da177e4
LT
1590 vunmap(area->addr);
1591 return NULL;
1592 }
1593
1594 return area->addr;
1595}
1da177e4
LT
1596EXPORT_SYMBOL(vmap);
1597
2dca6999
DM
1598static void *__vmalloc_node(unsigned long size, unsigned long align,
1599 gfp_t gfp_mask, pgprot_t prot,
5e6cafc8 1600 int node, const void *caller);
e31d9eb5 1601static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3722e13c 1602 pgprot_t prot, int node)
1da177e4 1603{
22943ab1 1604 const int order = 0;
1da177e4
LT
1605 struct page **pages;
1606 unsigned int nr_pages, array_size, i;
930f036b
DR
1607 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1608 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1da177e4 1609
762216ab 1610 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1da177e4
LT
1611 array_size = (nr_pages * sizeof(struct page *));
1612
1613 area->nr_pages = nr_pages;
1614 /* Please note that the recursion is strictly bounded. */
8757d5fa 1615 if (array_size > PAGE_SIZE) {
976d6dfb 1616 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
3722e13c 1617 PAGE_KERNEL, node, area->caller);
286e1ea3 1618 } else {
976d6dfb 1619 pages = kmalloc_node(array_size, nested_gfp, node);
286e1ea3 1620 }
1da177e4
LT
1621 area->pages = pages;
1622 if (!area->pages) {
1623 remove_vm_area(area->addr);
1624 kfree(area);
1625 return NULL;
1626 }
1da177e4
LT
1627
1628 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1629 struct page *page;
1630
4b90951c 1631 if (node == NUMA_NO_NODE)
37f08dda 1632 page = alloc_kmem_pages(alloc_mask, order);
930fc45a 1633 else
37f08dda 1634 page = alloc_kmem_pages_node(node, alloc_mask, order);
bf53d6f8
CL
1635
1636 if (unlikely(!page)) {
1da177e4
LT
1637 /* Successfully allocated i pages, free them in __vunmap() */
1638 area->nr_pages = i;
1639 goto fail;
1640 }
bf53d6f8 1641 area->pages[i] = page;
d0164adc 1642 if (gfpflags_allow_blocking(gfp_mask))
660654f9 1643 cond_resched();
1da177e4
LT
1644 }
1645
f6f8ed47 1646 if (map_vm_area(area, prot, pages))
1da177e4
LT
1647 goto fail;
1648 return area->addr;
1649
1650fail:
3ee9a4f0
JP
1651 warn_alloc_failed(gfp_mask, order,
1652 "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
22943ab1 1653 (area->nr_pages*PAGE_SIZE), area->size);
1da177e4
LT
1654 vfree(area->addr);
1655 return NULL;
1656}
1657
1658/**
d0a21265 1659 * __vmalloc_node_range - allocate virtually contiguous memory
1da177e4 1660 * @size: allocation size
2dca6999 1661 * @align: desired alignment
d0a21265
DR
1662 * @start: vm area range start
1663 * @end: vm area range end
1da177e4
LT
1664 * @gfp_mask: flags for the page level allocator
1665 * @prot: protection mask for the allocated pages
cb9e3c29 1666 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
00ef2d2f 1667 * @node: node to use for allocation or NUMA_NO_NODE
c85d194b 1668 * @caller: caller's return address
1da177e4
LT
1669 *
1670 * Allocate enough pages to cover @size from the page level
1671 * allocator with @gfp_mask flags. Map them into contiguous
1672 * kernel virtual space, using a pagetable protection of @prot.
1673 */
d0a21265
DR
1674void *__vmalloc_node_range(unsigned long size, unsigned long align,
1675 unsigned long start, unsigned long end, gfp_t gfp_mask,
cb9e3c29
AR
1676 pgprot_t prot, unsigned long vm_flags, int node,
1677 const void *caller)
1da177e4
LT
1678{
1679 struct vm_struct *area;
89219d37
CM
1680 void *addr;
1681 unsigned long real_size = size;
1da177e4
LT
1682
1683 size = PAGE_ALIGN(size);
4481374c 1684 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
de7d2b56 1685 goto fail;
1da177e4 1686
cb9e3c29
AR
1687 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
1688 vm_flags, start, end, node, gfp_mask, caller);
1da177e4 1689 if (!area)
de7d2b56 1690 goto fail;
1da177e4 1691
3722e13c 1692 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1368edf0 1693 if (!addr)
b82225f3 1694 return NULL;
89219d37 1695
f5252e00 1696 /*
20fc02b4
ZY
1697 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
1698 * flag. It means that vm_struct is not fully initialized.
4341fa45 1699 * Now, it is fully initialized, so remove this flag here.
f5252e00 1700 */
20fc02b4 1701 clear_vm_uninitialized_flag(area);
f5252e00 1702
89219d37 1703 /*
7f88f88f
CM
1704 * A ref_count = 2 is needed because vm_struct allocated in
1705 * __get_vm_area_node() contains a reference to the virtual address of
1706 * the vmalloc'ed block.
89219d37 1707 */
7f88f88f 1708 kmemleak_alloc(addr, real_size, 2, gfp_mask);
89219d37
CM
1709
1710 return addr;
de7d2b56
JP
1711
1712fail:
1713 warn_alloc_failed(gfp_mask, 0,
1714 "vmalloc: allocation failure: %lu bytes\n",
1715 real_size);
1716 return NULL;
1da177e4
LT
1717}
1718
d0a21265
DR
1719/**
1720 * __vmalloc_node - allocate virtually contiguous memory
1721 * @size: allocation size
1722 * @align: desired alignment
1723 * @gfp_mask: flags for the page level allocator
1724 * @prot: protection mask for the allocated pages
00ef2d2f 1725 * @node: node to use for allocation or NUMA_NO_NODE
d0a21265
DR
1726 * @caller: caller's return address
1727 *
1728 * Allocate enough pages to cover @size from the page level
1729 * allocator with @gfp_mask flags. Map them into contiguous
1730 * kernel virtual space, using a pagetable protection of @prot.
1731 */
1732static void *__vmalloc_node(unsigned long size, unsigned long align,
1733 gfp_t gfp_mask, pgprot_t prot,
5e6cafc8 1734 int node, const void *caller)
d0a21265
DR
1735{
1736 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
cb9e3c29 1737 gfp_mask, prot, 0, node, caller);
d0a21265
DR
1738}
1739
930fc45a
CL
1740void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1741{
00ef2d2f 1742 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
23016969 1743 __builtin_return_address(0));
930fc45a 1744}
1da177e4
LT
1745EXPORT_SYMBOL(__vmalloc);
1746
e1ca7788
DY
1747static inline void *__vmalloc_node_flags(unsigned long size,
1748 int node, gfp_t flags)
1749{
1750 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1751 node, __builtin_return_address(0));
1752}
1753
1da177e4
LT
1754/**
1755 * vmalloc - allocate virtually contiguous memory
1da177e4 1756 * @size: allocation size
1da177e4
LT
1757 * Allocate enough pages to cover @size from the page level
1758 * allocator and map them into contiguous kernel virtual space.
1759 *
c1c8897f 1760 * For tight control over page level allocator and protection flags
1da177e4
LT
1761 * use __vmalloc() instead.
1762 */
1763void *vmalloc(unsigned long size)
1764{
00ef2d2f
DR
1765 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1766 GFP_KERNEL | __GFP_HIGHMEM);
1da177e4 1767}
1da177e4
LT
1768EXPORT_SYMBOL(vmalloc);
1769
e1ca7788
DY
1770/**
1771 * vzalloc - allocate virtually contiguous memory with zero fill
1772 * @size: allocation size
1773 * Allocate enough pages to cover @size from the page level
1774 * allocator and map them into contiguous kernel virtual space.
1775 * The memory allocated is set to zero.
1776 *
1777 * For tight control over page level allocator and protection flags
1778 * use __vmalloc() instead.
1779 */
1780void *vzalloc(unsigned long size)
1781{
00ef2d2f 1782 return __vmalloc_node_flags(size, NUMA_NO_NODE,
e1ca7788
DY
1783 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1784}
1785EXPORT_SYMBOL(vzalloc);
1786
83342314 1787/**
ead04089
REB
1788 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1789 * @size: allocation size
83342314 1790 *
ead04089
REB
1791 * The resulting memory area is zeroed so it can be mapped to userspace
1792 * without leaking data.
83342314
NP
1793 */
1794void *vmalloc_user(unsigned long size)
1795{
1796 struct vm_struct *area;
1797 void *ret;
1798
2dca6999
DM
1799 ret = __vmalloc_node(size, SHMLBA,
1800 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
00ef2d2f
DR
1801 PAGE_KERNEL, NUMA_NO_NODE,
1802 __builtin_return_address(0));
2b4ac44e 1803 if (ret) {
db64fe02 1804 area = find_vm_area(ret);
2b4ac44e 1805 area->flags |= VM_USERMAP;
2b4ac44e 1806 }
83342314
NP
1807 return ret;
1808}
1809EXPORT_SYMBOL(vmalloc_user);
1810
930fc45a
CL
1811/**
1812 * vmalloc_node - allocate memory on a specific node
930fc45a 1813 * @size: allocation size
d44e0780 1814 * @node: numa node
930fc45a
CL
1815 *
1816 * Allocate enough pages to cover @size from the page level
1817 * allocator and map them into contiguous kernel virtual space.
1818 *
c1c8897f 1819 * For tight control over page level allocator and protection flags
930fc45a
CL
1820 * use __vmalloc() instead.
1821 */
1822void *vmalloc_node(unsigned long size, int node)
1823{
2dca6999 1824 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
23016969 1825 node, __builtin_return_address(0));
930fc45a
CL
1826}
1827EXPORT_SYMBOL(vmalloc_node);
1828
e1ca7788
DY
1829/**
1830 * vzalloc_node - allocate memory on a specific node with zero fill
1831 * @size: allocation size
1832 * @node: numa node
1833 *
1834 * Allocate enough pages to cover @size from the page level
1835 * allocator and map them into contiguous kernel virtual space.
1836 * The memory allocated is set to zero.
1837 *
1838 * For tight control over page level allocator and protection flags
1839 * use __vmalloc_node() instead.
1840 */
1841void *vzalloc_node(unsigned long size, int node)
1842{
1843 return __vmalloc_node_flags(size, node,
1844 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1845}
1846EXPORT_SYMBOL(vzalloc_node);
1847
4dc3b16b
PP
1848#ifndef PAGE_KERNEL_EXEC
1849# define PAGE_KERNEL_EXEC PAGE_KERNEL
1850#endif
1851
1da177e4
LT
1852/**
1853 * vmalloc_exec - allocate virtually contiguous, executable memory
1da177e4
LT
1854 * @size: allocation size
1855 *
1856 * Kernel-internal function to allocate enough pages to cover @size
1857 * the page level allocator and map them into contiguous and
1858 * executable kernel virtual space.
1859 *
c1c8897f 1860 * For tight control over page level allocator and protection flags
1da177e4
LT
1861 * use __vmalloc() instead.
1862 */
1863
1da177e4
LT
1864void *vmalloc_exec(unsigned long size)
1865{
2dca6999 1866 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
00ef2d2f 1867 NUMA_NO_NODE, __builtin_return_address(0));
1da177e4
LT
1868}
1869
0d08e0d3 1870#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
7ac674f5 1871#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3 1872#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
7ac674f5 1873#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
0d08e0d3
AK
1874#else
1875#define GFP_VMALLOC32 GFP_KERNEL
1876#endif
1877
1da177e4
LT
1878/**
1879 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1da177e4
LT
1880 * @size: allocation size
1881 *
1882 * Allocate enough 32bit PA addressable pages to cover @size from the
1883 * page level allocator and map them into contiguous kernel virtual space.
1884 */
1885void *vmalloc_32(unsigned long size)
1886{
2dca6999 1887 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
00ef2d2f 1888 NUMA_NO_NODE, __builtin_return_address(0));
1da177e4 1889}
1da177e4
LT
1890EXPORT_SYMBOL(vmalloc_32);
1891
83342314 1892/**
ead04089 1893 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
83342314 1894 * @size: allocation size
ead04089
REB
1895 *
1896 * The resulting memory area is 32bit addressable and zeroed so it can be
1897 * mapped to userspace without leaking data.
83342314
NP
1898 */
1899void *vmalloc_32_user(unsigned long size)
1900{
1901 struct vm_struct *area;
1902 void *ret;
1903
2dca6999 1904 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
00ef2d2f 1905 NUMA_NO_NODE, __builtin_return_address(0));
2b4ac44e 1906 if (ret) {
db64fe02 1907 area = find_vm_area(ret);
2b4ac44e 1908 area->flags |= VM_USERMAP;
2b4ac44e 1909 }
83342314
NP
1910 return ret;
1911}
1912EXPORT_SYMBOL(vmalloc_32_user);
1913
d0107eb0
KH
1914/*
1915 * small helper routine , copy contents to buf from addr.
1916 * If the page is not present, fill zero.
1917 */
1918
1919static int aligned_vread(char *buf, char *addr, unsigned long count)
1920{
1921 struct page *p;
1922 int copied = 0;
1923
1924 while (count) {
1925 unsigned long offset, length;
1926
891c49ab 1927 offset = offset_in_page(addr);
d0107eb0
KH
1928 length = PAGE_SIZE - offset;
1929 if (length > count)
1930 length = count;
1931 p = vmalloc_to_page(addr);
1932 /*
1933 * To do safe access to this _mapped_ area, we need
1934 * lock. But adding lock here means that we need to add
1935 * overhead of vmalloc()/vfree() calles for this _debug_
1936 * interface, rarely used. Instead of that, we'll use
1937 * kmap() and get small overhead in this access function.
1938 */
1939 if (p) {
1940 /*
1941 * we can expect USER0 is not used (see vread/vwrite's
1942 * function description)
1943 */
9b04c5fe 1944 void *map = kmap_atomic(p);
d0107eb0 1945 memcpy(buf, map + offset, length);
9b04c5fe 1946 kunmap_atomic(map);
d0107eb0
KH
1947 } else
1948 memset(buf, 0, length);
1949
1950 addr += length;
1951 buf += length;
1952 copied += length;
1953 count -= length;
1954 }
1955 return copied;
1956}
1957
1958static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1959{
1960 struct page *p;
1961 int copied = 0;
1962
1963 while (count) {
1964 unsigned long offset, length;
1965
891c49ab 1966 offset = offset_in_page(addr);
d0107eb0
KH
1967 length = PAGE_SIZE - offset;
1968 if (length > count)
1969 length = count;
1970 p = vmalloc_to_page(addr);
1971 /*
1972 * To do safe access to this _mapped_ area, we need
1973 * lock. But adding lock here means that we need to add
1974 * overhead of vmalloc()/vfree() calles for this _debug_
1975 * interface, rarely used. Instead of that, we'll use
1976 * kmap() and get small overhead in this access function.
1977 */
1978 if (p) {
1979 /*
1980 * we can expect USER0 is not used (see vread/vwrite's
1981 * function description)
1982 */
9b04c5fe 1983 void *map = kmap_atomic(p);
d0107eb0 1984 memcpy(map + offset, buf, length);
9b04c5fe 1985 kunmap_atomic(map);
d0107eb0
KH
1986 }
1987 addr += length;
1988 buf += length;
1989 copied += length;
1990 count -= length;
1991 }
1992 return copied;
1993}
1994
1995/**
1996 * vread() - read vmalloc area in a safe way.
1997 * @buf: buffer for reading data
1998 * @addr: vm address.
1999 * @count: number of bytes to be read.
2000 *
2001 * Returns # of bytes which addr and buf should be increased.
2002 * (same number to @count). Returns 0 if [addr...addr+count) doesn't
2003 * includes any intersect with alive vmalloc area.
2004 *
2005 * This function checks that addr is a valid vmalloc'ed area, and
2006 * copy data from that area to a given buffer. If the given memory range
2007 * of [addr...addr+count) includes some valid address, data is copied to
2008 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2009 * IOREMAP area is treated as memory hole and no copy is done.
2010 *
2011 * If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d 2012 * vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb0
KH
2013 *
2014 * Note: In usual ops, vread() is never necessary because the caller
2015 * should know vmalloc() area is valid and can use memcpy().
2016 * This is for routines which have to access vmalloc area without
2017 * any informaion, as /dev/kmem.
2018 *
2019 */
2020
1da177e4
LT
2021long vread(char *buf, char *addr, unsigned long count)
2022{
e81ce85f
JK
2023 struct vmap_area *va;
2024 struct vm_struct *vm;
1da177e4 2025 char *vaddr, *buf_start = buf;
d0107eb0 2026 unsigned long buflen = count;
1da177e4
LT
2027 unsigned long n;
2028
2029 /* Don't allow overflow */
2030 if ((unsigned long) addr + count < count)
2031 count = -(unsigned long) addr;
2032
e81ce85f
JK
2033 spin_lock(&vmap_area_lock);
2034 list_for_each_entry(va, &vmap_area_list, list) {
2035 if (!count)
2036 break;
2037
2038 if (!(va->flags & VM_VM_AREA))
2039 continue;
2040
2041 vm = va->vm;
2042 vaddr = (char *) vm->addr;
762216ab 2043 if (addr >= vaddr + get_vm_area_size(vm))
1da177e4
LT
2044 continue;
2045 while (addr < vaddr) {
2046 if (count == 0)
2047 goto finished;
2048 *buf = '\0';
2049 buf++;
2050 addr++;
2051 count--;
2052 }
762216ab 2053 n = vaddr + get_vm_area_size(vm) - addr;
d0107eb0
KH
2054 if (n > count)
2055 n = count;
e81ce85f 2056 if (!(vm->flags & VM_IOREMAP))
d0107eb0
KH
2057 aligned_vread(buf, addr, n);
2058 else /* IOREMAP area is treated as memory hole */
2059 memset(buf, 0, n);
2060 buf += n;
2061 addr += n;
2062 count -= n;
1da177e4
LT
2063 }
2064finished:
e81ce85f 2065 spin_unlock(&vmap_area_lock);
d0107eb0
KH
2066
2067 if (buf == buf_start)
2068 return 0;
2069 /* zero-fill memory holes */
2070 if (buf != buf_start + buflen)
2071 memset(buf, 0, buflen - (buf - buf_start));
2072
2073 return buflen;
1da177e4
LT
2074}
2075
d0107eb0
KH
2076/**
2077 * vwrite() - write vmalloc area in a safe way.
2078 * @buf: buffer for source data
2079 * @addr: vm address.
2080 * @count: number of bytes to be read.
2081 *
2082 * Returns # of bytes which addr and buf should be incresed.
2083 * (same number to @count).
2084 * If [addr...addr+count) doesn't includes any intersect with valid
2085 * vmalloc area, returns 0.
2086 *
2087 * This function checks that addr is a valid vmalloc'ed area, and
2088 * copy data from a buffer to the given addr. If specified range of
2089 * [addr...addr+count) includes some valid address, data is copied from
2090 * proper area of @buf. If there are memory holes, no copy to hole.
2091 * IOREMAP area is treated as memory hole and no copy is done.
2092 *
2093 * If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d 2094 * vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb0
KH
2095 *
2096 * Note: In usual ops, vwrite() is never necessary because the caller
2097 * should know vmalloc() area is valid and can use memcpy().
2098 * This is for routines which have to access vmalloc area without
2099 * any informaion, as /dev/kmem.
d0107eb0
KH
2100 */
2101
1da177e4
LT
2102long vwrite(char *buf, char *addr, unsigned long count)
2103{
e81ce85f
JK
2104 struct vmap_area *va;
2105 struct vm_struct *vm;
d0107eb0
KH
2106 char *vaddr;
2107 unsigned long n, buflen;
2108 int copied = 0;
1da177e4
LT
2109
2110 /* Don't allow overflow */
2111 if ((unsigned long) addr + count < count)
2112 count = -(unsigned long) addr;
d0107eb0 2113 buflen = count;
1da177e4 2114
e81ce85f
JK
2115 spin_lock(&vmap_area_lock);
2116 list_for_each_entry(va, &vmap_area_list, list) {
2117 if (!count)
2118 break;
2119
2120 if (!(va->flags & VM_VM_AREA))
2121 continue;
2122
2123 vm = va->vm;
2124 vaddr = (char *) vm->addr;
762216ab 2125 if (addr >= vaddr + get_vm_area_size(vm))
1da177e4
LT
2126 continue;
2127 while (addr < vaddr) {
2128 if (count == 0)
2129 goto finished;
2130 buf++;
2131 addr++;
2132 count--;
2133 }
762216ab 2134 n = vaddr + get_vm_area_size(vm) - addr;
d0107eb0
KH
2135 if (n > count)
2136 n = count;
e81ce85f 2137 if (!(vm->flags & VM_IOREMAP)) {
d0107eb0
KH
2138 aligned_vwrite(buf, addr, n);
2139 copied++;
2140 }
2141 buf += n;
2142 addr += n;
2143 count -= n;
1da177e4
LT
2144 }
2145finished:
e81ce85f 2146 spin_unlock(&vmap_area_lock);
d0107eb0
KH
2147 if (!copied)
2148 return 0;
2149 return buflen;
1da177e4 2150}
83342314
NP
2151
2152/**
e69e9d4a
HD
2153 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2154 * @vma: vma to cover
2155 * @uaddr: target user address to start at
2156 * @kaddr: virtual address of vmalloc kernel memory
2157 * @size: size of map area
7682486b
RD
2158 *
2159 * Returns: 0 for success, -Exxx on failure
83342314 2160 *
e69e9d4a
HD
2161 * This function checks that @kaddr is a valid vmalloc'ed area,
2162 * and that it is big enough to cover the range starting at
2163 * @uaddr in @vma. Will return failure if that criteria isn't
2164 * met.
83342314 2165 *
72fd4a35 2166 * Similar to remap_pfn_range() (see mm/memory.c)
83342314 2167 */
e69e9d4a
HD
2168int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2169 void *kaddr, unsigned long size)
83342314
NP
2170{
2171 struct vm_struct *area;
83342314 2172
e69e9d4a
HD
2173 size = PAGE_ALIGN(size);
2174
2175 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
83342314
NP
2176 return -EINVAL;
2177
e69e9d4a 2178 area = find_vm_area(kaddr);
83342314 2179 if (!area)
db64fe02 2180 return -EINVAL;
83342314
NP
2181
2182 if (!(area->flags & VM_USERMAP))
db64fe02 2183 return -EINVAL;
83342314 2184
e69e9d4a 2185 if (kaddr + size > area->addr + area->size)
db64fe02 2186 return -EINVAL;
83342314 2187
83342314 2188 do {
e69e9d4a 2189 struct page *page = vmalloc_to_page(kaddr);
db64fe02
NP
2190 int ret;
2191
83342314
NP
2192 ret = vm_insert_page(vma, uaddr, page);
2193 if (ret)
2194 return ret;
2195
2196 uaddr += PAGE_SIZE;
e69e9d4a
HD
2197 kaddr += PAGE_SIZE;
2198 size -= PAGE_SIZE;
2199 } while (size > 0);
83342314 2200
314e51b9 2201 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
83342314 2202
db64fe02 2203 return 0;
83342314 2204}
e69e9d4a
HD
2205EXPORT_SYMBOL(remap_vmalloc_range_partial);
2206
2207/**
2208 * remap_vmalloc_range - map vmalloc pages to userspace
2209 * @vma: vma to cover (map full range of vma)
2210 * @addr: vmalloc memory
2211 * @pgoff: number of pages into addr before first page to map
2212 *
2213 * Returns: 0 for success, -Exxx on failure
2214 *
2215 * This function checks that addr is a valid vmalloc'ed area, and
2216 * that it is big enough to cover the vma. Will return failure if
2217 * that criteria isn't met.
2218 *
2219 * Similar to remap_pfn_range() (see mm/memory.c)
2220 */
2221int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2222 unsigned long pgoff)
2223{
2224 return remap_vmalloc_range_partial(vma, vma->vm_start,
2225 addr + (pgoff << PAGE_SHIFT),
2226 vma->vm_end - vma->vm_start);
2227}
83342314
NP
2228EXPORT_SYMBOL(remap_vmalloc_range);
2229
1eeb66a1
CH
2230/*
2231 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
2232 * have one.
2233 */
3b32123d 2234void __weak vmalloc_sync_all(void)
1eeb66a1
CH
2235{
2236}
5f4352fb
JF
2237
2238
2f569afd 2239static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
5f4352fb 2240{
cd12909c
DV
2241 pte_t ***p = data;
2242
2243 if (p) {
2244 *(*p) = pte;
2245 (*p)++;
2246 }
5f4352fb
JF
2247 return 0;
2248}
2249
2250/**
2251 * alloc_vm_area - allocate a range of kernel address space
2252 * @size: size of the area
cd12909c 2253 * @ptes: returns the PTEs for the address space
7682486b
RD
2254 *
2255 * Returns: NULL on failure, vm_struct on success
5f4352fb
JF
2256 *
2257 * This function reserves a range of kernel address space, and
2258 * allocates pagetables to map that range. No actual mappings
cd12909c
DV
2259 * are created.
2260 *
2261 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2262 * allocated for the VM area are returned.
5f4352fb 2263 */
cd12909c 2264struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
5f4352fb
JF
2265{
2266 struct vm_struct *area;
2267
23016969
CL
2268 area = get_vm_area_caller(size, VM_IOREMAP,
2269 __builtin_return_address(0));
5f4352fb
JF
2270 if (area == NULL)
2271 return NULL;
2272
2273 /*
2274 * This ensures that page tables are constructed for this region
2275 * of kernel virtual address space and mapped into init_mm.
2276 */
2277 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
cd12909c 2278 size, f, ptes ? &ptes : NULL)) {
5f4352fb
JF
2279 free_vm_area(area);
2280 return NULL;
2281 }
2282
5f4352fb
JF
2283 return area;
2284}
2285EXPORT_SYMBOL_GPL(alloc_vm_area);
2286
2287void free_vm_area(struct vm_struct *area)
2288{
2289 struct vm_struct *ret;
2290 ret = remove_vm_area(area->addr);
2291 BUG_ON(ret != area);
2292 kfree(area);
2293}
2294EXPORT_SYMBOL_GPL(free_vm_area);
a10aa579 2295
4f8b02b4 2296#ifdef CONFIG_SMP
ca23e405
TH
2297static struct vmap_area *node_to_va(struct rb_node *n)
2298{
2299 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2300}
2301
2302/**
2303 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
2304 * @end: target address
2305 * @pnext: out arg for the next vmap_area
2306 * @pprev: out arg for the previous vmap_area
2307 *
2308 * Returns: %true if either or both of next and prev are found,
2309 * %false if no vmap_area exists
2310 *
2311 * Find vmap_areas end addresses of which enclose @end. ie. if not
2312 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
2313 */
2314static bool pvm_find_next_prev(unsigned long end,
2315 struct vmap_area **pnext,
2316 struct vmap_area **pprev)
2317{
2318 struct rb_node *n = vmap_area_root.rb_node;
2319 struct vmap_area *va = NULL;
2320
2321 while (n) {
2322 va = rb_entry(n, struct vmap_area, rb_node);
2323 if (end < va->va_end)
2324 n = n->rb_left;
2325 else if (end > va->va_end)
2326 n = n->rb_right;
2327 else
2328 break;
2329 }
2330
2331 if (!va)
2332 return false;
2333
2334 if (va->va_end > end) {
2335 *pnext = va;
2336 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2337 } else {
2338 *pprev = va;
2339 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2340 }
2341 return true;
2342}
2343
2344/**
2345 * pvm_determine_end - find the highest aligned address between two vmap_areas
2346 * @pnext: in/out arg for the next vmap_area
2347 * @pprev: in/out arg for the previous vmap_area
2348 * @align: alignment
2349 *
2350 * Returns: determined end address
2351 *
2352 * Find the highest aligned address between *@pnext and *@pprev below
2353 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
2354 * down address is between the end addresses of the two vmap_areas.
2355 *
2356 * Please note that the address returned by this function may fall
2357 * inside *@pnext vmap_area. The caller is responsible for checking
2358 * that.
2359 */
2360static unsigned long pvm_determine_end(struct vmap_area **pnext,
2361 struct vmap_area **pprev,
2362 unsigned long align)
2363{
2364 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2365 unsigned long addr;
2366
2367 if (*pnext)
2368 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2369 else
2370 addr = vmalloc_end;
2371
2372 while (*pprev && (*pprev)->va_end > addr) {
2373 *pnext = *pprev;
2374 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2375 }
2376
2377 return addr;
2378}
2379
2380/**
2381 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2382 * @offsets: array containing offset of each area
2383 * @sizes: array containing size of each area
2384 * @nr_vms: the number of areas to allocate
2385 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
ca23e405
TH
2386 *
2387 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2388 * vm_structs on success, %NULL on failure
2389 *
2390 * Percpu allocator wants to use congruent vm areas so that it can
2391 * maintain the offsets among percpu areas. This function allocates
ec3f64fc
DR
2392 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
2393 * be scattered pretty far, distance between two areas easily going up
2394 * to gigabytes. To avoid interacting with regular vmallocs, these
2395 * areas are allocated from top.
ca23e405
TH
2396 *
2397 * Despite its complicated look, this allocator is rather simple. It
2398 * does everything top-down and scans areas from the end looking for
2399 * matching slot. While scanning, if any of the areas overlaps with
2400 * existing vmap_area, the base address is pulled down to fit the
2401 * area. Scanning is repeated till all the areas fit and then all
2402 * necessary data structres are inserted and the result is returned.
2403 */
2404struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2405 const size_t *sizes, int nr_vms,
ec3f64fc 2406 size_t align)
ca23e405
TH
2407{
2408 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2409 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2410 struct vmap_area **vas, *prev, *next;
2411 struct vm_struct **vms;
2412 int area, area2, last_area, term_area;
2413 unsigned long base, start, end, last_end;
2414 bool purged = false;
2415
ca23e405 2416 /* verify parameters and allocate data structures */
891c49ab 2417 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
ca23e405
TH
2418 for (last_area = 0, area = 0; area < nr_vms; area++) {
2419 start = offsets[area];
2420 end = start + sizes[area];
2421
2422 /* is everything aligned properly? */
2423 BUG_ON(!IS_ALIGNED(offsets[area], align));
2424 BUG_ON(!IS_ALIGNED(sizes[area], align));
2425
2426 /* detect the area with the highest address */
2427 if (start > offsets[last_area])
2428 last_area = area;
2429
2430 for (area2 = 0; area2 < nr_vms; area2++) {
2431 unsigned long start2 = offsets[area2];
2432 unsigned long end2 = start2 + sizes[area2];
2433
2434 if (area2 == area)
2435 continue;
2436
2437 BUG_ON(start2 >= start && start2 < end);
2438 BUG_ON(end2 <= end && end2 > start);
2439 }
2440 }
2441 last_end = offsets[last_area] + sizes[last_area];
2442
2443 if (vmalloc_end - vmalloc_start < last_end) {
2444 WARN_ON(true);
2445 return NULL;
2446 }
2447
4d67d860
TM
2448 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2449 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
ca23e405 2450 if (!vas || !vms)
f1db7afd 2451 goto err_free2;
ca23e405
TH
2452
2453 for (area = 0; area < nr_vms; area++) {
ec3f64fc
DR
2454 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2455 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
ca23e405
TH
2456 if (!vas[area] || !vms[area])
2457 goto err_free;
2458 }
2459retry:
2460 spin_lock(&vmap_area_lock);
2461
2462 /* start scanning - we scan from the top, begin with the last area */
2463 area = term_area = last_area;
2464 start = offsets[area];
2465 end = start + sizes[area];
2466
2467 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2468 base = vmalloc_end - last_end;
2469 goto found;
2470 }
2471 base = pvm_determine_end(&next, &prev, align) - end;
2472
2473 while (true) {
2474 BUG_ON(next && next->va_end <= base + end);
2475 BUG_ON(prev && prev->va_end > base + end);
2476
2477 /*
2478 * base might have underflowed, add last_end before
2479 * comparing.
2480 */
2481 if (base + last_end < vmalloc_start + last_end) {
2482 spin_unlock(&vmap_area_lock);
2483 if (!purged) {
2484 purge_vmap_area_lazy();
2485 purged = true;
2486 goto retry;
2487 }
2488 goto err_free;
2489 }
2490
2491 /*
2492 * If next overlaps, move base downwards so that it's
2493 * right below next and then recheck.
2494 */
2495 if (next && next->va_start < base + end) {
2496 base = pvm_determine_end(&next, &prev, align) - end;
2497 term_area = area;
2498 continue;
2499 }
2500
2501 /*
2502 * If prev overlaps, shift down next and prev and move
2503 * base so that it's right below new next and then
2504 * recheck.
2505 */
2506 if (prev && prev->va_end > base + start) {
2507 next = prev;
2508 prev = node_to_va(rb_prev(&next->rb_node));
2509 base = pvm_determine_end(&next, &prev, align) - end;
2510 term_area = area;
2511 continue;
2512 }
2513
2514 /*
2515 * This area fits, move on to the previous one. If
2516 * the previous one is the terminal one, we're done.
2517 */
2518 area = (area + nr_vms - 1) % nr_vms;
2519 if (area == term_area)
2520 break;
2521 start = offsets[area];
2522 end = start + sizes[area];
2523 pvm_find_next_prev(base + end, &next, &prev);
2524 }
2525found:
2526 /* we've found a fitting base, insert all va's */
2527 for (area = 0; area < nr_vms; area++) {
2528 struct vmap_area *va = vas[area];
2529
2530 va->va_start = base + offsets[area];
2531 va->va_end = va->va_start + sizes[area];
2532 __insert_vmap_area(va);
2533 }
2534
2535 vmap_area_pcpu_hole = base + offsets[last_area];
2536
2537 spin_unlock(&vmap_area_lock);
2538
2539 /* insert all vm's */
2540 for (area = 0; area < nr_vms; area++)
3645cb4a
ZY
2541 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2542 pcpu_get_vm_areas);
ca23e405
TH
2543
2544 kfree(vas);
2545 return vms;
2546
2547err_free:
2548 for (area = 0; area < nr_vms; area++) {
f1db7afd
KC
2549 kfree(vas[area]);
2550 kfree(vms[area]);
ca23e405 2551 }
f1db7afd 2552err_free2:
ca23e405
TH
2553 kfree(vas);
2554 kfree(vms);
2555 return NULL;
2556}
2557
2558/**
2559 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2560 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
2561 * @nr_vms: the number of allocated areas
2562 *
2563 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
2564 */
2565void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2566{
2567 int i;
2568
2569 for (i = 0; i < nr_vms; i++)
2570 free_vm_area(vms[i]);
2571 kfree(vms);
2572}
4f8b02b4 2573#endif /* CONFIG_SMP */
a10aa579
CL
2574
2575#ifdef CONFIG_PROC_FS
2576static void *s_start(struct seq_file *m, loff_t *pos)
d4033afd 2577 __acquires(&vmap_area_lock)
a10aa579
CL
2578{
2579 loff_t n = *pos;
d4033afd 2580 struct vmap_area *va;
a10aa579 2581
d4033afd 2582 spin_lock(&vmap_area_lock);
6219c2a2 2583 va = list_first_entry(&vmap_area_list, typeof(*va), list);
d4033afd 2584 while (n > 0 && &va->list != &vmap_area_list) {
a10aa579 2585 n--;
6219c2a2 2586 va = list_next_entry(va, list);
a10aa579 2587 }
d4033afd
JK
2588 if (!n && &va->list != &vmap_area_list)
2589 return va;
a10aa579
CL
2590
2591 return NULL;
2592
2593}
2594
2595static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2596{
d4033afd 2597 struct vmap_area *va = p, *next;
a10aa579
CL
2598
2599 ++*pos;
6219c2a2 2600 next = list_next_entry(va, list);
d4033afd
JK
2601 if (&next->list != &vmap_area_list)
2602 return next;
2603
2604 return NULL;
a10aa579
CL
2605}
2606
2607static void s_stop(struct seq_file *m, void *p)
d4033afd 2608 __releases(&vmap_area_lock)
a10aa579 2609{
d4033afd 2610 spin_unlock(&vmap_area_lock);
a10aa579
CL
2611}
2612
a47a126a
ED
2613static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2614{
e5adfffc 2615 if (IS_ENABLED(CONFIG_NUMA)) {
a47a126a
ED
2616 unsigned int nr, *counters = m->private;
2617
2618 if (!counters)
2619 return;
2620
af12346c
WL
2621 if (v->flags & VM_UNINITIALIZED)
2622 return;
7e5b528b
DV
2623 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
2624 smp_rmb();
af12346c 2625
a47a126a
ED
2626 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2627
2628 for (nr = 0; nr < v->nr_pages; nr++)
2629 counters[page_to_nid(v->pages[nr])]++;
2630
2631 for_each_node_state(nr, N_HIGH_MEMORY)
2632 if (counters[nr])
2633 seq_printf(m, " N%u=%u", nr, counters[nr]);
2634 }
2635}
2636
a10aa579
CL
2637static int s_show(struct seq_file *m, void *p)
2638{
d4033afd
JK
2639 struct vmap_area *va = p;
2640 struct vm_struct *v;
2641
c2ce8c14
WL
2642 /*
2643 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
2644 * behalf of vmap area is being tear down or vm_map_ram allocation.
2645 */
2646 if (!(va->flags & VM_VM_AREA))
d4033afd 2647 return 0;
d4033afd
JK
2648
2649 v = va->vm;
a10aa579 2650
45ec1690 2651 seq_printf(m, "0x%pK-0x%pK %7ld",
a10aa579
CL
2652 v->addr, v->addr + v->size, v->size);
2653
62c70bce
JP
2654 if (v->caller)
2655 seq_printf(m, " %pS", v->caller);
23016969 2656
a10aa579
CL
2657 if (v->nr_pages)
2658 seq_printf(m, " pages=%d", v->nr_pages);
2659
2660 if (v->phys_addr)
ffa71f33 2661 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
a10aa579
CL
2662
2663 if (v->flags & VM_IOREMAP)
f4527c90 2664 seq_puts(m, " ioremap");
a10aa579
CL
2665
2666 if (v->flags & VM_ALLOC)
f4527c90 2667 seq_puts(m, " vmalloc");
a10aa579
CL
2668
2669 if (v->flags & VM_MAP)
f4527c90 2670 seq_puts(m, " vmap");
a10aa579
CL
2671
2672 if (v->flags & VM_USERMAP)
f4527c90 2673 seq_puts(m, " user");
a10aa579 2674
244d63ee 2675 if (is_vmalloc_addr(v->pages))
f4527c90 2676 seq_puts(m, " vpages");
a10aa579 2677
a47a126a 2678 show_numa_info(m, v);
a10aa579
CL
2679 seq_putc(m, '\n');
2680 return 0;
2681}
2682
5f6a6a9c 2683static const struct seq_operations vmalloc_op = {
a10aa579
CL
2684 .start = s_start,
2685 .next = s_next,
2686 .stop = s_stop,
2687 .show = s_show,
2688};
5f6a6a9c
AD
2689
2690static int vmalloc_open(struct inode *inode, struct file *file)
2691{
703394c1
RJ
2692 if (IS_ENABLED(CONFIG_NUMA))
2693 return seq_open_private(file, &vmalloc_op,
2694 nr_node_ids * sizeof(unsigned int));
2695 else
2696 return seq_open(file, &vmalloc_op);
5f6a6a9c
AD
2697}
2698
2699static const struct file_operations proc_vmalloc_operations = {
2700 .open = vmalloc_open,
2701 .read = seq_read,
2702 .llseek = seq_lseek,
2703 .release = seq_release_private,
2704};
2705
2706static int __init proc_vmalloc_init(void)
2707{
2708 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2709 return 0;
2710}
2711module_init(proc_vmalloc_init);
db3808c1 2712
a10aa579
CL
2713#endif
2714