| 1 | /* |
| 2 | * linux/mm/vmalloc.c |
| 3 | * |
| 4 | * Copyright (C) 1993 Linus Torvalds |
| 5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 |
| 6 | * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 |
| 7 | * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 |
| 8 | * Numa awareness, Christoph Lameter, SGI, June 2005 |
| 9 | */ |
| 10 | |
| 11 | #include <linux/vmalloc.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/highmem.h> |
| 15 | #include <linux/sched/signal.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/proc_fs.h> |
| 20 | #include <linux/seq_file.h> |
| 21 | #include <linux/debugobjects.h> |
| 22 | #include <linux/kallsyms.h> |
| 23 | #include <linux/list.h> |
| 24 | #include <linux/notifier.h> |
| 25 | #include <linux/rbtree.h> |
| 26 | #include <linux/radix-tree.h> |
| 27 | #include <linux/rcupdate.h> |
| 28 | #include <linux/pfn.h> |
| 29 | #include <linux/kmemleak.h> |
| 30 | #include <linux/atomic.h> |
| 31 | #include <linux/compiler.h> |
| 32 | #include <linux/llist.h> |
| 33 | #include <linux/bitops.h> |
| 34 | |
| 35 | #include <linux/uaccess.h> |
| 36 | #include <asm/tlbflush.h> |
| 37 | #include <asm/shmparam.h> |
| 38 | |
| 39 | #include "internal.h" |
| 40 | |
| 41 | struct vfree_deferred { |
| 42 | struct llist_head list; |
| 43 | struct work_struct wq; |
| 44 | }; |
| 45 | static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); |
| 46 | |
| 47 | static void __vunmap(const void *, int); |
| 48 | |
| 49 | static void free_work(struct work_struct *w) |
| 50 | { |
| 51 | struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); |
| 52 | struct llist_node *t, *llnode; |
| 53 | |
| 54 | llist_for_each_safe(llnode, t, llist_del_all(&p->list)) |
| 55 | __vunmap((void *)llnode, 1); |
| 56 | } |
| 57 | |
| 58 | /*** Page table manipulation functions ***/ |
| 59 | |
| 60 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
| 61 | { |
| 62 | pte_t *pte; |
| 63 | |
| 64 | pte = pte_offset_kernel(pmd, addr); |
| 65 | do { |
| 66 | pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); |
| 67 | WARN_ON(!pte_none(ptent) && !pte_present(ptent)); |
| 68 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 69 | } |
| 70 | |
| 71 | static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) |
| 72 | { |
| 73 | pmd_t *pmd; |
| 74 | unsigned long next; |
| 75 | |
| 76 | pmd = pmd_offset(pud, addr); |
| 77 | do { |
| 78 | next = pmd_addr_end(addr, end); |
| 79 | if (pmd_clear_huge(pmd)) |
| 80 | continue; |
| 81 | if (pmd_none_or_clear_bad(pmd)) |
| 82 | continue; |
| 83 | vunmap_pte_range(pmd, addr, next); |
| 84 | } while (pmd++, addr = next, addr != end); |
| 85 | } |
| 86 | |
| 87 | static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) |
| 88 | { |
| 89 | pud_t *pud; |
| 90 | unsigned long next; |
| 91 | |
| 92 | pud = pud_offset(p4d, addr); |
| 93 | do { |
| 94 | next = pud_addr_end(addr, end); |
| 95 | if (pud_clear_huge(pud)) |
| 96 | continue; |
| 97 | if (pud_none_or_clear_bad(pud)) |
| 98 | continue; |
| 99 | vunmap_pmd_range(pud, addr, next); |
| 100 | } while (pud++, addr = next, addr != end); |
| 101 | } |
| 102 | |
| 103 | static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) |
| 104 | { |
| 105 | p4d_t *p4d; |
| 106 | unsigned long next; |
| 107 | |
| 108 | p4d = p4d_offset(pgd, addr); |
| 109 | do { |
| 110 | next = p4d_addr_end(addr, end); |
| 111 | if (p4d_clear_huge(p4d)) |
| 112 | continue; |
| 113 | if (p4d_none_or_clear_bad(p4d)) |
| 114 | continue; |
| 115 | vunmap_pud_range(p4d, addr, next); |
| 116 | } while (p4d++, addr = next, addr != end); |
| 117 | } |
| 118 | |
| 119 | static void vunmap_page_range(unsigned long addr, unsigned long end) |
| 120 | { |
| 121 | pgd_t *pgd; |
| 122 | unsigned long next; |
| 123 | |
| 124 | BUG_ON(addr >= end); |
| 125 | pgd = pgd_offset_k(addr); |
| 126 | do { |
| 127 | next = pgd_addr_end(addr, end); |
| 128 | if (pgd_none_or_clear_bad(pgd)) |
| 129 | continue; |
| 130 | vunmap_p4d_range(pgd, addr, next); |
| 131 | } while (pgd++, addr = next, addr != end); |
| 132 | } |
| 133 | |
| 134 | static int vmap_pte_range(pmd_t *pmd, unsigned long addr, |
| 135 | unsigned long end, pgprot_t prot, struct page **pages, int *nr) |
| 136 | { |
| 137 | pte_t *pte; |
| 138 | |
| 139 | /* |
| 140 | * nr is a running index into the array which helps higher level |
| 141 | * callers keep track of where we're up to. |
| 142 | */ |
| 143 | |
| 144 | pte = pte_alloc_kernel(pmd, addr); |
| 145 | if (!pte) |
| 146 | return -ENOMEM; |
| 147 | do { |
| 148 | struct page *page = pages[*nr]; |
| 149 | |
| 150 | if (WARN_ON(!pte_none(*pte))) |
| 151 | return -EBUSY; |
| 152 | if (WARN_ON(!page)) |
| 153 | return -ENOMEM; |
| 154 | set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); |
| 155 | (*nr)++; |
| 156 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 157 | return 0; |
| 158 | } |
| 159 | |
| 160 | static int vmap_pmd_range(pud_t *pud, unsigned long addr, |
| 161 | unsigned long end, pgprot_t prot, struct page **pages, int *nr) |
| 162 | { |
| 163 | pmd_t *pmd; |
| 164 | unsigned long next; |
| 165 | |
| 166 | pmd = pmd_alloc(&init_mm, pud, addr); |
| 167 | if (!pmd) |
| 168 | return -ENOMEM; |
| 169 | do { |
| 170 | next = pmd_addr_end(addr, end); |
| 171 | if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) |
| 172 | return -ENOMEM; |
| 173 | } while (pmd++, addr = next, addr != end); |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | static int vmap_pud_range(p4d_t *p4d, unsigned long addr, |
| 178 | unsigned long end, pgprot_t prot, struct page **pages, int *nr) |
| 179 | { |
| 180 | pud_t *pud; |
| 181 | unsigned long next; |
| 182 | |
| 183 | pud = pud_alloc(&init_mm, p4d, addr); |
| 184 | if (!pud) |
| 185 | return -ENOMEM; |
| 186 | do { |
| 187 | next = pud_addr_end(addr, end); |
| 188 | if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) |
| 189 | return -ENOMEM; |
| 190 | } while (pud++, addr = next, addr != end); |
| 191 | return 0; |
| 192 | } |
| 193 | |
| 194 | static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, |
| 195 | unsigned long end, pgprot_t prot, struct page **pages, int *nr) |
| 196 | { |
| 197 | p4d_t *p4d; |
| 198 | unsigned long next; |
| 199 | |
| 200 | p4d = p4d_alloc(&init_mm, pgd, addr); |
| 201 | if (!p4d) |
| 202 | return -ENOMEM; |
| 203 | do { |
| 204 | next = p4d_addr_end(addr, end); |
| 205 | if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) |
| 206 | return -ENOMEM; |
| 207 | } while (p4d++, addr = next, addr != end); |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | /* |
| 212 | * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and |
| 213 | * will have pfns corresponding to the "pages" array. |
| 214 | * |
| 215 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] |
| 216 | */ |
| 217 | static int vmap_page_range_noflush(unsigned long start, unsigned long end, |
| 218 | pgprot_t prot, struct page **pages) |
| 219 | { |
| 220 | pgd_t *pgd; |
| 221 | unsigned long next; |
| 222 | unsigned long addr = start; |
| 223 | int err = 0; |
| 224 | int nr = 0; |
| 225 | |
| 226 | BUG_ON(addr >= end); |
| 227 | pgd = pgd_offset_k(addr); |
| 228 | do { |
| 229 | next = pgd_addr_end(addr, end); |
| 230 | err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); |
| 231 | if (err) |
| 232 | return err; |
| 233 | } while (pgd++, addr = next, addr != end); |
| 234 | |
| 235 | return nr; |
| 236 | } |
| 237 | |
| 238 | static int vmap_page_range(unsigned long start, unsigned long end, |
| 239 | pgprot_t prot, struct page **pages) |
| 240 | { |
| 241 | int ret; |
| 242 | |
| 243 | ret = vmap_page_range_noflush(start, end, prot, pages); |
| 244 | flush_cache_vmap(start, end); |
| 245 | return ret; |
| 246 | } |
| 247 | |
| 248 | int is_vmalloc_or_module_addr(const void *x) |
| 249 | { |
| 250 | /* |
| 251 | * ARM, x86-64 and sparc64 put modules in a special place, |
| 252 | * and fall back on vmalloc() if that fails. Others |
| 253 | * just put it in the vmalloc space. |
| 254 | */ |
| 255 | #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) |
| 256 | unsigned long addr = (unsigned long)x; |
| 257 | if (addr >= MODULES_VADDR && addr < MODULES_END) |
| 258 | return 1; |
| 259 | #endif |
| 260 | return is_vmalloc_addr(x); |
| 261 | } |
| 262 | |
| 263 | /* |
| 264 | * Walk a vmap address to the struct page it maps. |
| 265 | */ |
| 266 | struct page *vmalloc_to_page(const void *vmalloc_addr) |
| 267 | { |
| 268 | unsigned long addr = (unsigned long) vmalloc_addr; |
| 269 | struct page *page = NULL; |
| 270 | pgd_t *pgd = pgd_offset_k(addr); |
| 271 | p4d_t *p4d; |
| 272 | pud_t *pud; |
| 273 | pmd_t *pmd; |
| 274 | pte_t *ptep, pte; |
| 275 | |
| 276 | /* |
| 277 | * XXX we might need to change this if we add VIRTUAL_BUG_ON for |
| 278 | * architectures that do not vmalloc module space |
| 279 | */ |
| 280 | VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); |
| 281 | |
| 282 | if (pgd_none(*pgd)) |
| 283 | return NULL; |
| 284 | p4d = p4d_offset(pgd, addr); |
| 285 | if (p4d_none(*p4d)) |
| 286 | return NULL; |
| 287 | pud = pud_offset(p4d, addr); |
| 288 | |
| 289 | /* |
| 290 | * Don't dereference bad PUD or PMD (below) entries. This will also |
| 291 | * identify huge mappings, which we may encounter on architectures |
| 292 | * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be |
| 293 | * identified as vmalloc addresses by is_vmalloc_addr(), but are |
| 294 | * not [unambiguously] associated with a struct page, so there is |
| 295 | * no correct value to return for them. |
| 296 | */ |
| 297 | WARN_ON_ONCE(pud_bad(*pud)); |
| 298 | if (pud_none(*pud) || pud_bad(*pud)) |
| 299 | return NULL; |
| 300 | pmd = pmd_offset(pud, addr); |
| 301 | WARN_ON_ONCE(pmd_bad(*pmd)); |
| 302 | if (pmd_none(*pmd) || pmd_bad(*pmd)) |
| 303 | return NULL; |
| 304 | |
| 305 | ptep = pte_offset_map(pmd, addr); |
| 306 | pte = *ptep; |
| 307 | if (pte_present(pte)) |
| 308 | page = pte_page(pte); |
| 309 | pte_unmap(ptep); |
| 310 | return page; |
| 311 | } |
| 312 | EXPORT_SYMBOL(vmalloc_to_page); |
| 313 | |
| 314 | /* |
| 315 | * Map a vmalloc()-space virtual address to the physical page frame number. |
| 316 | */ |
| 317 | unsigned long vmalloc_to_pfn(const void *vmalloc_addr) |
| 318 | { |
| 319 | return page_to_pfn(vmalloc_to_page(vmalloc_addr)); |
| 320 | } |
| 321 | EXPORT_SYMBOL(vmalloc_to_pfn); |
| 322 | |
| 323 | |
| 324 | /*** Global kva allocator ***/ |
| 325 | |
| 326 | #define VM_LAZY_FREE 0x02 |
| 327 | #define VM_VM_AREA 0x04 |
| 328 | |
| 329 | static DEFINE_SPINLOCK(vmap_area_lock); |
| 330 | /* Export for kexec only */ |
| 331 | LIST_HEAD(vmap_area_list); |
| 332 | static LLIST_HEAD(vmap_purge_list); |
| 333 | static struct rb_root vmap_area_root = RB_ROOT; |
| 334 | |
| 335 | /* The vmap cache globals are protected by vmap_area_lock */ |
| 336 | static struct rb_node *free_vmap_cache; |
| 337 | static unsigned long cached_hole_size; |
| 338 | static unsigned long cached_vstart; |
| 339 | static unsigned long cached_align; |
| 340 | |
| 341 | static unsigned long vmap_area_pcpu_hole; |
| 342 | |
| 343 | static struct vmap_area *__find_vmap_area(unsigned long addr) |
| 344 | { |
| 345 | struct rb_node *n = vmap_area_root.rb_node; |
| 346 | |
| 347 | while (n) { |
| 348 | struct vmap_area *va; |
| 349 | |
| 350 | va = rb_entry(n, struct vmap_area, rb_node); |
| 351 | if (addr < va->va_start) |
| 352 | n = n->rb_left; |
| 353 | else if (addr >= va->va_end) |
| 354 | n = n->rb_right; |
| 355 | else |
| 356 | return va; |
| 357 | } |
| 358 | |
| 359 | return NULL; |
| 360 | } |
| 361 | |
| 362 | static void __insert_vmap_area(struct vmap_area *va) |
| 363 | { |
| 364 | struct rb_node **p = &vmap_area_root.rb_node; |
| 365 | struct rb_node *parent = NULL; |
| 366 | struct rb_node *tmp; |
| 367 | |
| 368 | while (*p) { |
| 369 | struct vmap_area *tmp_va; |
| 370 | |
| 371 | parent = *p; |
| 372 | tmp_va = rb_entry(parent, struct vmap_area, rb_node); |
| 373 | if (va->va_start < tmp_va->va_end) |
| 374 | p = &(*p)->rb_left; |
| 375 | else if (va->va_end > tmp_va->va_start) |
| 376 | p = &(*p)->rb_right; |
| 377 | else |
| 378 | BUG(); |
| 379 | } |
| 380 | |
| 381 | rb_link_node(&va->rb_node, parent, p); |
| 382 | rb_insert_color(&va->rb_node, &vmap_area_root); |
| 383 | |
| 384 | /* address-sort this list */ |
| 385 | tmp = rb_prev(&va->rb_node); |
| 386 | if (tmp) { |
| 387 | struct vmap_area *prev; |
| 388 | prev = rb_entry(tmp, struct vmap_area, rb_node); |
| 389 | list_add_rcu(&va->list, &prev->list); |
| 390 | } else |
| 391 | list_add_rcu(&va->list, &vmap_area_list); |
| 392 | } |
| 393 | |
| 394 | static void purge_vmap_area_lazy(void); |
| 395 | |
| 396 | static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); |
| 397 | |
| 398 | /* |
| 399 | * Allocate a region of KVA of the specified size and alignment, within the |
| 400 | * vstart and vend. |
| 401 | */ |
| 402 | static struct vmap_area *alloc_vmap_area(unsigned long size, |
| 403 | unsigned long align, |
| 404 | unsigned long vstart, unsigned long vend, |
| 405 | int node, gfp_t gfp_mask) |
| 406 | { |
| 407 | struct vmap_area *va; |
| 408 | struct rb_node *n; |
| 409 | unsigned long addr; |
| 410 | int purged = 0; |
| 411 | struct vmap_area *first; |
| 412 | |
| 413 | BUG_ON(!size); |
| 414 | BUG_ON(offset_in_page(size)); |
| 415 | BUG_ON(!is_power_of_2(align)); |
| 416 | |
| 417 | might_sleep(); |
| 418 | |
| 419 | va = kmalloc_node(sizeof(struct vmap_area), |
| 420 | gfp_mask & GFP_RECLAIM_MASK, node); |
| 421 | if (unlikely(!va)) |
| 422 | return ERR_PTR(-ENOMEM); |
| 423 | |
| 424 | /* |
| 425 | * Only scan the relevant parts containing pointers to other objects |
| 426 | * to avoid false negatives. |
| 427 | */ |
| 428 | kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); |
| 429 | |
| 430 | retry: |
| 431 | spin_lock(&vmap_area_lock); |
| 432 | /* |
| 433 | * Invalidate cache if we have more permissive parameters. |
| 434 | * cached_hole_size notes the largest hole noticed _below_ |
| 435 | * the vmap_area cached in free_vmap_cache: if size fits |
| 436 | * into that hole, we want to scan from vstart to reuse |
| 437 | * the hole instead of allocating above free_vmap_cache. |
| 438 | * Note that __free_vmap_area may update free_vmap_cache |
| 439 | * without updating cached_hole_size or cached_align. |
| 440 | */ |
| 441 | if (!free_vmap_cache || |
| 442 | size < cached_hole_size || |
| 443 | vstart < cached_vstart || |
| 444 | align < cached_align) { |
| 445 | nocache: |
| 446 | cached_hole_size = 0; |
| 447 | free_vmap_cache = NULL; |
| 448 | } |
| 449 | /* record if we encounter less permissive parameters */ |
| 450 | cached_vstart = vstart; |
| 451 | cached_align = align; |
| 452 | |
| 453 | /* find starting point for our search */ |
| 454 | if (free_vmap_cache) { |
| 455 | first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); |
| 456 | addr = ALIGN(first->va_end, align); |
| 457 | if (addr < vstart) |
| 458 | goto nocache; |
| 459 | if (addr + size < addr) |
| 460 | goto overflow; |
| 461 | |
| 462 | } else { |
| 463 | addr = ALIGN(vstart, align); |
| 464 | if (addr + size < addr) |
| 465 | goto overflow; |
| 466 | |
| 467 | n = vmap_area_root.rb_node; |
| 468 | first = NULL; |
| 469 | |
| 470 | while (n) { |
| 471 | struct vmap_area *tmp; |
| 472 | tmp = rb_entry(n, struct vmap_area, rb_node); |
| 473 | if (tmp->va_end >= addr) { |
| 474 | first = tmp; |
| 475 | if (tmp->va_start <= addr) |
| 476 | break; |
| 477 | n = n->rb_left; |
| 478 | } else |
| 479 | n = n->rb_right; |
| 480 | } |
| 481 | |
| 482 | if (!first) |
| 483 | goto found; |
| 484 | } |
| 485 | |
| 486 | /* from the starting point, walk areas until a suitable hole is found */ |
| 487 | while (addr + size > first->va_start && addr + size <= vend) { |
| 488 | if (addr + cached_hole_size < first->va_start) |
| 489 | cached_hole_size = first->va_start - addr; |
| 490 | addr = ALIGN(first->va_end, align); |
| 491 | if (addr + size < addr) |
| 492 | goto overflow; |
| 493 | |
| 494 | if (list_is_last(&first->list, &vmap_area_list)) |
| 495 | goto found; |
| 496 | |
| 497 | first = list_next_entry(first, list); |
| 498 | } |
| 499 | |
| 500 | found: |
| 501 | if (addr + size > vend) |
| 502 | goto overflow; |
| 503 | |
| 504 | va->va_start = addr; |
| 505 | va->va_end = addr + size; |
| 506 | va->flags = 0; |
| 507 | __insert_vmap_area(va); |
| 508 | free_vmap_cache = &va->rb_node; |
| 509 | spin_unlock(&vmap_area_lock); |
| 510 | |
| 511 | BUG_ON(!IS_ALIGNED(va->va_start, align)); |
| 512 | BUG_ON(va->va_start < vstart); |
| 513 | BUG_ON(va->va_end > vend); |
| 514 | |
| 515 | return va; |
| 516 | |
| 517 | overflow: |
| 518 | spin_unlock(&vmap_area_lock); |
| 519 | if (!purged) { |
| 520 | purge_vmap_area_lazy(); |
| 521 | purged = 1; |
| 522 | goto retry; |
| 523 | } |
| 524 | |
| 525 | if (gfpflags_allow_blocking(gfp_mask)) { |
| 526 | unsigned long freed = 0; |
| 527 | blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); |
| 528 | if (freed > 0) { |
| 529 | purged = 0; |
| 530 | goto retry; |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) |
| 535 | pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", |
| 536 | size); |
| 537 | kfree(va); |
| 538 | return ERR_PTR(-EBUSY); |
| 539 | } |
| 540 | |
| 541 | int register_vmap_purge_notifier(struct notifier_block *nb) |
| 542 | { |
| 543 | return blocking_notifier_chain_register(&vmap_notify_list, nb); |
| 544 | } |
| 545 | EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); |
| 546 | |
| 547 | int unregister_vmap_purge_notifier(struct notifier_block *nb) |
| 548 | { |
| 549 | return blocking_notifier_chain_unregister(&vmap_notify_list, nb); |
| 550 | } |
| 551 | EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); |
| 552 | |
| 553 | static void __free_vmap_area(struct vmap_area *va) |
| 554 | { |
| 555 | BUG_ON(RB_EMPTY_NODE(&va->rb_node)); |
| 556 | |
| 557 | if (free_vmap_cache) { |
| 558 | if (va->va_end < cached_vstart) { |
| 559 | free_vmap_cache = NULL; |
| 560 | } else { |
| 561 | struct vmap_area *cache; |
| 562 | cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); |
| 563 | if (va->va_start <= cache->va_start) { |
| 564 | free_vmap_cache = rb_prev(&va->rb_node); |
| 565 | /* |
| 566 | * We don't try to update cached_hole_size or |
| 567 | * cached_align, but it won't go very wrong. |
| 568 | */ |
| 569 | } |
| 570 | } |
| 571 | } |
| 572 | rb_erase(&va->rb_node, &vmap_area_root); |
| 573 | RB_CLEAR_NODE(&va->rb_node); |
| 574 | list_del_rcu(&va->list); |
| 575 | |
| 576 | /* |
| 577 | * Track the highest possible candidate for pcpu area |
| 578 | * allocation. Areas outside of vmalloc area can be returned |
| 579 | * here too, consider only end addresses which fall inside |
| 580 | * vmalloc area proper. |
| 581 | */ |
| 582 | if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) |
| 583 | vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); |
| 584 | |
| 585 | kfree_rcu(va, rcu_head); |
| 586 | } |
| 587 | |
| 588 | /* |
| 589 | * Free a region of KVA allocated by alloc_vmap_area |
| 590 | */ |
| 591 | static void free_vmap_area(struct vmap_area *va) |
| 592 | { |
| 593 | spin_lock(&vmap_area_lock); |
| 594 | __free_vmap_area(va); |
| 595 | spin_unlock(&vmap_area_lock); |
| 596 | } |
| 597 | |
| 598 | /* |
| 599 | * Clear the pagetable entries of a given vmap_area |
| 600 | */ |
| 601 | static void unmap_vmap_area(struct vmap_area *va) |
| 602 | { |
| 603 | vunmap_page_range(va->va_start, va->va_end); |
| 604 | } |
| 605 | |
| 606 | static void vmap_debug_free_range(unsigned long start, unsigned long end) |
| 607 | { |
| 608 | /* |
| 609 | * Unmap page tables and force a TLB flush immediately if pagealloc |
| 610 | * debugging is enabled. This catches use after free bugs similarly to |
| 611 | * those in linear kernel virtual address space after a page has been |
| 612 | * freed. |
| 613 | * |
| 614 | * All the lazy freeing logic is still retained, in order to minimise |
| 615 | * intrusiveness of this debugging feature. |
| 616 | * |
| 617 | * This is going to be *slow* (linear kernel virtual address debugging |
| 618 | * doesn't do a broadcast TLB flush so it is a lot faster). |
| 619 | */ |
| 620 | if (debug_pagealloc_enabled()) { |
| 621 | vunmap_page_range(start, end); |
| 622 | flush_tlb_kernel_range(start, end); |
| 623 | } |
| 624 | } |
| 625 | |
| 626 | /* |
| 627 | * lazy_max_pages is the maximum amount of virtual address space we gather up |
| 628 | * before attempting to purge with a TLB flush. |
| 629 | * |
| 630 | * There is a tradeoff here: a larger number will cover more kernel page tables |
| 631 | * and take slightly longer to purge, but it will linearly reduce the number of |
| 632 | * global TLB flushes that must be performed. It would seem natural to scale |
| 633 | * this number up linearly with the number of CPUs (because vmapping activity |
| 634 | * could also scale linearly with the number of CPUs), however it is likely |
| 635 | * that in practice, workloads might be constrained in other ways that mean |
| 636 | * vmap activity will not scale linearly with CPUs. Also, I want to be |
| 637 | * conservative and not introduce a big latency on huge systems, so go with |
| 638 | * a less aggressive log scale. It will still be an improvement over the old |
| 639 | * code, and it will be simple to change the scale factor if we find that it |
| 640 | * becomes a problem on bigger systems. |
| 641 | */ |
| 642 | static unsigned long lazy_max_pages(void) |
| 643 | { |
| 644 | unsigned int log; |
| 645 | |
| 646 | log = fls(num_online_cpus()); |
| 647 | |
| 648 | return log * (32UL * 1024 * 1024 / PAGE_SIZE); |
| 649 | } |
| 650 | |
| 651 | static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); |
| 652 | |
| 653 | /* |
| 654 | * Serialize vmap purging. There is no actual criticial section protected |
| 655 | * by this look, but we want to avoid concurrent calls for performance |
| 656 | * reasons and to make the pcpu_get_vm_areas more deterministic. |
| 657 | */ |
| 658 | static DEFINE_MUTEX(vmap_purge_lock); |
| 659 | |
| 660 | /* for per-CPU blocks */ |
| 661 | static void purge_fragmented_blocks_allcpus(void); |
| 662 | |
| 663 | /* |
| 664 | * called before a call to iounmap() if the caller wants vm_area_struct's |
| 665 | * immediately freed. |
| 666 | */ |
| 667 | void set_iounmap_nonlazy(void) |
| 668 | { |
| 669 | atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); |
| 670 | } |
| 671 | |
| 672 | /* |
| 673 | * Purges all lazily-freed vmap areas. |
| 674 | */ |
| 675 | static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) |
| 676 | { |
| 677 | struct llist_node *valist; |
| 678 | struct vmap_area *va; |
| 679 | struct vmap_area *n_va; |
| 680 | bool do_free = false; |
| 681 | |
| 682 | lockdep_assert_held(&vmap_purge_lock); |
| 683 | |
| 684 | valist = llist_del_all(&vmap_purge_list); |
| 685 | llist_for_each_entry(va, valist, purge_list) { |
| 686 | if (va->va_start < start) |
| 687 | start = va->va_start; |
| 688 | if (va->va_end > end) |
| 689 | end = va->va_end; |
| 690 | do_free = true; |
| 691 | } |
| 692 | |
| 693 | if (!do_free) |
| 694 | return false; |
| 695 | |
| 696 | flush_tlb_kernel_range(start, end); |
| 697 | |
| 698 | spin_lock(&vmap_area_lock); |
| 699 | llist_for_each_entry_safe(va, n_va, valist, purge_list) { |
| 700 | int nr = (va->va_end - va->va_start) >> PAGE_SHIFT; |
| 701 | |
| 702 | __free_vmap_area(va); |
| 703 | atomic_sub(nr, &vmap_lazy_nr); |
| 704 | cond_resched_lock(&vmap_area_lock); |
| 705 | } |
| 706 | spin_unlock(&vmap_area_lock); |
| 707 | return true; |
| 708 | } |
| 709 | |
| 710 | /* |
| 711 | * Kick off a purge of the outstanding lazy areas. Don't bother if somebody |
| 712 | * is already purging. |
| 713 | */ |
| 714 | static void try_purge_vmap_area_lazy(void) |
| 715 | { |
| 716 | if (mutex_trylock(&vmap_purge_lock)) { |
| 717 | __purge_vmap_area_lazy(ULONG_MAX, 0); |
| 718 | mutex_unlock(&vmap_purge_lock); |
| 719 | } |
| 720 | } |
| 721 | |
| 722 | /* |
| 723 | * Kick off a purge of the outstanding lazy areas. |
| 724 | */ |
| 725 | static void purge_vmap_area_lazy(void) |
| 726 | { |
| 727 | mutex_lock(&vmap_purge_lock); |
| 728 | purge_fragmented_blocks_allcpus(); |
| 729 | __purge_vmap_area_lazy(ULONG_MAX, 0); |
| 730 | mutex_unlock(&vmap_purge_lock); |
| 731 | } |
| 732 | |
| 733 | /* |
| 734 | * Free a vmap area, caller ensuring that the area has been unmapped |
| 735 | * and flush_cache_vunmap had been called for the correct range |
| 736 | * previously. |
| 737 | */ |
| 738 | static void free_vmap_area_noflush(struct vmap_area *va) |
| 739 | { |
| 740 | int nr_lazy; |
| 741 | |
| 742 | nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, |
| 743 | &vmap_lazy_nr); |
| 744 | |
| 745 | /* After this point, we may free va at any time */ |
| 746 | llist_add(&va->purge_list, &vmap_purge_list); |
| 747 | |
| 748 | if (unlikely(nr_lazy > lazy_max_pages())) |
| 749 | try_purge_vmap_area_lazy(); |
| 750 | } |
| 751 | |
| 752 | /* |
| 753 | * Free and unmap a vmap area |
| 754 | */ |
| 755 | static void free_unmap_vmap_area(struct vmap_area *va) |
| 756 | { |
| 757 | flush_cache_vunmap(va->va_start, va->va_end); |
| 758 | unmap_vmap_area(va); |
| 759 | free_vmap_area_noflush(va); |
| 760 | } |
| 761 | |
| 762 | static struct vmap_area *find_vmap_area(unsigned long addr) |
| 763 | { |
| 764 | struct vmap_area *va; |
| 765 | |
| 766 | spin_lock(&vmap_area_lock); |
| 767 | va = __find_vmap_area(addr); |
| 768 | spin_unlock(&vmap_area_lock); |
| 769 | |
| 770 | return va; |
| 771 | } |
| 772 | |
| 773 | /*** Per cpu kva allocator ***/ |
| 774 | |
| 775 | /* |
| 776 | * vmap space is limited especially on 32 bit architectures. Ensure there is |
| 777 | * room for at least 16 percpu vmap blocks per CPU. |
| 778 | */ |
| 779 | /* |
| 780 | * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able |
| 781 | * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess |
| 782 | * instead (we just need a rough idea) |
| 783 | */ |
| 784 | #if BITS_PER_LONG == 32 |
| 785 | #define VMALLOC_SPACE (128UL*1024*1024) |
| 786 | #else |
| 787 | #define VMALLOC_SPACE (128UL*1024*1024*1024) |
| 788 | #endif |
| 789 | |
| 790 | #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) |
| 791 | #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ |
| 792 | #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ |
| 793 | #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) |
| 794 | #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ |
| 795 | #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ |
| 796 | #define VMAP_BBMAP_BITS \ |
| 797 | VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ |
| 798 | VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ |
| 799 | VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) |
| 800 | |
| 801 | #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) |
| 802 | |
| 803 | static bool vmap_initialized __read_mostly = false; |
| 804 | |
| 805 | struct vmap_block_queue { |
| 806 | spinlock_t lock; |
| 807 | struct list_head free; |
| 808 | }; |
| 809 | |
| 810 | struct vmap_block { |
| 811 | spinlock_t lock; |
| 812 | struct vmap_area *va; |
| 813 | unsigned long free, dirty; |
| 814 | unsigned long dirty_min, dirty_max; /*< dirty range */ |
| 815 | struct list_head free_list; |
| 816 | struct rcu_head rcu_head; |
| 817 | struct list_head purge; |
| 818 | }; |
| 819 | |
| 820 | /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ |
| 821 | static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); |
| 822 | |
| 823 | /* |
| 824 | * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block |
| 825 | * in the free path. Could get rid of this if we change the API to return a |
| 826 | * "cookie" from alloc, to be passed to free. But no big deal yet. |
| 827 | */ |
| 828 | static DEFINE_SPINLOCK(vmap_block_tree_lock); |
| 829 | static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); |
| 830 | |
| 831 | /* |
| 832 | * We should probably have a fallback mechanism to allocate virtual memory |
| 833 | * out of partially filled vmap blocks. However vmap block sizing should be |
| 834 | * fairly reasonable according to the vmalloc size, so it shouldn't be a |
| 835 | * big problem. |
| 836 | */ |
| 837 | |
| 838 | static unsigned long addr_to_vb_idx(unsigned long addr) |
| 839 | { |
| 840 | addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); |
| 841 | addr /= VMAP_BLOCK_SIZE; |
| 842 | return addr; |
| 843 | } |
| 844 | |
| 845 | static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) |
| 846 | { |
| 847 | unsigned long addr; |
| 848 | |
| 849 | addr = va_start + (pages_off << PAGE_SHIFT); |
| 850 | BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); |
| 851 | return (void *)addr; |
| 852 | } |
| 853 | |
| 854 | /** |
| 855 | * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this |
| 856 | * block. Of course pages number can't exceed VMAP_BBMAP_BITS |
| 857 | * @order: how many 2^order pages should be occupied in newly allocated block |
| 858 | * @gfp_mask: flags for the page level allocator |
| 859 | * |
| 860 | * Returns: virtual address in a newly allocated block or ERR_PTR(-errno) |
| 861 | */ |
| 862 | static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) |
| 863 | { |
| 864 | struct vmap_block_queue *vbq; |
| 865 | struct vmap_block *vb; |
| 866 | struct vmap_area *va; |
| 867 | unsigned long vb_idx; |
| 868 | int node, err; |
| 869 | void *vaddr; |
| 870 | |
| 871 | node = numa_node_id(); |
| 872 | |
| 873 | vb = kmalloc_node(sizeof(struct vmap_block), |
| 874 | gfp_mask & GFP_RECLAIM_MASK, node); |
| 875 | if (unlikely(!vb)) |
| 876 | return ERR_PTR(-ENOMEM); |
| 877 | |
| 878 | va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, |
| 879 | VMALLOC_START, VMALLOC_END, |
| 880 | node, gfp_mask); |
| 881 | if (IS_ERR(va)) { |
| 882 | kfree(vb); |
| 883 | return ERR_CAST(va); |
| 884 | } |
| 885 | |
| 886 | err = radix_tree_preload(gfp_mask); |
| 887 | if (unlikely(err)) { |
| 888 | kfree(vb); |
| 889 | free_vmap_area(va); |
| 890 | return ERR_PTR(err); |
| 891 | } |
| 892 | |
| 893 | vaddr = vmap_block_vaddr(va->va_start, 0); |
| 894 | spin_lock_init(&vb->lock); |
| 895 | vb->va = va; |
| 896 | /* At least something should be left free */ |
| 897 | BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); |
| 898 | vb->free = VMAP_BBMAP_BITS - (1UL << order); |
| 899 | vb->dirty = 0; |
| 900 | vb->dirty_min = VMAP_BBMAP_BITS; |
| 901 | vb->dirty_max = 0; |
| 902 | INIT_LIST_HEAD(&vb->free_list); |
| 903 | |
| 904 | vb_idx = addr_to_vb_idx(va->va_start); |
| 905 | spin_lock(&vmap_block_tree_lock); |
| 906 | err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); |
| 907 | spin_unlock(&vmap_block_tree_lock); |
| 908 | BUG_ON(err); |
| 909 | radix_tree_preload_end(); |
| 910 | |
| 911 | vbq = &get_cpu_var(vmap_block_queue); |
| 912 | spin_lock(&vbq->lock); |
| 913 | list_add_tail_rcu(&vb->free_list, &vbq->free); |
| 914 | spin_unlock(&vbq->lock); |
| 915 | put_cpu_var(vmap_block_queue); |
| 916 | |
| 917 | return vaddr; |
| 918 | } |
| 919 | |
| 920 | static void free_vmap_block(struct vmap_block *vb) |
| 921 | { |
| 922 | struct vmap_block *tmp; |
| 923 | unsigned long vb_idx; |
| 924 | |
| 925 | vb_idx = addr_to_vb_idx(vb->va->va_start); |
| 926 | spin_lock(&vmap_block_tree_lock); |
| 927 | tmp = radix_tree_delete(&vmap_block_tree, vb_idx); |
| 928 | spin_unlock(&vmap_block_tree_lock); |
| 929 | BUG_ON(tmp != vb); |
| 930 | |
| 931 | free_vmap_area_noflush(vb->va); |
| 932 | kfree_rcu(vb, rcu_head); |
| 933 | } |
| 934 | |
| 935 | static void purge_fragmented_blocks(int cpu) |
| 936 | { |
| 937 | LIST_HEAD(purge); |
| 938 | struct vmap_block *vb; |
| 939 | struct vmap_block *n_vb; |
| 940 | struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); |
| 941 | |
| 942 | rcu_read_lock(); |
| 943 | list_for_each_entry_rcu(vb, &vbq->free, free_list) { |
| 944 | |
| 945 | if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) |
| 946 | continue; |
| 947 | |
| 948 | spin_lock(&vb->lock); |
| 949 | if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { |
| 950 | vb->free = 0; /* prevent further allocs after releasing lock */ |
| 951 | vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ |
| 952 | vb->dirty_min = 0; |
| 953 | vb->dirty_max = VMAP_BBMAP_BITS; |
| 954 | spin_lock(&vbq->lock); |
| 955 | list_del_rcu(&vb->free_list); |
| 956 | spin_unlock(&vbq->lock); |
| 957 | spin_unlock(&vb->lock); |
| 958 | list_add_tail(&vb->purge, &purge); |
| 959 | } else |
| 960 | spin_unlock(&vb->lock); |
| 961 | } |
| 962 | rcu_read_unlock(); |
| 963 | |
| 964 | list_for_each_entry_safe(vb, n_vb, &purge, purge) { |
| 965 | list_del(&vb->purge); |
| 966 | free_vmap_block(vb); |
| 967 | } |
| 968 | } |
| 969 | |
| 970 | static void purge_fragmented_blocks_allcpus(void) |
| 971 | { |
| 972 | int cpu; |
| 973 | |
| 974 | for_each_possible_cpu(cpu) |
| 975 | purge_fragmented_blocks(cpu); |
| 976 | } |
| 977 | |
| 978 | static void *vb_alloc(unsigned long size, gfp_t gfp_mask) |
| 979 | { |
| 980 | struct vmap_block_queue *vbq; |
| 981 | struct vmap_block *vb; |
| 982 | void *vaddr = NULL; |
| 983 | unsigned int order; |
| 984 | |
| 985 | BUG_ON(offset_in_page(size)); |
| 986 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); |
| 987 | if (WARN_ON(size == 0)) { |
| 988 | /* |
| 989 | * Allocating 0 bytes isn't what caller wants since |
| 990 | * get_order(0) returns funny result. Just warn and terminate |
| 991 | * early. |
| 992 | */ |
| 993 | return NULL; |
| 994 | } |
| 995 | order = get_order(size); |
| 996 | |
| 997 | rcu_read_lock(); |
| 998 | vbq = &get_cpu_var(vmap_block_queue); |
| 999 | list_for_each_entry_rcu(vb, &vbq->free, free_list) { |
| 1000 | unsigned long pages_off; |
| 1001 | |
| 1002 | spin_lock(&vb->lock); |
| 1003 | if (vb->free < (1UL << order)) { |
| 1004 | spin_unlock(&vb->lock); |
| 1005 | continue; |
| 1006 | } |
| 1007 | |
| 1008 | pages_off = VMAP_BBMAP_BITS - vb->free; |
| 1009 | vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); |
| 1010 | vb->free -= 1UL << order; |
| 1011 | if (vb->free == 0) { |
| 1012 | spin_lock(&vbq->lock); |
| 1013 | list_del_rcu(&vb->free_list); |
| 1014 | spin_unlock(&vbq->lock); |
| 1015 | } |
| 1016 | |
| 1017 | spin_unlock(&vb->lock); |
| 1018 | break; |
| 1019 | } |
| 1020 | |
| 1021 | put_cpu_var(vmap_block_queue); |
| 1022 | rcu_read_unlock(); |
| 1023 | |
| 1024 | /* Allocate new block if nothing was found */ |
| 1025 | if (!vaddr) |
| 1026 | vaddr = new_vmap_block(order, gfp_mask); |
| 1027 | |
| 1028 | return vaddr; |
| 1029 | } |
| 1030 | |
| 1031 | static void vb_free(const void *addr, unsigned long size) |
| 1032 | { |
| 1033 | unsigned long offset; |
| 1034 | unsigned long vb_idx; |
| 1035 | unsigned int order; |
| 1036 | struct vmap_block *vb; |
| 1037 | |
| 1038 | BUG_ON(offset_in_page(size)); |
| 1039 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); |
| 1040 | |
| 1041 | flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); |
| 1042 | |
| 1043 | order = get_order(size); |
| 1044 | |
| 1045 | offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); |
| 1046 | offset >>= PAGE_SHIFT; |
| 1047 | |
| 1048 | vb_idx = addr_to_vb_idx((unsigned long)addr); |
| 1049 | rcu_read_lock(); |
| 1050 | vb = radix_tree_lookup(&vmap_block_tree, vb_idx); |
| 1051 | rcu_read_unlock(); |
| 1052 | BUG_ON(!vb); |
| 1053 | |
| 1054 | vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); |
| 1055 | |
| 1056 | spin_lock(&vb->lock); |
| 1057 | |
| 1058 | /* Expand dirty range */ |
| 1059 | vb->dirty_min = min(vb->dirty_min, offset); |
| 1060 | vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); |
| 1061 | |
| 1062 | vb->dirty += 1UL << order; |
| 1063 | if (vb->dirty == VMAP_BBMAP_BITS) { |
| 1064 | BUG_ON(vb->free); |
| 1065 | spin_unlock(&vb->lock); |
| 1066 | free_vmap_block(vb); |
| 1067 | } else |
| 1068 | spin_unlock(&vb->lock); |
| 1069 | } |
| 1070 | |
| 1071 | /** |
| 1072 | * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer |
| 1073 | * |
| 1074 | * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily |
| 1075 | * to amortize TLB flushing overheads. What this means is that any page you |
| 1076 | * have now, may, in a former life, have been mapped into kernel virtual |
| 1077 | * address by the vmap layer and so there might be some CPUs with TLB entries |
| 1078 | * still referencing that page (additional to the regular 1:1 kernel mapping). |
| 1079 | * |
| 1080 | * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can |
| 1081 | * be sure that none of the pages we have control over will have any aliases |
| 1082 | * from the vmap layer. |
| 1083 | */ |
| 1084 | void vm_unmap_aliases(void) |
| 1085 | { |
| 1086 | unsigned long start = ULONG_MAX, end = 0; |
| 1087 | int cpu; |
| 1088 | int flush = 0; |
| 1089 | |
| 1090 | if (unlikely(!vmap_initialized)) |
| 1091 | return; |
| 1092 | |
| 1093 | might_sleep(); |
| 1094 | |
| 1095 | for_each_possible_cpu(cpu) { |
| 1096 | struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); |
| 1097 | struct vmap_block *vb; |
| 1098 | |
| 1099 | rcu_read_lock(); |
| 1100 | list_for_each_entry_rcu(vb, &vbq->free, free_list) { |
| 1101 | spin_lock(&vb->lock); |
| 1102 | if (vb->dirty) { |
| 1103 | unsigned long va_start = vb->va->va_start; |
| 1104 | unsigned long s, e; |
| 1105 | |
| 1106 | s = va_start + (vb->dirty_min << PAGE_SHIFT); |
| 1107 | e = va_start + (vb->dirty_max << PAGE_SHIFT); |
| 1108 | |
| 1109 | start = min(s, start); |
| 1110 | end = max(e, end); |
| 1111 | |
| 1112 | flush = 1; |
| 1113 | } |
| 1114 | spin_unlock(&vb->lock); |
| 1115 | } |
| 1116 | rcu_read_unlock(); |
| 1117 | } |
| 1118 | |
| 1119 | mutex_lock(&vmap_purge_lock); |
| 1120 | purge_fragmented_blocks_allcpus(); |
| 1121 | if (!__purge_vmap_area_lazy(start, end) && flush) |
| 1122 | flush_tlb_kernel_range(start, end); |
| 1123 | mutex_unlock(&vmap_purge_lock); |
| 1124 | } |
| 1125 | EXPORT_SYMBOL_GPL(vm_unmap_aliases); |
| 1126 | |
| 1127 | /** |
| 1128 | * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram |
| 1129 | * @mem: the pointer returned by vm_map_ram |
| 1130 | * @count: the count passed to that vm_map_ram call (cannot unmap partial) |
| 1131 | */ |
| 1132 | void vm_unmap_ram(const void *mem, unsigned int count) |
| 1133 | { |
| 1134 | unsigned long size = (unsigned long)count << PAGE_SHIFT; |
| 1135 | unsigned long addr = (unsigned long)mem; |
| 1136 | struct vmap_area *va; |
| 1137 | |
| 1138 | might_sleep(); |
| 1139 | BUG_ON(!addr); |
| 1140 | BUG_ON(addr < VMALLOC_START); |
| 1141 | BUG_ON(addr > VMALLOC_END); |
| 1142 | BUG_ON(!PAGE_ALIGNED(addr)); |
| 1143 | |
| 1144 | debug_check_no_locks_freed(mem, size); |
| 1145 | vmap_debug_free_range(addr, addr+size); |
| 1146 | |
| 1147 | if (likely(count <= VMAP_MAX_ALLOC)) { |
| 1148 | vb_free(mem, size); |
| 1149 | return; |
| 1150 | } |
| 1151 | |
| 1152 | va = find_vmap_area(addr); |
| 1153 | BUG_ON(!va); |
| 1154 | free_unmap_vmap_area(va); |
| 1155 | } |
| 1156 | EXPORT_SYMBOL(vm_unmap_ram); |
| 1157 | |
| 1158 | /** |
| 1159 | * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) |
| 1160 | * @pages: an array of pointers to the pages to be mapped |
| 1161 | * @count: number of pages |
| 1162 | * @node: prefer to allocate data structures on this node |
| 1163 | * @prot: memory protection to use. PAGE_KERNEL for regular RAM |
| 1164 | * |
| 1165 | * If you use this function for less than VMAP_MAX_ALLOC pages, it could be |
| 1166 | * faster than vmap so it's good. But if you mix long-life and short-life |
| 1167 | * objects with vm_map_ram(), it could consume lots of address space through |
| 1168 | * fragmentation (especially on a 32bit machine). You could see failures in |
| 1169 | * the end. Please use this function for short-lived objects. |
| 1170 | * |
| 1171 | * Returns: a pointer to the address that has been mapped, or %NULL on failure |
| 1172 | */ |
| 1173 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) |
| 1174 | { |
| 1175 | unsigned long size = (unsigned long)count << PAGE_SHIFT; |
| 1176 | unsigned long addr; |
| 1177 | void *mem; |
| 1178 | |
| 1179 | if (likely(count <= VMAP_MAX_ALLOC)) { |
| 1180 | mem = vb_alloc(size, GFP_KERNEL); |
| 1181 | if (IS_ERR(mem)) |
| 1182 | return NULL; |
| 1183 | addr = (unsigned long)mem; |
| 1184 | } else { |
| 1185 | struct vmap_area *va; |
| 1186 | va = alloc_vmap_area(size, PAGE_SIZE, |
| 1187 | VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); |
| 1188 | if (IS_ERR(va)) |
| 1189 | return NULL; |
| 1190 | |
| 1191 | addr = va->va_start; |
| 1192 | mem = (void *)addr; |
| 1193 | } |
| 1194 | if (vmap_page_range(addr, addr + size, prot, pages) < 0) { |
| 1195 | vm_unmap_ram(mem, count); |
| 1196 | return NULL; |
| 1197 | } |
| 1198 | return mem; |
| 1199 | } |
| 1200 | EXPORT_SYMBOL(vm_map_ram); |
| 1201 | |
| 1202 | static struct vm_struct *vmlist __initdata; |
| 1203 | /** |
| 1204 | * vm_area_add_early - add vmap area early during boot |
| 1205 | * @vm: vm_struct to add |
| 1206 | * |
| 1207 | * This function is used to add fixed kernel vm area to vmlist before |
| 1208 | * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags |
| 1209 | * should contain proper values and the other fields should be zero. |
| 1210 | * |
| 1211 | * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. |
| 1212 | */ |
| 1213 | void __init vm_area_add_early(struct vm_struct *vm) |
| 1214 | { |
| 1215 | struct vm_struct *tmp, **p; |
| 1216 | |
| 1217 | BUG_ON(vmap_initialized); |
| 1218 | for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { |
| 1219 | if (tmp->addr >= vm->addr) { |
| 1220 | BUG_ON(tmp->addr < vm->addr + vm->size); |
| 1221 | break; |
| 1222 | } else |
| 1223 | BUG_ON(tmp->addr + tmp->size > vm->addr); |
| 1224 | } |
| 1225 | vm->next = *p; |
| 1226 | *p = vm; |
| 1227 | } |
| 1228 | |
| 1229 | /** |
| 1230 | * vm_area_register_early - register vmap area early during boot |
| 1231 | * @vm: vm_struct to register |
| 1232 | * @align: requested alignment |
| 1233 | * |
| 1234 | * This function is used to register kernel vm area before |
| 1235 | * vmalloc_init() is called. @vm->size and @vm->flags should contain |
| 1236 | * proper values on entry and other fields should be zero. On return, |
| 1237 | * vm->addr contains the allocated address. |
| 1238 | * |
| 1239 | * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. |
| 1240 | */ |
| 1241 | void __init vm_area_register_early(struct vm_struct *vm, size_t align) |
| 1242 | { |
| 1243 | static size_t vm_init_off __initdata; |
| 1244 | unsigned long addr; |
| 1245 | |
| 1246 | addr = ALIGN(VMALLOC_START + vm_init_off, align); |
| 1247 | vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; |
| 1248 | |
| 1249 | vm->addr = (void *)addr; |
| 1250 | |
| 1251 | vm_area_add_early(vm); |
| 1252 | } |
| 1253 | |
| 1254 | void __init vmalloc_init(void) |
| 1255 | { |
| 1256 | struct vmap_area *va; |
| 1257 | struct vm_struct *tmp; |
| 1258 | int i; |
| 1259 | |
| 1260 | for_each_possible_cpu(i) { |
| 1261 | struct vmap_block_queue *vbq; |
| 1262 | struct vfree_deferred *p; |
| 1263 | |
| 1264 | vbq = &per_cpu(vmap_block_queue, i); |
| 1265 | spin_lock_init(&vbq->lock); |
| 1266 | INIT_LIST_HEAD(&vbq->free); |
| 1267 | p = &per_cpu(vfree_deferred, i); |
| 1268 | init_llist_head(&p->list); |
| 1269 | INIT_WORK(&p->wq, free_work); |
| 1270 | } |
| 1271 | |
| 1272 | /* Import existing vmlist entries. */ |
| 1273 | for (tmp = vmlist; tmp; tmp = tmp->next) { |
| 1274 | va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); |
| 1275 | va->flags = VM_VM_AREA; |
| 1276 | va->va_start = (unsigned long)tmp->addr; |
| 1277 | va->va_end = va->va_start + tmp->size; |
| 1278 | va->vm = tmp; |
| 1279 | __insert_vmap_area(va); |
| 1280 | } |
| 1281 | |
| 1282 | vmap_area_pcpu_hole = VMALLOC_END; |
| 1283 | |
| 1284 | vmap_initialized = true; |
| 1285 | } |
| 1286 | |
| 1287 | /** |
| 1288 | * map_kernel_range_noflush - map kernel VM area with the specified pages |
| 1289 | * @addr: start of the VM area to map |
| 1290 | * @size: size of the VM area to map |
| 1291 | * @prot: page protection flags to use |
| 1292 | * @pages: pages to map |
| 1293 | * |
| 1294 | * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size |
| 1295 | * specify should have been allocated using get_vm_area() and its |
| 1296 | * friends. |
| 1297 | * |
| 1298 | * NOTE: |
| 1299 | * This function does NOT do any cache flushing. The caller is |
| 1300 | * responsible for calling flush_cache_vmap() on to-be-mapped areas |
| 1301 | * before calling this function. |
| 1302 | * |
| 1303 | * RETURNS: |
| 1304 | * The number of pages mapped on success, -errno on failure. |
| 1305 | */ |
| 1306 | int map_kernel_range_noflush(unsigned long addr, unsigned long size, |
| 1307 | pgprot_t prot, struct page **pages) |
| 1308 | { |
| 1309 | return vmap_page_range_noflush(addr, addr + size, prot, pages); |
| 1310 | } |
| 1311 | |
| 1312 | /** |
| 1313 | * unmap_kernel_range_noflush - unmap kernel VM area |
| 1314 | * @addr: start of the VM area to unmap |
| 1315 | * @size: size of the VM area to unmap |
| 1316 | * |
| 1317 | * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size |
| 1318 | * specify should have been allocated using get_vm_area() and its |
| 1319 | * friends. |
| 1320 | * |
| 1321 | * NOTE: |
| 1322 | * This function does NOT do any cache flushing. The caller is |
| 1323 | * responsible for calling flush_cache_vunmap() on to-be-mapped areas |
| 1324 | * before calling this function and flush_tlb_kernel_range() after. |
| 1325 | */ |
| 1326 | void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) |
| 1327 | { |
| 1328 | vunmap_page_range(addr, addr + size); |
| 1329 | } |
| 1330 | EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); |
| 1331 | |
| 1332 | /** |
| 1333 | * unmap_kernel_range - unmap kernel VM area and flush cache and TLB |
| 1334 | * @addr: start of the VM area to unmap |
| 1335 | * @size: size of the VM area to unmap |
| 1336 | * |
| 1337 | * Similar to unmap_kernel_range_noflush() but flushes vcache before |
| 1338 | * the unmapping and tlb after. |
| 1339 | */ |
| 1340 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
| 1341 | { |
| 1342 | unsigned long end = addr + size; |
| 1343 | |
| 1344 | flush_cache_vunmap(addr, end); |
| 1345 | vunmap_page_range(addr, end); |
| 1346 | flush_tlb_kernel_range(addr, end); |
| 1347 | } |
| 1348 | EXPORT_SYMBOL_GPL(unmap_kernel_range); |
| 1349 | |
| 1350 | int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) |
| 1351 | { |
| 1352 | unsigned long addr = (unsigned long)area->addr; |
| 1353 | unsigned long end = addr + get_vm_area_size(area); |
| 1354 | int err; |
| 1355 | |
| 1356 | err = vmap_page_range(addr, end, prot, pages); |
| 1357 | |
| 1358 | return err > 0 ? 0 : err; |
| 1359 | } |
| 1360 | EXPORT_SYMBOL_GPL(map_vm_area); |
| 1361 | |
| 1362 | static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, |
| 1363 | unsigned long flags, const void *caller) |
| 1364 | { |
| 1365 | spin_lock(&vmap_area_lock); |
| 1366 | vm->flags = flags; |
| 1367 | vm->addr = (void *)va->va_start; |
| 1368 | vm->size = va->va_end - va->va_start; |
| 1369 | vm->caller = caller; |
| 1370 | va->vm = vm; |
| 1371 | va->flags |= VM_VM_AREA; |
| 1372 | spin_unlock(&vmap_area_lock); |
| 1373 | } |
| 1374 | |
| 1375 | static void clear_vm_uninitialized_flag(struct vm_struct *vm) |
| 1376 | { |
| 1377 | /* |
| 1378 | * Before removing VM_UNINITIALIZED, |
| 1379 | * we should make sure that vm has proper values. |
| 1380 | * Pair with smp_rmb() in show_numa_info(). |
| 1381 | */ |
| 1382 | smp_wmb(); |
| 1383 | vm->flags &= ~VM_UNINITIALIZED; |
| 1384 | } |
| 1385 | |
| 1386 | static struct vm_struct *__get_vm_area_node(unsigned long size, |
| 1387 | unsigned long align, unsigned long flags, unsigned long start, |
| 1388 | unsigned long end, int node, gfp_t gfp_mask, const void *caller) |
| 1389 | { |
| 1390 | struct vmap_area *va; |
| 1391 | struct vm_struct *area; |
| 1392 | |
| 1393 | BUG_ON(in_interrupt()); |
| 1394 | size = PAGE_ALIGN(size); |
| 1395 | if (unlikely(!size)) |
| 1396 | return NULL; |
| 1397 | |
| 1398 | if (flags & VM_IOREMAP) |
| 1399 | align = 1ul << clamp_t(int, get_count_order_long(size), |
| 1400 | PAGE_SHIFT, IOREMAP_MAX_ORDER); |
| 1401 | |
| 1402 | area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); |
| 1403 | if (unlikely(!area)) |
| 1404 | return NULL; |
| 1405 | |
| 1406 | if (!(flags & VM_NO_GUARD)) |
| 1407 | size += PAGE_SIZE; |
| 1408 | |
| 1409 | va = alloc_vmap_area(size, align, start, end, node, gfp_mask); |
| 1410 | if (IS_ERR(va)) { |
| 1411 | kfree(area); |
| 1412 | return NULL; |
| 1413 | } |
| 1414 | |
| 1415 | setup_vmalloc_vm(area, va, flags, caller); |
| 1416 | |
| 1417 | return area; |
| 1418 | } |
| 1419 | |
| 1420 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
| 1421 | unsigned long start, unsigned long end) |
| 1422 | { |
| 1423 | return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, |
| 1424 | GFP_KERNEL, __builtin_return_address(0)); |
| 1425 | } |
| 1426 | EXPORT_SYMBOL_GPL(__get_vm_area); |
| 1427 | |
| 1428 | struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, |
| 1429 | unsigned long start, unsigned long end, |
| 1430 | const void *caller) |
| 1431 | { |
| 1432 | return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, |
| 1433 | GFP_KERNEL, caller); |
| 1434 | } |
| 1435 | |
| 1436 | /** |
| 1437 | * get_vm_area - reserve a contiguous kernel virtual area |
| 1438 | * @size: size of the area |
| 1439 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC |
| 1440 | * |
| 1441 | * Search an area of @size in the kernel virtual mapping area, |
| 1442 | * and reserved it for out purposes. Returns the area descriptor |
| 1443 | * on success or %NULL on failure. |
| 1444 | */ |
| 1445 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) |
| 1446 | { |
| 1447 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
| 1448 | NUMA_NO_NODE, GFP_KERNEL, |
| 1449 | __builtin_return_address(0)); |
| 1450 | } |
| 1451 | |
| 1452 | struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, |
| 1453 | const void *caller) |
| 1454 | { |
| 1455 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
| 1456 | NUMA_NO_NODE, GFP_KERNEL, caller); |
| 1457 | } |
| 1458 | |
| 1459 | /** |
| 1460 | * find_vm_area - find a continuous kernel virtual area |
| 1461 | * @addr: base address |
| 1462 | * |
| 1463 | * Search for the kernel VM area starting at @addr, and return it. |
| 1464 | * It is up to the caller to do all required locking to keep the returned |
| 1465 | * pointer valid. |
| 1466 | */ |
| 1467 | struct vm_struct *find_vm_area(const void *addr) |
| 1468 | { |
| 1469 | struct vmap_area *va; |
| 1470 | |
| 1471 | va = find_vmap_area((unsigned long)addr); |
| 1472 | if (va && va->flags & VM_VM_AREA) |
| 1473 | return va->vm; |
| 1474 | |
| 1475 | return NULL; |
| 1476 | } |
| 1477 | |
| 1478 | /** |
| 1479 | * remove_vm_area - find and remove a continuous kernel virtual area |
| 1480 | * @addr: base address |
| 1481 | * |
| 1482 | * Search for the kernel VM area starting at @addr, and remove it. |
| 1483 | * This function returns the found VM area, but using it is NOT safe |
| 1484 | * on SMP machines, except for its size or flags. |
| 1485 | */ |
| 1486 | struct vm_struct *remove_vm_area(const void *addr) |
| 1487 | { |
| 1488 | struct vmap_area *va; |
| 1489 | |
| 1490 | might_sleep(); |
| 1491 | |
| 1492 | va = find_vmap_area((unsigned long)addr); |
| 1493 | if (va && va->flags & VM_VM_AREA) { |
| 1494 | struct vm_struct *vm = va->vm; |
| 1495 | |
| 1496 | spin_lock(&vmap_area_lock); |
| 1497 | va->vm = NULL; |
| 1498 | va->flags &= ~VM_VM_AREA; |
| 1499 | va->flags |= VM_LAZY_FREE; |
| 1500 | spin_unlock(&vmap_area_lock); |
| 1501 | |
| 1502 | vmap_debug_free_range(va->va_start, va->va_end); |
| 1503 | kasan_free_shadow(vm); |
| 1504 | free_unmap_vmap_area(va); |
| 1505 | |
| 1506 | return vm; |
| 1507 | } |
| 1508 | return NULL; |
| 1509 | } |
| 1510 | |
| 1511 | static void __vunmap(const void *addr, int deallocate_pages) |
| 1512 | { |
| 1513 | struct vm_struct *area; |
| 1514 | |
| 1515 | if (!addr) |
| 1516 | return; |
| 1517 | |
| 1518 | if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", |
| 1519 | addr)) |
| 1520 | return; |
| 1521 | |
| 1522 | area = remove_vm_area(addr); |
| 1523 | if (unlikely(!area)) { |
| 1524 | WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", |
| 1525 | addr); |
| 1526 | return; |
| 1527 | } |
| 1528 | |
| 1529 | debug_check_no_locks_freed(addr, get_vm_area_size(area)); |
| 1530 | debug_check_no_obj_freed(addr, get_vm_area_size(area)); |
| 1531 | |
| 1532 | if (deallocate_pages) { |
| 1533 | int i; |
| 1534 | |
| 1535 | for (i = 0; i < area->nr_pages; i++) { |
| 1536 | struct page *page = area->pages[i]; |
| 1537 | |
| 1538 | BUG_ON(!page); |
| 1539 | __free_pages(page, 0); |
| 1540 | } |
| 1541 | |
| 1542 | kvfree(area->pages); |
| 1543 | } |
| 1544 | |
| 1545 | kfree(area); |
| 1546 | return; |
| 1547 | } |
| 1548 | |
| 1549 | static inline void __vfree_deferred(const void *addr) |
| 1550 | { |
| 1551 | /* |
| 1552 | * Use raw_cpu_ptr() because this can be called from preemptible |
| 1553 | * context. Preemption is absolutely fine here, because the llist_add() |
| 1554 | * implementation is lockless, so it works even if we are adding to |
| 1555 | * nother cpu's list. schedule_work() should be fine with this too. |
| 1556 | */ |
| 1557 | struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); |
| 1558 | |
| 1559 | if (llist_add((struct llist_node *)addr, &p->list)) |
| 1560 | schedule_work(&p->wq); |
| 1561 | } |
| 1562 | |
| 1563 | /** |
| 1564 | * vfree_atomic - release memory allocated by vmalloc() |
| 1565 | * @addr: memory base address |
| 1566 | * |
| 1567 | * This one is just like vfree() but can be called in any atomic context |
| 1568 | * except NMIs. |
| 1569 | */ |
| 1570 | void vfree_atomic(const void *addr) |
| 1571 | { |
| 1572 | BUG_ON(in_nmi()); |
| 1573 | |
| 1574 | kmemleak_free(addr); |
| 1575 | |
| 1576 | if (!addr) |
| 1577 | return; |
| 1578 | __vfree_deferred(addr); |
| 1579 | } |
| 1580 | |
| 1581 | /** |
| 1582 | * vfree - release memory allocated by vmalloc() |
| 1583 | * @addr: memory base address |
| 1584 | * |
| 1585 | * Free the virtually continuous memory area starting at @addr, as |
| 1586 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is |
| 1587 | * NULL, no operation is performed. |
| 1588 | * |
| 1589 | * Must not be called in NMI context (strictly speaking, only if we don't |
| 1590 | * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling |
| 1591 | * conventions for vfree() arch-depenedent would be a really bad idea) |
| 1592 | * |
| 1593 | * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) |
| 1594 | */ |
| 1595 | void vfree(const void *addr) |
| 1596 | { |
| 1597 | BUG_ON(in_nmi()); |
| 1598 | |
| 1599 | kmemleak_free(addr); |
| 1600 | |
| 1601 | if (!addr) |
| 1602 | return; |
| 1603 | if (unlikely(in_interrupt())) |
| 1604 | __vfree_deferred(addr); |
| 1605 | else |
| 1606 | __vunmap(addr, 1); |
| 1607 | } |
| 1608 | EXPORT_SYMBOL(vfree); |
| 1609 | |
| 1610 | /** |
| 1611 | * vunmap - release virtual mapping obtained by vmap() |
| 1612 | * @addr: memory base address |
| 1613 | * |
| 1614 | * Free the virtually contiguous memory area starting at @addr, |
| 1615 | * which was created from the page array passed to vmap(). |
| 1616 | * |
| 1617 | * Must not be called in interrupt context. |
| 1618 | */ |
| 1619 | void vunmap(const void *addr) |
| 1620 | { |
| 1621 | BUG_ON(in_interrupt()); |
| 1622 | might_sleep(); |
| 1623 | if (addr) |
| 1624 | __vunmap(addr, 0); |
| 1625 | } |
| 1626 | EXPORT_SYMBOL(vunmap); |
| 1627 | |
| 1628 | /** |
| 1629 | * vmap - map an array of pages into virtually contiguous space |
| 1630 | * @pages: array of page pointers |
| 1631 | * @count: number of pages to map |
| 1632 | * @flags: vm_area->flags |
| 1633 | * @prot: page protection for the mapping |
| 1634 | * |
| 1635 | * Maps @count pages from @pages into contiguous kernel virtual |
| 1636 | * space. |
| 1637 | */ |
| 1638 | void *vmap(struct page **pages, unsigned int count, |
| 1639 | unsigned long flags, pgprot_t prot) |
| 1640 | { |
| 1641 | struct vm_struct *area; |
| 1642 | unsigned long size; /* In bytes */ |
| 1643 | |
| 1644 | might_sleep(); |
| 1645 | |
| 1646 | if (count > totalram_pages) |
| 1647 | return NULL; |
| 1648 | |
| 1649 | size = (unsigned long)count << PAGE_SHIFT; |
| 1650 | area = get_vm_area_caller(size, flags, __builtin_return_address(0)); |
| 1651 | if (!area) |
| 1652 | return NULL; |
| 1653 | |
| 1654 | if (map_vm_area(area, prot, pages)) { |
| 1655 | vunmap(area->addr); |
| 1656 | return NULL; |
| 1657 | } |
| 1658 | |
| 1659 | return area->addr; |
| 1660 | } |
| 1661 | EXPORT_SYMBOL(vmap); |
| 1662 | |
| 1663 | static void *__vmalloc_node(unsigned long size, unsigned long align, |
| 1664 | gfp_t gfp_mask, pgprot_t prot, |
| 1665 | int node, const void *caller); |
| 1666 | static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, |
| 1667 | pgprot_t prot, int node) |
| 1668 | { |
| 1669 | struct page **pages; |
| 1670 | unsigned int nr_pages, array_size, i; |
| 1671 | const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; |
| 1672 | const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; |
| 1673 | const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? |
| 1674 | 0 : |
| 1675 | __GFP_HIGHMEM; |
| 1676 | |
| 1677 | nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; |
| 1678 | array_size = (nr_pages * sizeof(struct page *)); |
| 1679 | |
| 1680 | area->nr_pages = nr_pages; |
| 1681 | /* Please note that the recursion is strictly bounded. */ |
| 1682 | if (array_size > PAGE_SIZE) { |
| 1683 | pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, |
| 1684 | PAGE_KERNEL, node, area->caller); |
| 1685 | } else { |
| 1686 | pages = kmalloc_node(array_size, nested_gfp, node); |
| 1687 | } |
| 1688 | area->pages = pages; |
| 1689 | if (!area->pages) { |
| 1690 | remove_vm_area(area->addr); |
| 1691 | kfree(area); |
| 1692 | return NULL; |
| 1693 | } |
| 1694 | |
| 1695 | for (i = 0; i < area->nr_pages; i++) { |
| 1696 | struct page *page; |
| 1697 | |
| 1698 | if (node == NUMA_NO_NODE) |
| 1699 | page = alloc_page(alloc_mask|highmem_mask); |
| 1700 | else |
| 1701 | page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); |
| 1702 | |
| 1703 | if (unlikely(!page)) { |
| 1704 | /* Successfully allocated i pages, free them in __vunmap() */ |
| 1705 | area->nr_pages = i; |
| 1706 | goto fail; |
| 1707 | } |
| 1708 | area->pages[i] = page; |
| 1709 | if (gfpflags_allow_blocking(gfp_mask|highmem_mask)) |
| 1710 | cond_resched(); |
| 1711 | } |
| 1712 | |
| 1713 | if (map_vm_area(area, prot, pages)) |
| 1714 | goto fail; |
| 1715 | return area->addr; |
| 1716 | |
| 1717 | fail: |
| 1718 | warn_alloc(gfp_mask, NULL, |
| 1719 | "vmalloc: allocation failure, allocated %ld of %ld bytes", |
| 1720 | (area->nr_pages*PAGE_SIZE), area->size); |
| 1721 | vfree(area->addr); |
| 1722 | return NULL; |
| 1723 | } |
| 1724 | |
| 1725 | /** |
| 1726 | * __vmalloc_node_range - allocate virtually contiguous memory |
| 1727 | * @size: allocation size |
| 1728 | * @align: desired alignment |
| 1729 | * @start: vm area range start |
| 1730 | * @end: vm area range end |
| 1731 | * @gfp_mask: flags for the page level allocator |
| 1732 | * @prot: protection mask for the allocated pages |
| 1733 | * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) |
| 1734 | * @node: node to use for allocation or NUMA_NO_NODE |
| 1735 | * @caller: caller's return address |
| 1736 | * |
| 1737 | * Allocate enough pages to cover @size from the page level |
| 1738 | * allocator with @gfp_mask flags. Map them into contiguous |
| 1739 | * kernel virtual space, using a pagetable protection of @prot. |
| 1740 | */ |
| 1741 | void *__vmalloc_node_range(unsigned long size, unsigned long align, |
| 1742 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 1743 | pgprot_t prot, unsigned long vm_flags, int node, |
| 1744 | const void *caller) |
| 1745 | { |
| 1746 | struct vm_struct *area; |
| 1747 | void *addr; |
| 1748 | unsigned long real_size = size; |
| 1749 | |
| 1750 | size = PAGE_ALIGN(size); |
| 1751 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
| 1752 | goto fail; |
| 1753 | |
| 1754 | area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | |
| 1755 | vm_flags, start, end, node, gfp_mask, caller); |
| 1756 | if (!area) |
| 1757 | goto fail; |
| 1758 | |
| 1759 | addr = __vmalloc_area_node(area, gfp_mask, prot, node); |
| 1760 | if (!addr) |
| 1761 | return NULL; |
| 1762 | |
| 1763 | /* |
| 1764 | * In this function, newly allocated vm_struct has VM_UNINITIALIZED |
| 1765 | * flag. It means that vm_struct is not fully initialized. |
| 1766 | * Now, it is fully initialized, so remove this flag here. |
| 1767 | */ |
| 1768 | clear_vm_uninitialized_flag(area); |
| 1769 | |
| 1770 | kmemleak_vmalloc(area, size, gfp_mask); |
| 1771 | |
| 1772 | return addr; |
| 1773 | |
| 1774 | fail: |
| 1775 | warn_alloc(gfp_mask, NULL, |
| 1776 | "vmalloc: allocation failure: %lu bytes", real_size); |
| 1777 | return NULL; |
| 1778 | } |
| 1779 | |
| 1780 | /** |
| 1781 | * __vmalloc_node - allocate virtually contiguous memory |
| 1782 | * @size: allocation size |
| 1783 | * @align: desired alignment |
| 1784 | * @gfp_mask: flags for the page level allocator |
| 1785 | * @prot: protection mask for the allocated pages |
| 1786 | * @node: node to use for allocation or NUMA_NO_NODE |
| 1787 | * @caller: caller's return address |
| 1788 | * |
| 1789 | * Allocate enough pages to cover @size from the page level |
| 1790 | * allocator with @gfp_mask flags. Map them into contiguous |
| 1791 | * kernel virtual space, using a pagetable protection of @prot. |
| 1792 | * |
| 1793 | * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL |
| 1794 | * and __GFP_NOFAIL are not supported |
| 1795 | * |
| 1796 | * Any use of gfp flags outside of GFP_KERNEL should be consulted |
| 1797 | * with mm people. |
| 1798 | * |
| 1799 | */ |
| 1800 | static void *__vmalloc_node(unsigned long size, unsigned long align, |
| 1801 | gfp_t gfp_mask, pgprot_t prot, |
| 1802 | int node, const void *caller) |
| 1803 | { |
| 1804 | return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, |
| 1805 | gfp_mask, prot, 0, node, caller); |
| 1806 | } |
| 1807 | |
| 1808 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
| 1809 | { |
| 1810 | return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, |
| 1811 | __builtin_return_address(0)); |
| 1812 | } |
| 1813 | EXPORT_SYMBOL(__vmalloc); |
| 1814 | |
| 1815 | static inline void *__vmalloc_node_flags(unsigned long size, |
| 1816 | int node, gfp_t flags) |
| 1817 | { |
| 1818 | return __vmalloc_node(size, 1, flags, PAGE_KERNEL, |
| 1819 | node, __builtin_return_address(0)); |
| 1820 | } |
| 1821 | |
| 1822 | |
| 1823 | void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, |
| 1824 | void *caller) |
| 1825 | { |
| 1826 | return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller); |
| 1827 | } |
| 1828 | |
| 1829 | /** |
| 1830 | * vmalloc - allocate virtually contiguous memory |
| 1831 | * @size: allocation size |
| 1832 | * Allocate enough pages to cover @size from the page level |
| 1833 | * allocator and map them into contiguous kernel virtual space. |
| 1834 | * |
| 1835 | * For tight control over page level allocator and protection flags |
| 1836 | * use __vmalloc() instead. |
| 1837 | */ |
| 1838 | void *vmalloc(unsigned long size) |
| 1839 | { |
| 1840 | return __vmalloc_node_flags(size, NUMA_NO_NODE, |
| 1841 | GFP_KERNEL); |
| 1842 | } |
| 1843 | EXPORT_SYMBOL(vmalloc); |
| 1844 | |
| 1845 | /** |
| 1846 | * vzalloc - allocate virtually contiguous memory with zero fill |
| 1847 | * @size: allocation size |
| 1848 | * Allocate enough pages to cover @size from the page level |
| 1849 | * allocator and map them into contiguous kernel virtual space. |
| 1850 | * The memory allocated is set to zero. |
| 1851 | * |
| 1852 | * For tight control over page level allocator and protection flags |
| 1853 | * use __vmalloc() instead. |
| 1854 | */ |
| 1855 | void *vzalloc(unsigned long size) |
| 1856 | { |
| 1857 | return __vmalloc_node_flags(size, NUMA_NO_NODE, |
| 1858 | GFP_KERNEL | __GFP_ZERO); |
| 1859 | } |
| 1860 | EXPORT_SYMBOL(vzalloc); |
| 1861 | |
| 1862 | /** |
| 1863 | * vmalloc_user - allocate zeroed virtually contiguous memory for userspace |
| 1864 | * @size: allocation size |
| 1865 | * |
| 1866 | * The resulting memory area is zeroed so it can be mapped to userspace |
| 1867 | * without leaking data. |
| 1868 | */ |
| 1869 | void *vmalloc_user(unsigned long size) |
| 1870 | { |
| 1871 | struct vm_struct *area; |
| 1872 | void *ret; |
| 1873 | |
| 1874 | ret = __vmalloc_node(size, SHMLBA, |
| 1875 | GFP_KERNEL | __GFP_ZERO, |
| 1876 | PAGE_KERNEL, NUMA_NO_NODE, |
| 1877 | __builtin_return_address(0)); |
| 1878 | if (ret) { |
| 1879 | area = find_vm_area(ret); |
| 1880 | area->flags |= VM_USERMAP; |
| 1881 | } |
| 1882 | return ret; |
| 1883 | } |
| 1884 | EXPORT_SYMBOL(vmalloc_user); |
| 1885 | |
| 1886 | /** |
| 1887 | * vmalloc_node - allocate memory on a specific node |
| 1888 | * @size: allocation size |
| 1889 | * @node: numa node |
| 1890 | * |
| 1891 | * Allocate enough pages to cover @size from the page level |
| 1892 | * allocator and map them into contiguous kernel virtual space. |
| 1893 | * |
| 1894 | * For tight control over page level allocator and protection flags |
| 1895 | * use __vmalloc() instead. |
| 1896 | */ |
| 1897 | void *vmalloc_node(unsigned long size, int node) |
| 1898 | { |
| 1899 | return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, |
| 1900 | node, __builtin_return_address(0)); |
| 1901 | } |
| 1902 | EXPORT_SYMBOL(vmalloc_node); |
| 1903 | |
| 1904 | /** |
| 1905 | * vzalloc_node - allocate memory on a specific node with zero fill |
| 1906 | * @size: allocation size |
| 1907 | * @node: numa node |
| 1908 | * |
| 1909 | * Allocate enough pages to cover @size from the page level |
| 1910 | * allocator and map them into contiguous kernel virtual space. |
| 1911 | * The memory allocated is set to zero. |
| 1912 | * |
| 1913 | * For tight control over page level allocator and protection flags |
| 1914 | * use __vmalloc_node() instead. |
| 1915 | */ |
| 1916 | void *vzalloc_node(unsigned long size, int node) |
| 1917 | { |
| 1918 | return __vmalloc_node_flags(size, node, |
| 1919 | GFP_KERNEL | __GFP_ZERO); |
| 1920 | } |
| 1921 | EXPORT_SYMBOL(vzalloc_node); |
| 1922 | |
| 1923 | #ifndef PAGE_KERNEL_EXEC |
| 1924 | # define PAGE_KERNEL_EXEC PAGE_KERNEL |
| 1925 | #endif |
| 1926 | |
| 1927 | /** |
| 1928 | * vmalloc_exec - allocate virtually contiguous, executable memory |
| 1929 | * @size: allocation size |
| 1930 | * |
| 1931 | * Kernel-internal function to allocate enough pages to cover @size |
| 1932 | * the page level allocator and map them into contiguous and |
| 1933 | * executable kernel virtual space. |
| 1934 | * |
| 1935 | * For tight control over page level allocator and protection flags |
| 1936 | * use __vmalloc() instead. |
| 1937 | */ |
| 1938 | |
| 1939 | void *vmalloc_exec(unsigned long size) |
| 1940 | { |
| 1941 | return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, |
| 1942 | NUMA_NO_NODE, __builtin_return_address(0)); |
| 1943 | } |
| 1944 | |
| 1945 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) |
| 1946 | #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL |
| 1947 | #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) |
| 1948 | #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL |
| 1949 | #else |
| 1950 | #define GFP_VMALLOC32 GFP_KERNEL |
| 1951 | #endif |
| 1952 | |
| 1953 | /** |
| 1954 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) |
| 1955 | * @size: allocation size |
| 1956 | * |
| 1957 | * Allocate enough 32bit PA addressable pages to cover @size from the |
| 1958 | * page level allocator and map them into contiguous kernel virtual space. |
| 1959 | */ |
| 1960 | void *vmalloc_32(unsigned long size) |
| 1961 | { |
| 1962 | return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, |
| 1963 | NUMA_NO_NODE, __builtin_return_address(0)); |
| 1964 | } |
| 1965 | EXPORT_SYMBOL(vmalloc_32); |
| 1966 | |
| 1967 | /** |
| 1968 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory |
| 1969 | * @size: allocation size |
| 1970 | * |
| 1971 | * The resulting memory area is 32bit addressable and zeroed so it can be |
| 1972 | * mapped to userspace without leaking data. |
| 1973 | */ |
| 1974 | void *vmalloc_32_user(unsigned long size) |
| 1975 | { |
| 1976 | struct vm_struct *area; |
| 1977 | void *ret; |
| 1978 | |
| 1979 | ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, |
| 1980 | NUMA_NO_NODE, __builtin_return_address(0)); |
| 1981 | if (ret) { |
| 1982 | area = find_vm_area(ret); |
| 1983 | area->flags |= VM_USERMAP; |
| 1984 | } |
| 1985 | return ret; |
| 1986 | } |
| 1987 | EXPORT_SYMBOL(vmalloc_32_user); |
| 1988 | |
| 1989 | /* |
| 1990 | * small helper routine , copy contents to buf from addr. |
| 1991 | * If the page is not present, fill zero. |
| 1992 | */ |
| 1993 | |
| 1994 | static int aligned_vread(char *buf, char *addr, unsigned long count) |
| 1995 | { |
| 1996 | struct page *p; |
| 1997 | int copied = 0; |
| 1998 | |
| 1999 | while (count) { |
| 2000 | unsigned long offset, length; |
| 2001 | |
| 2002 | offset = offset_in_page(addr); |
| 2003 | length = PAGE_SIZE - offset; |
| 2004 | if (length > count) |
| 2005 | length = count; |
| 2006 | p = vmalloc_to_page(addr); |
| 2007 | /* |
| 2008 | * To do safe access to this _mapped_ area, we need |
| 2009 | * lock. But adding lock here means that we need to add |
| 2010 | * overhead of vmalloc()/vfree() calles for this _debug_ |
| 2011 | * interface, rarely used. Instead of that, we'll use |
| 2012 | * kmap() and get small overhead in this access function. |
| 2013 | */ |
| 2014 | if (p) { |
| 2015 | /* |
| 2016 | * we can expect USER0 is not used (see vread/vwrite's |
| 2017 | * function description) |
| 2018 | */ |
| 2019 | void *map = kmap_atomic(p); |
| 2020 | memcpy(buf, map + offset, length); |
| 2021 | kunmap_atomic(map); |
| 2022 | } else |
| 2023 | memset(buf, 0, length); |
| 2024 | |
| 2025 | addr += length; |
| 2026 | buf += length; |
| 2027 | copied += length; |
| 2028 | count -= length; |
| 2029 | } |
| 2030 | return copied; |
| 2031 | } |
| 2032 | |
| 2033 | static int aligned_vwrite(char *buf, char *addr, unsigned long count) |
| 2034 | { |
| 2035 | struct page *p; |
| 2036 | int copied = 0; |
| 2037 | |
| 2038 | while (count) { |
| 2039 | unsigned long offset, length; |
| 2040 | |
| 2041 | offset = offset_in_page(addr); |
| 2042 | length = PAGE_SIZE - offset; |
| 2043 | if (length > count) |
| 2044 | length = count; |
| 2045 | p = vmalloc_to_page(addr); |
| 2046 | /* |
| 2047 | * To do safe access to this _mapped_ area, we need |
| 2048 | * lock. But adding lock here means that we need to add |
| 2049 | * overhead of vmalloc()/vfree() calles for this _debug_ |
| 2050 | * interface, rarely used. Instead of that, we'll use |
| 2051 | * kmap() and get small overhead in this access function. |
| 2052 | */ |
| 2053 | if (p) { |
| 2054 | /* |
| 2055 | * we can expect USER0 is not used (see vread/vwrite's |
| 2056 | * function description) |
| 2057 | */ |
| 2058 | void *map = kmap_atomic(p); |
| 2059 | memcpy(map + offset, buf, length); |
| 2060 | kunmap_atomic(map); |
| 2061 | } |
| 2062 | addr += length; |
| 2063 | buf += length; |
| 2064 | copied += length; |
| 2065 | count -= length; |
| 2066 | } |
| 2067 | return copied; |
| 2068 | } |
| 2069 | |
| 2070 | /** |
| 2071 | * vread() - read vmalloc area in a safe way. |
| 2072 | * @buf: buffer for reading data |
| 2073 | * @addr: vm address. |
| 2074 | * @count: number of bytes to be read. |
| 2075 | * |
| 2076 | * Returns # of bytes which addr and buf should be increased. |
| 2077 | * (same number to @count). Returns 0 if [addr...addr+count) doesn't |
| 2078 | * includes any intersect with alive vmalloc area. |
| 2079 | * |
| 2080 | * This function checks that addr is a valid vmalloc'ed area, and |
| 2081 | * copy data from that area to a given buffer. If the given memory range |
| 2082 | * of [addr...addr+count) includes some valid address, data is copied to |
| 2083 | * proper area of @buf. If there are memory holes, they'll be zero-filled. |
| 2084 | * IOREMAP area is treated as memory hole and no copy is done. |
| 2085 | * |
| 2086 | * If [addr...addr+count) doesn't includes any intersects with alive |
| 2087 | * vm_struct area, returns 0. @buf should be kernel's buffer. |
| 2088 | * |
| 2089 | * Note: In usual ops, vread() is never necessary because the caller |
| 2090 | * should know vmalloc() area is valid and can use memcpy(). |
| 2091 | * This is for routines which have to access vmalloc area without |
| 2092 | * any informaion, as /dev/kmem. |
| 2093 | * |
| 2094 | */ |
| 2095 | |
| 2096 | long vread(char *buf, char *addr, unsigned long count) |
| 2097 | { |
| 2098 | struct vmap_area *va; |
| 2099 | struct vm_struct *vm; |
| 2100 | char *vaddr, *buf_start = buf; |
| 2101 | unsigned long buflen = count; |
| 2102 | unsigned long n; |
| 2103 | |
| 2104 | /* Don't allow overflow */ |
| 2105 | if ((unsigned long) addr + count < count) |
| 2106 | count = -(unsigned long) addr; |
| 2107 | |
| 2108 | spin_lock(&vmap_area_lock); |
| 2109 | list_for_each_entry(va, &vmap_area_list, list) { |
| 2110 | if (!count) |
| 2111 | break; |
| 2112 | |
| 2113 | if (!(va->flags & VM_VM_AREA)) |
| 2114 | continue; |
| 2115 | |
| 2116 | vm = va->vm; |
| 2117 | vaddr = (char *) vm->addr; |
| 2118 | if (addr >= vaddr + get_vm_area_size(vm)) |
| 2119 | continue; |
| 2120 | while (addr < vaddr) { |
| 2121 | if (count == 0) |
| 2122 | goto finished; |
| 2123 | *buf = '\0'; |
| 2124 | buf++; |
| 2125 | addr++; |
| 2126 | count--; |
| 2127 | } |
| 2128 | n = vaddr + get_vm_area_size(vm) - addr; |
| 2129 | if (n > count) |
| 2130 | n = count; |
| 2131 | if (!(vm->flags & VM_IOREMAP)) |
| 2132 | aligned_vread(buf, addr, n); |
| 2133 | else /* IOREMAP area is treated as memory hole */ |
| 2134 | memset(buf, 0, n); |
| 2135 | buf += n; |
| 2136 | addr += n; |
| 2137 | count -= n; |
| 2138 | } |
| 2139 | finished: |
| 2140 | spin_unlock(&vmap_area_lock); |
| 2141 | |
| 2142 | if (buf == buf_start) |
| 2143 | return 0; |
| 2144 | /* zero-fill memory holes */ |
| 2145 | if (buf != buf_start + buflen) |
| 2146 | memset(buf, 0, buflen - (buf - buf_start)); |
| 2147 | |
| 2148 | return buflen; |
| 2149 | } |
| 2150 | |
| 2151 | /** |
| 2152 | * vwrite() - write vmalloc area in a safe way. |
| 2153 | * @buf: buffer for source data |
| 2154 | * @addr: vm address. |
| 2155 | * @count: number of bytes to be read. |
| 2156 | * |
| 2157 | * Returns # of bytes which addr and buf should be incresed. |
| 2158 | * (same number to @count). |
| 2159 | * If [addr...addr+count) doesn't includes any intersect with valid |
| 2160 | * vmalloc area, returns 0. |
| 2161 | * |
| 2162 | * This function checks that addr is a valid vmalloc'ed area, and |
| 2163 | * copy data from a buffer to the given addr. If specified range of |
| 2164 | * [addr...addr+count) includes some valid address, data is copied from |
| 2165 | * proper area of @buf. If there are memory holes, no copy to hole. |
| 2166 | * IOREMAP area is treated as memory hole and no copy is done. |
| 2167 | * |
| 2168 | * If [addr...addr+count) doesn't includes any intersects with alive |
| 2169 | * vm_struct area, returns 0. @buf should be kernel's buffer. |
| 2170 | * |
| 2171 | * Note: In usual ops, vwrite() is never necessary because the caller |
| 2172 | * should know vmalloc() area is valid and can use memcpy(). |
| 2173 | * This is for routines which have to access vmalloc area without |
| 2174 | * any informaion, as /dev/kmem. |
| 2175 | */ |
| 2176 | |
| 2177 | long vwrite(char *buf, char *addr, unsigned long count) |
| 2178 | { |
| 2179 | struct vmap_area *va; |
| 2180 | struct vm_struct *vm; |
| 2181 | char *vaddr; |
| 2182 | unsigned long n, buflen; |
| 2183 | int copied = 0; |
| 2184 | |
| 2185 | /* Don't allow overflow */ |
| 2186 | if ((unsigned long) addr + count < count) |
| 2187 | count = -(unsigned long) addr; |
| 2188 | buflen = count; |
| 2189 | |
| 2190 | spin_lock(&vmap_area_lock); |
| 2191 | list_for_each_entry(va, &vmap_area_list, list) { |
| 2192 | if (!count) |
| 2193 | break; |
| 2194 | |
| 2195 | if (!(va->flags & VM_VM_AREA)) |
| 2196 | continue; |
| 2197 | |
| 2198 | vm = va->vm; |
| 2199 | vaddr = (char *) vm->addr; |
| 2200 | if (addr >= vaddr + get_vm_area_size(vm)) |
| 2201 | continue; |
| 2202 | while (addr < vaddr) { |
| 2203 | if (count == 0) |
| 2204 | goto finished; |
| 2205 | buf++; |
| 2206 | addr++; |
| 2207 | count--; |
| 2208 | } |
| 2209 | n = vaddr + get_vm_area_size(vm) - addr; |
| 2210 | if (n > count) |
| 2211 | n = count; |
| 2212 | if (!(vm->flags & VM_IOREMAP)) { |
| 2213 | aligned_vwrite(buf, addr, n); |
| 2214 | copied++; |
| 2215 | } |
| 2216 | buf += n; |
| 2217 | addr += n; |
| 2218 | count -= n; |
| 2219 | } |
| 2220 | finished: |
| 2221 | spin_unlock(&vmap_area_lock); |
| 2222 | if (!copied) |
| 2223 | return 0; |
| 2224 | return buflen; |
| 2225 | } |
| 2226 | |
| 2227 | /** |
| 2228 | * remap_vmalloc_range_partial - map vmalloc pages to userspace |
| 2229 | * @vma: vma to cover |
| 2230 | * @uaddr: target user address to start at |
| 2231 | * @kaddr: virtual address of vmalloc kernel memory |
| 2232 | * @size: size of map area |
| 2233 | * |
| 2234 | * Returns: 0 for success, -Exxx on failure |
| 2235 | * |
| 2236 | * This function checks that @kaddr is a valid vmalloc'ed area, |
| 2237 | * and that it is big enough to cover the range starting at |
| 2238 | * @uaddr in @vma. Will return failure if that criteria isn't |
| 2239 | * met. |
| 2240 | * |
| 2241 | * Similar to remap_pfn_range() (see mm/memory.c) |
| 2242 | */ |
| 2243 | int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, |
| 2244 | void *kaddr, unsigned long size) |
| 2245 | { |
| 2246 | struct vm_struct *area; |
| 2247 | |
| 2248 | size = PAGE_ALIGN(size); |
| 2249 | |
| 2250 | if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) |
| 2251 | return -EINVAL; |
| 2252 | |
| 2253 | area = find_vm_area(kaddr); |
| 2254 | if (!area) |
| 2255 | return -EINVAL; |
| 2256 | |
| 2257 | if (!(area->flags & VM_USERMAP)) |
| 2258 | return -EINVAL; |
| 2259 | |
| 2260 | if (kaddr + size > area->addr + area->size) |
| 2261 | return -EINVAL; |
| 2262 | |
| 2263 | do { |
| 2264 | struct page *page = vmalloc_to_page(kaddr); |
| 2265 | int ret; |
| 2266 | |
| 2267 | ret = vm_insert_page(vma, uaddr, page); |
| 2268 | if (ret) |
| 2269 | return ret; |
| 2270 | |
| 2271 | uaddr += PAGE_SIZE; |
| 2272 | kaddr += PAGE_SIZE; |
| 2273 | size -= PAGE_SIZE; |
| 2274 | } while (size > 0); |
| 2275 | |
| 2276 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; |
| 2277 | |
| 2278 | return 0; |
| 2279 | } |
| 2280 | EXPORT_SYMBOL(remap_vmalloc_range_partial); |
| 2281 | |
| 2282 | /** |
| 2283 | * remap_vmalloc_range - map vmalloc pages to userspace |
| 2284 | * @vma: vma to cover (map full range of vma) |
| 2285 | * @addr: vmalloc memory |
| 2286 | * @pgoff: number of pages into addr before first page to map |
| 2287 | * |
| 2288 | * Returns: 0 for success, -Exxx on failure |
| 2289 | * |
| 2290 | * This function checks that addr is a valid vmalloc'ed area, and |
| 2291 | * that it is big enough to cover the vma. Will return failure if |
| 2292 | * that criteria isn't met. |
| 2293 | * |
| 2294 | * Similar to remap_pfn_range() (see mm/memory.c) |
| 2295 | */ |
| 2296 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
| 2297 | unsigned long pgoff) |
| 2298 | { |
| 2299 | return remap_vmalloc_range_partial(vma, vma->vm_start, |
| 2300 | addr + (pgoff << PAGE_SHIFT), |
| 2301 | vma->vm_end - vma->vm_start); |
| 2302 | } |
| 2303 | EXPORT_SYMBOL(remap_vmalloc_range); |
| 2304 | |
| 2305 | /* |
| 2306 | * Implement a stub for vmalloc_sync_all() if the architecture chose not to |
| 2307 | * have one. |
| 2308 | */ |
| 2309 | void __weak vmalloc_sync_all(void) |
| 2310 | { |
| 2311 | } |
| 2312 | |
| 2313 | |
| 2314 | static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) |
| 2315 | { |
| 2316 | pte_t ***p = data; |
| 2317 | |
| 2318 | if (p) { |
| 2319 | *(*p) = pte; |
| 2320 | (*p)++; |
| 2321 | } |
| 2322 | return 0; |
| 2323 | } |
| 2324 | |
| 2325 | /** |
| 2326 | * alloc_vm_area - allocate a range of kernel address space |
| 2327 | * @size: size of the area |
| 2328 | * @ptes: returns the PTEs for the address space |
| 2329 | * |
| 2330 | * Returns: NULL on failure, vm_struct on success |
| 2331 | * |
| 2332 | * This function reserves a range of kernel address space, and |
| 2333 | * allocates pagetables to map that range. No actual mappings |
| 2334 | * are created. |
| 2335 | * |
| 2336 | * If @ptes is non-NULL, pointers to the PTEs (in init_mm) |
| 2337 | * allocated for the VM area are returned. |
| 2338 | */ |
| 2339 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) |
| 2340 | { |
| 2341 | struct vm_struct *area; |
| 2342 | |
| 2343 | area = get_vm_area_caller(size, VM_IOREMAP, |
| 2344 | __builtin_return_address(0)); |
| 2345 | if (area == NULL) |
| 2346 | return NULL; |
| 2347 | |
| 2348 | /* |
| 2349 | * This ensures that page tables are constructed for this region |
| 2350 | * of kernel virtual address space and mapped into init_mm. |
| 2351 | */ |
| 2352 | if (apply_to_page_range(&init_mm, (unsigned long)area->addr, |
| 2353 | size, f, ptes ? &ptes : NULL)) { |
| 2354 | free_vm_area(area); |
| 2355 | return NULL; |
| 2356 | } |
| 2357 | |
| 2358 | return area; |
| 2359 | } |
| 2360 | EXPORT_SYMBOL_GPL(alloc_vm_area); |
| 2361 | |
| 2362 | void free_vm_area(struct vm_struct *area) |
| 2363 | { |
| 2364 | struct vm_struct *ret; |
| 2365 | ret = remove_vm_area(area->addr); |
| 2366 | BUG_ON(ret != area); |
| 2367 | kfree(area); |
| 2368 | } |
| 2369 | EXPORT_SYMBOL_GPL(free_vm_area); |
| 2370 | |
| 2371 | #ifdef CONFIG_SMP |
| 2372 | static struct vmap_area *node_to_va(struct rb_node *n) |
| 2373 | { |
| 2374 | return rb_entry_safe(n, struct vmap_area, rb_node); |
| 2375 | } |
| 2376 | |
| 2377 | /** |
| 2378 | * pvm_find_next_prev - find the next and prev vmap_area surrounding @end |
| 2379 | * @end: target address |
| 2380 | * @pnext: out arg for the next vmap_area |
| 2381 | * @pprev: out arg for the previous vmap_area |
| 2382 | * |
| 2383 | * Returns: %true if either or both of next and prev are found, |
| 2384 | * %false if no vmap_area exists |
| 2385 | * |
| 2386 | * Find vmap_areas end addresses of which enclose @end. ie. if not |
| 2387 | * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. |
| 2388 | */ |
| 2389 | static bool pvm_find_next_prev(unsigned long end, |
| 2390 | struct vmap_area **pnext, |
| 2391 | struct vmap_area **pprev) |
| 2392 | { |
| 2393 | struct rb_node *n = vmap_area_root.rb_node; |
| 2394 | struct vmap_area *va = NULL; |
| 2395 | |
| 2396 | while (n) { |
| 2397 | va = rb_entry(n, struct vmap_area, rb_node); |
| 2398 | if (end < va->va_end) |
| 2399 | n = n->rb_left; |
| 2400 | else if (end > va->va_end) |
| 2401 | n = n->rb_right; |
| 2402 | else |
| 2403 | break; |
| 2404 | } |
| 2405 | |
| 2406 | if (!va) |
| 2407 | return false; |
| 2408 | |
| 2409 | if (va->va_end > end) { |
| 2410 | *pnext = va; |
| 2411 | *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); |
| 2412 | } else { |
| 2413 | *pprev = va; |
| 2414 | *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); |
| 2415 | } |
| 2416 | return true; |
| 2417 | } |
| 2418 | |
| 2419 | /** |
| 2420 | * pvm_determine_end - find the highest aligned address between two vmap_areas |
| 2421 | * @pnext: in/out arg for the next vmap_area |
| 2422 | * @pprev: in/out arg for the previous vmap_area |
| 2423 | * @align: alignment |
| 2424 | * |
| 2425 | * Returns: determined end address |
| 2426 | * |
| 2427 | * Find the highest aligned address between *@pnext and *@pprev below |
| 2428 | * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned |
| 2429 | * down address is between the end addresses of the two vmap_areas. |
| 2430 | * |
| 2431 | * Please note that the address returned by this function may fall |
| 2432 | * inside *@pnext vmap_area. The caller is responsible for checking |
| 2433 | * that. |
| 2434 | */ |
| 2435 | static unsigned long pvm_determine_end(struct vmap_area **pnext, |
| 2436 | struct vmap_area **pprev, |
| 2437 | unsigned long align) |
| 2438 | { |
| 2439 | const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); |
| 2440 | unsigned long addr; |
| 2441 | |
| 2442 | if (*pnext) |
| 2443 | addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); |
| 2444 | else |
| 2445 | addr = vmalloc_end; |
| 2446 | |
| 2447 | while (*pprev && (*pprev)->va_end > addr) { |
| 2448 | *pnext = *pprev; |
| 2449 | *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); |
| 2450 | } |
| 2451 | |
| 2452 | return addr; |
| 2453 | } |
| 2454 | |
| 2455 | /** |
| 2456 | * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator |
| 2457 | * @offsets: array containing offset of each area |
| 2458 | * @sizes: array containing size of each area |
| 2459 | * @nr_vms: the number of areas to allocate |
| 2460 | * @align: alignment, all entries in @offsets and @sizes must be aligned to this |
| 2461 | * |
| 2462 | * Returns: kmalloc'd vm_struct pointer array pointing to allocated |
| 2463 | * vm_structs on success, %NULL on failure |
| 2464 | * |
| 2465 | * Percpu allocator wants to use congruent vm areas so that it can |
| 2466 | * maintain the offsets among percpu areas. This function allocates |
| 2467 | * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to |
| 2468 | * be scattered pretty far, distance between two areas easily going up |
| 2469 | * to gigabytes. To avoid interacting with regular vmallocs, these |
| 2470 | * areas are allocated from top. |
| 2471 | * |
| 2472 | * Despite its complicated look, this allocator is rather simple. It |
| 2473 | * does everything top-down and scans areas from the end looking for |
| 2474 | * matching slot. While scanning, if any of the areas overlaps with |
| 2475 | * existing vmap_area, the base address is pulled down to fit the |
| 2476 | * area. Scanning is repeated till all the areas fit and then all |
| 2477 | * necessary data structures are inserted and the result is returned. |
| 2478 | */ |
| 2479 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
| 2480 | const size_t *sizes, int nr_vms, |
| 2481 | size_t align) |
| 2482 | { |
| 2483 | const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); |
| 2484 | const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); |
| 2485 | struct vmap_area **vas, *prev, *next; |
| 2486 | struct vm_struct **vms; |
| 2487 | int area, area2, last_area, term_area; |
| 2488 | unsigned long base, start, end, last_end; |
| 2489 | bool purged = false; |
| 2490 | |
| 2491 | /* verify parameters and allocate data structures */ |
| 2492 | BUG_ON(offset_in_page(align) || !is_power_of_2(align)); |
| 2493 | for (last_area = 0, area = 0; area < nr_vms; area++) { |
| 2494 | start = offsets[area]; |
| 2495 | end = start + sizes[area]; |
| 2496 | |
| 2497 | /* is everything aligned properly? */ |
| 2498 | BUG_ON(!IS_ALIGNED(offsets[area], align)); |
| 2499 | BUG_ON(!IS_ALIGNED(sizes[area], align)); |
| 2500 | |
| 2501 | /* detect the area with the highest address */ |
| 2502 | if (start > offsets[last_area]) |
| 2503 | last_area = area; |
| 2504 | |
| 2505 | for (area2 = area + 1; area2 < nr_vms; area2++) { |
| 2506 | unsigned long start2 = offsets[area2]; |
| 2507 | unsigned long end2 = start2 + sizes[area2]; |
| 2508 | |
| 2509 | BUG_ON(start2 < end && start < end2); |
| 2510 | } |
| 2511 | } |
| 2512 | last_end = offsets[last_area] + sizes[last_area]; |
| 2513 | |
| 2514 | if (vmalloc_end - vmalloc_start < last_end) { |
| 2515 | WARN_ON(true); |
| 2516 | return NULL; |
| 2517 | } |
| 2518 | |
| 2519 | vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); |
| 2520 | vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); |
| 2521 | if (!vas || !vms) |
| 2522 | goto err_free2; |
| 2523 | |
| 2524 | for (area = 0; area < nr_vms; area++) { |
| 2525 | vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); |
| 2526 | vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); |
| 2527 | if (!vas[area] || !vms[area]) |
| 2528 | goto err_free; |
| 2529 | } |
| 2530 | retry: |
| 2531 | spin_lock(&vmap_area_lock); |
| 2532 | |
| 2533 | /* start scanning - we scan from the top, begin with the last area */ |
| 2534 | area = term_area = last_area; |
| 2535 | start = offsets[area]; |
| 2536 | end = start + sizes[area]; |
| 2537 | |
| 2538 | if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { |
| 2539 | base = vmalloc_end - last_end; |
| 2540 | goto found; |
| 2541 | } |
| 2542 | base = pvm_determine_end(&next, &prev, align) - end; |
| 2543 | |
| 2544 | while (true) { |
| 2545 | BUG_ON(next && next->va_end <= base + end); |
| 2546 | BUG_ON(prev && prev->va_end > base + end); |
| 2547 | |
| 2548 | /* |
| 2549 | * base might have underflowed, add last_end before |
| 2550 | * comparing. |
| 2551 | */ |
| 2552 | if (base + last_end < vmalloc_start + last_end) { |
| 2553 | spin_unlock(&vmap_area_lock); |
| 2554 | if (!purged) { |
| 2555 | purge_vmap_area_lazy(); |
| 2556 | purged = true; |
| 2557 | goto retry; |
| 2558 | } |
| 2559 | goto err_free; |
| 2560 | } |
| 2561 | |
| 2562 | /* |
| 2563 | * If next overlaps, move base downwards so that it's |
| 2564 | * right below next and then recheck. |
| 2565 | */ |
| 2566 | if (next && next->va_start < base + end) { |
| 2567 | base = pvm_determine_end(&next, &prev, align) - end; |
| 2568 | term_area = area; |
| 2569 | continue; |
| 2570 | } |
| 2571 | |
| 2572 | /* |
| 2573 | * If prev overlaps, shift down next and prev and move |
| 2574 | * base so that it's right below new next and then |
| 2575 | * recheck. |
| 2576 | */ |
| 2577 | if (prev && prev->va_end > base + start) { |
| 2578 | next = prev; |
| 2579 | prev = node_to_va(rb_prev(&next->rb_node)); |
| 2580 | base = pvm_determine_end(&next, &prev, align) - end; |
| 2581 | term_area = area; |
| 2582 | continue; |
| 2583 | } |
| 2584 | |
| 2585 | /* |
| 2586 | * This area fits, move on to the previous one. If |
| 2587 | * the previous one is the terminal one, we're done. |
| 2588 | */ |
| 2589 | area = (area + nr_vms - 1) % nr_vms; |
| 2590 | if (area == term_area) |
| 2591 | break; |
| 2592 | start = offsets[area]; |
| 2593 | end = start + sizes[area]; |
| 2594 | pvm_find_next_prev(base + end, &next, &prev); |
| 2595 | } |
| 2596 | found: |
| 2597 | /* we've found a fitting base, insert all va's */ |
| 2598 | for (area = 0; area < nr_vms; area++) { |
| 2599 | struct vmap_area *va = vas[area]; |
| 2600 | |
| 2601 | va->va_start = base + offsets[area]; |
| 2602 | va->va_end = va->va_start + sizes[area]; |
| 2603 | __insert_vmap_area(va); |
| 2604 | } |
| 2605 | |
| 2606 | vmap_area_pcpu_hole = base + offsets[last_area]; |
| 2607 | |
| 2608 | spin_unlock(&vmap_area_lock); |
| 2609 | |
| 2610 | /* insert all vm's */ |
| 2611 | for (area = 0; area < nr_vms; area++) |
| 2612 | setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, |
| 2613 | pcpu_get_vm_areas); |
| 2614 | |
| 2615 | kfree(vas); |
| 2616 | return vms; |
| 2617 | |
| 2618 | err_free: |
| 2619 | for (area = 0; area < nr_vms; area++) { |
| 2620 | kfree(vas[area]); |
| 2621 | kfree(vms[area]); |
| 2622 | } |
| 2623 | err_free2: |
| 2624 | kfree(vas); |
| 2625 | kfree(vms); |
| 2626 | return NULL; |
| 2627 | } |
| 2628 | |
| 2629 | /** |
| 2630 | * pcpu_free_vm_areas - free vmalloc areas for percpu allocator |
| 2631 | * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() |
| 2632 | * @nr_vms: the number of allocated areas |
| 2633 | * |
| 2634 | * Free vm_structs and the array allocated by pcpu_get_vm_areas(). |
| 2635 | */ |
| 2636 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) |
| 2637 | { |
| 2638 | int i; |
| 2639 | |
| 2640 | for (i = 0; i < nr_vms; i++) |
| 2641 | free_vm_area(vms[i]); |
| 2642 | kfree(vms); |
| 2643 | } |
| 2644 | #endif /* CONFIG_SMP */ |
| 2645 | |
| 2646 | #ifdef CONFIG_PROC_FS |
| 2647 | static void *s_start(struct seq_file *m, loff_t *pos) |
| 2648 | __acquires(&vmap_area_lock) |
| 2649 | { |
| 2650 | spin_lock(&vmap_area_lock); |
| 2651 | return seq_list_start(&vmap_area_list, *pos); |
| 2652 | } |
| 2653 | |
| 2654 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) |
| 2655 | { |
| 2656 | return seq_list_next(p, &vmap_area_list, pos); |
| 2657 | } |
| 2658 | |
| 2659 | static void s_stop(struct seq_file *m, void *p) |
| 2660 | __releases(&vmap_area_lock) |
| 2661 | { |
| 2662 | spin_unlock(&vmap_area_lock); |
| 2663 | } |
| 2664 | |
| 2665 | static void show_numa_info(struct seq_file *m, struct vm_struct *v) |
| 2666 | { |
| 2667 | if (IS_ENABLED(CONFIG_NUMA)) { |
| 2668 | unsigned int nr, *counters = m->private; |
| 2669 | |
| 2670 | if (!counters) |
| 2671 | return; |
| 2672 | |
| 2673 | if (v->flags & VM_UNINITIALIZED) |
| 2674 | return; |
| 2675 | /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ |
| 2676 | smp_rmb(); |
| 2677 | |
| 2678 | memset(counters, 0, nr_node_ids * sizeof(unsigned int)); |
| 2679 | |
| 2680 | for (nr = 0; nr < v->nr_pages; nr++) |
| 2681 | counters[page_to_nid(v->pages[nr])]++; |
| 2682 | |
| 2683 | for_each_node_state(nr, N_HIGH_MEMORY) |
| 2684 | if (counters[nr]) |
| 2685 | seq_printf(m, " N%u=%u", nr, counters[nr]); |
| 2686 | } |
| 2687 | } |
| 2688 | |
| 2689 | static int s_show(struct seq_file *m, void *p) |
| 2690 | { |
| 2691 | struct vmap_area *va; |
| 2692 | struct vm_struct *v; |
| 2693 | |
| 2694 | va = list_entry(p, struct vmap_area, list); |
| 2695 | |
| 2696 | /* |
| 2697 | * s_show can encounter race with remove_vm_area, !VM_VM_AREA on |
| 2698 | * behalf of vmap area is being tear down or vm_map_ram allocation. |
| 2699 | */ |
| 2700 | if (!(va->flags & VM_VM_AREA)) { |
| 2701 | seq_printf(m, "0x%pK-0x%pK %7ld %s\n", |
| 2702 | (void *)va->va_start, (void *)va->va_end, |
| 2703 | va->va_end - va->va_start, |
| 2704 | va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram"); |
| 2705 | |
| 2706 | return 0; |
| 2707 | } |
| 2708 | |
| 2709 | v = va->vm; |
| 2710 | |
| 2711 | seq_printf(m, "0x%pK-0x%pK %7ld", |
| 2712 | v->addr, v->addr + v->size, v->size); |
| 2713 | |
| 2714 | if (v->caller) |
| 2715 | seq_printf(m, " %pS", v->caller); |
| 2716 | |
| 2717 | if (v->nr_pages) |
| 2718 | seq_printf(m, " pages=%d", v->nr_pages); |
| 2719 | |
| 2720 | if (v->phys_addr) |
| 2721 | seq_printf(m, " phys=%pa", &v->phys_addr); |
| 2722 | |
| 2723 | if (v->flags & VM_IOREMAP) |
| 2724 | seq_puts(m, " ioremap"); |
| 2725 | |
| 2726 | if (v->flags & VM_ALLOC) |
| 2727 | seq_puts(m, " vmalloc"); |
| 2728 | |
| 2729 | if (v->flags & VM_MAP) |
| 2730 | seq_puts(m, " vmap"); |
| 2731 | |
| 2732 | if (v->flags & VM_USERMAP) |
| 2733 | seq_puts(m, " user"); |
| 2734 | |
| 2735 | if (is_vmalloc_addr(v->pages)) |
| 2736 | seq_puts(m, " vpages"); |
| 2737 | |
| 2738 | show_numa_info(m, v); |
| 2739 | seq_putc(m, '\n'); |
| 2740 | return 0; |
| 2741 | } |
| 2742 | |
| 2743 | static const struct seq_operations vmalloc_op = { |
| 2744 | .start = s_start, |
| 2745 | .next = s_next, |
| 2746 | .stop = s_stop, |
| 2747 | .show = s_show, |
| 2748 | }; |
| 2749 | |
| 2750 | static int vmalloc_open(struct inode *inode, struct file *file) |
| 2751 | { |
| 2752 | if (IS_ENABLED(CONFIG_NUMA)) |
| 2753 | return seq_open_private(file, &vmalloc_op, |
| 2754 | nr_node_ids * sizeof(unsigned int)); |
| 2755 | else |
| 2756 | return seq_open(file, &vmalloc_op); |
| 2757 | } |
| 2758 | |
| 2759 | static const struct file_operations proc_vmalloc_operations = { |
| 2760 | .open = vmalloc_open, |
| 2761 | .read = seq_read, |
| 2762 | .llseek = seq_lseek, |
| 2763 | .release = seq_release_private, |
| 2764 | }; |
| 2765 | |
| 2766 | static int __init proc_vmalloc_init(void) |
| 2767 | { |
| 2768 | proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); |
| 2769 | return 0; |
| 2770 | } |
| 2771 | module_init(proc_vmalloc_init); |
| 2772 | |
| 2773 | #endif |
| 2774 | |