4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
18 #include <linux/vmalloc.h>
20 #include <asm/uaccess.h>
21 #include <asm/tlbflush.h>
24 DEFINE_RWLOCK(vmlist_lock);
25 struct vm_struct *vmlist;
27 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
30 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
34 pte = pte_offset_kernel(pmd, addr);
36 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
37 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
38 } while (pte++, addr += PAGE_SIZE, addr != end);
41 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
47 pmd = pmd_offset(pud, addr);
49 next = pmd_addr_end(addr, end);
50 if (pmd_none_or_clear_bad(pmd))
52 vunmap_pte_range(pmd, addr, next);
53 } while (pmd++, addr = next, addr != end);
56 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
62 pud = pud_offset(pgd, addr);
64 next = pud_addr_end(addr, end);
65 if (pud_none_or_clear_bad(pud))
67 vunmap_pmd_range(pud, addr, next);
68 } while (pud++, addr = next, addr != end);
71 void unmap_kernel_range(unsigned long addr, unsigned long size)
75 unsigned long start = addr;
76 unsigned long end = addr + size;
79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
82 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd))
85 vunmap_pud_range(pgd, addr, next);
86 } while (pgd++, addr = next, addr != end);
87 flush_tlb_kernel_range(start, end);
90 static void unmap_vm_area(struct vm_struct *area)
92 unmap_kernel_range((unsigned long)area->addr, area->size);
95 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
96 unsigned long end, pgprot_t prot, struct page ***pages)
100 pte = pte_alloc_kernel(pmd, addr);
104 struct page *page = **pages;
105 WARN_ON(!pte_none(*pte));
108 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
110 } while (pte++, addr += PAGE_SIZE, addr != end);
114 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
115 unsigned long end, pgprot_t prot, struct page ***pages)
120 pmd = pmd_alloc(&init_mm, pud, addr);
124 next = pmd_addr_end(addr, end);
125 if (vmap_pte_range(pmd, addr, next, prot, pages))
127 } while (pmd++, addr = next, addr != end);
131 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
132 unsigned long end, pgprot_t prot, struct page ***pages)
137 pud = pud_alloc(&init_mm, pgd, addr);
141 next = pud_addr_end(addr, end);
142 if (vmap_pmd_range(pud, addr, next, prot, pages))
144 } while (pud++, addr = next, addr != end);
148 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
152 unsigned long addr = (unsigned long) area->addr;
153 unsigned long end = addr + area->size - PAGE_SIZE;
157 pgd = pgd_offset_k(addr);
159 next = pgd_addr_end(addr, end);
160 err = vmap_pud_range(pgd, addr, next, prot, pages);
163 } while (pgd++, addr = next, addr != end);
164 flush_cache_vmap((unsigned long) area->addr, end);
167 EXPORT_SYMBOL_GPL(map_vm_area);
170 * Map a vmalloc()-space virtual address to the physical page.
172 struct page *vmalloc_to_page(void *vmalloc_addr)
174 unsigned long addr = (unsigned long) vmalloc_addr;
175 struct page *page = NULL;
176 pgd_t *pgd = pgd_offset_k(addr);
181 if (!pgd_none(*pgd)) {
182 pud = pud_offset(pgd, addr);
183 if (!pud_none(*pud)) {
184 pmd = pmd_offset(pud, addr);
185 if (!pmd_none(*pmd)) {
186 ptep = pte_offset_map(pmd, addr);
188 if (pte_present(pte))
189 page = pte_page(pte);
196 EXPORT_SYMBOL(vmalloc_to_page);
199 * Map a vmalloc()-space virtual address to the physical page frame number.
201 unsigned long vmalloc_to_pfn(void *vmalloc_addr)
203 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
205 EXPORT_SYMBOL(vmalloc_to_pfn);
207 static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
208 unsigned long start, unsigned long end,
209 int node, gfp_t gfp_mask)
211 struct vm_struct **p, *tmp, *area;
212 unsigned long align = 1;
215 BUG_ON(in_interrupt());
216 if (flags & VM_IOREMAP) {
219 if (bit > IOREMAP_MAX_ORDER)
220 bit = IOREMAP_MAX_ORDER;
221 else if (bit < PAGE_SHIFT)
226 addr = ALIGN(start, align);
227 size = PAGE_ALIGN(size);
231 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
237 * We always allocate a guard page.
241 write_lock(&vmlist_lock);
242 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
243 if ((unsigned long)tmp->addr < addr) {
244 if((unsigned long)tmp->addr + tmp->size >= addr)
245 addr = ALIGN(tmp->size +
246 (unsigned long)tmp->addr, align);
249 if ((size + addr) < addr)
251 if (size + addr <= (unsigned long)tmp->addr)
253 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
254 if (addr > end - size)
263 area->addr = (void *)addr;
268 write_unlock(&vmlist_lock);
273 write_unlock(&vmlist_lock);
275 if (printk_ratelimit())
276 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
280 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
281 unsigned long start, unsigned long end)
283 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
285 EXPORT_SYMBOL_GPL(__get_vm_area);
288 * get_vm_area - reserve a contiguous kernel virtual area
289 * @size: size of the area
290 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
292 * Search an area of @size in the kernel virtual mapping area,
293 * and reserved it for out purposes. Returns the area descriptor
294 * on success or %NULL on failure.
296 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
298 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
301 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
302 int node, gfp_t gfp_mask)
304 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
308 /* Caller must hold vmlist_lock */
309 static struct vm_struct *__find_vm_area(void *addr)
311 struct vm_struct *tmp;
313 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
314 if (tmp->addr == addr)
321 /* Caller must hold vmlist_lock */
322 static struct vm_struct *__remove_vm_area(void *addr)
324 struct vm_struct **p, *tmp;
326 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
327 if (tmp->addr == addr)
337 * Remove the guard page.
339 tmp->size -= PAGE_SIZE;
344 * remove_vm_area - find and remove a continuous kernel virtual area
345 * @addr: base address
347 * Search for the kernel VM area starting at @addr, and remove it.
348 * This function returns the found VM area, but using it is NOT safe
349 * on SMP machines, except for its size or flags.
351 struct vm_struct *remove_vm_area(void *addr)
354 write_lock(&vmlist_lock);
355 v = __remove_vm_area(addr);
356 write_unlock(&vmlist_lock);
360 static void __vunmap(void *addr, int deallocate_pages)
362 struct vm_struct *area;
367 if ((PAGE_SIZE-1) & (unsigned long)addr) {
368 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
373 area = remove_vm_area(addr);
374 if (unlikely(!area)) {
375 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
381 debug_check_no_locks_freed(addr, area->size);
383 if (deallocate_pages) {
386 for (i = 0; i < area->nr_pages; i++) {
387 BUG_ON(!area->pages[i]);
388 __free_page(area->pages[i]);
391 if (area->flags & VM_VPAGES)
402 * vfree - release memory allocated by vmalloc()
403 * @addr: memory base address
405 * Free the virtually continuous memory area starting at @addr, as
406 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
407 * NULL, no operation is performed.
409 * Must not be called in interrupt context.
411 void vfree(void *addr)
413 BUG_ON(in_interrupt());
416 EXPORT_SYMBOL(vfree);
419 * vunmap - release virtual mapping obtained by vmap()
420 * @addr: memory base address
422 * Free the virtually contiguous memory area starting at @addr,
423 * which was created from the page array passed to vmap().
425 * Must not be called in interrupt context.
427 void vunmap(void *addr)
429 BUG_ON(in_interrupt());
432 EXPORT_SYMBOL(vunmap);
435 * vmap - map an array of pages into virtually contiguous space
436 * @pages: array of page pointers
437 * @count: number of pages to map
438 * @flags: vm_area->flags
439 * @prot: page protection for the mapping
441 * Maps @count pages from @pages into contiguous kernel virtual
444 void *vmap(struct page **pages, unsigned int count,
445 unsigned long flags, pgprot_t prot)
447 struct vm_struct *area;
449 if (count > num_physpages)
452 area = get_vm_area((count << PAGE_SHIFT), flags);
455 if (map_vm_area(area, prot, &pages)) {
464 void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
465 pgprot_t prot, int node)
468 unsigned int nr_pages, array_size, i;
470 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
471 array_size = (nr_pages * sizeof(struct page *));
473 area->nr_pages = nr_pages;
474 /* Please note that the recursion is strictly bounded. */
475 if (array_size > PAGE_SIZE) {
476 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
478 area->flags |= VM_VPAGES;
480 pages = kmalloc_node(array_size,
481 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
486 remove_vm_area(area->addr);
491 for (i = 0; i < area->nr_pages; i++) {
493 area->pages[i] = alloc_page(gfp_mask);
495 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
496 if (unlikely(!area->pages[i])) {
497 /* Successfully allocated i pages, free them in __vunmap() */
503 if (map_vm_area(area, prot, &pages))
512 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
514 return __vmalloc_area_node(area, gfp_mask, prot, -1);
518 * __vmalloc_node - allocate virtually contiguous memory
519 * @size: allocation size
520 * @gfp_mask: flags for the page level allocator
521 * @prot: protection mask for the allocated pages
522 * @node: node to use for allocation or -1
524 * Allocate enough pages to cover @size from the page level
525 * allocator with @gfp_mask flags. Map them into contiguous
526 * kernel virtual space, using a pagetable protection of @prot.
528 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
531 struct vm_struct *area;
533 size = PAGE_ALIGN(size);
534 if (!size || (size >> PAGE_SHIFT) > num_physpages)
537 area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
541 return __vmalloc_area_node(area, gfp_mask, prot, node);
544 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
546 return __vmalloc_node(size, gfp_mask, prot, -1);
548 EXPORT_SYMBOL(__vmalloc);
551 * vmalloc - allocate virtually contiguous memory
552 * @size: allocation size
553 * Allocate enough pages to cover @size from the page level
554 * allocator and map them into contiguous kernel virtual space.
556 * For tight control over page level allocator and protection flags
557 * use __vmalloc() instead.
559 void *vmalloc(unsigned long size)
561 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
563 EXPORT_SYMBOL(vmalloc);
566 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
567 * @size: allocation size
569 * The resulting memory area is zeroed so it can be mapped to userspace
570 * without leaking data.
572 void *vmalloc_user(unsigned long size)
574 struct vm_struct *area;
577 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
579 write_lock(&vmlist_lock);
580 area = __find_vm_area(ret);
581 area->flags |= VM_USERMAP;
582 write_unlock(&vmlist_lock);
586 EXPORT_SYMBOL(vmalloc_user);
589 * vmalloc_node - allocate memory on a specific node
590 * @size: allocation size
593 * Allocate enough pages to cover @size from the page level
594 * allocator and map them into contiguous kernel virtual space.
596 * For tight control over page level allocator and protection flags
597 * use __vmalloc() instead.
599 void *vmalloc_node(unsigned long size, int node)
601 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
603 EXPORT_SYMBOL(vmalloc_node);
605 #ifndef PAGE_KERNEL_EXEC
606 # define PAGE_KERNEL_EXEC PAGE_KERNEL
610 * vmalloc_exec - allocate virtually contiguous, executable memory
611 * @size: allocation size
613 * Kernel-internal function to allocate enough pages to cover @size
614 * the page level allocator and map them into contiguous and
615 * executable kernel virtual space.
617 * For tight control over page level allocator and protection flags
618 * use __vmalloc() instead.
621 void *vmalloc_exec(unsigned long size)
623 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
626 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
627 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
628 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
629 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
631 #define GFP_VMALLOC32 GFP_KERNEL
635 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
636 * @size: allocation size
638 * Allocate enough 32bit PA addressable pages to cover @size from the
639 * page level allocator and map them into contiguous kernel virtual space.
641 void *vmalloc_32(unsigned long size)
643 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
645 EXPORT_SYMBOL(vmalloc_32);
648 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
649 * @size: allocation size
651 * The resulting memory area is 32bit addressable and zeroed so it can be
652 * mapped to userspace without leaking data.
654 void *vmalloc_32_user(unsigned long size)
656 struct vm_struct *area;
659 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
661 write_lock(&vmlist_lock);
662 area = __find_vm_area(ret);
663 area->flags |= VM_USERMAP;
664 write_unlock(&vmlist_lock);
668 EXPORT_SYMBOL(vmalloc_32_user);
670 long vread(char *buf, char *addr, unsigned long count)
672 struct vm_struct *tmp;
673 char *vaddr, *buf_start = buf;
676 /* Don't allow overflow */
677 if ((unsigned long) addr + count < count)
678 count = -(unsigned long) addr;
680 read_lock(&vmlist_lock);
681 for (tmp = vmlist; tmp; tmp = tmp->next) {
682 vaddr = (char *) tmp->addr;
683 if (addr >= vaddr + tmp->size - PAGE_SIZE)
685 while (addr < vaddr) {
693 n = vaddr + tmp->size - PAGE_SIZE - addr;
704 read_unlock(&vmlist_lock);
705 return buf - buf_start;
708 long vwrite(char *buf, char *addr, unsigned long count)
710 struct vm_struct *tmp;
711 char *vaddr, *buf_start = buf;
714 /* Don't allow overflow */
715 if ((unsigned long) addr + count < count)
716 count = -(unsigned long) addr;
718 read_lock(&vmlist_lock);
719 for (tmp = vmlist; tmp; tmp = tmp->next) {
720 vaddr = (char *) tmp->addr;
721 if (addr >= vaddr + tmp->size - PAGE_SIZE)
723 while (addr < vaddr) {
730 n = vaddr + tmp->size - PAGE_SIZE - addr;
741 read_unlock(&vmlist_lock);
742 return buf - buf_start;
746 * remap_vmalloc_range - map vmalloc pages to userspace
747 * @vma: vma to cover (map full range of vma)
748 * @addr: vmalloc memory
749 * @pgoff: number of pages into addr before first page to map
750 * @returns: 0 for success, -Exxx on failure
752 * This function checks that addr is a valid vmalloc'ed area, and
753 * that it is big enough to cover the vma. Will return failure if
754 * that criteria isn't met.
756 * Similar to remap_pfn_range() (see mm/memory.c)
758 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
761 struct vm_struct *area;
762 unsigned long uaddr = vma->vm_start;
763 unsigned long usize = vma->vm_end - vma->vm_start;
766 if ((PAGE_SIZE-1) & (unsigned long)addr)
769 read_lock(&vmlist_lock);
770 area = __find_vm_area(addr);
772 goto out_einval_locked;
774 if (!(area->flags & VM_USERMAP))
775 goto out_einval_locked;
777 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
778 goto out_einval_locked;
779 read_unlock(&vmlist_lock);
781 addr += pgoff << PAGE_SHIFT;
783 struct page *page = vmalloc_to_page(addr);
784 ret = vm_insert_page(vma, uaddr, page);
793 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
794 vma->vm_flags |= VM_RESERVED;
799 read_unlock(&vmlist_lock);
802 EXPORT_SYMBOL(remap_vmalloc_range);
805 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
808 void __attribute__((weak)) vmalloc_sync_all(void)
813 static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
815 /* apply_to_page_range() does all the hard work. */
820 * alloc_vm_area - allocate a range of kernel address space
821 * @size: size of the area
822 * @returns: NULL on failure, vm_struct on success
824 * This function reserves a range of kernel address space, and
825 * allocates pagetables to map that range. No actual mappings
826 * are created. If the kernel address space is not shared
827 * between processes, it syncs the pagetable across all
830 struct vm_struct *alloc_vm_area(size_t size)
832 struct vm_struct *area;
834 area = get_vm_area(size, VM_IOREMAP);
839 * This ensures that page tables are constructed for this region
840 * of kernel virtual address space and mapped into init_mm.
842 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
843 area->size, f, NULL)) {
848 /* Make sure the pagetables are constructed in process kernel
854 EXPORT_SYMBOL_GPL(alloc_vm_area);
856 void free_vm_area(struct vm_struct *area)
858 struct vm_struct *ret;
859 ret = remove_vm_area(area->addr);
863 EXPORT_SYMBOL_GPL(free_vm_area);