Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/vmalloc.c | |
3 | * | |
4 | * Copyright (C) 1993 Linus Torvalds | |
5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
6 | * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 | |
7 | * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 | |
8 | */ | |
9 | ||
10 | #include <linux/mm.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/highmem.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/interrupt.h> | |
16 | ||
17 | #include <linux/vmalloc.h> | |
18 | ||
19 | #include <asm/uaccess.h> | |
20 | #include <asm/tlbflush.h> | |
21 | ||
22 | ||
23 | DEFINE_RWLOCK(vmlist_lock); | |
24 | struct vm_struct *vmlist; | |
25 | ||
26 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) | |
27 | { | |
28 | pte_t *pte; | |
29 | ||
30 | pte = pte_offset_kernel(pmd, addr); | |
31 | do { | |
32 | pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); | |
33 | WARN_ON(!pte_none(ptent) && !pte_present(ptent)); | |
34 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
35 | } | |
36 | ||
37 | static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr, | |
38 | unsigned long end) | |
39 | { | |
40 | pmd_t *pmd; | |
41 | unsigned long next; | |
42 | ||
43 | pmd = pmd_offset(pud, addr); | |
44 | do { | |
45 | next = pmd_addr_end(addr, end); | |
46 | if (pmd_none_or_clear_bad(pmd)) | |
47 | continue; | |
48 | vunmap_pte_range(pmd, addr, next); | |
49 | } while (pmd++, addr = next, addr != end); | |
50 | } | |
51 | ||
52 | static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, | |
53 | unsigned long end) | |
54 | { | |
55 | pud_t *pud; | |
56 | unsigned long next; | |
57 | ||
58 | pud = pud_offset(pgd, addr); | |
59 | do { | |
60 | next = pud_addr_end(addr, end); | |
61 | if (pud_none_or_clear_bad(pud)) | |
62 | continue; | |
63 | vunmap_pmd_range(pud, addr, next); | |
64 | } while (pud++, addr = next, addr != end); | |
65 | } | |
66 | ||
67 | void unmap_vm_area(struct vm_struct *area) | |
68 | { | |
69 | pgd_t *pgd; | |
70 | unsigned long next; | |
71 | unsigned long addr = (unsigned long) area->addr; | |
72 | unsigned long end = addr + area->size; | |
73 | ||
74 | BUG_ON(addr >= end); | |
75 | pgd = pgd_offset_k(addr); | |
76 | flush_cache_vunmap(addr, end); | |
77 | do { | |
78 | next = pgd_addr_end(addr, end); | |
79 | if (pgd_none_or_clear_bad(pgd)) | |
80 | continue; | |
81 | vunmap_pud_range(pgd, addr, next); | |
82 | } while (pgd++, addr = next, addr != end); | |
83 | flush_tlb_kernel_range((unsigned long) area->addr, end); | |
84 | } | |
85 | ||
86 | static int vmap_pte_range(pmd_t *pmd, unsigned long addr, | |
87 | unsigned long end, pgprot_t prot, struct page ***pages) | |
88 | { | |
89 | pte_t *pte; | |
90 | ||
91 | pte = pte_alloc_kernel(&init_mm, pmd, addr); | |
92 | if (!pte) | |
93 | return -ENOMEM; | |
94 | do { | |
95 | struct page *page = **pages; | |
96 | WARN_ON(!pte_none(*pte)); | |
97 | if (!page) | |
98 | return -ENOMEM; | |
99 | set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); | |
100 | (*pages)++; | |
101 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
102 | return 0; | |
103 | } | |
104 | ||
105 | static inline int vmap_pmd_range(pud_t *pud, unsigned long addr, | |
106 | unsigned long end, pgprot_t prot, struct page ***pages) | |
107 | { | |
108 | pmd_t *pmd; | |
109 | unsigned long next; | |
110 | ||
111 | pmd = pmd_alloc(&init_mm, pud, addr); | |
112 | if (!pmd) | |
113 | return -ENOMEM; | |
114 | do { | |
115 | next = pmd_addr_end(addr, end); | |
116 | if (vmap_pte_range(pmd, addr, next, prot, pages)) | |
117 | return -ENOMEM; | |
118 | } while (pmd++, addr = next, addr != end); | |
119 | return 0; | |
120 | } | |
121 | ||
122 | static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |
123 | unsigned long end, pgprot_t prot, struct page ***pages) | |
124 | { | |
125 | pud_t *pud; | |
126 | unsigned long next; | |
127 | ||
128 | pud = pud_alloc(&init_mm, pgd, addr); | |
129 | if (!pud) | |
130 | return -ENOMEM; | |
131 | do { | |
132 | next = pud_addr_end(addr, end); | |
133 | if (vmap_pmd_range(pud, addr, next, prot, pages)) | |
134 | return -ENOMEM; | |
135 | } while (pud++, addr = next, addr != end); | |
136 | return 0; | |
137 | } | |
138 | ||
139 | int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) | |
140 | { | |
141 | pgd_t *pgd; | |
142 | unsigned long next; | |
143 | unsigned long addr = (unsigned long) area->addr; | |
144 | unsigned long end = addr + area->size - PAGE_SIZE; | |
145 | int err; | |
146 | ||
147 | BUG_ON(addr >= end); | |
148 | pgd = pgd_offset_k(addr); | |
149 | spin_lock(&init_mm.page_table_lock); | |
150 | do { | |
151 | next = pgd_addr_end(addr, end); | |
152 | err = vmap_pud_range(pgd, addr, next, prot, pages); | |
153 | if (err) | |
154 | break; | |
155 | } while (pgd++, addr = next, addr != end); | |
156 | spin_unlock(&init_mm.page_table_lock); | |
157 | flush_cache_vmap((unsigned long) area->addr, end); | |
158 | return err; | |
159 | } | |
160 | ||
1da177e4 LT |
161 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
162 | unsigned long start, unsigned long end) | |
163 | { | |
164 | struct vm_struct **p, *tmp, *area; | |
165 | unsigned long align = 1; | |
166 | unsigned long addr; | |
167 | ||
168 | if (flags & VM_IOREMAP) { | |
169 | int bit = fls(size); | |
170 | ||
171 | if (bit > IOREMAP_MAX_ORDER) | |
172 | bit = IOREMAP_MAX_ORDER; | |
173 | else if (bit < PAGE_SHIFT) | |
174 | bit = PAGE_SHIFT; | |
175 | ||
176 | align = 1ul << bit; | |
177 | } | |
178 | addr = ALIGN(start, align); | |
179 | size = PAGE_ALIGN(size); | |
180 | ||
181 | area = kmalloc(sizeof(*area), GFP_KERNEL); | |
182 | if (unlikely(!area)) | |
183 | return NULL; | |
184 | ||
185 | if (unlikely(!size)) { | |
186 | kfree (area); | |
187 | return NULL; | |
188 | } | |
189 | ||
190 | /* | |
191 | * We always allocate a guard page. | |
192 | */ | |
193 | size += PAGE_SIZE; | |
194 | ||
195 | write_lock(&vmlist_lock); | |
196 | for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { | |
197 | if ((unsigned long)tmp->addr < addr) { | |
198 | if((unsigned long)tmp->addr + tmp->size >= addr) | |
199 | addr = ALIGN(tmp->size + | |
200 | (unsigned long)tmp->addr, align); | |
201 | continue; | |
202 | } | |
203 | if ((size + addr) < addr) | |
204 | goto out; | |
205 | if (size + addr <= (unsigned long)tmp->addr) | |
206 | goto found; | |
207 | addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align); | |
208 | if (addr > end - size) | |
209 | goto out; | |
210 | } | |
211 | ||
212 | found: | |
213 | area->next = *p; | |
214 | *p = area; | |
215 | ||
216 | area->flags = flags; | |
217 | area->addr = (void *)addr; | |
218 | area->size = size; | |
219 | area->pages = NULL; | |
220 | area->nr_pages = 0; | |
221 | area->phys_addr = 0; | |
222 | write_unlock(&vmlist_lock); | |
223 | ||
224 | return area; | |
225 | ||
226 | out: | |
227 | write_unlock(&vmlist_lock); | |
228 | kfree(area); | |
229 | if (printk_ratelimit()) | |
230 | printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n"); | |
231 | return NULL; | |
232 | } | |
233 | ||
234 | /** | |
235 | * get_vm_area - reserve a contingous kernel virtual area | |
236 | * | |
237 | * @size: size of the area | |
238 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC | |
239 | * | |
240 | * Search an area of @size in the kernel virtual mapping area, | |
241 | * and reserved it for out purposes. Returns the area descriptor | |
242 | * on success or %NULL on failure. | |
243 | */ | |
244 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | |
245 | { | |
246 | return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); | |
247 | } | |
248 | ||
7856dfeb AK |
249 | /* Caller must hold vmlist_lock */ |
250 | struct vm_struct *__remove_vm_area(void *addr) | |
1da177e4 LT |
251 | { |
252 | struct vm_struct **p, *tmp; | |
253 | ||
1da177e4 LT |
254 | for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { |
255 | if (tmp->addr == addr) | |
256 | goto found; | |
257 | } | |
1da177e4 LT |
258 | return NULL; |
259 | ||
260 | found: | |
261 | unmap_vm_area(tmp); | |
262 | *p = tmp->next; | |
1da177e4 LT |
263 | |
264 | /* | |
265 | * Remove the guard page. | |
266 | */ | |
267 | tmp->size -= PAGE_SIZE; | |
268 | return tmp; | |
269 | } | |
270 | ||
7856dfeb AK |
271 | /** |
272 | * remove_vm_area - find and remove a contingous kernel virtual area | |
273 | * | |
274 | * @addr: base address | |
275 | * | |
276 | * Search for the kernel VM area starting at @addr, and remove it. | |
277 | * This function returns the found VM area, but using it is NOT safe | |
278 | * on SMP machines, except for its size or flags. | |
279 | */ | |
280 | struct vm_struct *remove_vm_area(void *addr) | |
281 | { | |
282 | struct vm_struct *v; | |
283 | write_lock(&vmlist_lock); | |
284 | v = __remove_vm_area(addr); | |
285 | write_unlock(&vmlist_lock); | |
286 | return v; | |
287 | } | |
288 | ||
1da177e4 LT |
289 | void __vunmap(void *addr, int deallocate_pages) |
290 | { | |
291 | struct vm_struct *area; | |
292 | ||
293 | if (!addr) | |
294 | return; | |
295 | ||
296 | if ((PAGE_SIZE-1) & (unsigned long)addr) { | |
297 | printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr); | |
298 | WARN_ON(1); | |
299 | return; | |
300 | } | |
301 | ||
302 | area = remove_vm_area(addr); | |
303 | if (unlikely(!area)) { | |
304 | printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", | |
305 | addr); | |
306 | WARN_ON(1); | |
307 | return; | |
308 | } | |
309 | ||
310 | if (deallocate_pages) { | |
311 | int i; | |
312 | ||
313 | for (i = 0; i < area->nr_pages; i++) { | |
314 | if (unlikely(!area->pages[i])) | |
315 | BUG(); | |
316 | __free_page(area->pages[i]); | |
317 | } | |
318 | ||
319 | if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) | |
320 | vfree(area->pages); | |
321 | else | |
322 | kfree(area->pages); | |
323 | } | |
324 | ||
325 | kfree(area); | |
326 | return; | |
327 | } | |
328 | ||
329 | /** | |
330 | * vfree - release memory allocated by vmalloc() | |
331 | * | |
332 | * @addr: memory base address | |
333 | * | |
334 | * Free the virtually contiguous memory area starting at @addr, as | |
335 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). | |
336 | * | |
337 | * May not be called in interrupt context. | |
338 | */ | |
339 | void vfree(void *addr) | |
340 | { | |
341 | BUG_ON(in_interrupt()); | |
342 | __vunmap(addr, 1); | |
343 | } | |
344 | ||
345 | EXPORT_SYMBOL(vfree); | |
346 | ||
347 | /** | |
348 | * vunmap - release virtual mapping obtained by vmap() | |
349 | * | |
350 | * @addr: memory base address | |
351 | * | |
352 | * Free the virtually contiguous memory area starting at @addr, | |
353 | * which was created from the page array passed to vmap(). | |
354 | * | |
355 | * May not be called in interrupt context. | |
356 | */ | |
357 | void vunmap(void *addr) | |
358 | { | |
359 | BUG_ON(in_interrupt()); | |
360 | __vunmap(addr, 0); | |
361 | } | |
362 | ||
363 | EXPORT_SYMBOL(vunmap); | |
364 | ||
365 | /** | |
366 | * vmap - map an array of pages into virtually contiguous space | |
367 | * | |
368 | * @pages: array of page pointers | |
369 | * @count: number of pages to map | |
370 | * @flags: vm_area->flags | |
371 | * @prot: page protection for the mapping | |
372 | * | |
373 | * Maps @count pages from @pages into contiguous kernel virtual | |
374 | * space. | |
375 | */ | |
376 | void *vmap(struct page **pages, unsigned int count, | |
377 | unsigned long flags, pgprot_t prot) | |
378 | { | |
379 | struct vm_struct *area; | |
380 | ||
381 | if (count > num_physpages) | |
382 | return NULL; | |
383 | ||
384 | area = get_vm_area((count << PAGE_SHIFT), flags); | |
385 | if (!area) | |
386 | return NULL; | |
387 | if (map_vm_area(area, prot, &pages)) { | |
388 | vunmap(area->addr); | |
389 | return NULL; | |
390 | } | |
391 | ||
392 | return area->addr; | |
393 | } | |
394 | ||
395 | EXPORT_SYMBOL(vmap); | |
396 | ||
397 | void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) | |
398 | { | |
399 | struct page **pages; | |
400 | unsigned int nr_pages, array_size, i; | |
401 | ||
402 | nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; | |
403 | array_size = (nr_pages * sizeof(struct page *)); | |
404 | ||
405 | area->nr_pages = nr_pages; | |
406 | /* Please note that the recursion is strictly bounded. */ | |
407 | if (array_size > PAGE_SIZE) | |
408 | pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL); | |
409 | else | |
410 | pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM)); | |
411 | area->pages = pages; | |
412 | if (!area->pages) { | |
413 | remove_vm_area(area->addr); | |
414 | kfree(area); | |
415 | return NULL; | |
416 | } | |
417 | memset(area->pages, 0, array_size); | |
418 | ||
419 | for (i = 0; i < area->nr_pages; i++) { | |
420 | area->pages[i] = alloc_page(gfp_mask); | |
421 | if (unlikely(!area->pages[i])) { | |
422 | /* Successfully allocated i pages, free them in __vunmap() */ | |
423 | area->nr_pages = i; | |
424 | goto fail; | |
425 | } | |
426 | } | |
427 | ||
428 | if (map_vm_area(area, prot, &pages)) | |
429 | goto fail; | |
430 | return area->addr; | |
431 | ||
432 | fail: | |
433 | vfree(area->addr); | |
434 | return NULL; | |
435 | } | |
436 | ||
437 | /** | |
438 | * __vmalloc - allocate virtually contiguous memory | |
439 | * | |
440 | * @size: allocation size | |
441 | * @gfp_mask: flags for the page level allocator | |
442 | * @prot: protection mask for the allocated pages | |
443 | * | |
444 | * Allocate enough pages to cover @size from the page level | |
445 | * allocator with @gfp_mask flags. Map them into contiguous | |
446 | * kernel virtual space, using a pagetable protection of @prot. | |
447 | */ | |
448 | void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) | |
449 | { | |
450 | struct vm_struct *area; | |
451 | ||
452 | size = PAGE_ALIGN(size); | |
453 | if (!size || (size >> PAGE_SHIFT) > num_physpages) | |
454 | return NULL; | |
455 | ||
456 | area = get_vm_area(size, VM_ALLOC); | |
457 | if (!area) | |
458 | return NULL; | |
459 | ||
460 | return __vmalloc_area(area, gfp_mask, prot); | |
461 | } | |
462 | ||
463 | EXPORT_SYMBOL(__vmalloc); | |
464 | ||
465 | /** | |
466 | * vmalloc - allocate virtually contiguous memory | |
467 | * | |
468 | * @size: allocation size | |
469 | * | |
470 | * Allocate enough pages to cover @size from the page level | |
471 | * allocator and map them into contiguous kernel virtual space. | |
472 | * | |
473 | * For tight cotrol over page level allocator and protection flags | |
474 | * use __vmalloc() instead. | |
475 | */ | |
476 | void *vmalloc(unsigned long size) | |
477 | { | |
478 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | |
479 | } | |
480 | ||
481 | EXPORT_SYMBOL(vmalloc); | |
482 | ||
4dc3b16b PP |
483 | #ifndef PAGE_KERNEL_EXEC |
484 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | |
485 | #endif | |
486 | ||
1da177e4 LT |
487 | /** |
488 | * vmalloc_exec - allocate virtually contiguous, executable memory | |
489 | * | |
490 | * @size: allocation size | |
491 | * | |
492 | * Kernel-internal function to allocate enough pages to cover @size | |
493 | * the page level allocator and map them into contiguous and | |
494 | * executable kernel virtual space. | |
495 | * | |
496 | * For tight cotrol over page level allocator and protection flags | |
497 | * use __vmalloc() instead. | |
498 | */ | |
499 | ||
1da177e4 LT |
500 | void *vmalloc_exec(unsigned long size) |
501 | { | |
502 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | |
503 | } | |
504 | ||
505 | /** | |
506 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) | |
507 | * | |
508 | * @size: allocation size | |
509 | * | |
510 | * Allocate enough 32bit PA addressable pages to cover @size from the | |
511 | * page level allocator and map them into contiguous kernel virtual space. | |
512 | */ | |
513 | void *vmalloc_32(unsigned long size) | |
514 | { | |
515 | return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); | |
516 | } | |
517 | ||
518 | EXPORT_SYMBOL(vmalloc_32); | |
519 | ||
520 | long vread(char *buf, char *addr, unsigned long count) | |
521 | { | |
522 | struct vm_struct *tmp; | |
523 | char *vaddr, *buf_start = buf; | |
524 | unsigned long n; | |
525 | ||
526 | /* Don't allow overflow */ | |
527 | if ((unsigned long) addr + count < count) | |
528 | count = -(unsigned long) addr; | |
529 | ||
530 | read_lock(&vmlist_lock); | |
531 | for (tmp = vmlist; tmp; tmp = tmp->next) { | |
532 | vaddr = (char *) tmp->addr; | |
533 | if (addr >= vaddr + tmp->size - PAGE_SIZE) | |
534 | continue; | |
535 | while (addr < vaddr) { | |
536 | if (count == 0) | |
537 | goto finished; | |
538 | *buf = '\0'; | |
539 | buf++; | |
540 | addr++; | |
541 | count--; | |
542 | } | |
543 | n = vaddr + tmp->size - PAGE_SIZE - addr; | |
544 | do { | |
545 | if (count == 0) | |
546 | goto finished; | |
547 | *buf = *addr; | |
548 | buf++; | |
549 | addr++; | |
550 | count--; | |
551 | } while (--n > 0); | |
552 | } | |
553 | finished: | |
554 | read_unlock(&vmlist_lock); | |
555 | return buf - buf_start; | |
556 | } | |
557 | ||
558 | long vwrite(char *buf, char *addr, unsigned long count) | |
559 | { | |
560 | struct vm_struct *tmp; | |
561 | char *vaddr, *buf_start = buf; | |
562 | unsigned long n; | |
563 | ||
564 | /* Don't allow overflow */ | |
565 | if ((unsigned long) addr + count < count) | |
566 | count = -(unsigned long) addr; | |
567 | ||
568 | read_lock(&vmlist_lock); | |
569 | for (tmp = vmlist; tmp; tmp = tmp->next) { | |
570 | vaddr = (char *) tmp->addr; | |
571 | if (addr >= vaddr + tmp->size - PAGE_SIZE) | |
572 | continue; | |
573 | while (addr < vaddr) { | |
574 | if (count == 0) | |
575 | goto finished; | |
576 | buf++; | |
577 | addr++; | |
578 | count--; | |
579 | } | |
580 | n = vaddr + tmp->size - PAGE_SIZE - addr; | |
581 | do { | |
582 | if (count == 0) | |
583 | goto finished; | |
584 | *addr = *buf; | |
585 | buf++; | |
586 | addr++; | |
587 | count--; | |
588 | } while (--n > 0); | |
589 | } | |
590 | finished: | |
591 | read_unlock(&vmlist_lock); | |
592 | return buf - buf_start; | |
593 | } |