nommu: Fix up vmalloc_node() symbol export regression.
[linux-2.6-block.git] / mm / nommu.c
1 /*
2  *  linux/mm/nommu.c
3  *
4  *  Replacement code for mm functions to support CPU's that don't
5  *  have any form of memory management unit (thus no virtual memory).
6  *
7  *  See Documentation/nommu-mmap.txt
8  *
9  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
10  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13  *  Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org>
14  */
15
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/tracehook.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/mount.h>
29 #include <linux/personality.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/audit.h>
33
34 #include <asm/uaccess.h>
35 #include <asm/tlb.h>
36 #include <asm/tlbflush.h>
37 #include <asm/mmu_context.h>
38 #include "internal.h"
39
40 #if 0
41 #define kenter(FMT, ...) \
42         printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
43 #define kleave(FMT, ...) \
44         printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
45 #define kdebug(FMT, ...) \
46         printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
47 #else
48 #define kenter(FMT, ...) \
49         no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
50 #define kleave(FMT, ...) \
51         no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
52 #define kdebug(FMT, ...) \
53         no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
54 #endif
55
56 void *high_memory;
57 struct page *mem_map;
58 unsigned long max_mapnr;
59 unsigned long num_physpages;
60 unsigned long highest_memmap_pfn;
61 struct percpu_counter vm_committed_as;
62 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
63 int sysctl_overcommit_ratio = 50; /* default is 50% */
64 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66 int heap_stack_gap = 0;
67
68 atomic_long_t mmap_pages_allocated;
69
70 EXPORT_SYMBOL(mem_map);
71 EXPORT_SYMBOL(num_physpages);
72
73 /* list of mapped, potentially shareable regions */
74 static struct kmem_cache *vm_region_jar;
75 struct rb_root nommu_region_tree = RB_ROOT;
76 DECLARE_RWSEM(nommu_region_sem);
77
78 const struct vm_operations_struct generic_file_vm_ops = {
79 };
80
81 /*
82  * Return the total memory allocated for this pointer, not
83  * just what the caller asked for.
84  *
85  * Doesn't have to be accurate, i.e. may have races.
86  */
87 unsigned int kobjsize(const void *objp)
88 {
89         struct page *page;
90
91         /*
92          * If the object we have should not have ksize performed on it,
93          * return size of 0
94          */
95         if (!objp || !virt_addr_valid(objp))
96                 return 0;
97
98         page = virt_to_head_page(objp);
99
100         /*
101          * If the allocator sets PageSlab, we know the pointer came from
102          * kmalloc().
103          */
104         if (PageSlab(page))
105                 return ksize(objp);
106
107         /*
108          * If it's not a compound page, see if we have a matching VMA
109          * region. This test is intentionally done in reverse order,
110          * so if there's no VMA, we still fall through and hand back
111          * PAGE_SIZE for 0-order pages.
112          */
113         if (!PageCompound(page)) {
114                 struct vm_area_struct *vma;
115
116                 vma = find_vma(current->mm, (unsigned long)objp);
117                 if (vma)
118                         return vma->vm_end - vma->vm_start;
119         }
120
121         /*
122          * The ksize() function is only guaranteed to work for pointers
123          * returned by kmalloc(). So handle arbitrary pointers here.
124          */
125         return PAGE_SIZE << compound_order(page);
126 }
127
128 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
129                      unsigned long start, int nr_pages, unsigned int foll_flags,
130                      struct page **pages, struct vm_area_struct **vmas)
131 {
132         struct vm_area_struct *vma;
133         unsigned long vm_flags;
134         int i;
135
136         /* calculate required read or write permissions.
137          * If FOLL_FORCE is set, we only require the "MAY" flags.
138          */
139         vm_flags  = (foll_flags & FOLL_WRITE) ?
140                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
141         vm_flags &= (foll_flags & FOLL_FORCE) ?
142                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
143
144         for (i = 0; i < nr_pages; i++) {
145                 vma = find_vma(mm, start);
146                 if (!vma)
147                         goto finish_or_fault;
148
149                 /* protect what we can, including chardevs */
150                 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
151                     !(vm_flags & vma->vm_flags))
152                         goto finish_or_fault;
153
154                 if (pages) {
155                         pages[i] = virt_to_page(start);
156                         if (pages[i])
157                                 page_cache_get(pages[i]);
158                 }
159                 if (vmas)
160                         vmas[i] = vma;
161                 start = (start + PAGE_SIZE) & PAGE_MASK;
162         }
163
164         return i;
165
166 finish_or_fault:
167         return i ? : -EFAULT;
168 }
169
170 /*
171  * get a list of pages in an address range belonging to the specified process
172  * and indicate the VMA that covers each page
173  * - this is potentially dodgy as we may end incrementing the page count of a
174  *   slab page or a secondary page from a compound page
175  * - don't permit access to VMAs that don't support it, such as I/O mappings
176  */
177 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
178         unsigned long start, int nr_pages, int write, int force,
179         struct page **pages, struct vm_area_struct **vmas)
180 {
181         int flags = 0;
182
183         if (write)
184                 flags |= FOLL_WRITE;
185         if (force)
186                 flags |= FOLL_FORCE;
187
188         return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
189 }
190 EXPORT_SYMBOL(get_user_pages);
191
192 /**
193  * follow_pfn - look up PFN at a user virtual address
194  * @vma: memory mapping
195  * @address: user virtual address
196  * @pfn: location to store found PFN
197  *
198  * Only IO mappings and raw PFN mappings are allowed.
199  *
200  * Returns zero and the pfn at @pfn on success, -ve otherwise.
201  */
202 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
203         unsigned long *pfn)
204 {
205         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
206                 return -EINVAL;
207
208         *pfn = address >> PAGE_SHIFT;
209         return 0;
210 }
211 EXPORT_SYMBOL(follow_pfn);
212
213 DEFINE_RWLOCK(vmlist_lock);
214 struct vm_struct *vmlist;
215
216 void vfree(const void *addr)
217 {
218         kfree(addr);
219 }
220 EXPORT_SYMBOL(vfree);
221
222 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
223 {
224         /*
225          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
226          * returns only a logical address.
227          */
228         return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
229 }
230 EXPORT_SYMBOL(__vmalloc);
231
232 void *vmalloc_user(unsigned long size)
233 {
234         void *ret;
235
236         ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
237                         PAGE_KERNEL);
238         if (ret) {
239                 struct vm_area_struct *vma;
240
241                 down_write(&current->mm->mmap_sem);
242                 vma = find_vma(current->mm, (unsigned long)ret);
243                 if (vma)
244                         vma->vm_flags |= VM_USERMAP;
245                 up_write(&current->mm->mmap_sem);
246         }
247
248         return ret;
249 }
250 EXPORT_SYMBOL(vmalloc_user);
251
252 struct page *vmalloc_to_page(const void *addr)
253 {
254         return virt_to_page(addr);
255 }
256 EXPORT_SYMBOL(vmalloc_to_page);
257
258 unsigned long vmalloc_to_pfn(const void *addr)
259 {
260         return page_to_pfn(virt_to_page(addr));
261 }
262 EXPORT_SYMBOL(vmalloc_to_pfn);
263
264 long vread(char *buf, char *addr, unsigned long count)
265 {
266         memcpy(buf, addr, count);
267         return count;
268 }
269
270 long vwrite(char *buf, char *addr, unsigned long count)
271 {
272         /* Don't allow overflow */
273         if ((unsigned long) addr + count < count)
274                 count = -(unsigned long) addr;
275
276         memcpy(addr, buf, count);
277         return(count);
278 }
279
280 /*
281  *      vmalloc  -  allocate virtually continguos memory
282  *
283  *      @size:          allocation size
284  *
285  *      Allocate enough pages to cover @size from the page level
286  *      allocator and map them into continguos kernel virtual space.
287  *
288  *      For tight control over page level allocator and protection flags
289  *      use __vmalloc() instead.
290  */
291 void *vmalloc(unsigned long size)
292 {
293        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
294 }
295 EXPORT_SYMBOL(vmalloc);
296
297 /*
298  *      vzalloc - allocate virtually continguos memory with zero fill
299  *
300  *      @size:          allocation size
301  *
302  *      Allocate enough pages to cover @size from the page level
303  *      allocator and map them into continguos kernel virtual space.
304  *      The memory allocated is set to zero.
305  *
306  *      For tight control over page level allocator and protection flags
307  *      use __vmalloc() instead.
308  */
309 void *vzalloc(unsigned long size)
310 {
311         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
312                         PAGE_KERNEL);
313 }
314 EXPORT_SYMBOL(vzalloc);
315
316 /**
317  * vmalloc_node - allocate memory on a specific node
318  * @size:       allocation size
319  * @node:       numa node
320  *
321  * Allocate enough pages to cover @size from the page level
322  * allocator and map them into contiguous kernel virtual space.
323  *
324  * For tight control over page level allocator and protection flags
325  * use __vmalloc() instead.
326  */
327 void *vmalloc_node(unsigned long size, int node)
328 {
329         return vmalloc(size);
330 }
331 EXPORT_SYMBOL(vmalloc_node);
332
333 /**
334  * vzalloc_node - allocate memory on a specific node with zero fill
335  * @size:       allocation size
336  * @node:       numa node
337  *
338  * Allocate enough pages to cover @size from the page level
339  * allocator and map them into contiguous kernel virtual space.
340  * The memory allocated is set to zero.
341  *
342  * For tight control over page level allocator and protection flags
343  * use __vmalloc() instead.
344  */
345 void *vzalloc_node(unsigned long size, int node)
346 {
347         return vzalloc(size);
348 }
349 EXPORT_SYMBOL(vzalloc_node);
350
351 #ifndef PAGE_KERNEL_EXEC
352 # define PAGE_KERNEL_EXEC PAGE_KERNEL
353 #endif
354
355 /**
356  *      vmalloc_exec  -  allocate virtually contiguous, executable memory
357  *      @size:          allocation size
358  *
359  *      Kernel-internal function to allocate enough pages to cover @size
360  *      the page level allocator and map them into contiguous and
361  *      executable kernel virtual space.
362  *
363  *      For tight control over page level allocator and protection flags
364  *      use __vmalloc() instead.
365  */
366
367 void *vmalloc_exec(unsigned long size)
368 {
369         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
370 }
371
372 /**
373  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
374  *      @size:          allocation size
375  *
376  *      Allocate enough 32bit PA addressable pages to cover @size from the
377  *      page level allocator and map them into continguos kernel virtual space.
378  */
379 void *vmalloc_32(unsigned long size)
380 {
381         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
382 }
383 EXPORT_SYMBOL(vmalloc_32);
384
385 /**
386  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
387  *      @size:          allocation size
388  *
389  * The resulting memory area is 32bit addressable and zeroed so it can be
390  * mapped to userspace without leaking data.
391  *
392  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
393  * remap_vmalloc_range() are permissible.
394  */
395 void *vmalloc_32_user(unsigned long size)
396 {
397         /*
398          * We'll have to sort out the ZONE_DMA bits for 64-bit,
399          * but for now this can simply use vmalloc_user() directly.
400          */
401         return vmalloc_user(size);
402 }
403 EXPORT_SYMBOL(vmalloc_32_user);
404
405 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
406 {
407         BUG();
408         return NULL;
409 }
410 EXPORT_SYMBOL(vmap);
411
412 void vunmap(const void *addr)
413 {
414         BUG();
415 }
416 EXPORT_SYMBOL(vunmap);
417
418 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
419 {
420         BUG();
421         return NULL;
422 }
423 EXPORT_SYMBOL(vm_map_ram);
424
425 void vm_unmap_ram(const void *mem, unsigned int count)
426 {
427         BUG();
428 }
429 EXPORT_SYMBOL(vm_unmap_ram);
430
431 void vm_unmap_aliases(void)
432 {
433 }
434 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
435
436 /*
437  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
438  * have one.
439  */
440 void  __attribute__((weak)) vmalloc_sync_all(void)
441 {
442 }
443
444 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
445                    struct page *page)
446 {
447         return -EINVAL;
448 }
449 EXPORT_SYMBOL(vm_insert_page);
450
451 /*
452  *  sys_brk() for the most part doesn't need the global kernel
453  *  lock, except when an application is doing something nasty
454  *  like trying to un-brk an area that has already been mapped
455  *  to a regular file.  in this case, the unmapping will need
456  *  to invoke file system routines that need the global lock.
457  */
458 SYSCALL_DEFINE1(brk, unsigned long, brk)
459 {
460         struct mm_struct *mm = current->mm;
461
462         if (brk < mm->start_brk || brk > mm->context.end_brk)
463                 return mm->brk;
464
465         if (mm->brk == brk)
466                 return mm->brk;
467
468         /*
469          * Always allow shrinking brk
470          */
471         if (brk <= mm->brk) {
472                 mm->brk = brk;
473                 return brk;
474         }
475
476         /*
477          * Ok, looks good - let it rip.
478          */
479         flush_icache_range(mm->brk, brk);
480         return mm->brk = brk;
481 }
482
483 /*
484  * initialise the VMA and region record slabs
485  */
486 void __init mmap_init(void)
487 {
488         int ret;
489
490         ret = percpu_counter_init(&vm_committed_as, 0);
491         VM_BUG_ON(ret);
492         vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
493 }
494
495 /*
496  * validate the region tree
497  * - the caller must hold the region lock
498  */
499 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
500 static noinline void validate_nommu_regions(void)
501 {
502         struct vm_region *region, *last;
503         struct rb_node *p, *lastp;
504
505         lastp = rb_first(&nommu_region_tree);
506         if (!lastp)
507                 return;
508
509         last = rb_entry(lastp, struct vm_region, vm_rb);
510         BUG_ON(unlikely(last->vm_end <= last->vm_start));
511         BUG_ON(unlikely(last->vm_top < last->vm_end));
512
513         while ((p = rb_next(lastp))) {
514                 region = rb_entry(p, struct vm_region, vm_rb);
515                 last = rb_entry(lastp, struct vm_region, vm_rb);
516
517                 BUG_ON(unlikely(region->vm_end <= region->vm_start));
518                 BUG_ON(unlikely(region->vm_top < region->vm_end));
519                 BUG_ON(unlikely(region->vm_start < last->vm_top));
520
521                 lastp = p;
522         }
523 }
524 #else
525 static void validate_nommu_regions(void)
526 {
527 }
528 #endif
529
530 /*
531  * add a region into the global tree
532  */
533 static void add_nommu_region(struct vm_region *region)
534 {
535         struct vm_region *pregion;
536         struct rb_node **p, *parent;
537
538         validate_nommu_regions();
539
540         parent = NULL;
541         p = &nommu_region_tree.rb_node;
542         while (*p) {
543                 parent = *p;
544                 pregion = rb_entry(parent, struct vm_region, vm_rb);
545                 if (region->vm_start < pregion->vm_start)
546                         p = &(*p)->rb_left;
547                 else if (region->vm_start > pregion->vm_start)
548                         p = &(*p)->rb_right;
549                 else if (pregion == region)
550                         return;
551                 else
552                         BUG();
553         }
554
555         rb_link_node(&region->vm_rb, parent, p);
556         rb_insert_color(&region->vm_rb, &nommu_region_tree);
557
558         validate_nommu_regions();
559 }
560
561 /*
562  * delete a region from the global tree
563  */
564 static void delete_nommu_region(struct vm_region *region)
565 {
566         BUG_ON(!nommu_region_tree.rb_node);
567
568         validate_nommu_regions();
569         rb_erase(&region->vm_rb, &nommu_region_tree);
570         validate_nommu_regions();
571 }
572
573 /*
574  * free a contiguous series of pages
575  */
576 static void free_page_series(unsigned long from, unsigned long to)
577 {
578         for (; from < to; from += PAGE_SIZE) {
579                 struct page *page = virt_to_page(from);
580
581                 kdebug("- free %lx", from);
582                 atomic_long_dec(&mmap_pages_allocated);
583                 if (page_count(page) != 1)
584                         kdebug("free page %p: refcount not one: %d",
585                                page, page_count(page));
586                 put_page(page);
587         }
588 }
589
590 /*
591  * release a reference to a region
592  * - the caller must hold the region semaphore for writing, which this releases
593  * - the region may not have been added to the tree yet, in which case vm_top
594  *   will equal vm_start
595  */
596 static void __put_nommu_region(struct vm_region *region)
597         __releases(nommu_region_sem)
598 {
599         kenter("%p{%d}", region, region->vm_usage);
600
601         BUG_ON(!nommu_region_tree.rb_node);
602
603         if (--region->vm_usage == 0) {
604                 if (region->vm_top > region->vm_start)
605                         delete_nommu_region(region);
606                 up_write(&nommu_region_sem);
607
608                 if (region->vm_file)
609                         fput(region->vm_file);
610
611                 /* IO memory and memory shared directly out of the pagecache
612                  * from ramfs/tmpfs mustn't be released here */
613                 if (region->vm_flags & VM_MAPPED_COPY) {
614                         kdebug("free series");
615                         free_page_series(region->vm_start, region->vm_top);
616                 }
617                 kmem_cache_free(vm_region_jar, region);
618         } else {
619                 up_write(&nommu_region_sem);
620         }
621 }
622
623 /*
624  * release a reference to a region
625  */
626 static void put_nommu_region(struct vm_region *region)
627 {
628         down_write(&nommu_region_sem);
629         __put_nommu_region(region);
630 }
631
632 /*
633  * update protection on a vma
634  */
635 static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
636 {
637 #ifdef CONFIG_MPU
638         struct mm_struct *mm = vma->vm_mm;
639         long start = vma->vm_start & PAGE_MASK;
640         while (start < vma->vm_end) {
641                 protect_page(mm, start, flags);
642                 start += PAGE_SIZE;
643         }
644         update_protections(mm);
645 #endif
646 }
647
648 /*
649  * add a VMA into a process's mm_struct in the appropriate place in the list
650  * and tree and add to the address space's page tree also if not an anonymous
651  * page
652  * - should be called with mm->mmap_sem held writelocked
653  */
654 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
655 {
656         struct vm_area_struct *pvma, **pp, *next;
657         struct address_space *mapping;
658         struct rb_node **p, *parent;
659
660         kenter(",%p", vma);
661
662         BUG_ON(!vma->vm_region);
663
664         mm->map_count++;
665         vma->vm_mm = mm;
666
667         protect_vma(vma, vma->vm_flags);
668
669         /* add the VMA to the mapping */
670         if (vma->vm_file) {
671                 mapping = vma->vm_file->f_mapping;
672
673                 flush_dcache_mmap_lock(mapping);
674                 vma_prio_tree_insert(vma, &mapping->i_mmap);
675                 flush_dcache_mmap_unlock(mapping);
676         }
677
678         /* add the VMA to the tree */
679         parent = NULL;
680         p = &mm->mm_rb.rb_node;
681         while (*p) {
682                 parent = *p;
683                 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
684
685                 /* sort by: start addr, end addr, VMA struct addr in that order
686                  * (the latter is necessary as we may get identical VMAs) */
687                 if (vma->vm_start < pvma->vm_start)
688                         p = &(*p)->rb_left;
689                 else if (vma->vm_start > pvma->vm_start)
690                         p = &(*p)->rb_right;
691                 else if (vma->vm_end < pvma->vm_end)
692                         p = &(*p)->rb_left;
693                 else if (vma->vm_end > pvma->vm_end)
694                         p = &(*p)->rb_right;
695                 else if (vma < pvma)
696                         p = &(*p)->rb_left;
697                 else if (vma > pvma)
698                         p = &(*p)->rb_right;
699                 else
700                         BUG();
701         }
702
703         rb_link_node(&vma->vm_rb, parent, p);
704         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
705
706         /* add VMA to the VMA list also */
707         for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
708                 if (pvma->vm_start > vma->vm_start)
709                         break;
710                 if (pvma->vm_start < vma->vm_start)
711                         continue;
712                 if (pvma->vm_end < vma->vm_end)
713                         break;
714         }
715
716         next = *pp;
717         *pp = vma;
718         vma->vm_next = next;
719         if (next)
720                 next->vm_prev = vma;
721 }
722
723 /*
724  * delete a VMA from its owning mm_struct and address space
725  */
726 static void delete_vma_from_mm(struct vm_area_struct *vma)
727 {
728         struct vm_area_struct **pp;
729         struct address_space *mapping;
730         struct mm_struct *mm = vma->vm_mm;
731
732         kenter("%p", vma);
733
734         protect_vma(vma, 0);
735
736         mm->map_count--;
737         if (mm->mmap_cache == vma)
738                 mm->mmap_cache = NULL;
739
740         /* remove the VMA from the mapping */
741         if (vma->vm_file) {
742                 mapping = vma->vm_file->f_mapping;
743
744                 flush_dcache_mmap_lock(mapping);
745                 vma_prio_tree_remove(vma, &mapping->i_mmap);
746                 flush_dcache_mmap_unlock(mapping);
747         }
748
749         /* remove from the MM's tree and list */
750         rb_erase(&vma->vm_rb, &mm->mm_rb);
751         for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {
752                 if (*pp == vma) {
753                         *pp = vma->vm_next;
754                         break;
755                 }
756         }
757
758         vma->vm_mm = NULL;
759 }
760
761 /*
762  * destroy a VMA record
763  */
764 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
765 {
766         kenter("%p", vma);
767         if (vma->vm_ops && vma->vm_ops->close)
768                 vma->vm_ops->close(vma);
769         if (vma->vm_file) {
770                 fput(vma->vm_file);
771                 if (vma->vm_flags & VM_EXECUTABLE)
772                         removed_exe_file_vma(mm);
773         }
774         put_nommu_region(vma->vm_region);
775         kmem_cache_free(vm_area_cachep, vma);
776 }
777
778 /*
779  * look up the first VMA in which addr resides, NULL if none
780  * - should be called with mm->mmap_sem at least held readlocked
781  */
782 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
783 {
784         struct vm_area_struct *vma;
785         struct rb_node *n = mm->mm_rb.rb_node;
786
787         /* check the cache first */
788         vma = mm->mmap_cache;
789         if (vma && vma->vm_start <= addr && vma->vm_end > addr)
790                 return vma;
791
792         /* trawl the tree (there may be multiple mappings in which addr
793          * resides) */
794         for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
795                 vma = rb_entry(n, struct vm_area_struct, vm_rb);
796                 if (vma->vm_start > addr)
797                         return NULL;
798                 if (vma->vm_end > addr) {
799                         mm->mmap_cache = vma;
800                         return vma;
801                 }
802         }
803
804         return NULL;
805 }
806 EXPORT_SYMBOL(find_vma);
807
808 /*
809  * find a VMA
810  * - we don't extend stack VMAs under NOMMU conditions
811  */
812 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
813 {
814         return find_vma(mm, addr);
815 }
816
817 /*
818  * expand a stack to a given address
819  * - not supported under NOMMU conditions
820  */
821 int expand_stack(struct vm_area_struct *vma, unsigned long address)
822 {
823         return -ENOMEM;
824 }
825
826 /*
827  * look up the first VMA exactly that exactly matches addr
828  * - should be called with mm->mmap_sem at least held readlocked
829  */
830 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
831                                              unsigned long addr,
832                                              unsigned long len)
833 {
834         struct vm_area_struct *vma;
835         struct rb_node *n = mm->mm_rb.rb_node;
836         unsigned long end = addr + len;
837
838         /* check the cache first */
839         vma = mm->mmap_cache;
840         if (vma && vma->vm_start == addr && vma->vm_end == end)
841                 return vma;
842
843         /* trawl the tree (there may be multiple mappings in which addr
844          * resides) */
845         for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
846                 vma = rb_entry(n, struct vm_area_struct, vm_rb);
847                 if (vma->vm_start < addr)
848                         continue;
849                 if (vma->vm_start > addr)
850                         return NULL;
851                 if (vma->vm_end == end) {
852                         mm->mmap_cache = vma;
853                         return vma;
854                 }
855         }
856
857         return NULL;
858 }
859
860 /*
861  * determine whether a mapping should be permitted and, if so, what sort of
862  * mapping we're capable of supporting
863  */
864 static int validate_mmap_request(struct file *file,
865                                  unsigned long addr,
866                                  unsigned long len,
867                                  unsigned long prot,
868                                  unsigned long flags,
869                                  unsigned long pgoff,
870                                  unsigned long *_capabilities)
871 {
872         unsigned long capabilities, rlen;
873         unsigned long reqprot = prot;
874         int ret;
875
876         /* do the simple checks first */
877         if (flags & MAP_FIXED) {
878                 printk(KERN_DEBUG
879                        "%d: Can't do fixed-address/overlay mmap of RAM\n",
880                        current->pid);
881                 return -EINVAL;
882         }
883
884         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
885             (flags & MAP_TYPE) != MAP_SHARED)
886                 return -EINVAL;
887
888         if (!len)
889                 return -EINVAL;
890
891         /* Careful about overflows.. */
892         rlen = PAGE_ALIGN(len);
893         if (!rlen || rlen > TASK_SIZE)
894                 return -ENOMEM;
895
896         /* offset overflow? */
897         if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
898                 return -EOVERFLOW;
899
900         if (file) {
901                 /* validate file mapping requests */
902                 struct address_space *mapping;
903
904                 /* files must support mmap */
905                 if (!file->f_op || !file->f_op->mmap)
906                         return -ENODEV;
907
908                 /* work out if what we've got could possibly be shared
909                  * - we support chardevs that provide their own "memory"
910                  * - we support files/blockdevs that are memory backed
911                  */
912                 mapping = file->f_mapping;
913                 if (!mapping)
914                         mapping = file->f_path.dentry->d_inode->i_mapping;
915
916                 capabilities = 0;
917                 if (mapping && mapping->backing_dev_info)
918                         capabilities = mapping->backing_dev_info->capabilities;
919
920                 if (!capabilities) {
921                         /* no explicit capabilities set, so assume some
922                          * defaults */
923                         switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
924                         case S_IFREG:
925                         case S_IFBLK:
926                                 capabilities = BDI_CAP_MAP_COPY;
927                                 break;
928
929                         case S_IFCHR:
930                                 capabilities =
931                                         BDI_CAP_MAP_DIRECT |
932                                         BDI_CAP_READ_MAP |
933                                         BDI_CAP_WRITE_MAP;
934                                 break;
935
936                         default:
937                                 return -EINVAL;
938                         }
939                 }
940
941                 /* eliminate any capabilities that we can't support on this
942                  * device */
943                 if (!file->f_op->get_unmapped_area)
944                         capabilities &= ~BDI_CAP_MAP_DIRECT;
945                 if (!file->f_op->read)
946                         capabilities &= ~BDI_CAP_MAP_COPY;
947
948                 /* The file shall have been opened with read permission. */
949                 if (!(file->f_mode & FMODE_READ))
950                         return -EACCES;
951
952                 if (flags & MAP_SHARED) {
953                         /* do checks for writing, appending and locking */
954                         if ((prot & PROT_WRITE) &&
955                             !(file->f_mode & FMODE_WRITE))
956                                 return -EACCES;
957
958                         if (IS_APPEND(file->f_path.dentry->d_inode) &&
959                             (file->f_mode & FMODE_WRITE))
960                                 return -EACCES;
961
962                         if (locks_verify_locked(file->f_path.dentry->d_inode))
963                                 return -EAGAIN;
964
965                         if (!(capabilities & BDI_CAP_MAP_DIRECT))
966                                 return -ENODEV;
967
968                         /* we mustn't privatise shared mappings */
969                         capabilities &= ~BDI_CAP_MAP_COPY;
970                 }
971                 else {
972                         /* we're going to read the file into private memory we
973                          * allocate */
974                         if (!(capabilities & BDI_CAP_MAP_COPY))
975                                 return -ENODEV;
976
977                         /* we don't permit a private writable mapping to be
978                          * shared with the backing device */
979                         if (prot & PROT_WRITE)
980                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
981                 }
982
983                 if (capabilities & BDI_CAP_MAP_DIRECT) {
984                         if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
985                             ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
986                             ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
987                             ) {
988                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
989                                 if (flags & MAP_SHARED) {
990                                         printk(KERN_WARNING
991                                                "MAP_SHARED not completely supported on !MMU\n");
992                                         return -EINVAL;
993                                 }
994                         }
995                 }
996
997                 /* handle executable mappings and implied executable
998                  * mappings */
999                 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1000                         if (prot & PROT_EXEC)
1001                                 return -EPERM;
1002                 }
1003                 else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1004                         /* handle implication of PROT_EXEC by PROT_READ */
1005                         if (current->personality & READ_IMPLIES_EXEC) {
1006                                 if (capabilities & BDI_CAP_EXEC_MAP)
1007                                         prot |= PROT_EXEC;
1008                         }
1009                 }
1010                 else if ((prot & PROT_READ) &&
1011                          (prot & PROT_EXEC) &&
1012                          !(capabilities & BDI_CAP_EXEC_MAP)
1013                          ) {
1014                         /* backing file is not executable, try to copy */
1015                         capabilities &= ~BDI_CAP_MAP_DIRECT;
1016                 }
1017         }
1018         else {
1019                 /* anonymous mappings are always memory backed and can be
1020                  * privately mapped
1021                  */
1022                 capabilities = BDI_CAP_MAP_COPY;
1023
1024                 /* handle PROT_EXEC implication by PROT_READ */
1025                 if ((prot & PROT_READ) &&
1026                     (current->personality & READ_IMPLIES_EXEC))
1027                         prot |= PROT_EXEC;
1028         }
1029
1030         /* allow the security API to have its say */
1031         ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1032         if (ret < 0)
1033                 return ret;
1034
1035         /* looks okay */
1036         *_capabilities = capabilities;
1037         return 0;
1038 }
1039
1040 /*
1041  * we've determined that we can make the mapping, now translate what we
1042  * now know into VMA flags
1043  */
1044 static unsigned long determine_vm_flags(struct file *file,
1045                                         unsigned long prot,
1046                                         unsigned long flags,
1047                                         unsigned long capabilities)
1048 {
1049         unsigned long vm_flags;
1050
1051         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
1052         /* vm_flags |= mm->def_flags; */
1053
1054         if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1055                 /* attempt to share read-only copies of mapped file chunks */
1056                 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1057                 if (file && !(prot & PROT_WRITE))
1058                         vm_flags |= VM_MAYSHARE;
1059         } else {
1060                 /* overlay a shareable mapping on the backing device or inode
1061                  * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1062                  * romfs/cramfs */
1063                 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1064                 if (flags & MAP_SHARED)
1065                         vm_flags |= VM_SHARED;
1066         }
1067
1068         /* refuse to let anyone share private mappings with this process if
1069          * it's being traced - otherwise breakpoints set in it may interfere
1070          * with another untraced process
1071          */
1072         if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
1073                 vm_flags &= ~VM_MAYSHARE;
1074
1075         return vm_flags;
1076 }
1077
1078 /*
1079  * set up a shared mapping on a file (the driver or filesystem provides and
1080  * pins the storage)
1081  */
1082 static int do_mmap_shared_file(struct vm_area_struct *vma)
1083 {
1084         int ret;
1085
1086         ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1087         if (ret == 0) {
1088                 vma->vm_region->vm_top = vma->vm_region->vm_end;
1089                 return 0;
1090         }
1091         if (ret != -ENOSYS)
1092                 return ret;
1093
1094         /* getting -ENOSYS indicates that direct mmap isn't possible (as
1095          * opposed to tried but failed) so we can only give a suitable error as
1096          * it's not possible to make a private copy if MAP_SHARED was given */
1097         return -ENODEV;
1098 }
1099
1100 /*
1101  * set up a private mapping or an anonymous shared mapping
1102  */
1103 static int do_mmap_private(struct vm_area_struct *vma,
1104                            struct vm_region *region,
1105                            unsigned long len,
1106                            unsigned long capabilities)
1107 {
1108         struct page *pages;
1109         unsigned long total, point, n, rlen;
1110         void *base;
1111         int ret, order;
1112
1113         /* invoke the file's mapping function so that it can keep track of
1114          * shared mappings on devices or memory
1115          * - VM_MAYSHARE will be set if it may attempt to share
1116          */
1117         if (capabilities & BDI_CAP_MAP_DIRECT) {
1118                 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1119                 if (ret == 0) {
1120                         /* shouldn't return success if we're not sharing */
1121                         BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1122                         vma->vm_region->vm_top = vma->vm_region->vm_end;
1123                         return 0;
1124                 }
1125                 if (ret != -ENOSYS)
1126                         return ret;
1127
1128                 /* getting an ENOSYS error indicates that direct mmap isn't
1129                  * possible (as opposed to tried but failed) so we'll try to
1130                  * make a private copy of the data and map that instead */
1131         }
1132
1133         rlen = PAGE_ALIGN(len);
1134
1135         /* allocate some memory to hold the mapping
1136          * - note that this may not return a page-aligned address if the object
1137          *   we're allocating is smaller than a page
1138          */
1139         order = get_order(rlen);
1140         kdebug("alloc order %d for %lx", order, len);
1141
1142         pages = alloc_pages(GFP_KERNEL, order);
1143         if (!pages)
1144                 goto enomem;
1145
1146         total = 1 << order;
1147         atomic_long_add(total, &mmap_pages_allocated);
1148
1149         point = rlen >> PAGE_SHIFT;
1150
1151         /* we allocated a power-of-2 sized page set, so we may want to trim off
1152          * the excess */
1153         if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1154                 while (total > point) {
1155                         order = ilog2(total - point);
1156                         n = 1 << order;
1157                         kdebug("shave %lu/%lu @%lu", n, total - point, total);
1158                         atomic_long_sub(n, &mmap_pages_allocated);
1159                         total -= n;
1160                         set_page_refcounted(pages + total);
1161                         __free_pages(pages + total, order);
1162                 }
1163         }
1164
1165         for (point = 1; point < total; point++)
1166                 set_page_refcounted(&pages[point]);
1167
1168         base = page_address(pages);
1169         region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1170         region->vm_start = (unsigned long) base;
1171         region->vm_end   = region->vm_start + rlen;
1172         region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1173
1174         vma->vm_start = region->vm_start;
1175         vma->vm_end   = region->vm_start + len;
1176
1177         if (vma->vm_file) {
1178                 /* read the contents of a file into the copy */
1179                 mm_segment_t old_fs;
1180                 loff_t fpos;
1181
1182                 fpos = vma->vm_pgoff;
1183                 fpos <<= PAGE_SHIFT;
1184
1185                 old_fs = get_fs();
1186                 set_fs(KERNEL_DS);
1187                 ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos);
1188                 set_fs(old_fs);
1189
1190                 if (ret < 0)
1191                         goto error_free;
1192
1193                 /* clear the last little bit */
1194                 if (ret < rlen)
1195                         memset(base + ret, 0, rlen - ret);
1196
1197         }
1198
1199         return 0;
1200
1201 error_free:
1202         free_page_series(region->vm_start, region->vm_end);
1203         region->vm_start = vma->vm_start = 0;
1204         region->vm_end   = vma->vm_end = 0;
1205         region->vm_top   = 0;
1206         return ret;
1207
1208 enomem:
1209         printk("Allocation of length %lu from process %d (%s) failed\n",
1210                len, current->pid, current->comm);
1211         show_free_areas();
1212         return -ENOMEM;
1213 }
1214
1215 /*
1216  * handle mapping creation for uClinux
1217  */
1218 unsigned long do_mmap_pgoff(struct file *file,
1219                             unsigned long addr,
1220                             unsigned long len,
1221                             unsigned long prot,
1222                             unsigned long flags,
1223                             unsigned long pgoff)
1224 {
1225         struct vm_area_struct *vma;
1226         struct vm_region *region;
1227         struct rb_node *rb;
1228         unsigned long capabilities, vm_flags, result;
1229         int ret;
1230
1231         kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1232
1233         /* decide whether we should attempt the mapping, and if so what sort of
1234          * mapping */
1235         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1236                                     &capabilities);
1237         if (ret < 0) {
1238                 kleave(" = %d [val]", ret);
1239                 return ret;
1240         }
1241
1242         /* we ignore the address hint */
1243         addr = 0;
1244
1245         /* we've determined that we can make the mapping, now translate what we
1246          * now know into VMA flags */
1247         vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1248
1249         /* we're going to need to record the mapping */
1250         region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1251         if (!region)
1252                 goto error_getting_region;
1253
1254         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1255         if (!vma)
1256                 goto error_getting_vma;
1257
1258         region->vm_usage = 1;
1259         region->vm_flags = vm_flags;
1260         region->vm_pgoff = pgoff;
1261
1262         INIT_LIST_HEAD(&vma->anon_vma_chain);
1263         vma->vm_flags = vm_flags;
1264         vma->vm_pgoff = pgoff;
1265
1266         if (file) {
1267                 region->vm_file = file;
1268                 get_file(file);
1269                 vma->vm_file = file;
1270                 get_file(file);
1271                 if (vm_flags & VM_EXECUTABLE) {
1272                         added_exe_file_vma(current->mm);
1273                         vma->vm_mm = current->mm;
1274                 }
1275         }
1276
1277         down_write(&nommu_region_sem);
1278
1279         /* if we want to share, we need to check for regions created by other
1280          * mmap() calls that overlap with our proposed mapping
1281          * - we can only share with a superset match on most regular files
1282          * - shared mappings on character devices and memory backed files are
1283          *   permitted to overlap inexactly as far as we are concerned for in
1284          *   these cases, sharing is handled in the driver or filesystem rather
1285          *   than here
1286          */
1287         if (vm_flags & VM_MAYSHARE) {
1288                 struct vm_region *pregion;
1289                 unsigned long pglen, rpglen, pgend, rpgend, start;
1290
1291                 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1292                 pgend = pgoff + pglen;
1293
1294                 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1295                         pregion = rb_entry(rb, struct vm_region, vm_rb);
1296
1297                         if (!(pregion->vm_flags & VM_MAYSHARE))
1298                                 continue;
1299
1300                         /* search for overlapping mappings on the same file */
1301                         if (pregion->vm_file->f_path.dentry->d_inode !=
1302                             file->f_path.dentry->d_inode)
1303                                 continue;
1304
1305                         if (pregion->vm_pgoff >= pgend)
1306                                 continue;
1307
1308                         rpglen = pregion->vm_end - pregion->vm_start;
1309                         rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1310                         rpgend = pregion->vm_pgoff + rpglen;
1311                         if (pgoff >= rpgend)
1312                                 continue;
1313
1314                         /* handle inexactly overlapping matches between
1315                          * mappings */
1316                         if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1317                             !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1318                                 /* new mapping is not a subset of the region */
1319                                 if (!(capabilities & BDI_CAP_MAP_DIRECT))
1320                                         goto sharing_violation;
1321                                 continue;
1322                         }
1323
1324                         /* we've found a region we can share */
1325                         pregion->vm_usage++;
1326                         vma->vm_region = pregion;
1327                         start = pregion->vm_start;
1328                         start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1329                         vma->vm_start = start;
1330                         vma->vm_end = start + len;
1331
1332                         if (pregion->vm_flags & VM_MAPPED_COPY) {
1333                                 kdebug("share copy");
1334                                 vma->vm_flags |= VM_MAPPED_COPY;
1335                         } else {
1336                                 kdebug("share mmap");
1337                                 ret = do_mmap_shared_file(vma);
1338                                 if (ret < 0) {
1339                                         vma->vm_region = NULL;
1340                                         vma->vm_start = 0;
1341                                         vma->vm_end = 0;
1342                                         pregion->vm_usage--;
1343                                         pregion = NULL;
1344                                         goto error_just_free;
1345                                 }
1346                         }
1347                         fput(region->vm_file);
1348                         kmem_cache_free(vm_region_jar, region);
1349                         region = pregion;
1350                         result = start;
1351                         goto share;
1352                 }
1353
1354                 /* obtain the address at which to make a shared mapping
1355                  * - this is the hook for quasi-memory character devices to
1356                  *   tell us the location of a shared mapping
1357                  */
1358                 if (capabilities & BDI_CAP_MAP_DIRECT) {
1359                         addr = file->f_op->get_unmapped_area(file, addr, len,
1360                                                              pgoff, flags);
1361                         if (IS_ERR((void *) addr)) {
1362                                 ret = addr;
1363                                 if (ret != (unsigned long) -ENOSYS)
1364                                         goto error_just_free;
1365
1366                                 /* the driver refused to tell us where to site
1367                                  * the mapping so we'll have to attempt to copy
1368                                  * it */
1369                                 ret = (unsigned long) -ENODEV;
1370                                 if (!(capabilities & BDI_CAP_MAP_COPY))
1371                                         goto error_just_free;
1372
1373                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
1374                         } else {
1375                                 vma->vm_start = region->vm_start = addr;
1376                                 vma->vm_end = region->vm_end = addr + len;
1377                         }
1378                 }
1379         }
1380
1381         vma->vm_region = region;
1382
1383         /* set up the mapping
1384          * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
1385          */
1386         if (file && vma->vm_flags & VM_SHARED)
1387                 ret = do_mmap_shared_file(vma);
1388         else
1389                 ret = do_mmap_private(vma, region, len, capabilities);
1390         if (ret < 0)
1391                 goto error_just_free;
1392         add_nommu_region(region);
1393
1394         /* clear anonymous mappings that don't ask for uninitialized data */
1395         if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1396                 memset((void *)region->vm_start, 0,
1397                        region->vm_end - region->vm_start);
1398
1399         /* okay... we have a mapping; now we have to register it */
1400         result = vma->vm_start;
1401
1402         current->mm->total_vm += len >> PAGE_SHIFT;
1403
1404 share:
1405         add_vma_to_mm(current->mm, vma);
1406
1407         /* we flush the region from the icache only when the first executable
1408          * mapping of it is made  */
1409         if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1410                 flush_icache_range(region->vm_start, region->vm_end);
1411                 region->vm_icache_flushed = true;
1412         }
1413
1414         up_write(&nommu_region_sem);
1415
1416         kleave(" = %lx", result);
1417         return result;
1418
1419 error_just_free:
1420         up_write(&nommu_region_sem);
1421 error:
1422         if (region->vm_file)
1423                 fput(region->vm_file);
1424         kmem_cache_free(vm_region_jar, region);
1425         if (vma->vm_file)
1426                 fput(vma->vm_file);
1427         if (vma->vm_flags & VM_EXECUTABLE)
1428                 removed_exe_file_vma(vma->vm_mm);
1429         kmem_cache_free(vm_area_cachep, vma);
1430         kleave(" = %d", ret);
1431         return ret;
1432
1433 sharing_violation:
1434         up_write(&nommu_region_sem);
1435         printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1436         ret = -EINVAL;
1437         goto error;
1438
1439 error_getting_vma:
1440         kmem_cache_free(vm_region_jar, region);
1441         printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1442                " from process %d failed\n",
1443                len, current->pid);
1444         show_free_areas();
1445         return -ENOMEM;
1446
1447 error_getting_region:
1448         printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1449                " from process %d failed\n",
1450                len, current->pid);
1451         show_free_areas();
1452         return -ENOMEM;
1453 }
1454 EXPORT_SYMBOL(do_mmap_pgoff);
1455
1456 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1457                 unsigned long, prot, unsigned long, flags,
1458                 unsigned long, fd, unsigned long, pgoff)
1459 {
1460         struct file *file = NULL;
1461         unsigned long retval = -EBADF;
1462
1463         audit_mmap_fd(fd, flags);
1464         if (!(flags & MAP_ANONYMOUS)) {
1465                 file = fget(fd);
1466                 if (!file)
1467                         goto out;
1468         }
1469
1470         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1471
1472         down_write(&current->mm->mmap_sem);
1473         retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1474         up_write(&current->mm->mmap_sem);
1475
1476         if (file)
1477                 fput(file);
1478 out:
1479         return retval;
1480 }
1481
1482 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1483 struct mmap_arg_struct {
1484         unsigned long addr;
1485         unsigned long len;
1486         unsigned long prot;
1487         unsigned long flags;
1488         unsigned long fd;
1489         unsigned long offset;
1490 };
1491
1492 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1493 {
1494         struct mmap_arg_struct a;
1495
1496         if (copy_from_user(&a, arg, sizeof(a)))
1497                 return -EFAULT;
1498         if (a.offset & ~PAGE_MASK)
1499                 return -EINVAL;
1500
1501         return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1502                               a.offset >> PAGE_SHIFT);
1503 }
1504 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1505
1506 /*
1507  * split a vma into two pieces at address 'addr', a new vma is allocated either
1508  * for the first part or the tail.
1509  */
1510 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1511               unsigned long addr, int new_below)
1512 {
1513         struct vm_area_struct *new;
1514         struct vm_region *region;
1515         unsigned long npages;
1516
1517         kenter("");
1518
1519         /* we're only permitted to split anonymous regions (these should have
1520          * only a single usage on the region) */
1521         if (vma->vm_file)
1522                 return -ENOMEM;
1523
1524         if (mm->map_count >= sysctl_max_map_count)
1525                 return -ENOMEM;
1526
1527         region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1528         if (!region)
1529                 return -ENOMEM;
1530
1531         new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1532         if (!new) {
1533                 kmem_cache_free(vm_region_jar, region);
1534                 return -ENOMEM;
1535         }
1536
1537         /* most fields are the same, copy all, and then fixup */
1538         *new = *vma;
1539         *region = *vma->vm_region;
1540         new->vm_region = region;
1541
1542         npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1543
1544         if (new_below) {
1545                 region->vm_top = region->vm_end = new->vm_end = addr;
1546         } else {
1547                 region->vm_start = new->vm_start = addr;
1548                 region->vm_pgoff = new->vm_pgoff += npages;
1549         }
1550
1551         if (new->vm_ops && new->vm_ops->open)
1552                 new->vm_ops->open(new);
1553
1554         delete_vma_from_mm(vma);
1555         down_write(&nommu_region_sem);
1556         delete_nommu_region(vma->vm_region);
1557         if (new_below) {
1558                 vma->vm_region->vm_start = vma->vm_start = addr;
1559                 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1560         } else {
1561                 vma->vm_region->vm_end = vma->vm_end = addr;
1562                 vma->vm_region->vm_top = addr;
1563         }
1564         add_nommu_region(vma->vm_region);
1565         add_nommu_region(new->vm_region);
1566         up_write(&nommu_region_sem);
1567         add_vma_to_mm(mm, vma);
1568         add_vma_to_mm(mm, new);
1569         return 0;
1570 }
1571
1572 /*
1573  * shrink a VMA by removing the specified chunk from either the beginning or
1574  * the end
1575  */
1576 static int shrink_vma(struct mm_struct *mm,
1577                       struct vm_area_struct *vma,
1578                       unsigned long from, unsigned long to)
1579 {
1580         struct vm_region *region;
1581
1582         kenter("");
1583
1584         /* adjust the VMA's pointers, which may reposition it in the MM's tree
1585          * and list */
1586         delete_vma_from_mm(vma);
1587         if (from > vma->vm_start)
1588                 vma->vm_end = from;
1589         else
1590                 vma->vm_start = to;
1591         add_vma_to_mm(mm, vma);
1592
1593         /* cut the backing region down to size */
1594         region = vma->vm_region;
1595         BUG_ON(region->vm_usage != 1);
1596
1597         down_write(&nommu_region_sem);
1598         delete_nommu_region(region);
1599         if (from > region->vm_start) {
1600                 to = region->vm_top;
1601                 region->vm_top = region->vm_end = from;
1602         } else {
1603                 region->vm_start = to;
1604         }
1605         add_nommu_region(region);
1606         up_write(&nommu_region_sem);
1607
1608         free_page_series(from, to);
1609         return 0;
1610 }
1611
1612 /*
1613  * release a mapping
1614  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1615  *   VMA, though it need not cover the whole VMA
1616  */
1617 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1618 {
1619         struct vm_area_struct *vma;
1620         struct rb_node *rb;
1621         unsigned long end = start + len;
1622         int ret;
1623
1624         kenter(",%lx,%zx", start, len);
1625
1626         if (len == 0)
1627                 return -EINVAL;
1628
1629         /* find the first potentially overlapping VMA */
1630         vma = find_vma(mm, start);
1631         if (!vma) {
1632                 static int limit = 0;
1633                 if (limit < 5) {
1634                         printk(KERN_WARNING
1635                                "munmap of memory not mmapped by process %d"
1636                                " (%s): 0x%lx-0x%lx\n",
1637                                current->pid, current->comm,
1638                                start, start + len - 1);
1639                         limit++;
1640                 }
1641                 return -EINVAL;
1642         }
1643
1644         /* we're allowed to split an anonymous VMA but not a file-backed one */
1645         if (vma->vm_file) {
1646                 do {
1647                         if (start > vma->vm_start) {
1648                                 kleave(" = -EINVAL [miss]");
1649                                 return -EINVAL;
1650                         }
1651                         if (end == vma->vm_end)
1652                                 goto erase_whole_vma;
1653                         rb = rb_next(&vma->vm_rb);
1654                         vma = rb_entry(rb, struct vm_area_struct, vm_rb);
1655                 } while (rb);
1656                 kleave(" = -EINVAL [split file]");
1657                 return -EINVAL;
1658         } else {
1659                 /* the chunk must be a subset of the VMA found */
1660                 if (start == vma->vm_start && end == vma->vm_end)
1661                         goto erase_whole_vma;
1662                 if (start < vma->vm_start || end > vma->vm_end) {
1663                         kleave(" = -EINVAL [superset]");
1664                         return -EINVAL;
1665                 }
1666                 if (start & ~PAGE_MASK) {
1667                         kleave(" = -EINVAL [unaligned start]");
1668                         return -EINVAL;
1669                 }
1670                 if (end != vma->vm_end && end & ~PAGE_MASK) {
1671                         kleave(" = -EINVAL [unaligned split]");
1672                         return -EINVAL;
1673                 }
1674                 if (start != vma->vm_start && end != vma->vm_end) {
1675                         ret = split_vma(mm, vma, start, 1);
1676                         if (ret < 0) {
1677                                 kleave(" = %d [split]", ret);
1678                                 return ret;
1679                         }
1680                 }
1681                 return shrink_vma(mm, vma, start, end);
1682         }
1683
1684 erase_whole_vma:
1685         delete_vma_from_mm(vma);
1686         delete_vma(mm, vma);
1687         kleave(" = 0");
1688         return 0;
1689 }
1690 EXPORT_SYMBOL(do_munmap);
1691
1692 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1693 {
1694         int ret;
1695         struct mm_struct *mm = current->mm;
1696
1697         down_write(&mm->mmap_sem);
1698         ret = do_munmap(mm, addr, len);
1699         up_write(&mm->mmap_sem);
1700         return ret;
1701 }
1702
1703 /*
1704  * release all the mappings made in a process's VM space
1705  */
1706 void exit_mmap(struct mm_struct *mm)
1707 {
1708         struct vm_area_struct *vma;
1709
1710         if (!mm)
1711                 return;
1712
1713         kenter("");
1714
1715         mm->total_vm = 0;
1716
1717         while ((vma = mm->mmap)) {
1718                 mm->mmap = vma->vm_next;
1719                 delete_vma_from_mm(vma);
1720                 delete_vma(mm, vma);
1721                 cond_resched();
1722         }
1723
1724         kleave("");
1725 }
1726
1727 unsigned long do_brk(unsigned long addr, unsigned long len)
1728 {
1729         return -ENOMEM;
1730 }
1731
1732 /*
1733  * expand (or shrink) an existing mapping, potentially moving it at the same
1734  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1735  *
1736  * under NOMMU conditions, we only permit changing a mapping's size, and only
1737  * as long as it stays within the region allocated by do_mmap_private() and the
1738  * block is not shareable
1739  *
1740  * MREMAP_FIXED is not supported under NOMMU conditions
1741  */
1742 unsigned long do_mremap(unsigned long addr,
1743                         unsigned long old_len, unsigned long new_len,
1744                         unsigned long flags, unsigned long new_addr)
1745 {
1746         struct vm_area_struct *vma;
1747
1748         /* insanity checks first */
1749         if (old_len == 0 || new_len == 0)
1750                 return (unsigned long) -EINVAL;
1751
1752         if (addr & ~PAGE_MASK)
1753                 return -EINVAL;
1754
1755         if (flags & MREMAP_FIXED && new_addr != addr)
1756                 return (unsigned long) -EINVAL;
1757
1758         vma = find_vma_exact(current->mm, addr, old_len);
1759         if (!vma)
1760                 return (unsigned long) -EINVAL;
1761
1762         if (vma->vm_end != vma->vm_start + old_len)
1763                 return (unsigned long) -EFAULT;
1764
1765         if (vma->vm_flags & VM_MAYSHARE)
1766                 return (unsigned long) -EPERM;
1767
1768         if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1769                 return (unsigned long) -ENOMEM;
1770
1771         /* all checks complete - do it */
1772         vma->vm_end = vma->vm_start + new_len;
1773         return vma->vm_start;
1774 }
1775 EXPORT_SYMBOL(do_mremap);
1776
1777 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1778                 unsigned long, new_len, unsigned long, flags,
1779                 unsigned long, new_addr)
1780 {
1781         unsigned long ret;
1782
1783         down_write(&current->mm->mmap_sem);
1784         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1785         up_write(&current->mm->mmap_sem);
1786         return ret;
1787 }
1788
1789 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1790                         unsigned int foll_flags)
1791 {
1792         return NULL;
1793 }
1794
1795 int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1796                 unsigned long to, unsigned long size, pgprot_t prot)
1797 {
1798         vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
1799         return 0;
1800 }
1801 EXPORT_SYMBOL(remap_pfn_range);
1802
1803 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1804                         unsigned long pgoff)
1805 {
1806         unsigned int size = vma->vm_end - vma->vm_start;
1807
1808         if (!(vma->vm_flags & VM_USERMAP))
1809                 return -EINVAL;
1810
1811         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1812         vma->vm_end = vma->vm_start + size;
1813
1814         return 0;
1815 }
1816 EXPORT_SYMBOL(remap_vmalloc_range);
1817
1818 void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1819 {
1820 }
1821
1822 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1823         unsigned long len, unsigned long pgoff, unsigned long flags)
1824 {
1825         return -ENOMEM;
1826 }
1827
1828 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1829 {
1830 }
1831
1832 void unmap_mapping_range(struct address_space *mapping,
1833                          loff_t const holebegin, loff_t const holelen,
1834                          int even_cows)
1835 {
1836 }
1837 EXPORT_SYMBOL(unmap_mapping_range);
1838
1839 /*
1840  * Check that a process has enough memory to allocate a new virtual
1841  * mapping. 0 means there is enough memory for the allocation to
1842  * succeed and -ENOMEM implies there is not.
1843  *
1844  * We currently support three overcommit policies, which are set via the
1845  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1846  *
1847  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1848  * Additional code 2002 Jul 20 by Robert Love.
1849  *
1850  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1851  *
1852  * Note this is a helper function intended to be used by LSMs which
1853  * wish to use this logic.
1854  */
1855 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1856 {
1857         unsigned long free, allowed;
1858
1859         vm_acct_memory(pages);
1860
1861         /*
1862          * Sometimes we want to use more memory than we have
1863          */
1864         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1865                 return 0;
1866
1867         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1868                 unsigned long n;
1869
1870                 free = global_page_state(NR_FILE_PAGES);
1871                 free += nr_swap_pages;
1872
1873                 /*
1874                  * Any slabs which are created with the
1875                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1876                  * which are reclaimable, under pressure.  The dentry
1877                  * cache and most inode caches should fall into this
1878                  */
1879                 free += global_page_state(NR_SLAB_RECLAIMABLE);
1880
1881                 /*
1882                  * Leave the last 3% for root
1883                  */
1884                 if (!cap_sys_admin)
1885                         free -= free / 32;
1886
1887                 if (free > pages)
1888                         return 0;
1889
1890                 /*
1891                  * nr_free_pages() is very expensive on large systems,
1892                  * only call if we're about to fail.
1893                  */
1894                 n = nr_free_pages();
1895
1896                 /*
1897                  * Leave reserved pages. The pages are not for anonymous pages.
1898                  */
1899                 if (n <= totalreserve_pages)
1900                         goto error;
1901                 else
1902                         n -= totalreserve_pages;
1903
1904                 /*
1905                  * Leave the last 3% for root
1906                  */
1907                 if (!cap_sys_admin)
1908                         n -= n / 32;
1909                 free += n;
1910
1911                 if (free > pages)
1912                         return 0;
1913
1914                 goto error;
1915         }
1916
1917         allowed = totalram_pages * sysctl_overcommit_ratio / 100;
1918         /*
1919          * Leave the last 3% for root
1920          */
1921         if (!cap_sys_admin)
1922                 allowed -= allowed / 32;
1923         allowed += total_swap_pages;
1924
1925         /* Don't let a single process grow too big:
1926            leave 3% of the size of this process for other processes */
1927         if (mm)
1928                 allowed -= mm->total_vm / 32;
1929
1930         if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1931                 return 0;
1932
1933 error:
1934         vm_unacct_memory(pages);
1935
1936         return -ENOMEM;
1937 }
1938
1939 int in_gate_area_no_task(unsigned long addr)
1940 {
1941         return 0;
1942 }
1943
1944 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1945 {
1946         BUG();
1947         return 0;
1948 }
1949 EXPORT_SYMBOL(filemap_fault);
1950
1951 /*
1952  * Access another process' address space.
1953  * - source/target buffer must be kernel space
1954  */
1955 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1956 {
1957         struct vm_area_struct *vma;
1958         struct mm_struct *mm;
1959
1960         if (addr + len < addr)
1961                 return 0;
1962
1963         mm = get_task_mm(tsk);
1964         if (!mm)
1965                 return 0;
1966
1967         down_read(&mm->mmap_sem);
1968
1969         /* the access must start within one of the target process's mappings */
1970         vma = find_vma(mm, addr);
1971         if (vma) {
1972                 /* don't overrun this mapping */
1973                 if (addr + len >= vma->vm_end)
1974                         len = vma->vm_end - addr;
1975
1976                 /* only read or write mappings where it is permitted */
1977                 if (write && vma->vm_flags & VM_MAYWRITE)
1978                         copy_to_user_page(vma, NULL, addr,
1979                                          (void *) addr, buf, len);
1980                 else if (!write && vma->vm_flags & VM_MAYREAD)
1981                         copy_from_user_page(vma, NULL, addr,
1982                                             buf, (void *) addr, len);
1983                 else
1984                         len = 0;
1985         } else {
1986                 len = 0;
1987         }
1988
1989         up_read(&mm->mmap_sem);
1990         mmput(mm);
1991         return len;
1992 }
1993
1994 /**
1995  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1996  * @inode: The inode to check
1997  * @size: The current filesize of the inode
1998  * @newsize: The proposed filesize of the inode
1999  *
2000  * Check the shared mappings on an inode on behalf of a shrinking truncate to
2001  * make sure that that any outstanding VMAs aren't broken and then shrink the
2002  * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
2003  * automatically grant mappings that are too large.
2004  */
2005 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2006                                 size_t newsize)
2007 {
2008         struct vm_area_struct *vma;
2009         struct prio_tree_iter iter;
2010         struct vm_region *region;
2011         pgoff_t low, high;
2012         size_t r_size, r_top;
2013
2014         low = newsize >> PAGE_SHIFT;
2015         high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2016
2017         down_write(&nommu_region_sem);
2018
2019         /* search for VMAs that fall within the dead zone */
2020         vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
2021                               low, high) {
2022                 /* found one - only interested if it's shared out of the page
2023                  * cache */
2024                 if (vma->vm_flags & VM_SHARED) {
2025                         up_write(&nommu_region_sem);
2026                         return -ETXTBSY; /* not quite true, but near enough */
2027                 }
2028         }
2029
2030         /* reduce any regions that overlap the dead zone - if in existence,
2031          * these will be pointed to by VMAs that don't overlap the dead zone
2032          *
2033          * we don't check for any regions that start beyond the EOF as there
2034          * shouldn't be any
2035          */
2036         vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
2037                               0, ULONG_MAX) {
2038                 if (!(vma->vm_flags & VM_SHARED))
2039                         continue;
2040
2041                 region = vma->vm_region;
2042                 r_size = region->vm_top - region->vm_start;
2043                 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
2044
2045                 if (r_top > newsize) {
2046                         region->vm_top -= r_top - newsize;
2047                         if (region->vm_end > region->vm_top)
2048                                 region->vm_end = region->vm_top;
2049                 }
2050         }
2051
2052         up_write(&nommu_region_sem);
2053         return 0;
2054 }