Merge tag 'media/v6.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux-block.git] / mm / nommu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/nommu.c
4  *
5  *  Replacement code for mm functions to support CPU's that don't
6  *  have any form of memory management unit (thus no virtual memory).
7  *
8  *  See Documentation/admin-guide/mm/nommu-mmap.rst
9  *
10  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
13  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
14  *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/export.h>
20 #include <linux/mm.h>
21 #include <linux/sched/mm.h>
22 #include <linux/mman.h>
23 #include <linux/swap.h>
24 #include <linux/file.h>
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/compiler.h>
31 #include <linux/mount.h>
32 #include <linux/personality.h>
33 #include <linux/security.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/printk.h>
37
38 #include <linux/uaccess.h>
39 #include <linux/uio.h>
40 #include <asm/tlb.h>
41 #include <asm/tlbflush.h>
42 #include <asm/mmu_context.h>
43 #include "internal.h"
44
45 void *high_memory;
46 EXPORT_SYMBOL(high_memory);
47 struct page *mem_map;
48 unsigned long max_mapnr;
49 EXPORT_SYMBOL(max_mapnr);
50 unsigned long highest_memmap_pfn;
51 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
52 int heap_stack_gap = 0;
53
54 atomic_long_t mmap_pages_allocated;
55
56 EXPORT_SYMBOL(mem_map);
57
58 /* list of mapped, potentially shareable regions */
59 static struct kmem_cache *vm_region_jar;
60 struct rb_root nommu_region_tree = RB_ROOT;
61 DECLARE_RWSEM(nommu_region_sem);
62
63 const struct vm_operations_struct generic_file_vm_ops = {
64 };
65
66 /*
67  * Return the total memory allocated for this pointer, not
68  * just what the caller asked for.
69  *
70  * Doesn't have to be accurate, i.e. may have races.
71  */
72 unsigned int kobjsize(const void *objp)
73 {
74         struct page *page;
75
76         /*
77          * If the object we have should not have ksize performed on it,
78          * return size of 0
79          */
80         if (!objp || !virt_addr_valid(objp))
81                 return 0;
82
83         page = virt_to_head_page(objp);
84
85         /*
86          * If the allocator sets PageSlab, we know the pointer came from
87          * kmalloc().
88          */
89         if (PageSlab(page))
90                 return ksize(objp);
91
92         /*
93          * If it's not a compound page, see if we have a matching VMA
94          * region. This test is intentionally done in reverse order,
95          * so if there's no VMA, we still fall through and hand back
96          * PAGE_SIZE for 0-order pages.
97          */
98         if (!PageCompound(page)) {
99                 struct vm_area_struct *vma;
100
101                 vma = find_vma(current->mm, (unsigned long)objp);
102                 if (vma)
103                         return vma->vm_end - vma->vm_start;
104         }
105
106         /*
107          * The ksize() function is only guaranteed to work for pointers
108          * returned by kmalloc(). So handle arbitrary pointers here.
109          */
110         return page_size(page);
111 }
112
113 /**
114  * follow_pfn - look up PFN at a user virtual address
115  * @vma: memory mapping
116  * @address: user virtual address
117  * @pfn: location to store found PFN
118  *
119  * Only IO mappings and raw PFN mappings are allowed.
120  *
121  * Returns zero and the pfn at @pfn on success, -ve otherwise.
122  */
123 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
124         unsigned long *pfn)
125 {
126         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
127                 return -EINVAL;
128
129         *pfn = address >> PAGE_SHIFT;
130         return 0;
131 }
132 EXPORT_SYMBOL(follow_pfn);
133
134 void vfree(const void *addr)
135 {
136         kfree(addr);
137 }
138 EXPORT_SYMBOL(vfree);
139
140 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
141 {
142         /*
143          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
144          * returns only a logical address.
145          */
146         return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
147 }
148 EXPORT_SYMBOL(__vmalloc);
149
150 void *__vmalloc_node_range(unsigned long size, unsigned long align,
151                 unsigned long start, unsigned long end, gfp_t gfp_mask,
152                 pgprot_t prot, unsigned long vm_flags, int node,
153                 const void *caller)
154 {
155         return __vmalloc(size, gfp_mask);
156 }
157
158 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
159                 int node, const void *caller)
160 {
161         return __vmalloc(size, gfp_mask);
162 }
163
164 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
165 {
166         void *ret;
167
168         ret = __vmalloc(size, flags);
169         if (ret) {
170                 struct vm_area_struct *vma;
171
172                 mmap_write_lock(current->mm);
173                 vma = find_vma(current->mm, (unsigned long)ret);
174                 if (vma)
175                         vm_flags_set(vma, VM_USERMAP);
176                 mmap_write_unlock(current->mm);
177         }
178
179         return ret;
180 }
181
182 void *vmalloc_user(unsigned long size)
183 {
184         return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
185 }
186 EXPORT_SYMBOL(vmalloc_user);
187
188 struct page *vmalloc_to_page(const void *addr)
189 {
190         return virt_to_page(addr);
191 }
192 EXPORT_SYMBOL(vmalloc_to_page);
193
194 unsigned long vmalloc_to_pfn(const void *addr)
195 {
196         return page_to_pfn(virt_to_page(addr));
197 }
198 EXPORT_SYMBOL(vmalloc_to_pfn);
199
200 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
201 {
202         /* Don't allow overflow */
203         if ((unsigned long) addr + count < count)
204                 count = -(unsigned long) addr;
205
206         return copy_to_iter(addr, count, iter);
207 }
208
209 /*
210  *      vmalloc  -  allocate virtually contiguous memory
211  *
212  *      @size:          allocation size
213  *
214  *      Allocate enough pages to cover @size from the page level
215  *      allocator and map them into contiguous kernel virtual space.
216  *
217  *      For tight control over page level allocator and protection flags
218  *      use __vmalloc() instead.
219  */
220 void *vmalloc(unsigned long size)
221 {
222         return __vmalloc(size, GFP_KERNEL);
223 }
224 EXPORT_SYMBOL(vmalloc);
225
226 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
227
228 /*
229  *      vzalloc - allocate virtually contiguous memory with zero fill
230  *
231  *      @size:          allocation size
232  *
233  *      Allocate enough pages to cover @size from the page level
234  *      allocator and map them into contiguous kernel virtual space.
235  *      The memory allocated is set to zero.
236  *
237  *      For tight control over page level allocator and protection flags
238  *      use __vmalloc() instead.
239  */
240 void *vzalloc(unsigned long size)
241 {
242         return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
243 }
244 EXPORT_SYMBOL(vzalloc);
245
246 /**
247  * vmalloc_node - allocate memory on a specific node
248  * @size:       allocation size
249  * @node:       numa node
250  *
251  * Allocate enough pages to cover @size from the page level
252  * allocator and map them into contiguous kernel virtual space.
253  *
254  * For tight control over page level allocator and protection flags
255  * use __vmalloc() instead.
256  */
257 void *vmalloc_node(unsigned long size, int node)
258 {
259         return vmalloc(size);
260 }
261 EXPORT_SYMBOL(vmalloc_node);
262
263 /**
264  * vzalloc_node - allocate memory on a specific node with zero fill
265  * @size:       allocation size
266  * @node:       numa node
267  *
268  * Allocate enough pages to cover @size from the page level
269  * allocator and map them into contiguous kernel virtual space.
270  * The memory allocated is set to zero.
271  *
272  * For tight control over page level allocator and protection flags
273  * use __vmalloc() instead.
274  */
275 void *vzalloc_node(unsigned long size, int node)
276 {
277         return vzalloc(size);
278 }
279 EXPORT_SYMBOL(vzalloc_node);
280
281 /**
282  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
283  *      @size:          allocation size
284  *
285  *      Allocate enough 32bit PA addressable pages to cover @size from the
286  *      page level allocator and map them into contiguous kernel virtual space.
287  */
288 void *vmalloc_32(unsigned long size)
289 {
290         return __vmalloc(size, GFP_KERNEL);
291 }
292 EXPORT_SYMBOL(vmalloc_32);
293
294 /**
295  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
296  *      @size:          allocation size
297  *
298  * The resulting memory area is 32bit addressable and zeroed so it can be
299  * mapped to userspace without leaking data.
300  *
301  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
302  * remap_vmalloc_range() are permissible.
303  */
304 void *vmalloc_32_user(unsigned long size)
305 {
306         /*
307          * We'll have to sort out the ZONE_DMA bits for 64-bit,
308          * but for now this can simply use vmalloc_user() directly.
309          */
310         return vmalloc_user(size);
311 }
312 EXPORT_SYMBOL(vmalloc_32_user);
313
314 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
315 {
316         BUG();
317         return NULL;
318 }
319 EXPORT_SYMBOL(vmap);
320
321 void vunmap(const void *addr)
322 {
323         BUG();
324 }
325 EXPORT_SYMBOL(vunmap);
326
327 void *vm_map_ram(struct page **pages, unsigned int count, int node)
328 {
329         BUG();
330         return NULL;
331 }
332 EXPORT_SYMBOL(vm_map_ram);
333
334 void vm_unmap_ram(const void *mem, unsigned int count)
335 {
336         BUG();
337 }
338 EXPORT_SYMBOL(vm_unmap_ram);
339
340 void vm_unmap_aliases(void)
341 {
342 }
343 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
344
345 void free_vm_area(struct vm_struct *area)
346 {
347         BUG();
348 }
349 EXPORT_SYMBOL_GPL(free_vm_area);
350
351 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
352                    struct page *page)
353 {
354         return -EINVAL;
355 }
356 EXPORT_SYMBOL(vm_insert_page);
357
358 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
359                         struct page **pages, unsigned long *num)
360 {
361         return -EINVAL;
362 }
363 EXPORT_SYMBOL(vm_insert_pages);
364
365 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
366                         unsigned long num)
367 {
368         return -EINVAL;
369 }
370 EXPORT_SYMBOL(vm_map_pages);
371
372 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
373                                 unsigned long num)
374 {
375         return -EINVAL;
376 }
377 EXPORT_SYMBOL(vm_map_pages_zero);
378
379 /*
380  *  sys_brk() for the most part doesn't need the global kernel
381  *  lock, except when an application is doing something nasty
382  *  like trying to un-brk an area that has already been mapped
383  *  to a regular file.  in this case, the unmapping will need
384  *  to invoke file system routines that need the global lock.
385  */
386 SYSCALL_DEFINE1(brk, unsigned long, brk)
387 {
388         struct mm_struct *mm = current->mm;
389
390         if (brk < mm->start_brk || brk > mm->context.end_brk)
391                 return mm->brk;
392
393         if (mm->brk == brk)
394                 return mm->brk;
395
396         /*
397          * Always allow shrinking brk
398          */
399         if (brk <= mm->brk) {
400                 mm->brk = brk;
401                 return brk;
402         }
403
404         /*
405          * Ok, looks good - let it rip.
406          */
407         flush_icache_user_range(mm->brk, brk);
408         return mm->brk = brk;
409 }
410
411 /*
412  * initialise the percpu counter for VM and region record slabs
413  */
414 void __init mmap_init(void)
415 {
416         int ret;
417
418         ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
419         VM_BUG_ON(ret);
420         vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
421 }
422
423 /*
424  * validate the region tree
425  * - the caller must hold the region lock
426  */
427 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
428 static noinline void validate_nommu_regions(void)
429 {
430         struct vm_region *region, *last;
431         struct rb_node *p, *lastp;
432
433         lastp = rb_first(&nommu_region_tree);
434         if (!lastp)
435                 return;
436
437         last = rb_entry(lastp, struct vm_region, vm_rb);
438         BUG_ON(last->vm_end <= last->vm_start);
439         BUG_ON(last->vm_top < last->vm_end);
440
441         while ((p = rb_next(lastp))) {
442                 region = rb_entry(p, struct vm_region, vm_rb);
443                 last = rb_entry(lastp, struct vm_region, vm_rb);
444
445                 BUG_ON(region->vm_end <= region->vm_start);
446                 BUG_ON(region->vm_top < region->vm_end);
447                 BUG_ON(region->vm_start < last->vm_top);
448
449                 lastp = p;
450         }
451 }
452 #else
453 static void validate_nommu_regions(void)
454 {
455 }
456 #endif
457
458 /*
459  * add a region into the global tree
460  */
461 static void add_nommu_region(struct vm_region *region)
462 {
463         struct vm_region *pregion;
464         struct rb_node **p, *parent;
465
466         validate_nommu_regions();
467
468         parent = NULL;
469         p = &nommu_region_tree.rb_node;
470         while (*p) {
471                 parent = *p;
472                 pregion = rb_entry(parent, struct vm_region, vm_rb);
473                 if (region->vm_start < pregion->vm_start)
474                         p = &(*p)->rb_left;
475                 else if (region->vm_start > pregion->vm_start)
476                         p = &(*p)->rb_right;
477                 else if (pregion == region)
478                         return;
479                 else
480                         BUG();
481         }
482
483         rb_link_node(&region->vm_rb, parent, p);
484         rb_insert_color(&region->vm_rb, &nommu_region_tree);
485
486         validate_nommu_regions();
487 }
488
489 /*
490  * delete a region from the global tree
491  */
492 static void delete_nommu_region(struct vm_region *region)
493 {
494         BUG_ON(!nommu_region_tree.rb_node);
495
496         validate_nommu_regions();
497         rb_erase(&region->vm_rb, &nommu_region_tree);
498         validate_nommu_regions();
499 }
500
501 /*
502  * free a contiguous series of pages
503  */
504 static void free_page_series(unsigned long from, unsigned long to)
505 {
506         for (; from < to; from += PAGE_SIZE) {
507                 struct page *page = virt_to_page((void *)from);
508
509                 atomic_long_dec(&mmap_pages_allocated);
510                 put_page(page);
511         }
512 }
513
514 /*
515  * release a reference to a region
516  * - the caller must hold the region semaphore for writing, which this releases
517  * - the region may not have been added to the tree yet, in which case vm_top
518  *   will equal vm_start
519  */
520 static void __put_nommu_region(struct vm_region *region)
521         __releases(nommu_region_sem)
522 {
523         BUG_ON(!nommu_region_tree.rb_node);
524
525         if (--region->vm_usage == 0) {
526                 if (region->vm_top > region->vm_start)
527                         delete_nommu_region(region);
528                 up_write(&nommu_region_sem);
529
530                 if (region->vm_file)
531                         fput(region->vm_file);
532
533                 /* IO memory and memory shared directly out of the pagecache
534                  * from ramfs/tmpfs mustn't be released here */
535                 if (region->vm_flags & VM_MAPPED_COPY)
536                         free_page_series(region->vm_start, region->vm_top);
537                 kmem_cache_free(vm_region_jar, region);
538         } else {
539                 up_write(&nommu_region_sem);
540         }
541 }
542
543 /*
544  * release a reference to a region
545  */
546 static void put_nommu_region(struct vm_region *region)
547 {
548         down_write(&nommu_region_sem);
549         __put_nommu_region(region);
550 }
551
552 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
553 {
554         vma->vm_mm = mm;
555
556         /* add the VMA to the mapping */
557         if (vma->vm_file) {
558                 struct address_space *mapping = vma->vm_file->f_mapping;
559
560                 i_mmap_lock_write(mapping);
561                 flush_dcache_mmap_lock(mapping);
562                 vma_interval_tree_insert(vma, &mapping->i_mmap);
563                 flush_dcache_mmap_unlock(mapping);
564                 i_mmap_unlock_write(mapping);
565         }
566 }
567
568 static void cleanup_vma_from_mm(struct vm_area_struct *vma)
569 {
570         vma->vm_mm->map_count--;
571         /* remove the VMA from the mapping */
572         if (vma->vm_file) {
573                 struct address_space *mapping;
574                 mapping = vma->vm_file->f_mapping;
575
576                 i_mmap_lock_write(mapping);
577                 flush_dcache_mmap_lock(mapping);
578                 vma_interval_tree_remove(vma, &mapping->i_mmap);
579                 flush_dcache_mmap_unlock(mapping);
580                 i_mmap_unlock_write(mapping);
581         }
582 }
583
584 /*
585  * delete a VMA from its owning mm_struct and address space
586  */
587 static int delete_vma_from_mm(struct vm_area_struct *vma)
588 {
589         VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
590
591         vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
592         if (vma_iter_prealloc(&vmi, vma)) {
593                 pr_warn("Allocation of vma tree for process %d failed\n",
594                        current->pid);
595                 return -ENOMEM;
596         }
597         cleanup_vma_from_mm(vma);
598
599         /* remove from the MM's tree and list */
600         vma_iter_clear(&vmi);
601         return 0;
602 }
603 /*
604  * destroy a VMA record
605  */
606 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
607 {
608         if (vma->vm_ops && vma->vm_ops->close)
609                 vma->vm_ops->close(vma);
610         if (vma->vm_file)
611                 fput(vma->vm_file);
612         put_nommu_region(vma->vm_region);
613         vm_area_free(vma);
614 }
615
616 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
617                                              unsigned long start_addr,
618                                              unsigned long end_addr)
619 {
620         unsigned long index = start_addr;
621
622         mmap_assert_locked(mm);
623         return mt_find(&mm->mm_mt, &index, end_addr - 1);
624 }
625 EXPORT_SYMBOL(find_vma_intersection);
626
627 /*
628  * look up the first VMA in which addr resides, NULL if none
629  * - should be called with mm->mmap_lock at least held readlocked
630  */
631 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
632 {
633         VMA_ITERATOR(vmi, mm, addr);
634
635         return vma_iter_load(&vmi);
636 }
637 EXPORT_SYMBOL(find_vma);
638
639 /*
640  * At least xtensa ends up having protection faults even with no
641  * MMU.. No stack expansion, at least.
642  */
643 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
644                         unsigned long addr, struct pt_regs *regs)
645 {
646         struct vm_area_struct *vma;
647
648         mmap_read_lock(mm);
649         vma = vma_lookup(mm, addr);
650         if (!vma)
651                 mmap_read_unlock(mm);
652         return vma;
653 }
654
655 /*
656  * expand a stack to a given address
657  * - not supported under NOMMU conditions
658  */
659 int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
660 {
661         return -ENOMEM;
662 }
663
664 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
665 {
666         mmap_read_unlock(mm);
667         return NULL;
668 }
669
670 /*
671  * look up the first VMA exactly that exactly matches addr
672  * - should be called with mm->mmap_lock at least held readlocked
673  */
674 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
675                                              unsigned long addr,
676                                              unsigned long len)
677 {
678         struct vm_area_struct *vma;
679         unsigned long end = addr + len;
680         VMA_ITERATOR(vmi, mm, addr);
681
682         vma = vma_iter_load(&vmi);
683         if (!vma)
684                 return NULL;
685         if (vma->vm_start != addr)
686                 return NULL;
687         if (vma->vm_end != end)
688                 return NULL;
689
690         return vma;
691 }
692
693 /*
694  * determine whether a mapping should be permitted and, if so, what sort of
695  * mapping we're capable of supporting
696  */
697 static int validate_mmap_request(struct file *file,
698                                  unsigned long addr,
699                                  unsigned long len,
700                                  unsigned long prot,
701                                  unsigned long flags,
702                                  unsigned long pgoff,
703                                  unsigned long *_capabilities)
704 {
705         unsigned long capabilities, rlen;
706         int ret;
707
708         /* do the simple checks first */
709         if (flags & MAP_FIXED)
710                 return -EINVAL;
711
712         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
713             (flags & MAP_TYPE) != MAP_SHARED)
714                 return -EINVAL;
715
716         if (!len)
717                 return -EINVAL;
718
719         /* Careful about overflows.. */
720         rlen = PAGE_ALIGN(len);
721         if (!rlen || rlen > TASK_SIZE)
722                 return -ENOMEM;
723
724         /* offset overflow? */
725         if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
726                 return -EOVERFLOW;
727
728         if (file) {
729                 /* files must support mmap */
730                 if (!file->f_op->mmap)
731                         return -ENODEV;
732
733                 /* work out if what we've got could possibly be shared
734                  * - we support chardevs that provide their own "memory"
735                  * - we support files/blockdevs that are memory backed
736                  */
737                 if (file->f_op->mmap_capabilities) {
738                         capabilities = file->f_op->mmap_capabilities(file);
739                 } else {
740                         /* no explicit capabilities set, so assume some
741                          * defaults */
742                         switch (file_inode(file)->i_mode & S_IFMT) {
743                         case S_IFREG:
744                         case S_IFBLK:
745                                 capabilities = NOMMU_MAP_COPY;
746                                 break;
747
748                         case S_IFCHR:
749                                 capabilities =
750                                         NOMMU_MAP_DIRECT |
751                                         NOMMU_MAP_READ |
752                                         NOMMU_MAP_WRITE;
753                                 break;
754
755                         default:
756                                 return -EINVAL;
757                         }
758                 }
759
760                 /* eliminate any capabilities that we can't support on this
761                  * device */
762                 if (!file->f_op->get_unmapped_area)
763                         capabilities &= ~NOMMU_MAP_DIRECT;
764                 if (!(file->f_mode & FMODE_CAN_READ))
765                         capabilities &= ~NOMMU_MAP_COPY;
766
767                 /* The file shall have been opened with read permission. */
768                 if (!(file->f_mode & FMODE_READ))
769                         return -EACCES;
770
771                 if (flags & MAP_SHARED) {
772                         /* do checks for writing, appending and locking */
773                         if ((prot & PROT_WRITE) &&
774                             !(file->f_mode & FMODE_WRITE))
775                                 return -EACCES;
776
777                         if (IS_APPEND(file_inode(file)) &&
778                             (file->f_mode & FMODE_WRITE))
779                                 return -EACCES;
780
781                         if (!(capabilities & NOMMU_MAP_DIRECT))
782                                 return -ENODEV;
783
784                         /* we mustn't privatise shared mappings */
785                         capabilities &= ~NOMMU_MAP_COPY;
786                 } else {
787                         /* we're going to read the file into private memory we
788                          * allocate */
789                         if (!(capabilities & NOMMU_MAP_COPY))
790                                 return -ENODEV;
791
792                         /* we don't permit a private writable mapping to be
793                          * shared with the backing device */
794                         if (prot & PROT_WRITE)
795                                 capabilities &= ~NOMMU_MAP_DIRECT;
796                 }
797
798                 if (capabilities & NOMMU_MAP_DIRECT) {
799                         if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
800                             ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
801                             ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
802                             ) {
803                                 capabilities &= ~NOMMU_MAP_DIRECT;
804                                 if (flags & MAP_SHARED) {
805                                         pr_warn("MAP_SHARED not completely supported on !MMU\n");
806                                         return -EINVAL;
807                                 }
808                         }
809                 }
810
811                 /* handle executable mappings and implied executable
812                  * mappings */
813                 if (path_noexec(&file->f_path)) {
814                         if (prot & PROT_EXEC)
815                                 return -EPERM;
816                 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
817                         /* handle implication of PROT_EXEC by PROT_READ */
818                         if (current->personality & READ_IMPLIES_EXEC) {
819                                 if (capabilities & NOMMU_MAP_EXEC)
820                                         prot |= PROT_EXEC;
821                         }
822                 } else if ((prot & PROT_READ) &&
823                          (prot & PROT_EXEC) &&
824                          !(capabilities & NOMMU_MAP_EXEC)
825                          ) {
826                         /* backing file is not executable, try to copy */
827                         capabilities &= ~NOMMU_MAP_DIRECT;
828                 }
829         } else {
830                 /* anonymous mappings are always memory backed and can be
831                  * privately mapped
832                  */
833                 capabilities = NOMMU_MAP_COPY;
834
835                 /* handle PROT_EXEC implication by PROT_READ */
836                 if ((prot & PROT_READ) &&
837                     (current->personality & READ_IMPLIES_EXEC))
838                         prot |= PROT_EXEC;
839         }
840
841         /* allow the security API to have its say */
842         ret = security_mmap_addr(addr);
843         if (ret < 0)
844                 return ret;
845
846         /* looks okay */
847         *_capabilities = capabilities;
848         return 0;
849 }
850
851 /*
852  * we've determined that we can make the mapping, now translate what we
853  * now know into VMA flags
854  */
855 static unsigned long determine_vm_flags(struct file *file,
856                                         unsigned long prot,
857                                         unsigned long flags,
858                                         unsigned long capabilities)
859 {
860         unsigned long vm_flags;
861
862         vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
863
864         if (!file) {
865                 /*
866                  * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because
867                  * there is no fork().
868                  */
869                 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
870         } else if (flags & MAP_PRIVATE) {
871                 /* MAP_PRIVATE file mapping */
872                 if (capabilities & NOMMU_MAP_DIRECT)
873                         vm_flags |= (capabilities & NOMMU_VMFLAGS);
874                 else
875                         vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
876
877                 if (!(prot & PROT_WRITE) && !current->ptrace)
878                         /*
879                          * R/O private file mapping which cannot be used to
880                          * modify memory, especially also not via active ptrace
881                          * (e.g., set breakpoints) or later by upgrading
882                          * permissions (no mprotect()). We can try overlaying
883                          * the file mapping, which will work e.g., on chardevs,
884                          * ramfs/tmpfs/shmfs and romfs/cramf.
885                          */
886                         vm_flags |= VM_MAYOVERLAY;
887         } else {
888                 /* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */
889                 vm_flags |= VM_SHARED | VM_MAYSHARE |
890                             (capabilities & NOMMU_VMFLAGS);
891         }
892
893         return vm_flags;
894 }
895
896 /*
897  * set up a shared mapping on a file (the driver or filesystem provides and
898  * pins the storage)
899  */
900 static int do_mmap_shared_file(struct vm_area_struct *vma)
901 {
902         int ret;
903
904         ret = call_mmap(vma->vm_file, vma);
905         if (ret == 0) {
906                 vma->vm_region->vm_top = vma->vm_region->vm_end;
907                 return 0;
908         }
909         if (ret != -ENOSYS)
910                 return ret;
911
912         /* getting -ENOSYS indicates that direct mmap isn't possible (as
913          * opposed to tried but failed) so we can only give a suitable error as
914          * it's not possible to make a private copy if MAP_SHARED was given */
915         return -ENODEV;
916 }
917
918 /*
919  * set up a private mapping or an anonymous shared mapping
920  */
921 static int do_mmap_private(struct vm_area_struct *vma,
922                            struct vm_region *region,
923                            unsigned long len,
924                            unsigned long capabilities)
925 {
926         unsigned long total, point;
927         void *base;
928         int ret, order;
929
930         /*
931          * Invoke the file's mapping function so that it can keep track of
932          * shared mappings on devices or memory. VM_MAYOVERLAY will be set if
933          * it may attempt to share, which will make is_nommu_shared_mapping()
934          * happy.
935          */
936         if (capabilities & NOMMU_MAP_DIRECT) {
937                 ret = call_mmap(vma->vm_file, vma);
938                 /* shouldn't return success if we're not sharing */
939                 if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
940                         ret = -ENOSYS;
941                 if (ret == 0) {
942                         vma->vm_region->vm_top = vma->vm_region->vm_end;
943                         return 0;
944                 }
945                 if (ret != -ENOSYS)
946                         return ret;
947
948                 /* getting an ENOSYS error indicates that direct mmap isn't
949                  * possible (as opposed to tried but failed) so we'll try to
950                  * make a private copy of the data and map that instead */
951         }
952
953
954         /* allocate some memory to hold the mapping
955          * - note that this may not return a page-aligned address if the object
956          *   we're allocating is smaller than a page
957          */
958         order = get_order(len);
959         total = 1 << order;
960         point = len >> PAGE_SHIFT;
961
962         /* we don't want to allocate a power-of-2 sized page set */
963         if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
964                 total = point;
965
966         base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
967         if (!base)
968                 goto enomem;
969
970         atomic_long_add(total, &mmap_pages_allocated);
971
972         vm_flags_set(vma, VM_MAPPED_COPY);
973         region->vm_flags = vma->vm_flags;
974         region->vm_start = (unsigned long) base;
975         region->vm_end   = region->vm_start + len;
976         region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
977
978         vma->vm_start = region->vm_start;
979         vma->vm_end   = region->vm_start + len;
980
981         if (vma->vm_file) {
982                 /* read the contents of a file into the copy */
983                 loff_t fpos;
984
985                 fpos = vma->vm_pgoff;
986                 fpos <<= PAGE_SHIFT;
987
988                 ret = kernel_read(vma->vm_file, base, len, &fpos);
989                 if (ret < 0)
990                         goto error_free;
991
992                 /* clear the last little bit */
993                 if (ret < len)
994                         memset(base + ret, 0, len - ret);
995
996         } else {
997                 vma_set_anonymous(vma);
998         }
999
1000         return 0;
1001
1002 error_free:
1003         free_page_series(region->vm_start, region->vm_top);
1004         region->vm_start = vma->vm_start = 0;
1005         region->vm_end   = vma->vm_end = 0;
1006         region->vm_top   = 0;
1007         return ret;
1008
1009 enomem:
1010         pr_err("Allocation of length %lu from process %d (%s) failed\n",
1011                len, current->pid, current->comm);
1012         show_mem();
1013         return -ENOMEM;
1014 }
1015
1016 /*
1017  * handle mapping creation for uClinux
1018  */
1019 unsigned long do_mmap(struct file *file,
1020                         unsigned long addr,
1021                         unsigned long len,
1022                         unsigned long prot,
1023                         unsigned long flags,
1024                         vm_flags_t vm_flags,
1025                         unsigned long pgoff,
1026                         unsigned long *populate,
1027                         struct list_head *uf)
1028 {
1029         struct vm_area_struct *vma;
1030         struct vm_region *region;
1031         struct rb_node *rb;
1032         unsigned long capabilities, result;
1033         int ret;
1034         VMA_ITERATOR(vmi, current->mm, 0);
1035
1036         *populate = 0;
1037
1038         /* decide whether we should attempt the mapping, and if so what sort of
1039          * mapping */
1040         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1041                                     &capabilities);
1042         if (ret < 0)
1043                 return ret;
1044
1045         /* we ignore the address hint */
1046         addr = 0;
1047         len = PAGE_ALIGN(len);
1048
1049         /* we've determined that we can make the mapping, now translate what we
1050          * now know into VMA flags */
1051         vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1052
1053
1054         /* we're going to need to record the mapping */
1055         region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1056         if (!region)
1057                 goto error_getting_region;
1058
1059         vma = vm_area_alloc(current->mm);
1060         if (!vma)
1061                 goto error_getting_vma;
1062
1063         region->vm_usage = 1;
1064         region->vm_flags = vm_flags;
1065         region->vm_pgoff = pgoff;
1066
1067         vm_flags_init(vma, vm_flags);
1068         vma->vm_pgoff = pgoff;
1069
1070         if (file) {
1071                 region->vm_file = get_file(file);
1072                 vma->vm_file = get_file(file);
1073         }
1074
1075         down_write(&nommu_region_sem);
1076
1077         /* if we want to share, we need to check for regions created by other
1078          * mmap() calls that overlap with our proposed mapping
1079          * - we can only share with a superset match on most regular files
1080          * - shared mappings on character devices and memory backed files are
1081          *   permitted to overlap inexactly as far as we are concerned for in
1082          *   these cases, sharing is handled in the driver or filesystem rather
1083          *   than here
1084          */
1085         if (is_nommu_shared_mapping(vm_flags)) {
1086                 struct vm_region *pregion;
1087                 unsigned long pglen, rpglen, pgend, rpgend, start;
1088
1089                 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1090                 pgend = pgoff + pglen;
1091
1092                 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1093                         pregion = rb_entry(rb, struct vm_region, vm_rb);
1094
1095                         if (!is_nommu_shared_mapping(pregion->vm_flags))
1096                                 continue;
1097
1098                         /* search for overlapping mappings on the same file */
1099                         if (file_inode(pregion->vm_file) !=
1100                             file_inode(file))
1101                                 continue;
1102
1103                         if (pregion->vm_pgoff >= pgend)
1104                                 continue;
1105
1106                         rpglen = pregion->vm_end - pregion->vm_start;
1107                         rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1108                         rpgend = pregion->vm_pgoff + rpglen;
1109                         if (pgoff >= rpgend)
1110                                 continue;
1111
1112                         /* handle inexactly overlapping matches between
1113                          * mappings */
1114                         if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1115                             !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1116                                 /* new mapping is not a subset of the region */
1117                                 if (!(capabilities & NOMMU_MAP_DIRECT))
1118                                         goto sharing_violation;
1119                                 continue;
1120                         }
1121
1122                         /* we've found a region we can share */
1123                         pregion->vm_usage++;
1124                         vma->vm_region = pregion;
1125                         start = pregion->vm_start;
1126                         start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1127                         vma->vm_start = start;
1128                         vma->vm_end = start + len;
1129
1130                         if (pregion->vm_flags & VM_MAPPED_COPY)
1131                                 vm_flags_set(vma, VM_MAPPED_COPY);
1132                         else {
1133                                 ret = do_mmap_shared_file(vma);
1134                                 if (ret < 0) {
1135                                         vma->vm_region = NULL;
1136                                         vma->vm_start = 0;
1137                                         vma->vm_end = 0;
1138                                         pregion->vm_usage--;
1139                                         pregion = NULL;
1140                                         goto error_just_free;
1141                                 }
1142                         }
1143                         fput(region->vm_file);
1144                         kmem_cache_free(vm_region_jar, region);
1145                         region = pregion;
1146                         result = start;
1147                         goto share;
1148                 }
1149
1150                 /* obtain the address at which to make a shared mapping
1151                  * - this is the hook for quasi-memory character devices to
1152                  *   tell us the location of a shared mapping
1153                  */
1154                 if (capabilities & NOMMU_MAP_DIRECT) {
1155                         addr = file->f_op->get_unmapped_area(file, addr, len,
1156                                                              pgoff, flags);
1157                         if (IS_ERR_VALUE(addr)) {
1158                                 ret = addr;
1159                                 if (ret != -ENOSYS)
1160                                         goto error_just_free;
1161
1162                                 /* the driver refused to tell us where to site
1163                                  * the mapping so we'll have to attempt to copy
1164                                  * it */
1165                                 ret = -ENODEV;
1166                                 if (!(capabilities & NOMMU_MAP_COPY))
1167                                         goto error_just_free;
1168
1169                                 capabilities &= ~NOMMU_MAP_DIRECT;
1170                         } else {
1171                                 vma->vm_start = region->vm_start = addr;
1172                                 vma->vm_end = region->vm_end = addr + len;
1173                         }
1174                 }
1175         }
1176
1177         vma->vm_region = region;
1178
1179         /* set up the mapping
1180          * - the region is filled in if NOMMU_MAP_DIRECT is still set
1181          */
1182         if (file && vma->vm_flags & VM_SHARED)
1183                 ret = do_mmap_shared_file(vma);
1184         else
1185                 ret = do_mmap_private(vma, region, len, capabilities);
1186         if (ret < 0)
1187                 goto error_just_free;
1188         add_nommu_region(region);
1189
1190         /* clear anonymous mappings that don't ask for uninitialized data */
1191         if (!vma->vm_file &&
1192             (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1193              !(flags & MAP_UNINITIALIZED)))
1194                 memset((void *)region->vm_start, 0,
1195                        region->vm_end - region->vm_start);
1196
1197         /* okay... we have a mapping; now we have to register it */
1198         result = vma->vm_start;
1199
1200         current->mm->total_vm += len >> PAGE_SHIFT;
1201
1202 share:
1203         BUG_ON(!vma->vm_region);
1204         vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1205         if (vma_iter_prealloc(&vmi, vma))
1206                 goto error_just_free;
1207
1208         setup_vma_to_mm(vma, current->mm);
1209         current->mm->map_count++;
1210         /* add the VMA to the tree */
1211         vma_iter_store(&vmi, vma);
1212
1213         /* we flush the region from the icache only when the first executable
1214          * mapping of it is made  */
1215         if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1216                 flush_icache_user_range(region->vm_start, region->vm_end);
1217                 region->vm_icache_flushed = true;
1218         }
1219
1220         up_write(&nommu_region_sem);
1221
1222         return result;
1223
1224 error_just_free:
1225         up_write(&nommu_region_sem);
1226 error:
1227         vma_iter_free(&vmi);
1228         if (region->vm_file)
1229                 fput(region->vm_file);
1230         kmem_cache_free(vm_region_jar, region);
1231         if (vma->vm_file)
1232                 fput(vma->vm_file);
1233         vm_area_free(vma);
1234         return ret;
1235
1236 sharing_violation:
1237         up_write(&nommu_region_sem);
1238         pr_warn("Attempt to share mismatched mappings\n");
1239         ret = -EINVAL;
1240         goto error;
1241
1242 error_getting_vma:
1243         kmem_cache_free(vm_region_jar, region);
1244         pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1245                         len, current->pid);
1246         show_mem();
1247         return -ENOMEM;
1248
1249 error_getting_region:
1250         pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1251                         len, current->pid);
1252         show_mem();
1253         return -ENOMEM;
1254 }
1255
1256 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1257                               unsigned long prot, unsigned long flags,
1258                               unsigned long fd, unsigned long pgoff)
1259 {
1260         struct file *file = NULL;
1261         unsigned long retval = -EBADF;
1262
1263         audit_mmap_fd(fd, flags);
1264         if (!(flags & MAP_ANONYMOUS)) {
1265                 file = fget(fd);
1266                 if (!file)
1267                         goto out;
1268         }
1269
1270         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1271
1272         if (file)
1273                 fput(file);
1274 out:
1275         return retval;
1276 }
1277
1278 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1279                 unsigned long, prot, unsigned long, flags,
1280                 unsigned long, fd, unsigned long, pgoff)
1281 {
1282         return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1283 }
1284
1285 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1286 struct mmap_arg_struct {
1287         unsigned long addr;
1288         unsigned long len;
1289         unsigned long prot;
1290         unsigned long flags;
1291         unsigned long fd;
1292         unsigned long offset;
1293 };
1294
1295 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1296 {
1297         struct mmap_arg_struct a;
1298
1299         if (copy_from_user(&a, arg, sizeof(a)))
1300                 return -EFAULT;
1301         if (offset_in_page(a.offset))
1302                 return -EINVAL;
1303
1304         return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1305                                a.offset >> PAGE_SHIFT);
1306 }
1307 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1308
1309 /*
1310  * split a vma into two pieces at address 'addr', a new vma is allocated either
1311  * for the first part or the tail.
1312  */
1313 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1314                      unsigned long addr, int new_below)
1315 {
1316         struct vm_area_struct *new;
1317         struct vm_region *region;
1318         unsigned long npages;
1319         struct mm_struct *mm;
1320
1321         /* we're only permitted to split anonymous regions (these should have
1322          * only a single usage on the region) */
1323         if (vma->vm_file)
1324                 return -ENOMEM;
1325
1326         mm = vma->vm_mm;
1327         if (mm->map_count >= sysctl_max_map_count)
1328                 return -ENOMEM;
1329
1330         region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1331         if (!region)
1332                 return -ENOMEM;
1333
1334         new = vm_area_dup(vma);
1335         if (!new)
1336                 goto err_vma_dup;
1337
1338         /* most fields are the same, copy all, and then fixup */
1339         *region = *vma->vm_region;
1340         new->vm_region = region;
1341
1342         npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1343
1344         if (new_below) {
1345                 region->vm_top = region->vm_end = new->vm_end = addr;
1346         } else {
1347                 region->vm_start = new->vm_start = addr;
1348                 region->vm_pgoff = new->vm_pgoff += npages;
1349         }
1350
1351         vma_iter_config(vmi, new->vm_start, new->vm_end);
1352         if (vma_iter_prealloc(vmi, vma)) {
1353                 pr_warn("Allocation of vma tree for process %d failed\n",
1354                         current->pid);
1355                 goto err_vmi_preallocate;
1356         }
1357
1358         if (new->vm_ops && new->vm_ops->open)
1359                 new->vm_ops->open(new);
1360
1361         down_write(&nommu_region_sem);
1362         delete_nommu_region(vma->vm_region);
1363         if (new_below) {
1364                 vma->vm_region->vm_start = vma->vm_start = addr;
1365                 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1366         } else {
1367                 vma->vm_region->vm_end = vma->vm_end = addr;
1368                 vma->vm_region->vm_top = addr;
1369         }
1370         add_nommu_region(vma->vm_region);
1371         add_nommu_region(new->vm_region);
1372         up_write(&nommu_region_sem);
1373
1374         setup_vma_to_mm(vma, mm);
1375         setup_vma_to_mm(new, mm);
1376         vma_iter_store(vmi, new);
1377         mm->map_count++;
1378         return 0;
1379
1380 err_vmi_preallocate:
1381         vm_area_free(new);
1382 err_vma_dup:
1383         kmem_cache_free(vm_region_jar, region);
1384         return -ENOMEM;
1385 }
1386
1387 /*
1388  * shrink a VMA by removing the specified chunk from either the beginning or
1389  * the end
1390  */
1391 static int vmi_shrink_vma(struct vma_iterator *vmi,
1392                       struct vm_area_struct *vma,
1393                       unsigned long from, unsigned long to)
1394 {
1395         struct vm_region *region;
1396
1397         /* adjust the VMA's pointers, which may reposition it in the MM's tree
1398          * and list */
1399         if (from > vma->vm_start) {
1400                 if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1401                         return -ENOMEM;
1402                 vma->vm_end = from;
1403         } else {
1404                 if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1405                         return -ENOMEM;
1406                 vma->vm_start = to;
1407         }
1408
1409         /* cut the backing region down to size */
1410         region = vma->vm_region;
1411         BUG_ON(region->vm_usage != 1);
1412
1413         down_write(&nommu_region_sem);
1414         delete_nommu_region(region);
1415         if (from > region->vm_start) {
1416                 to = region->vm_top;
1417                 region->vm_top = region->vm_end = from;
1418         } else {
1419                 region->vm_start = to;
1420         }
1421         add_nommu_region(region);
1422         up_write(&nommu_region_sem);
1423
1424         free_page_series(from, to);
1425         return 0;
1426 }
1427
1428 /*
1429  * release a mapping
1430  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1431  *   VMA, though it need not cover the whole VMA
1432  */
1433 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1434 {
1435         VMA_ITERATOR(vmi, mm, start);
1436         struct vm_area_struct *vma;
1437         unsigned long end;
1438         int ret = 0;
1439
1440         len = PAGE_ALIGN(len);
1441         if (len == 0)
1442                 return -EINVAL;
1443
1444         end = start + len;
1445
1446         /* find the first potentially overlapping VMA */
1447         vma = vma_find(&vmi, end);
1448         if (!vma) {
1449                 static int limit;
1450                 if (limit < 5) {
1451                         pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1452                                         current->pid, current->comm,
1453                                         start, start + len - 1);
1454                         limit++;
1455                 }
1456                 return -EINVAL;
1457         }
1458
1459         /* we're allowed to split an anonymous VMA but not a file-backed one */
1460         if (vma->vm_file) {
1461                 do {
1462                         if (start > vma->vm_start)
1463                                 return -EINVAL;
1464                         if (end == vma->vm_end)
1465                                 goto erase_whole_vma;
1466                         vma = vma_find(&vmi, end);
1467                 } while (vma);
1468                 return -EINVAL;
1469         } else {
1470                 /* the chunk must be a subset of the VMA found */
1471                 if (start == vma->vm_start && end == vma->vm_end)
1472                         goto erase_whole_vma;
1473                 if (start < vma->vm_start || end > vma->vm_end)
1474                         return -EINVAL;
1475                 if (offset_in_page(start))
1476                         return -EINVAL;
1477                 if (end != vma->vm_end && offset_in_page(end))
1478                         return -EINVAL;
1479                 if (start != vma->vm_start && end != vma->vm_end) {
1480                         ret = split_vma(&vmi, vma, start, 1);
1481                         if (ret < 0)
1482                                 return ret;
1483                 }
1484                 return vmi_shrink_vma(&vmi, vma, start, end);
1485         }
1486
1487 erase_whole_vma:
1488         if (delete_vma_from_mm(vma))
1489                 ret = -ENOMEM;
1490         else
1491                 delete_vma(mm, vma);
1492         return ret;
1493 }
1494
1495 int vm_munmap(unsigned long addr, size_t len)
1496 {
1497         struct mm_struct *mm = current->mm;
1498         int ret;
1499
1500         mmap_write_lock(mm);
1501         ret = do_munmap(mm, addr, len, NULL);
1502         mmap_write_unlock(mm);
1503         return ret;
1504 }
1505 EXPORT_SYMBOL(vm_munmap);
1506
1507 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1508 {
1509         return vm_munmap(addr, len);
1510 }
1511
1512 /*
1513  * release all the mappings made in a process's VM space
1514  */
1515 void exit_mmap(struct mm_struct *mm)
1516 {
1517         VMA_ITERATOR(vmi, mm, 0);
1518         struct vm_area_struct *vma;
1519
1520         if (!mm)
1521                 return;
1522
1523         mm->total_vm = 0;
1524
1525         /*
1526          * Lock the mm to avoid assert complaining even though this is the only
1527          * user of the mm
1528          */
1529         mmap_write_lock(mm);
1530         for_each_vma(vmi, vma) {
1531                 cleanup_vma_from_mm(vma);
1532                 delete_vma(mm, vma);
1533                 cond_resched();
1534         }
1535         __mt_destroy(&mm->mm_mt);
1536         mmap_write_unlock(mm);
1537 }
1538
1539 /*
1540  * expand (or shrink) an existing mapping, potentially moving it at the same
1541  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1542  *
1543  * under NOMMU conditions, we only permit changing a mapping's size, and only
1544  * as long as it stays within the region allocated by do_mmap_private() and the
1545  * block is not shareable
1546  *
1547  * MREMAP_FIXED is not supported under NOMMU conditions
1548  */
1549 static unsigned long do_mremap(unsigned long addr,
1550                         unsigned long old_len, unsigned long new_len,
1551                         unsigned long flags, unsigned long new_addr)
1552 {
1553         struct vm_area_struct *vma;
1554
1555         /* insanity checks first */
1556         old_len = PAGE_ALIGN(old_len);
1557         new_len = PAGE_ALIGN(new_len);
1558         if (old_len == 0 || new_len == 0)
1559                 return (unsigned long) -EINVAL;
1560
1561         if (offset_in_page(addr))
1562                 return -EINVAL;
1563
1564         if (flags & MREMAP_FIXED && new_addr != addr)
1565                 return (unsigned long) -EINVAL;
1566
1567         vma = find_vma_exact(current->mm, addr, old_len);
1568         if (!vma)
1569                 return (unsigned long) -EINVAL;
1570
1571         if (vma->vm_end != vma->vm_start + old_len)
1572                 return (unsigned long) -EFAULT;
1573
1574         if (is_nommu_shared_mapping(vma->vm_flags))
1575                 return (unsigned long) -EPERM;
1576
1577         if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1578                 return (unsigned long) -ENOMEM;
1579
1580         /* all checks complete - do it */
1581         vma->vm_end = vma->vm_start + new_len;
1582         return vma->vm_start;
1583 }
1584
1585 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1586                 unsigned long, new_len, unsigned long, flags,
1587                 unsigned long, new_addr)
1588 {
1589         unsigned long ret;
1590
1591         mmap_write_lock(current->mm);
1592         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1593         mmap_write_unlock(current->mm);
1594         return ret;
1595 }
1596
1597 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1598                          unsigned int foll_flags)
1599 {
1600         return NULL;
1601 }
1602
1603 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1604                 unsigned long pfn, unsigned long size, pgprot_t prot)
1605 {
1606         if (addr != (pfn << PAGE_SHIFT))
1607                 return -EINVAL;
1608
1609         vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1610         return 0;
1611 }
1612 EXPORT_SYMBOL(remap_pfn_range);
1613
1614 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1615 {
1616         unsigned long pfn = start >> PAGE_SHIFT;
1617         unsigned long vm_len = vma->vm_end - vma->vm_start;
1618
1619         pfn += vma->vm_pgoff;
1620         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1621 }
1622 EXPORT_SYMBOL(vm_iomap_memory);
1623
1624 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1625                         unsigned long pgoff)
1626 {
1627         unsigned int size = vma->vm_end - vma->vm_start;
1628
1629         if (!(vma->vm_flags & VM_USERMAP))
1630                 return -EINVAL;
1631
1632         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1633         vma->vm_end = vma->vm_start + size;
1634
1635         return 0;
1636 }
1637 EXPORT_SYMBOL(remap_vmalloc_range);
1638
1639 vm_fault_t filemap_fault(struct vm_fault *vmf)
1640 {
1641         BUG();
1642         return 0;
1643 }
1644 EXPORT_SYMBOL(filemap_fault);
1645
1646 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1647                 pgoff_t start_pgoff, pgoff_t end_pgoff)
1648 {
1649         BUG();
1650         return 0;
1651 }
1652 EXPORT_SYMBOL(filemap_map_pages);
1653
1654 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1655                               void *buf, int len, unsigned int gup_flags)
1656 {
1657         struct vm_area_struct *vma;
1658         int write = gup_flags & FOLL_WRITE;
1659
1660         if (mmap_read_lock_killable(mm))
1661                 return 0;
1662
1663         /* the access must start within one of the target process's mappings */
1664         vma = find_vma(mm, addr);
1665         if (vma) {
1666                 /* don't overrun this mapping */
1667                 if (addr + len >= vma->vm_end)
1668                         len = vma->vm_end - addr;
1669
1670                 /* only read or write mappings where it is permitted */
1671                 if (write && vma->vm_flags & VM_MAYWRITE)
1672                         copy_to_user_page(vma, NULL, addr,
1673                                          (void *) addr, buf, len);
1674                 else if (!write && vma->vm_flags & VM_MAYREAD)
1675                         copy_from_user_page(vma, NULL, addr,
1676                                             buf, (void *) addr, len);
1677                 else
1678                         len = 0;
1679         } else {
1680                 len = 0;
1681         }
1682
1683         mmap_read_unlock(mm);
1684
1685         return len;
1686 }
1687
1688 /**
1689  * access_remote_vm - access another process' address space
1690  * @mm:         the mm_struct of the target address space
1691  * @addr:       start address to access
1692  * @buf:        source or destination buffer
1693  * @len:        number of bytes to transfer
1694  * @gup_flags:  flags modifying lookup behaviour
1695  *
1696  * The caller must hold a reference on @mm.
1697  */
1698 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1699                 void *buf, int len, unsigned int gup_flags)
1700 {
1701         return __access_remote_vm(mm, addr, buf, len, gup_flags);
1702 }
1703
1704 /*
1705  * Access another process' address space.
1706  * - source/target buffer must be kernel space
1707  */
1708 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1709                 unsigned int gup_flags)
1710 {
1711         struct mm_struct *mm;
1712
1713         if (addr + len < addr)
1714                 return 0;
1715
1716         mm = get_task_mm(tsk);
1717         if (!mm)
1718                 return 0;
1719
1720         len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1721
1722         mmput(mm);
1723         return len;
1724 }
1725 EXPORT_SYMBOL_GPL(access_process_vm);
1726
1727 /**
1728  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1729  * @inode: The inode to check
1730  * @size: The current filesize of the inode
1731  * @newsize: The proposed filesize of the inode
1732  *
1733  * Check the shared mappings on an inode on behalf of a shrinking truncate to
1734  * make sure that any outstanding VMAs aren't broken and then shrink the
1735  * vm_regions that extend beyond so that do_mmap() doesn't
1736  * automatically grant mappings that are too large.
1737  */
1738 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1739                                 size_t newsize)
1740 {
1741         struct vm_area_struct *vma;
1742         struct vm_region *region;
1743         pgoff_t low, high;
1744         size_t r_size, r_top;
1745
1746         low = newsize >> PAGE_SHIFT;
1747         high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1748
1749         down_write(&nommu_region_sem);
1750         i_mmap_lock_read(inode->i_mapping);
1751
1752         /* search for VMAs that fall within the dead zone */
1753         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1754                 /* found one - only interested if it's shared out of the page
1755                  * cache */
1756                 if (vma->vm_flags & VM_SHARED) {
1757                         i_mmap_unlock_read(inode->i_mapping);
1758                         up_write(&nommu_region_sem);
1759                         return -ETXTBSY; /* not quite true, but near enough */
1760                 }
1761         }
1762
1763         /* reduce any regions that overlap the dead zone - if in existence,
1764          * these will be pointed to by VMAs that don't overlap the dead zone
1765          *
1766          * we don't check for any regions that start beyond the EOF as there
1767          * shouldn't be any
1768          */
1769         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1770                 if (!(vma->vm_flags & VM_SHARED))
1771                         continue;
1772
1773                 region = vma->vm_region;
1774                 r_size = region->vm_top - region->vm_start;
1775                 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1776
1777                 if (r_top > newsize) {
1778                         region->vm_top -= r_top - newsize;
1779                         if (region->vm_end > region->vm_top)
1780                                 region->vm_end = region->vm_top;
1781                 }
1782         }
1783
1784         i_mmap_unlock_read(inode->i_mapping);
1785         up_write(&nommu_region_sem);
1786         return 0;
1787 }
1788
1789 /*
1790  * Initialise sysctl_user_reserve_kbytes.
1791  *
1792  * This is intended to prevent a user from starting a single memory hogging
1793  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1794  * mode.
1795  *
1796  * The default value is min(3% of free memory, 128MB)
1797  * 128MB is enough to recover with sshd/login, bash, and top/kill.
1798  */
1799 static int __meminit init_user_reserve(void)
1800 {
1801         unsigned long free_kbytes;
1802
1803         free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1804
1805         sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1806         return 0;
1807 }
1808 subsys_initcall(init_user_reserve);
1809
1810 /*
1811  * Initialise sysctl_admin_reserve_kbytes.
1812  *
1813  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1814  * to log in and kill a memory hogging process.
1815  *
1816  * Systems with more than 256MB will reserve 8MB, enough to recover
1817  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1818  * only reserve 3% of free pages by default.
1819  */
1820 static int __meminit init_admin_reserve(void)
1821 {
1822         unsigned long free_kbytes;
1823
1824         free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1825
1826         sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1827         return 0;
1828 }
1829 subsys_initcall(init_admin_reserve);