mm: use mm_populate() for mremap() of VM_LOCKED vmas
[linux-2.6-block.git] / mm / mmap.c
CommitLineData
1da177e4
LT
1/*
2 * mm/mmap.c
3 *
4 * Written by obz.
5 *
046c6884 6 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
7 */
8
9#include <linux/slab.h>
4af3c9cc 10#include <linux/backing-dev.h>
1da177e4
LT
11#include <linux/mm.h>
12#include <linux/shm.h>
13#include <linux/mman.h>
14#include <linux/pagemap.h>
15#include <linux/swap.h>
16#include <linux/syscalls.h>
c59ede7b 17#include <linux/capability.h>
1da177e4
LT
18#include <linux/init.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/personality.h>
22#include <linux/security.h>
23#include <linux/hugetlb.h>
24#include <linux/profile.h>
b95f1b31 25#include <linux/export.h>
1da177e4
LT
26#include <linux/mount.h>
27#include <linux/mempolicy.h>
28#include <linux/rmap.h>
cddb8a5c 29#include <linux/mmu_notifier.h>
cdd6c482 30#include <linux/perf_event.h>
120a795d 31#include <linux/audit.h>
b15d00b6 32#include <linux/khugepaged.h>
2b144498 33#include <linux/uprobes.h>
d3737187 34#include <linux/rbtree_augmented.h>
cf4aebc2 35#include <linux/sched/sysctl.h>
1da177e4
LT
36
37#include <asm/uaccess.h>
38#include <asm/cacheflush.h>
39#include <asm/tlb.h>
d6dd61c8 40#include <asm/mmu_context.h>
1da177e4 41
42b77728
JB
42#include "internal.h"
43
3a459756
KK
44#ifndef arch_mmap_check
45#define arch_mmap_check(addr, len, flags) (0)
46#endif
47
08e7d9b5
MS
48#ifndef arch_rebalance_pgtables
49#define arch_rebalance_pgtables(addr, len) (addr)
50#endif
51
e0da382c
HD
52static void unmap_region(struct mm_struct *mm,
53 struct vm_area_struct *vma, struct vm_area_struct *prev,
54 unsigned long start, unsigned long end);
55
1da177e4
LT
56/* description of effects of mapping type and prot in current implementation.
57 * this is due to the limited x86 page protection hardware. The expected
58 * behavior is in parens:
59 *
60 * map_type prot
61 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
62 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
63 * w: (no) no w: (no) no w: (yes) yes w: (no) no
64 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
65 *
66 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
67 * w: (no) no w: (no) no w: (copy) copy w: (no) no
68 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
69 *
70 */
71pgprot_t protection_map[16] = {
72 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
73 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
74};
75
804af2cf
HD
76pgprot_t vm_get_page_prot(unsigned long vm_flags)
77{
b845f313
DK
78 return __pgprot(pgprot_val(protection_map[vm_flags &
79 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
80 pgprot_val(arch_vm_get_page_prot(vm_flags)));
804af2cf
HD
81}
82EXPORT_SYMBOL(vm_get_page_prot);
83
34679d7e
SL
84int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
85int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
c3d8c141 86int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
34679d7e
SL
87/*
88 * Make sure vm_committed_as in one cacheline and not cacheline shared with
89 * other variables. It can be updated by several CPUs frequently.
90 */
91struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
1da177e4 92
997071bc
S
93/*
94 * The global memory commitment made in the system can be a metric
95 * that can be used to drive ballooning decisions when Linux is hosted
96 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
97 * balancing memory across competing virtual machines that are hosted.
98 * Several metrics drive this policy engine including the guest reported
99 * memory commitment.
100 */
101unsigned long vm_memory_committed(void)
102{
103 return percpu_counter_read_positive(&vm_committed_as);
104}
105EXPORT_SYMBOL_GPL(vm_memory_committed);
106
1da177e4
LT
107/*
108 * Check that a process has enough memory to allocate a new virtual
109 * mapping. 0 means there is enough memory for the allocation to
110 * succeed and -ENOMEM implies there is not.
111 *
112 * We currently support three overcommit policies, which are set via the
113 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
114 *
115 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
116 * Additional code 2002 Jul 20 by Robert Love.
117 *
118 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
119 *
120 * Note this is a helper function intended to be used by LSMs which
121 * wish to use this logic.
122 */
34b4e4aa 123int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1da177e4
LT
124{
125 unsigned long free, allowed;
126
127 vm_acct_memory(pages);
128
129 /*
130 * Sometimes we want to use more memory than we have
131 */
132 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
133 return 0;
134
135 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
c15bef30
DF
136 free = global_page_state(NR_FREE_PAGES);
137 free += global_page_state(NR_FILE_PAGES);
138
139 /*
140 * shmem pages shouldn't be counted as free in this
141 * case, they can't be purged, only swapped out, and
142 * that won't affect the overall amount of available
143 * memory in the system.
144 */
145 free -= global_page_state(NR_SHMEM);
1da177e4 146
1da177e4
LT
147 free += nr_swap_pages;
148
149 /*
150 * Any slabs which are created with the
151 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
152 * which are reclaimable, under pressure. The dentry
153 * cache and most inode caches should fall into this
154 */
972d1a7b 155 free += global_page_state(NR_SLAB_RECLAIMABLE);
1da177e4 156
6d9f7839
HA
157 /*
158 * Leave reserved pages. The pages are not for anonymous pages.
159 */
c15bef30 160 if (free <= totalreserve_pages)
6d9f7839
HA
161 goto error;
162 else
c15bef30 163 free -= totalreserve_pages;
6d9f7839
HA
164
165 /*
166 * Leave the last 3% for root
167 */
1da177e4 168 if (!cap_sys_admin)
c15bef30 169 free -= free / 32;
1da177e4
LT
170
171 if (free > pages)
172 return 0;
6d9f7839
HA
173
174 goto error;
1da177e4
LT
175 }
176
177 allowed = (totalram_pages - hugetlb_total_pages())
178 * sysctl_overcommit_ratio / 100;
179 /*
180 * Leave the last 3% for root
181 */
182 if (!cap_sys_admin)
183 allowed -= allowed / 32;
184 allowed += total_swap_pages;
185
186 /* Don't let a single process grow too big:
187 leave 3% of the size of this process for other processes */
731572d3
AC
188 if (mm)
189 allowed -= mm->total_vm / 32;
1da177e4 190
00a62ce9 191 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1da177e4 192 return 0;
6d9f7839 193error:
1da177e4
LT
194 vm_unacct_memory(pages);
195
196 return -ENOMEM;
197}
198
1da177e4 199/*
3d48ae45 200 * Requires inode->i_mapping->i_mmap_mutex
1da177e4
LT
201 */
202static void __remove_shared_vm_struct(struct vm_area_struct *vma,
203 struct file *file, struct address_space *mapping)
204{
205 if (vma->vm_flags & VM_DENYWRITE)
d3ac7f89 206 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
1da177e4
LT
207 if (vma->vm_flags & VM_SHARED)
208 mapping->i_mmap_writable--;
209
210 flush_dcache_mmap_lock(mapping);
211 if (unlikely(vma->vm_flags & VM_NONLINEAR))
6b2dbba8 212 list_del_init(&vma->shared.nonlinear);
1da177e4 213 else
6b2dbba8 214 vma_interval_tree_remove(vma, &mapping->i_mmap);
1da177e4
LT
215 flush_dcache_mmap_unlock(mapping);
216}
217
218/*
6b2dbba8 219 * Unlink a file-based vm structure from its interval tree, to hide
a8fb5618 220 * vma from rmap and vmtruncate before freeing its page tables.
1da177e4 221 */
a8fb5618 222void unlink_file_vma(struct vm_area_struct *vma)
1da177e4
LT
223{
224 struct file *file = vma->vm_file;
225
1da177e4
LT
226 if (file) {
227 struct address_space *mapping = file->f_mapping;
3d48ae45 228 mutex_lock(&mapping->i_mmap_mutex);
1da177e4 229 __remove_shared_vm_struct(vma, file, mapping);
3d48ae45 230 mutex_unlock(&mapping->i_mmap_mutex);
1da177e4 231 }
a8fb5618
HD
232}
233
234/*
235 * Close a vm structure and free it, returning the next.
236 */
237static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
238{
239 struct vm_area_struct *next = vma->vm_next;
240
a8fb5618 241 might_sleep();
1da177e4
LT
242 if (vma->vm_ops && vma->vm_ops->close)
243 vma->vm_ops->close(vma);
e9714acf 244 if (vma->vm_file)
a8fb5618 245 fput(vma->vm_file);
f0be3d32 246 mpol_put(vma_policy(vma));
1da177e4 247 kmem_cache_free(vm_area_cachep, vma);
a8fb5618 248 return next;
1da177e4
LT
249}
250
e4eb1ff6
LT
251static unsigned long do_brk(unsigned long addr, unsigned long len);
252
6a6160a7 253SYSCALL_DEFINE1(brk, unsigned long, brk)
1da177e4
LT
254{
255 unsigned long rlim, retval;
256 unsigned long newbrk, oldbrk;
257 struct mm_struct *mm = current->mm;
a5b4592c 258 unsigned long min_brk;
128557ff 259 bool populate;
1da177e4
LT
260
261 down_write(&mm->mmap_sem);
262
a5b4592c 263#ifdef CONFIG_COMPAT_BRK
5520e894
JK
264 /*
265 * CONFIG_COMPAT_BRK can still be overridden by setting
266 * randomize_va_space to 2, which will still cause mm->start_brk
267 * to be arbitrarily shifted
268 */
4471a675 269 if (current->brk_randomized)
5520e894
JK
270 min_brk = mm->start_brk;
271 else
272 min_brk = mm->end_data;
a5b4592c
JK
273#else
274 min_brk = mm->start_brk;
275#endif
276 if (brk < min_brk)
1da177e4 277 goto out;
1e624196
RG
278
279 /*
280 * Check against rlimit here. If this check is done later after the test
281 * of oldbrk with newbrk then it can escape the test and let the data
282 * segment grow beyond its set limit the in case where the limit is
283 * not page aligned -Ram Gupta
284 */
59e99e5b 285 rlim = rlimit(RLIMIT_DATA);
c1d171a0
JK
286 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
287 (mm->end_data - mm->start_data) > rlim)
1e624196
RG
288 goto out;
289
1da177e4
LT
290 newbrk = PAGE_ALIGN(brk);
291 oldbrk = PAGE_ALIGN(mm->brk);
292 if (oldbrk == newbrk)
293 goto set_brk;
294
295 /* Always allow shrinking brk. */
296 if (brk <= mm->brk) {
297 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
298 goto set_brk;
299 goto out;
300 }
301
1da177e4
LT
302 /* Check against existing mmap mappings. */
303 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
304 goto out;
305
306 /* Ok, looks good - let it rip. */
307 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
308 goto out;
128557ff 309
1da177e4
LT
310set_brk:
311 mm->brk = brk;
128557ff
ML
312 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
313 up_write(&mm->mmap_sem);
314 if (populate)
315 mm_populate(oldbrk, newbrk - oldbrk);
316 return brk;
317
1da177e4
LT
318out:
319 retval = mm->brk;
320 up_write(&mm->mmap_sem);
321 return retval;
322}
323
d3737187
ML
324static long vma_compute_subtree_gap(struct vm_area_struct *vma)
325{
326 unsigned long max, subtree_gap;
327 max = vma->vm_start;
328 if (vma->vm_prev)
329 max -= vma->vm_prev->vm_end;
330 if (vma->vm_rb.rb_left) {
331 subtree_gap = rb_entry(vma->vm_rb.rb_left,
332 struct vm_area_struct, vm_rb)->rb_subtree_gap;
333 if (subtree_gap > max)
334 max = subtree_gap;
335 }
336 if (vma->vm_rb.rb_right) {
337 subtree_gap = rb_entry(vma->vm_rb.rb_right,
338 struct vm_area_struct, vm_rb)->rb_subtree_gap;
339 if (subtree_gap > max)
340 max = subtree_gap;
341 }
342 return max;
343}
344
ed8ea815 345#ifdef CONFIG_DEBUG_VM_RB
1da177e4
LT
346static int browse_rb(struct rb_root *root)
347{
5a0768f6 348 int i = 0, j, bug = 0;
1da177e4
LT
349 struct rb_node *nd, *pn = NULL;
350 unsigned long prev = 0, pend = 0;
351
352 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
353 struct vm_area_struct *vma;
354 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
5a0768f6
ML
355 if (vma->vm_start < prev) {
356 printk("vm_start %lx prev %lx\n", vma->vm_start, prev);
357 bug = 1;
358 }
359 if (vma->vm_start < pend) {
1da177e4 360 printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
5a0768f6
ML
361 bug = 1;
362 }
363 if (vma->vm_start > vma->vm_end) {
364 printk("vm_end %lx < vm_start %lx\n",
365 vma->vm_end, vma->vm_start);
366 bug = 1;
367 }
368 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
369 printk("free gap %lx, correct %lx\n",
370 vma->rb_subtree_gap,
371 vma_compute_subtree_gap(vma));
372 bug = 1;
373 }
1da177e4
LT
374 i++;
375 pn = nd;
d1af65d1
DM
376 prev = vma->vm_start;
377 pend = vma->vm_end;
1da177e4
LT
378 }
379 j = 0;
5a0768f6 380 for (nd = pn; nd; nd = rb_prev(nd))
1da177e4 381 j++;
5a0768f6
ML
382 if (i != j) {
383 printk("backwards %d, forwards %d\n", j, i);
384 bug = 1;
1da177e4 385 }
5a0768f6 386 return bug ? -1 : i;
1da177e4
LT
387}
388
d3737187
ML
389static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
390{
391 struct rb_node *nd;
392
393 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
394 struct vm_area_struct *vma;
395 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
396 BUG_ON(vma != ignore &&
397 vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
1da177e4 398 }
1da177e4
LT
399}
400
401void validate_mm(struct mm_struct *mm)
402{
403 int bug = 0;
404 int i = 0;
5a0768f6 405 unsigned long highest_address = 0;
ed8ea815
ML
406 struct vm_area_struct *vma = mm->mmap;
407 while (vma) {
408 struct anon_vma_chain *avc;
63c3b902 409 vma_lock_anon_vma(vma);
ed8ea815
ML
410 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
411 anon_vma_interval_tree_verify(avc);
63c3b902 412 vma_unlock_anon_vma(vma);
5a0768f6 413 highest_address = vma->vm_end;
ed8ea815 414 vma = vma->vm_next;
1da177e4
LT
415 i++;
416 }
5a0768f6
ML
417 if (i != mm->map_count) {
418 printk("map_count %d vm_next %d\n", mm->map_count, i);
419 bug = 1;
420 }
421 if (highest_address != mm->highest_vm_end) {
422 printk("mm->highest_vm_end %lx, found %lx\n",
423 mm->highest_vm_end, highest_address);
424 bug = 1;
425 }
1da177e4 426 i = browse_rb(&mm->mm_rb);
5a0768f6
ML
427 if (i != mm->map_count) {
428 printk("map_count %d rb %d\n", mm->map_count, i);
429 bug = 1;
430 }
46a350ef 431 BUG_ON(bug);
1da177e4
LT
432}
433#else
d3737187 434#define validate_mm_rb(root, ignore) do { } while (0)
1da177e4
LT
435#define validate_mm(mm) do { } while (0)
436#endif
437
d3737187
ML
438RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
439 unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
440
441/*
442 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
443 * vma->vm_prev->vm_end values changed, without modifying the vma's position
444 * in the rbtree.
445 */
446static void vma_gap_update(struct vm_area_struct *vma)
447{
448 /*
449 * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
450 * function that does exacltly what we want.
451 */
452 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
453}
454
455static inline void vma_rb_insert(struct vm_area_struct *vma,
456 struct rb_root *root)
457{
458 /* All rb_subtree_gap values must be consistent prior to insertion */
459 validate_mm_rb(root, NULL);
460
461 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
462}
463
464static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
465{
466 /*
467 * All rb_subtree_gap values must be consistent prior to erase,
468 * with the possible exception of the vma being erased.
469 */
470 validate_mm_rb(root, vma);
471
472 /*
473 * Note rb_erase_augmented is a fairly large inline function,
474 * so make sure we instantiate it only once with our desired
475 * augmented rbtree callbacks.
476 */
477 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
478}
479
bf181b9f
ML
480/*
481 * vma has some anon_vma assigned, and is already inserted on that
482 * anon_vma's interval trees.
483 *
484 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
485 * vma must be removed from the anon_vma's interval trees using
486 * anon_vma_interval_tree_pre_update_vma().
487 *
488 * After the update, the vma will be reinserted using
489 * anon_vma_interval_tree_post_update_vma().
490 *
491 * The entire update must be protected by exclusive mmap_sem and by
492 * the root anon_vma's mutex.
493 */
494static inline void
495anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
496{
497 struct anon_vma_chain *avc;
498
499 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
500 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
501}
502
503static inline void
504anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
505{
506 struct anon_vma_chain *avc;
507
508 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
509 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
510}
511
6597d783
HD
512static int find_vma_links(struct mm_struct *mm, unsigned long addr,
513 unsigned long end, struct vm_area_struct **pprev,
514 struct rb_node ***rb_link, struct rb_node **rb_parent)
1da177e4 515{
6597d783 516 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
1da177e4
LT
517
518 __rb_link = &mm->mm_rb.rb_node;
519 rb_prev = __rb_parent = NULL;
1da177e4
LT
520
521 while (*__rb_link) {
522 struct vm_area_struct *vma_tmp;
523
524 __rb_parent = *__rb_link;
525 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
526
527 if (vma_tmp->vm_end > addr) {
6597d783
HD
528 /* Fail if an existing vma overlaps the area */
529 if (vma_tmp->vm_start < end)
530 return -ENOMEM;
1da177e4
LT
531 __rb_link = &__rb_parent->rb_left;
532 } else {
533 rb_prev = __rb_parent;
534 __rb_link = &__rb_parent->rb_right;
535 }
536 }
537
538 *pprev = NULL;
539 if (rb_prev)
540 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
541 *rb_link = __rb_link;
542 *rb_parent = __rb_parent;
6597d783 543 return 0;
1da177e4
LT
544}
545
1da177e4
LT
546void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
547 struct rb_node **rb_link, struct rb_node *rb_parent)
548{
d3737187
ML
549 /* Update tracking information for the gap following the new vma. */
550 if (vma->vm_next)
551 vma_gap_update(vma->vm_next);
552 else
553 mm->highest_vm_end = vma->vm_end;
554
555 /*
556 * vma->vm_prev wasn't known when we followed the rbtree to find the
557 * correct insertion point for that vma. As a result, we could not
558 * update the vma vm_rb parents rb_subtree_gap values on the way down.
559 * So, we first insert the vma with a zero rb_subtree_gap value
560 * (to be consistent with what we did on the way down), and then
561 * immediately update the gap to the correct value. Finally we
562 * rebalance the rbtree after all augmented values have been set.
563 */
1da177e4 564 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
d3737187
ML
565 vma->rb_subtree_gap = 0;
566 vma_gap_update(vma);
567 vma_rb_insert(vma, &mm->mm_rb);
1da177e4
LT
568}
569
cb8f488c 570static void __vma_link_file(struct vm_area_struct *vma)
1da177e4 571{
48aae425 572 struct file *file;
1da177e4
LT
573
574 file = vma->vm_file;
575 if (file) {
576 struct address_space *mapping = file->f_mapping;
577
578 if (vma->vm_flags & VM_DENYWRITE)
d3ac7f89 579 atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
1da177e4
LT
580 if (vma->vm_flags & VM_SHARED)
581 mapping->i_mmap_writable++;
582
583 flush_dcache_mmap_lock(mapping);
584 if (unlikely(vma->vm_flags & VM_NONLINEAR))
585 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
586 else
6b2dbba8 587 vma_interval_tree_insert(vma, &mapping->i_mmap);
1da177e4
LT
588 flush_dcache_mmap_unlock(mapping);
589 }
590}
591
592static void
593__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
594 struct vm_area_struct *prev, struct rb_node **rb_link,
595 struct rb_node *rb_parent)
596{
597 __vma_link_list(mm, vma, prev, rb_parent);
598 __vma_link_rb(mm, vma, rb_link, rb_parent);
1da177e4
LT
599}
600
601static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
602 struct vm_area_struct *prev, struct rb_node **rb_link,
603 struct rb_node *rb_parent)
604{
605 struct address_space *mapping = NULL;
606
607 if (vma->vm_file)
608 mapping = vma->vm_file->f_mapping;
609
97a89413 610 if (mapping)
3d48ae45 611 mutex_lock(&mapping->i_mmap_mutex);
1da177e4
LT
612
613 __vma_link(mm, vma, prev, rb_link, rb_parent);
614 __vma_link_file(vma);
615
1da177e4 616 if (mapping)
3d48ae45 617 mutex_unlock(&mapping->i_mmap_mutex);
1da177e4
LT
618
619 mm->map_count++;
620 validate_mm(mm);
621}
622
623/*
88f6b4c3 624 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
6b2dbba8 625 * mm's list and rbtree. It has already been inserted into the interval tree.
1da177e4 626 */
48aae425 627static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
1da177e4 628{
6597d783 629 struct vm_area_struct *prev;
48aae425 630 struct rb_node **rb_link, *rb_parent;
1da177e4 631
6597d783
HD
632 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
633 &prev, &rb_link, &rb_parent))
634 BUG();
1da177e4
LT
635 __vma_link(mm, vma, prev, rb_link, rb_parent);
636 mm->map_count++;
637}
638
639static inline void
640__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
641 struct vm_area_struct *prev)
642{
d3737187 643 struct vm_area_struct *next;
297c5eee 644
d3737187
ML
645 vma_rb_erase(vma, &mm->mm_rb);
646 prev->vm_next = next = vma->vm_next;
297c5eee
LT
647 if (next)
648 next->vm_prev = prev;
1da177e4
LT
649 if (mm->mmap_cache == vma)
650 mm->mmap_cache = prev;
651}
652
653/*
654 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
655 * is already present in an i_mmap tree without adjusting the tree.
656 * The following helper function should be used when such adjustments
657 * are necessary. The "insert" vma (if any) is to be inserted
658 * before we drop the necessary locks.
659 */
5beb4930 660int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1da177e4
LT
661 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
662{
663 struct mm_struct *mm = vma->vm_mm;
664 struct vm_area_struct *next = vma->vm_next;
665 struct vm_area_struct *importer = NULL;
666 struct address_space *mapping = NULL;
6b2dbba8 667 struct rb_root *root = NULL;
012f1800 668 struct anon_vma *anon_vma = NULL;
1da177e4 669 struct file *file = vma->vm_file;
d3737187 670 bool start_changed = false, end_changed = false;
1da177e4
LT
671 long adjust_next = 0;
672 int remove_next = 0;
673
674 if (next && !insert) {
287d97ac
LT
675 struct vm_area_struct *exporter = NULL;
676
1da177e4
LT
677 if (end >= next->vm_end) {
678 /*
679 * vma expands, overlapping all the next, and
680 * perhaps the one after too (mprotect case 6).
681 */
682again: remove_next = 1 + (end > next->vm_end);
683 end = next->vm_end;
287d97ac 684 exporter = next;
1da177e4
LT
685 importer = vma;
686 } else if (end > next->vm_start) {
687 /*
688 * vma expands, overlapping part of the next:
689 * mprotect case 5 shifting the boundary up.
690 */
691 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
287d97ac 692 exporter = next;
1da177e4
LT
693 importer = vma;
694 } else if (end < vma->vm_end) {
695 /*
696 * vma shrinks, and !insert tells it's not
697 * split_vma inserting another: so it must be
698 * mprotect case 4 shifting the boundary down.
699 */
700 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
287d97ac 701 exporter = vma;
1da177e4
LT
702 importer = next;
703 }
1da177e4 704
5beb4930
RR
705 /*
706 * Easily overlooked: when mprotect shifts the boundary,
707 * make sure the expanding vma has anon_vma set if the
708 * shrinking vma had, to cover any anon pages imported.
709 */
287d97ac
LT
710 if (exporter && exporter->anon_vma && !importer->anon_vma) {
711 if (anon_vma_clone(importer, exporter))
5beb4930 712 return -ENOMEM;
287d97ac 713 importer->anon_vma = exporter->anon_vma;
5beb4930
RR
714 }
715 }
716
1da177e4
LT
717 if (file) {
718 mapping = file->f_mapping;
682968e0 719 if (!(vma->vm_flags & VM_NONLINEAR)) {
1da177e4 720 root = &mapping->i_mmap;
cbc91f71 721 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
682968e0
SD
722
723 if (adjust_next)
cbc91f71
SD
724 uprobe_munmap(next, next->vm_start,
725 next->vm_end);
682968e0
SD
726 }
727
3d48ae45 728 mutex_lock(&mapping->i_mmap_mutex);
1da177e4 729 if (insert) {
1da177e4 730 /*
6b2dbba8 731 * Put into interval tree now, so instantiated pages
1da177e4
LT
732 * are visible to arm/parisc __flush_dcache_page
733 * throughout; but we cannot insert into address
734 * space until vma start or end is updated.
735 */
736 __vma_link_file(insert);
737 }
738 }
739
94fcc585
AA
740 vma_adjust_trans_huge(vma, start, end, adjust_next);
741
bf181b9f
ML
742 anon_vma = vma->anon_vma;
743 if (!anon_vma && adjust_next)
744 anon_vma = next->anon_vma;
745 if (anon_vma) {
ca42b26a
ML
746 VM_BUG_ON(adjust_next && next->anon_vma &&
747 anon_vma != next->anon_vma);
4fc3f1d6 748 anon_vma_lock_write(anon_vma);
bf181b9f
ML
749 anon_vma_interval_tree_pre_update_vma(vma);
750 if (adjust_next)
751 anon_vma_interval_tree_pre_update_vma(next);
752 }
012f1800 753
1da177e4
LT
754 if (root) {
755 flush_dcache_mmap_lock(mapping);
6b2dbba8 756 vma_interval_tree_remove(vma, root);
1da177e4 757 if (adjust_next)
6b2dbba8 758 vma_interval_tree_remove(next, root);
1da177e4
LT
759 }
760
d3737187
ML
761 if (start != vma->vm_start) {
762 vma->vm_start = start;
763 start_changed = true;
764 }
765 if (end != vma->vm_end) {
766 vma->vm_end = end;
767 end_changed = true;
768 }
1da177e4
LT
769 vma->vm_pgoff = pgoff;
770 if (adjust_next) {
771 next->vm_start += adjust_next << PAGE_SHIFT;
772 next->vm_pgoff += adjust_next;
773 }
774
775 if (root) {
776 if (adjust_next)
6b2dbba8
ML
777 vma_interval_tree_insert(next, root);
778 vma_interval_tree_insert(vma, root);
1da177e4
LT
779 flush_dcache_mmap_unlock(mapping);
780 }
781
782 if (remove_next) {
783 /*
784 * vma_merge has merged next into vma, and needs
785 * us to remove next before dropping the locks.
786 */
787 __vma_unlink(mm, next, vma);
788 if (file)
789 __remove_shared_vm_struct(next, file, mapping);
1da177e4
LT
790 } else if (insert) {
791 /*
792 * split_vma has split insert from vma, and needs
793 * us to insert it before dropping the locks
794 * (it may either follow vma or precede it).
795 */
796 __insert_vm_struct(mm, insert);
d3737187
ML
797 } else {
798 if (start_changed)
799 vma_gap_update(vma);
800 if (end_changed) {
801 if (!next)
802 mm->highest_vm_end = end;
803 else if (!adjust_next)
804 vma_gap_update(next);
805 }
1da177e4
LT
806 }
807
bf181b9f
ML
808 if (anon_vma) {
809 anon_vma_interval_tree_post_update_vma(vma);
810 if (adjust_next)
811 anon_vma_interval_tree_post_update_vma(next);
012f1800 812 anon_vma_unlock(anon_vma);
bf181b9f 813 }
1da177e4 814 if (mapping)
3d48ae45 815 mutex_unlock(&mapping->i_mmap_mutex);
1da177e4 816
2b144498 817 if (root) {
7b2d81d4 818 uprobe_mmap(vma);
2b144498
SD
819
820 if (adjust_next)
7b2d81d4 821 uprobe_mmap(next);
2b144498
SD
822 }
823
1da177e4 824 if (remove_next) {
925d1c40 825 if (file) {
cbc91f71 826 uprobe_munmap(next, next->vm_start, next->vm_end);
1da177e4 827 fput(file);
925d1c40 828 }
5beb4930
RR
829 if (next->anon_vma)
830 anon_vma_merge(vma, next);
1da177e4 831 mm->map_count--;
f0be3d32 832 mpol_put(vma_policy(next));
1da177e4
LT
833 kmem_cache_free(vm_area_cachep, next);
834 /*
835 * In mprotect's case 6 (see comments on vma_merge),
836 * we must remove another next too. It would clutter
837 * up the code too much to do both in one go.
838 */
d3737187
ML
839 next = vma->vm_next;
840 if (remove_next == 2)
1da177e4 841 goto again;
d3737187
ML
842 else if (next)
843 vma_gap_update(next);
844 else
845 mm->highest_vm_end = end;
1da177e4 846 }
2b144498 847 if (insert && file)
7b2d81d4 848 uprobe_mmap(insert);
1da177e4
LT
849
850 validate_mm(mm);
5beb4930
RR
851
852 return 0;
1da177e4
LT
853}
854
855/*
856 * If the vma has a ->close operation then the driver probably needs to release
857 * per-vma resources, so we don't attempt to merge those.
858 */
1da177e4
LT
859static inline int is_mergeable_vma(struct vm_area_struct *vma,
860 struct file *file, unsigned long vm_flags)
861{
0b173bc4 862 if (vma->vm_flags ^ vm_flags)
1da177e4
LT
863 return 0;
864 if (vma->vm_file != file)
865 return 0;
866 if (vma->vm_ops && vma->vm_ops->close)
867 return 0;
868 return 1;
869}
870
871static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
965f55de
SL
872 struct anon_vma *anon_vma2,
873 struct vm_area_struct *vma)
1da177e4 874{
965f55de
SL
875 /*
876 * The list_is_singular() test is to avoid merging VMA cloned from
877 * parents. This can improve scalability caused by anon_vma lock.
878 */
879 if ((!anon_vma1 || !anon_vma2) && (!vma ||
880 list_is_singular(&vma->anon_vma_chain)))
881 return 1;
882 return anon_vma1 == anon_vma2;
1da177e4
LT
883}
884
885/*
886 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
887 * in front of (at a lower virtual address and file offset than) the vma.
888 *
889 * We cannot merge two vmas if they have differently assigned (non-NULL)
890 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
891 *
892 * We don't check here for the merged mmap wrapping around the end of pagecache
893 * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
894 * wrap, nor mmaps which cover the final page at index -1UL.
895 */
896static int
897can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
898 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
899{
900 if (is_mergeable_vma(vma, file, vm_flags) &&
965f55de 901 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1da177e4
LT
902 if (vma->vm_pgoff == vm_pgoff)
903 return 1;
904 }
905 return 0;
906}
907
908/*
909 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
910 * beyond (at a higher virtual address and file offset than) the vma.
911 *
912 * We cannot merge two vmas if they have differently assigned (non-NULL)
913 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
914 */
915static int
916can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
917 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
918{
919 if (is_mergeable_vma(vma, file, vm_flags) &&
965f55de 920 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1da177e4
LT
921 pgoff_t vm_pglen;
922 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
923 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
924 return 1;
925 }
926 return 0;
927}
928
929/*
930 * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
931 * whether that can be merged with its predecessor or its successor.
932 * Or both (it neatly fills a hole).
933 *
934 * In most cases - when called for mmap, brk or mremap - [addr,end) is
935 * certain not to be mapped by the time vma_merge is called; but when
936 * called for mprotect, it is certain to be already mapped (either at
937 * an offset within prev, or at the start of next), and the flags of
938 * this area are about to be changed to vm_flags - and the no-change
939 * case has already been eliminated.
940 *
941 * The following mprotect cases have to be considered, where AAAA is
942 * the area passed down from mprotect_fixup, never extending beyond one
943 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
944 *
945 * AAAA AAAA AAAA AAAA
946 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX
947 * cannot merge might become might become might become
948 * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
949 * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
950 * mremap move: PPPPNNNNNNNN 8
951 * AAAA
952 * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
953 * might become case 1 below case 2 below case 3 below
954 *
955 * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
956 * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
957 */
958struct vm_area_struct *vma_merge(struct mm_struct *mm,
959 struct vm_area_struct *prev, unsigned long addr,
960 unsigned long end, unsigned long vm_flags,
961 struct anon_vma *anon_vma, struct file *file,
962 pgoff_t pgoff, struct mempolicy *policy)
963{
964 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
965 struct vm_area_struct *area, *next;
5beb4930 966 int err;
1da177e4
LT
967
968 /*
969 * We later require that vma->vm_flags == vm_flags,
970 * so this tests vma->vm_flags & VM_SPECIAL, too.
971 */
972 if (vm_flags & VM_SPECIAL)
973 return NULL;
974
975 if (prev)
976 next = prev->vm_next;
977 else
978 next = mm->mmap;
979 area = next;
980 if (next && next->vm_end == end) /* cases 6, 7, 8 */
981 next = next->vm_next;
982
983 /*
984 * Can it merge with the predecessor?
985 */
986 if (prev && prev->vm_end == addr &&
987 mpol_equal(vma_policy(prev), policy) &&
988 can_vma_merge_after(prev, vm_flags,
989 anon_vma, file, pgoff)) {
990 /*
991 * OK, it can. Can we now merge in the successor as well?
992 */
993 if (next && end == next->vm_start &&
994 mpol_equal(policy, vma_policy(next)) &&
995 can_vma_merge_before(next, vm_flags,
996 anon_vma, file, pgoff+pglen) &&
997 is_mergeable_anon_vma(prev->anon_vma,
965f55de 998 next->anon_vma, NULL)) {
1da177e4 999 /* cases 1, 6 */
5beb4930 1000 err = vma_adjust(prev, prev->vm_start,
1da177e4
LT
1001 next->vm_end, prev->vm_pgoff, NULL);
1002 } else /* cases 2, 5, 7 */
5beb4930 1003 err = vma_adjust(prev, prev->vm_start,
1da177e4 1004 end, prev->vm_pgoff, NULL);
5beb4930
RR
1005 if (err)
1006 return NULL;
b15d00b6 1007 khugepaged_enter_vma_merge(prev);
1da177e4
LT
1008 return prev;
1009 }
1010
1011 /*
1012 * Can this new request be merged in front of next?
1013 */
1014 if (next && end == next->vm_start &&
1015 mpol_equal(policy, vma_policy(next)) &&
1016 can_vma_merge_before(next, vm_flags,
1017 anon_vma, file, pgoff+pglen)) {
1018 if (prev && addr < prev->vm_end) /* case 4 */
5beb4930 1019 err = vma_adjust(prev, prev->vm_start,
1da177e4
LT
1020 addr, prev->vm_pgoff, NULL);
1021 else /* cases 3, 8 */
5beb4930 1022 err = vma_adjust(area, addr, next->vm_end,
1da177e4 1023 next->vm_pgoff - pglen, NULL);
5beb4930
RR
1024 if (err)
1025 return NULL;
b15d00b6 1026 khugepaged_enter_vma_merge(area);
1da177e4
LT
1027 return area;
1028 }
1029
1030 return NULL;
1031}
1032
d0e9fe17
LT
1033/*
1034 * Rough compatbility check to quickly see if it's even worth looking
1035 * at sharing an anon_vma.
1036 *
1037 * They need to have the same vm_file, and the flags can only differ
1038 * in things that mprotect may change.
1039 *
1040 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1041 * we can merge the two vma's. For example, we refuse to merge a vma if
1042 * there is a vm_ops->close() function, because that indicates that the
1043 * driver is doing some kind of reference counting. But that doesn't
1044 * really matter for the anon_vma sharing case.
1045 */
1046static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1047{
1048 return a->vm_end == b->vm_start &&
1049 mpol_equal(vma_policy(a), vma_policy(b)) &&
1050 a->vm_file == b->vm_file &&
1051 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
1052 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1053}
1054
1055/*
1056 * Do some basic sanity checking to see if we can re-use the anon_vma
1057 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1058 * the same as 'old', the other will be the new one that is trying
1059 * to share the anon_vma.
1060 *
1061 * NOTE! This runs with mm_sem held for reading, so it is possible that
1062 * the anon_vma of 'old' is concurrently in the process of being set up
1063 * by another page fault trying to merge _that_. But that's ok: if it
1064 * is being set up, that automatically means that it will be a singleton
1065 * acceptable for merging, so we can do all of this optimistically. But
1066 * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
1067 *
1068 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1069 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1070 * is to return an anon_vma that is "complex" due to having gone through
1071 * a fork).
1072 *
1073 * We also make sure that the two vma's are compatible (adjacent,
1074 * and with the same memory policies). That's all stable, even with just
1075 * a read lock on the mm_sem.
1076 */
1077static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1078{
1079 if (anon_vma_compatible(a, b)) {
1080 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
1081
1082 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1083 return anon_vma;
1084 }
1085 return NULL;
1086}
1087
1da177e4
LT
1088/*
1089 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1090 * neighbouring vmas for a suitable anon_vma, before it goes off
1091 * to allocate a new anon_vma. It checks because a repetitive
1092 * sequence of mprotects and faults may otherwise lead to distinct
1093 * anon_vmas being allocated, preventing vma merge in subsequent
1094 * mprotect.
1095 */
1096struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1097{
d0e9fe17 1098 struct anon_vma *anon_vma;
1da177e4 1099 struct vm_area_struct *near;
1da177e4
LT
1100
1101 near = vma->vm_next;
1102 if (!near)
1103 goto try_prev;
1104
d0e9fe17
LT
1105 anon_vma = reusable_anon_vma(near, vma, near);
1106 if (anon_vma)
1107 return anon_vma;
1da177e4 1108try_prev:
9be34c9d 1109 near = vma->vm_prev;
1da177e4
LT
1110 if (!near)
1111 goto none;
1112
d0e9fe17
LT
1113 anon_vma = reusable_anon_vma(near, near, vma);
1114 if (anon_vma)
1115 return anon_vma;
1da177e4
LT
1116none:
1117 /*
1118 * There's no absolute need to look only at touching neighbours:
1119 * we could search further afield for "compatible" anon_vmas.
1120 * But it would probably just be a waste of time searching,
1121 * or lead to too many vmas hanging off the same anon_vma.
1122 * We're trying to allow mprotect remerging later on,
1123 * not trying to minimize memory used for anon_vmas.
1124 */
1125 return NULL;
1126}
1127
1128#ifdef CONFIG_PROC_FS
ab50b8ed 1129void vm_stat_account(struct mm_struct *mm, unsigned long flags,
1da177e4
LT
1130 struct file *file, long pages)
1131{
1132 const unsigned long stack_flags
1133 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
1134
44de9d0c
HS
1135 mm->total_vm += pages;
1136
1da177e4
LT
1137 if (file) {
1138 mm->shared_vm += pages;
1139 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
1140 mm->exec_vm += pages;
1141 } else if (flags & stack_flags)
1142 mm->stack_vm += pages;
1da177e4
LT
1143}
1144#endif /* CONFIG_PROC_FS */
1145
40401530
AV
1146/*
1147 * If a hint addr is less than mmap_min_addr change hint to be as
1148 * low as possible but still greater than mmap_min_addr
1149 */
1150static inline unsigned long round_hint_to_min(unsigned long hint)
1151{
1152 hint &= PAGE_MASK;
1153 if (((void *)hint != NULL) &&
1154 (hint < mmap_min_addr))
1155 return PAGE_ALIGN(mmap_min_addr);
1156 return hint;
1157}
1158
1da177e4 1159/*
27f5de79 1160 * The caller must hold down_write(&current->mm->mmap_sem).
1da177e4
LT
1161 */
1162
e3fc629d 1163unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1da177e4 1164 unsigned long len, unsigned long prot,
bebeb3d6
ML
1165 unsigned long flags, unsigned long pgoff,
1166 bool *populate)
1da177e4
LT
1167{
1168 struct mm_struct * mm = current->mm;
1da177e4 1169 struct inode *inode;
ca16d140 1170 vm_flags_t vm_flags;
1da177e4 1171
bebeb3d6
ML
1172 *populate = false;
1173
1da177e4
LT
1174 /*
1175 * Does the application expect PROT_READ to imply PROT_EXEC?
1176 *
1177 * (the exception is when the underlying filesystem is noexec
1178 * mounted, in which case we dont add PROT_EXEC.)
1179 */
1180 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
d3ac7f89 1181 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
1da177e4
LT
1182 prot |= PROT_EXEC;
1183
1184 if (!len)
1185 return -EINVAL;
1186
7cd94146
EP
1187 if (!(flags & MAP_FIXED))
1188 addr = round_hint_to_min(addr);
1189
1da177e4
LT
1190 /* Careful about overflows.. */
1191 len = PAGE_ALIGN(len);
9206de95 1192 if (!len)
1da177e4
LT
1193 return -ENOMEM;
1194
1195 /* offset overflow? */
1196 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1197 return -EOVERFLOW;
1198
1199 /* Too many mappings? */
1200 if (mm->map_count > sysctl_max_map_count)
1201 return -ENOMEM;
1202
1203 /* Obtain the address to map to. we verify (or select) it and ensure
1204 * that it represents a valid section of the address space.
1205 */
1206 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1207 if (addr & ~PAGE_MASK)
1208 return addr;
1209
1210 /* Do simple checking here so the lower-level routines won't have
1211 * to. we assume access permissions have been handled by the open
1212 * of the memory object, so we don't do any here.
1213 */
1214 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
1215 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1216
cdf7b341 1217 if (flags & MAP_LOCKED)
1da177e4
LT
1218 if (!can_do_mlock())
1219 return -EPERM;
ba470de4 1220
1da177e4
LT
1221 /* mlock MCL_FUTURE? */
1222 if (vm_flags & VM_LOCKED) {
1223 unsigned long locked, lock_limit;
93ea1d0a
CW
1224 locked = len >> PAGE_SHIFT;
1225 locked += mm->locked_vm;
59e99e5b 1226 lock_limit = rlimit(RLIMIT_MEMLOCK);
93ea1d0a 1227 lock_limit >>= PAGE_SHIFT;
1da177e4
LT
1228 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1229 return -EAGAIN;
1230 }
1231
d3ac7f89 1232 inode = file ? file->f_path.dentry->d_inode : NULL;
1da177e4
LT
1233
1234 if (file) {
1235 switch (flags & MAP_TYPE) {
1236 case MAP_SHARED:
1237 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1238 return -EACCES;
1239
1240 /*
1241 * Make sure we don't allow writing to an append-only
1242 * file..
1243 */
1244 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1245 return -EACCES;
1246
1247 /*
1248 * Make sure there are no mandatory locks on the file.
1249 */
1250 if (locks_verify_locked(inode))
1251 return -EAGAIN;
1252
1253 vm_flags |= VM_SHARED | VM_MAYSHARE;
1254 if (!(file->f_mode & FMODE_WRITE))
1255 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1256
1257 /* fall through */
1258 case MAP_PRIVATE:
1259 if (!(file->f_mode & FMODE_READ))
1260 return -EACCES;
d3ac7f89 1261 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
80c5606c
LT
1262 if (vm_flags & VM_EXEC)
1263 return -EPERM;
1264 vm_flags &= ~VM_MAYEXEC;
1265 }
80c5606c
LT
1266
1267 if (!file->f_op || !file->f_op->mmap)
1268 return -ENODEV;
1da177e4
LT
1269 break;
1270
1271 default:
1272 return -EINVAL;
1273 }
1274 } else {
1275 switch (flags & MAP_TYPE) {
1276 case MAP_SHARED:
ce363942
TH
1277 /*
1278 * Ignore pgoff.
1279 */
1280 pgoff = 0;
1da177e4
LT
1281 vm_flags |= VM_SHARED | VM_MAYSHARE;
1282 break;
1283 case MAP_PRIVATE:
1284 /*
1285 * Set pgoff according to addr for anon_vma.
1286 */
1287 pgoff = addr >> PAGE_SHIFT;
1288 break;
1289 default:
1290 return -EINVAL;
1291 }
1292 }
1293
bebeb3d6
ML
1294 addr = mmap_region(file, addr, len, flags, vm_flags, pgoff);
1295 if (!IS_ERR_VALUE(addr) &&
1296 ((vm_flags & VM_LOCKED) ||
1297 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1298 *populate = true;
1299 return addr;
0165ab44 1300}
6be5ceb0 1301
66f0dc48
HD
1302SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1303 unsigned long, prot, unsigned long, flags,
1304 unsigned long, fd, unsigned long, pgoff)
1305{
1306 struct file *file = NULL;
1307 unsigned long retval = -EBADF;
1308
1309 if (!(flags & MAP_ANONYMOUS)) {
120a795d 1310 audit_mmap_fd(fd, flags);
66f0dc48
HD
1311 if (unlikely(flags & MAP_HUGETLB))
1312 return -EINVAL;
1313 file = fget(fd);
1314 if (!file)
1315 goto out;
1316 } else if (flags & MAP_HUGETLB) {
1317 struct user_struct *user = NULL;
1318 /*
1319 * VM_NORESERVE is used because the reservations will be
1320 * taken when vm_ops->mmap() is called
1321 * A dummy user value is used because we are not locking
1322 * memory so no accounting is necessary
1323 */
40716e29 1324 file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
42d7395f
AK
1325 VM_NORESERVE,
1326 &user, HUGETLB_ANONHUGE_INODE,
1327 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
66f0dc48
HD
1328 if (IS_ERR(file))
1329 return PTR_ERR(file);
1330 }
1331
1332 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1333
eb36c587 1334 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
66f0dc48
HD
1335 if (file)
1336 fput(file);
1337out:
1338 return retval;
1339}
1340
a4679373
CH
1341#ifdef __ARCH_WANT_SYS_OLD_MMAP
1342struct mmap_arg_struct {
1343 unsigned long addr;
1344 unsigned long len;
1345 unsigned long prot;
1346 unsigned long flags;
1347 unsigned long fd;
1348 unsigned long offset;
1349};
1350
1351SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1352{
1353 struct mmap_arg_struct a;
1354
1355 if (copy_from_user(&a, arg, sizeof(a)))
1356 return -EFAULT;
1357 if (a.offset & ~PAGE_MASK)
1358 return -EINVAL;
1359
1360 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1361 a.offset >> PAGE_SHIFT);
1362}
1363#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1364
4e950f6f
AD
1365/*
1366 * Some shared mappigns will want the pages marked read-only
1367 * to track write events. If so, we'll downgrade vm_page_prot
1368 * to the private version (using protection_map[] without the
1369 * VM_SHARED bit).
1370 */
1371int vma_wants_writenotify(struct vm_area_struct *vma)
1372{
ca16d140 1373 vm_flags_t vm_flags = vma->vm_flags;
4e950f6f
AD
1374
1375 /* If it was private or non-writable, the write bit is already clear */
1376 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1377 return 0;
1378
1379 /* The backer wishes to know when pages are first written to? */
1380 if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1381 return 1;
1382
1383 /* The open routine did something to the protections already? */
1384 if (pgprot_val(vma->vm_page_prot) !=
3ed75eb8 1385 pgprot_val(vm_get_page_prot(vm_flags)))
4e950f6f
AD
1386 return 0;
1387
1388 /* Specialty mapping? */
4b6e1e37 1389 if (vm_flags & VM_PFNMAP)
4e950f6f
AD
1390 return 0;
1391
1392 /* Can the mapping track the dirty pages? */
1393 return vma->vm_file && vma->vm_file->f_mapping &&
1394 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1395}
1396
fc8744ad
LT
1397/*
1398 * We account for memory if it's a private writeable mapping,
5a6fe125 1399 * not hugepages and VM_NORESERVE wasn't set.
fc8744ad 1400 */
ca16d140 1401static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
fc8744ad 1402{
5a6fe125
MG
1403 /*
1404 * hugetlb has its own accounting separate from the core VM
1405 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1406 */
1407 if (file && is_file_hugepages(file))
1408 return 0;
1409
fc8744ad
LT
1410 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1411}
1412
0165ab44
MS
1413unsigned long mmap_region(struct file *file, unsigned long addr,
1414 unsigned long len, unsigned long flags,
ca16d140 1415 vm_flags_t vm_flags, unsigned long pgoff)
0165ab44
MS
1416{
1417 struct mm_struct *mm = current->mm;
1418 struct vm_area_struct *vma, *prev;
1419 int correct_wcount = 0;
1420 int error;
1421 struct rb_node **rb_link, *rb_parent;
1422 unsigned long charged = 0;
1423 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
1424
1da177e4
LT
1425 /* Clear old maps */
1426 error = -ENOMEM;
1427munmap_back:
6597d783 1428 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
1da177e4
LT
1429 if (do_munmap(mm, addr, len))
1430 return -ENOMEM;
1431 goto munmap_back;
1432 }
1433
1434 /* Check against address space limit. */
119f657c 1435 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1da177e4
LT
1436 return -ENOMEM;
1437
fc8744ad
LT
1438 /*
1439 * Set 'VM_NORESERVE' if we should not account for the
5a6fe125 1440 * memory use of this mapping.
fc8744ad 1441 */
5a6fe125
MG
1442 if ((flags & MAP_NORESERVE)) {
1443 /* We honor MAP_NORESERVE if allowed to overcommit */
1444 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1445 vm_flags |= VM_NORESERVE;
1446
1447 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1448 if (file && is_file_hugepages(file))
1449 vm_flags |= VM_NORESERVE;
1450 }
cdfd4325 1451
fc8744ad
LT
1452 /*
1453 * Private writable mapping: check memory availability
1454 */
5a6fe125 1455 if (accountable_mapping(file, vm_flags)) {
fc8744ad 1456 charged = len >> PAGE_SHIFT;
191c5424 1457 if (security_vm_enough_memory_mm(mm, charged))
fc8744ad
LT
1458 return -ENOMEM;
1459 vm_flags |= VM_ACCOUNT;
1da177e4
LT
1460 }
1461
1462 /*
de33c8db 1463 * Can we just expand an old mapping?
1da177e4 1464 */
de33c8db
LT
1465 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1466 if (vma)
1467 goto out;
1da177e4
LT
1468
1469 /*
1470 * Determine the object being mapped and call the appropriate
1471 * specific mapper. the address has already been validated, but
1472 * not unmapped, but the maps are removed from the list.
1473 */
c5e3b83e 1474 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
1475 if (!vma) {
1476 error = -ENOMEM;
1477 goto unacct_error;
1478 }
1da177e4
LT
1479
1480 vma->vm_mm = mm;
1481 vma->vm_start = addr;
1482 vma->vm_end = addr + len;
1483 vma->vm_flags = vm_flags;
3ed75eb8 1484 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1da177e4 1485 vma->vm_pgoff = pgoff;
5beb4930 1486 INIT_LIST_HEAD(&vma->anon_vma_chain);
1da177e4 1487
ce8fea7a
HD
1488 error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
1489
1da177e4 1490 if (file) {
1da177e4
LT
1491 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1492 goto free_vma;
1493 if (vm_flags & VM_DENYWRITE) {
1494 error = deny_write_access(file);
1495 if (error)
1496 goto free_vma;
1497 correct_wcount = 1;
1498 }
cb0942b8 1499 vma->vm_file = get_file(file);
1da177e4
LT
1500 error = file->f_op->mmap(file, vma);
1501 if (error)
1502 goto unmap_and_free_vma;
f8dbf0a7
HS
1503
1504 /* Can addr have changed??
1505 *
1506 * Answer: Yes, several device drivers can do it in their
1507 * f_op->mmap method. -DaveM
2897b4d2
JK
1508 * Bug: If addr is changed, prev, rb_link, rb_parent should
1509 * be updated for vma_link()
f8dbf0a7 1510 */
2897b4d2
JK
1511 WARN_ON_ONCE(addr != vma->vm_start);
1512
f8dbf0a7
HS
1513 addr = vma->vm_start;
1514 pgoff = vma->vm_pgoff;
1515 vm_flags = vma->vm_flags;
1da177e4 1516 } else if (vm_flags & VM_SHARED) {
835ee797
AV
1517 if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
1518 goto free_vma;
1da177e4
LT
1519 error = shmem_zero_setup(vma);
1520 if (error)
1521 goto free_vma;
1522 }
1523
c9d0bf24
MD
1524 if (vma_wants_writenotify(vma)) {
1525 pgprot_t pprot = vma->vm_page_prot;
1526
1527 /* Can vma->vm_page_prot have changed??
1528 *
1529 * Answer: Yes, drivers may have changed it in their
1530 * f_op->mmap method.
1531 *
1532 * Ensures that vmas marked as uncached stay that way.
1533 */
1ddd439e 1534 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
c9d0bf24
MD
1535 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1536 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1537 }
d08b3851 1538
de33c8db
LT
1539 vma_link(mm, vma, prev, rb_link, rb_parent);
1540 file = vma->vm_file;
4d3d5b41
ON
1541
1542 /* Once vma denies write, undo our temporary denial count */
1543 if (correct_wcount)
1544 atomic_inc(&inode->i_writecount);
1545out:
cdd6c482 1546 perf_event_mmap(vma);
0a4a9391 1547
ab50b8ed 1548 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1da177e4 1549 if (vm_flags & VM_LOCKED) {
bebeb3d6
ML
1550 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
1551 vma == get_gate_vma(current->mm)))
06f9d8c2 1552 mm->locked_vm += (len >> PAGE_SHIFT);
bebeb3d6
ML
1553 else
1554 vma->vm_flags &= ~VM_LOCKED;
1555 }
2b144498 1556
c7a3a88c
ON
1557 if (file)
1558 uprobe_mmap(vma);
2b144498 1559
1da177e4
LT
1560 return addr;
1561
1562unmap_and_free_vma:
1563 if (correct_wcount)
1564 atomic_inc(&inode->i_writecount);
1565 vma->vm_file = NULL;
1566 fput(file);
1567
1568 /* Undo any partial mapping done by a device driver. */
e0da382c
HD
1569 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1570 charged = 0;
1da177e4
LT
1571free_vma:
1572 kmem_cache_free(vm_area_cachep, vma);
1573unacct_error:
1574 if (charged)
1575 vm_unacct_memory(charged);
1576 return error;
1577}
1578
db4fbfb9
ML
1579unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1580{
1581 /*
1582 * We implement the search by looking for an rbtree node that
1583 * immediately follows a suitable gap. That is,
1584 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
1585 * - gap_end = vma->vm_start >= info->low_limit + length;
1586 * - gap_end - gap_start >= length
1587 */
1588
1589 struct mm_struct *mm = current->mm;
1590 struct vm_area_struct *vma;
1591 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1592
1593 /* Adjust search length to account for worst case alignment overhead */
1594 length = info->length + info->align_mask;
1595 if (length < info->length)
1596 return -ENOMEM;
1597
1598 /* Adjust search limits by the desired length */
1599 if (info->high_limit < length)
1600 return -ENOMEM;
1601 high_limit = info->high_limit - length;
1602
1603 if (info->low_limit > high_limit)
1604 return -ENOMEM;
1605 low_limit = info->low_limit + length;
1606
1607 /* Check if rbtree root looks promising */
1608 if (RB_EMPTY_ROOT(&mm->mm_rb))
1609 goto check_highest;
1610 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1611 if (vma->rb_subtree_gap < length)
1612 goto check_highest;
1613
1614 while (true) {
1615 /* Visit left subtree if it looks promising */
1616 gap_end = vma->vm_start;
1617 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1618 struct vm_area_struct *left =
1619 rb_entry(vma->vm_rb.rb_left,
1620 struct vm_area_struct, vm_rb);
1621 if (left->rb_subtree_gap >= length) {
1622 vma = left;
1623 continue;
1624 }
1625 }
1626
1627 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1628check_current:
1629 /* Check if current node has a suitable gap */
1630 if (gap_start > high_limit)
1631 return -ENOMEM;
1632 if (gap_end >= low_limit && gap_end - gap_start >= length)
1633 goto found;
1634
1635 /* Visit right subtree if it looks promising */
1636 if (vma->vm_rb.rb_right) {
1637 struct vm_area_struct *right =
1638 rb_entry(vma->vm_rb.rb_right,
1639 struct vm_area_struct, vm_rb);
1640 if (right->rb_subtree_gap >= length) {
1641 vma = right;
1642 continue;
1643 }
1644 }
1645
1646 /* Go back up the rbtree to find next candidate node */
1647 while (true) {
1648 struct rb_node *prev = &vma->vm_rb;
1649 if (!rb_parent(prev))
1650 goto check_highest;
1651 vma = rb_entry(rb_parent(prev),
1652 struct vm_area_struct, vm_rb);
1653 if (prev == vma->vm_rb.rb_left) {
1654 gap_start = vma->vm_prev->vm_end;
1655 gap_end = vma->vm_start;
1656 goto check_current;
1657 }
1658 }
1659 }
1660
1661check_highest:
1662 /* Check highest gap, which does not precede any rbtree node */
1663 gap_start = mm->highest_vm_end;
1664 gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
1665 if (gap_start > high_limit)
1666 return -ENOMEM;
1667
1668found:
1669 /* We found a suitable gap. Clip it with the original low_limit. */
1670 if (gap_start < info->low_limit)
1671 gap_start = info->low_limit;
1672
1673 /* Adjust gap address to the desired alignment */
1674 gap_start += (info->align_offset - gap_start) & info->align_mask;
1675
1676 VM_BUG_ON(gap_start + info->length > info->high_limit);
1677 VM_BUG_ON(gap_start + info->length > gap_end);
1678 return gap_start;
1679}
1680
1681unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1682{
1683 struct mm_struct *mm = current->mm;
1684 struct vm_area_struct *vma;
1685 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1686
1687 /* Adjust search length to account for worst case alignment overhead */
1688 length = info->length + info->align_mask;
1689 if (length < info->length)
1690 return -ENOMEM;
1691
1692 /*
1693 * Adjust search limits by the desired length.
1694 * See implementation comment at top of unmapped_area().
1695 */
1696 gap_end = info->high_limit;
1697 if (gap_end < length)
1698 return -ENOMEM;
1699 high_limit = gap_end - length;
1700
1701 if (info->low_limit > high_limit)
1702 return -ENOMEM;
1703 low_limit = info->low_limit + length;
1704
1705 /* Check highest gap, which does not precede any rbtree node */
1706 gap_start = mm->highest_vm_end;
1707 if (gap_start <= high_limit)
1708 goto found_highest;
1709
1710 /* Check if rbtree root looks promising */
1711 if (RB_EMPTY_ROOT(&mm->mm_rb))
1712 return -ENOMEM;
1713 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1714 if (vma->rb_subtree_gap < length)
1715 return -ENOMEM;
1716
1717 while (true) {
1718 /* Visit right subtree if it looks promising */
1719 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1720 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1721 struct vm_area_struct *right =
1722 rb_entry(vma->vm_rb.rb_right,
1723 struct vm_area_struct, vm_rb);
1724 if (right->rb_subtree_gap >= length) {
1725 vma = right;
1726 continue;
1727 }
1728 }
1729
1730check_current:
1731 /* Check if current node has a suitable gap */
1732 gap_end = vma->vm_start;
1733 if (gap_end < low_limit)
1734 return -ENOMEM;
1735 if (gap_start <= high_limit && gap_end - gap_start >= length)
1736 goto found;
1737
1738 /* Visit left subtree if it looks promising */
1739 if (vma->vm_rb.rb_left) {
1740 struct vm_area_struct *left =
1741 rb_entry(vma->vm_rb.rb_left,
1742 struct vm_area_struct, vm_rb);
1743 if (left->rb_subtree_gap >= length) {
1744 vma = left;
1745 continue;
1746 }
1747 }
1748
1749 /* Go back up the rbtree to find next candidate node */
1750 while (true) {
1751 struct rb_node *prev = &vma->vm_rb;
1752 if (!rb_parent(prev))
1753 return -ENOMEM;
1754 vma = rb_entry(rb_parent(prev),
1755 struct vm_area_struct, vm_rb);
1756 if (prev == vma->vm_rb.rb_right) {
1757 gap_start = vma->vm_prev ?
1758 vma->vm_prev->vm_end : 0;
1759 goto check_current;
1760 }
1761 }
1762 }
1763
1764found:
1765 /* We found a suitable gap. Clip it with the original high_limit. */
1766 if (gap_end > info->high_limit)
1767 gap_end = info->high_limit;
1768
1769found_highest:
1770 /* Compute highest gap address at the desired alignment */
1771 gap_end -= info->length;
1772 gap_end -= (gap_end - info->align_offset) & info->align_mask;
1773
1774 VM_BUG_ON(gap_end < info->low_limit);
1775 VM_BUG_ON(gap_end < gap_start);
1776 return gap_end;
1777}
1778
1da177e4
LT
1779/* Get an address range which is currently unmapped.
1780 * For shmat() with addr=0.
1781 *
1782 * Ugly calling convention alert:
1783 * Return value with the low bits set means error value,
1784 * ie
1785 * if (ret & ~PAGE_MASK)
1786 * error = ret;
1787 *
1788 * This function "knows" that -ENOMEM has the bits set.
1789 */
1790#ifndef HAVE_ARCH_UNMAPPED_AREA
1791unsigned long
1792arch_get_unmapped_area(struct file *filp, unsigned long addr,
1793 unsigned long len, unsigned long pgoff, unsigned long flags)
1794{
1795 struct mm_struct *mm = current->mm;
1796 struct vm_area_struct *vma;
db4fbfb9 1797 struct vm_unmapped_area_info info;
1da177e4
LT
1798
1799 if (len > TASK_SIZE)
1800 return -ENOMEM;
1801
06abdfb4
BH
1802 if (flags & MAP_FIXED)
1803 return addr;
1804
1da177e4
LT
1805 if (addr) {
1806 addr = PAGE_ALIGN(addr);
1807 vma = find_vma(mm, addr);
1808 if (TASK_SIZE - len >= addr &&
1809 (!vma || addr + len <= vma->vm_start))
1810 return addr;
1811 }
1da177e4 1812
db4fbfb9
ML
1813 info.flags = 0;
1814 info.length = len;
1815 info.low_limit = TASK_UNMAPPED_BASE;
1816 info.high_limit = TASK_SIZE;
1817 info.align_mask = 0;
1818 return vm_unmapped_area(&info);
1da177e4
LT
1819}
1820#endif
1821
1363c3cd 1822void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1da177e4
LT
1823{
1824 /*
1825 * Is this a new hole at the lowest possible address?
1826 */
f44d2198 1827 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
1363c3cd 1828 mm->free_area_cache = addr;
1da177e4
LT
1829}
1830
1831/*
1832 * This mmap-allocator allocates new areas top-down from below the
1833 * stack's low limit (the base):
1834 */
1835#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1836unsigned long
1837arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1838 const unsigned long len, const unsigned long pgoff,
1839 const unsigned long flags)
1840{
1841 struct vm_area_struct *vma;
1842 struct mm_struct *mm = current->mm;
db4fbfb9
ML
1843 unsigned long addr = addr0;
1844 struct vm_unmapped_area_info info;
1da177e4
LT
1845
1846 /* requested length too big for entire address space */
1847 if (len > TASK_SIZE)
1848 return -ENOMEM;
1849
06abdfb4
BH
1850 if (flags & MAP_FIXED)
1851 return addr;
1852
1da177e4
LT
1853 /* requesting a specific address */
1854 if (addr) {
1855 addr = PAGE_ALIGN(addr);
1856 vma = find_vma(mm, addr);
1857 if (TASK_SIZE - len >= addr &&
1858 (!vma || addr + len <= vma->vm_start))
1859 return addr;
1860 }
1861
db4fbfb9
ML
1862 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1863 info.length = len;
1864 info.low_limit = PAGE_SIZE;
1865 info.high_limit = mm->mmap_base;
1866 info.align_mask = 0;
1867 addr = vm_unmapped_area(&info);
b716ad95 1868
1da177e4
LT
1869 /*
1870 * A failed mmap() very likely causes application failure,
1871 * so fall back to the bottom-up function here. This scenario
1872 * can happen with large stack limits and large mmap()
1873 * allocations.
1874 */
db4fbfb9
ML
1875 if (addr & ~PAGE_MASK) {
1876 VM_BUG_ON(addr != -ENOMEM);
1877 info.flags = 0;
1878 info.low_limit = TASK_UNMAPPED_BASE;
1879 info.high_limit = TASK_SIZE;
1880 addr = vm_unmapped_area(&info);
1881 }
1da177e4
LT
1882
1883 return addr;
1884}
1885#endif
1886
1363c3cd 1887void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1da177e4
LT
1888{
1889 /*
1890 * Is this a new hole at the highest possible address?
1891 */
1363c3cd
WW
1892 if (addr > mm->free_area_cache)
1893 mm->free_area_cache = addr;
1da177e4
LT
1894
1895 /* dont allow allocations above current base */
1363c3cd
WW
1896 if (mm->free_area_cache > mm->mmap_base)
1897 mm->free_area_cache = mm->mmap_base;
1da177e4
LT
1898}
1899
1900unsigned long
1901get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1902 unsigned long pgoff, unsigned long flags)
1903{
06abdfb4
BH
1904 unsigned long (*get_area)(struct file *, unsigned long,
1905 unsigned long, unsigned long, unsigned long);
1906
9206de95
AV
1907 unsigned long error = arch_mmap_check(addr, len, flags);
1908 if (error)
1909 return error;
1910
1911 /* Careful about overflows.. */
1912 if (len > TASK_SIZE)
1913 return -ENOMEM;
1914
06abdfb4
BH
1915 get_area = current->mm->get_unmapped_area;
1916 if (file && file->f_op && file->f_op->get_unmapped_area)
1917 get_area = file->f_op->get_unmapped_area;
1918 addr = get_area(file, addr, len, pgoff, flags);
1919 if (IS_ERR_VALUE(addr))
1920 return addr;
1da177e4 1921
07ab67c8
LT
1922 if (addr > TASK_SIZE - len)
1923 return -ENOMEM;
1924 if (addr & ~PAGE_MASK)
1925 return -EINVAL;
06abdfb4 1926
9ac4ed4b
AV
1927 addr = arch_rebalance_pgtables(addr, len);
1928 error = security_mmap_addr(addr);
1929 return error ? error : addr;
1da177e4
LT
1930}
1931
1932EXPORT_SYMBOL(get_unmapped_area);
1933
1934/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
48aae425 1935struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1da177e4
LT
1936{
1937 struct vm_area_struct *vma = NULL;
1938
841e31e5
RM
1939 if (WARN_ON_ONCE(!mm)) /* Remove this in linux-3.6 */
1940 return NULL;
1941
1942 /* Check the cache first. */
1943 /* (Cache hit rate is typically around 35%.) */
1944 vma = mm->mmap_cache;
1945 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1946 struct rb_node *rb_node;
1947
1948 rb_node = mm->mm_rb.rb_node;
1949 vma = NULL;
1950
1951 while (rb_node) {
1952 struct vm_area_struct *vma_tmp;
1953
1954 vma_tmp = rb_entry(rb_node,
1955 struct vm_area_struct, vm_rb);
1956
1957 if (vma_tmp->vm_end > addr) {
1958 vma = vma_tmp;
1959 if (vma_tmp->vm_start <= addr)
1960 break;
1961 rb_node = rb_node->rb_left;
1962 } else
1963 rb_node = rb_node->rb_right;
1da177e4 1964 }
841e31e5
RM
1965 if (vma)
1966 mm->mmap_cache = vma;
1da177e4
LT
1967 }
1968 return vma;
1969}
1970
1971EXPORT_SYMBOL(find_vma);
1972
6bd4837d
KM
1973/*
1974 * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
6bd4837d 1975 */
1da177e4
LT
1976struct vm_area_struct *
1977find_vma_prev(struct mm_struct *mm, unsigned long addr,
1978 struct vm_area_struct **pprev)
1979{
6bd4837d 1980 struct vm_area_struct *vma;
1da177e4 1981
6bd4837d 1982 vma = find_vma(mm, addr);
83cd904d
MP
1983 if (vma) {
1984 *pprev = vma->vm_prev;
1985 } else {
1986 struct rb_node *rb_node = mm->mm_rb.rb_node;
1987 *pprev = NULL;
1988 while (rb_node) {
1989 *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1990 rb_node = rb_node->rb_right;
1991 }
1992 }
6bd4837d 1993 return vma;
1da177e4
LT
1994}
1995
1996/*
1997 * Verify that the stack growth is acceptable and
1998 * update accounting. This is shared with both the
1999 * grow-up and grow-down cases.
2000 */
48aae425 2001static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
1da177e4
LT
2002{
2003 struct mm_struct *mm = vma->vm_mm;
2004 struct rlimit *rlim = current->signal->rlim;
0d59a01b 2005 unsigned long new_start;
1da177e4
LT
2006
2007 /* address space limit tests */
119f657c 2008 if (!may_expand_vm(mm, grow))
1da177e4
LT
2009 return -ENOMEM;
2010
2011 /* Stack limit test */
59e99e5b 2012 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
1da177e4
LT
2013 return -ENOMEM;
2014
2015 /* mlock limit tests */
2016 if (vma->vm_flags & VM_LOCKED) {
2017 unsigned long locked;
2018 unsigned long limit;
2019 locked = mm->locked_vm + grow;
59e99e5b
JS
2020 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2021 limit >>= PAGE_SHIFT;
1da177e4
LT
2022 if (locked > limit && !capable(CAP_IPC_LOCK))
2023 return -ENOMEM;
2024 }
2025
0d59a01b
AL
2026 /* Check to ensure the stack will not grow into a hugetlb-only region */
2027 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2028 vma->vm_end - size;
2029 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2030 return -EFAULT;
2031
1da177e4
LT
2032 /*
2033 * Overcommit.. This must be the final test, as it will
2034 * update security statistics.
2035 */
05fa199d 2036 if (security_vm_enough_memory_mm(mm, grow))
1da177e4
LT
2037 return -ENOMEM;
2038
2039 /* Ok, everything looks good - let it rip */
1da177e4
LT
2040 if (vma->vm_flags & VM_LOCKED)
2041 mm->locked_vm += grow;
ab50b8ed 2042 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
1da177e4
LT
2043 return 0;
2044}
2045
46dea3d0 2046#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1da177e4 2047/*
46dea3d0
HD
2048 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
2049 * vma is the last one with address > vma->vm_end. Have to extend vma.
1da177e4 2050 */
46dea3d0 2051int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1da177e4
LT
2052{
2053 int error;
2054
2055 if (!(vma->vm_flags & VM_GROWSUP))
2056 return -EFAULT;
2057
2058 /*
2059 * We must make sure the anon_vma is allocated
2060 * so that the anon_vma locking is not a noop.
2061 */
2062 if (unlikely(anon_vma_prepare(vma)))
2063 return -ENOMEM;
bb4a340e 2064 vma_lock_anon_vma(vma);
1da177e4
LT
2065
2066 /*
2067 * vma->vm_start/vm_end cannot change under us because the caller
2068 * is required to hold the mmap_sem in read mode. We need the
2069 * anon_vma lock to serialize against concurrent expand_stacks.
06b32f3a 2070 * Also guard against wrapping around to address 0.
1da177e4 2071 */
06b32f3a
HD
2072 if (address < PAGE_ALIGN(address+4))
2073 address = PAGE_ALIGN(address+4);
2074 else {
bb4a340e 2075 vma_unlock_anon_vma(vma);
06b32f3a
HD
2076 return -ENOMEM;
2077 }
1da177e4
LT
2078 error = 0;
2079
2080 /* Somebody else might have raced and expanded it already */
2081 if (address > vma->vm_end) {
2082 unsigned long size, grow;
2083
2084 size = address - vma->vm_start;
2085 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2086
42c36f63
HD
2087 error = -ENOMEM;
2088 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2089 error = acct_stack_growth(vma, size, grow);
2090 if (!error) {
4128997b
ML
2091 /*
2092 * vma_gap_update() doesn't support concurrent
2093 * updates, but we only hold a shared mmap_sem
2094 * lock here, so we need to protect against
2095 * concurrent vma expansions.
2096 * vma_lock_anon_vma() doesn't help here, as
2097 * we don't guarantee that all growable vmas
2098 * in a mm share the same root anon vma.
2099 * So, we reuse mm->page_table_lock to guard
2100 * against concurrent vma expansions.
2101 */
2102 spin_lock(&vma->vm_mm->page_table_lock);
bf181b9f 2103 anon_vma_interval_tree_pre_update_vma(vma);
42c36f63 2104 vma->vm_end = address;
bf181b9f 2105 anon_vma_interval_tree_post_update_vma(vma);
d3737187
ML
2106 if (vma->vm_next)
2107 vma_gap_update(vma->vm_next);
2108 else
2109 vma->vm_mm->highest_vm_end = address;
4128997b
ML
2110 spin_unlock(&vma->vm_mm->page_table_lock);
2111
42c36f63
HD
2112 perf_event_mmap(vma);
2113 }
3af9e859 2114 }
1da177e4 2115 }
bb4a340e 2116 vma_unlock_anon_vma(vma);
b15d00b6 2117 khugepaged_enter_vma_merge(vma);
ed8ea815 2118 validate_mm(vma->vm_mm);
1da177e4
LT
2119 return error;
2120}
46dea3d0
HD
2121#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2122
1da177e4
LT
2123/*
2124 * vma is the first one with address < vma->vm_start. Have to extend vma.
2125 */
d05f3169 2126int expand_downwards(struct vm_area_struct *vma,
b6a2fea3 2127 unsigned long address)
1da177e4
LT
2128{
2129 int error;
2130
2131 /*
2132 * We must make sure the anon_vma is allocated
2133 * so that the anon_vma locking is not a noop.
2134 */
2135 if (unlikely(anon_vma_prepare(vma)))
2136 return -ENOMEM;
8869477a
EP
2137
2138 address &= PAGE_MASK;
e5467859 2139 error = security_mmap_addr(address);
8869477a
EP
2140 if (error)
2141 return error;
2142
bb4a340e 2143 vma_lock_anon_vma(vma);
1da177e4
LT
2144
2145 /*
2146 * vma->vm_start/vm_end cannot change under us because the caller
2147 * is required to hold the mmap_sem in read mode. We need the
2148 * anon_vma lock to serialize against concurrent expand_stacks.
2149 */
1da177e4
LT
2150
2151 /* Somebody else might have raced and expanded it already */
2152 if (address < vma->vm_start) {
2153 unsigned long size, grow;
2154
2155 size = vma->vm_end - address;
2156 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2157
a626ca6a
LT
2158 error = -ENOMEM;
2159 if (grow <= vma->vm_pgoff) {
2160 error = acct_stack_growth(vma, size, grow);
2161 if (!error) {
4128997b
ML
2162 /*
2163 * vma_gap_update() doesn't support concurrent
2164 * updates, but we only hold a shared mmap_sem
2165 * lock here, so we need to protect against
2166 * concurrent vma expansions.
2167 * vma_lock_anon_vma() doesn't help here, as
2168 * we don't guarantee that all growable vmas
2169 * in a mm share the same root anon vma.
2170 * So, we reuse mm->page_table_lock to guard
2171 * against concurrent vma expansions.
2172 */
2173 spin_lock(&vma->vm_mm->page_table_lock);
bf181b9f 2174 anon_vma_interval_tree_pre_update_vma(vma);
a626ca6a
LT
2175 vma->vm_start = address;
2176 vma->vm_pgoff -= grow;
bf181b9f 2177 anon_vma_interval_tree_post_update_vma(vma);
d3737187 2178 vma_gap_update(vma);
4128997b
ML
2179 spin_unlock(&vma->vm_mm->page_table_lock);
2180
a626ca6a
LT
2181 perf_event_mmap(vma);
2182 }
1da177e4
LT
2183 }
2184 }
bb4a340e 2185 vma_unlock_anon_vma(vma);
b15d00b6 2186 khugepaged_enter_vma_merge(vma);
ed8ea815 2187 validate_mm(vma->vm_mm);
1da177e4
LT
2188 return error;
2189}
2190
b6a2fea3
OW
2191#ifdef CONFIG_STACK_GROWSUP
2192int expand_stack(struct vm_area_struct *vma, unsigned long address)
2193{
2194 return expand_upwards(vma, address);
2195}
2196
2197struct vm_area_struct *
2198find_extend_vma(struct mm_struct *mm, unsigned long addr)
2199{
2200 struct vm_area_struct *vma, *prev;
2201
2202 addr &= PAGE_MASK;
2203 vma = find_vma_prev(mm, addr, &prev);
2204 if (vma && (vma->vm_start <= addr))
2205 return vma;
1c127185 2206 if (!prev || expand_stack(prev, addr))
b6a2fea3 2207 return NULL;
ba470de4 2208 if (prev->vm_flags & VM_LOCKED) {
c58267c3 2209 mlock_vma_pages_range(prev, addr, prev->vm_end);
ba470de4 2210 }
b6a2fea3
OW
2211 return prev;
2212}
2213#else
2214int expand_stack(struct vm_area_struct *vma, unsigned long address)
2215{
2216 return expand_downwards(vma, address);
2217}
2218
1da177e4
LT
2219struct vm_area_struct *
2220find_extend_vma(struct mm_struct * mm, unsigned long addr)
2221{
2222 struct vm_area_struct * vma;
2223 unsigned long start;
2224
2225 addr &= PAGE_MASK;
2226 vma = find_vma(mm,addr);
2227 if (!vma)
2228 return NULL;
2229 if (vma->vm_start <= addr)
2230 return vma;
2231 if (!(vma->vm_flags & VM_GROWSDOWN))
2232 return NULL;
2233 start = vma->vm_start;
2234 if (expand_stack(vma, addr))
2235 return NULL;
ba470de4 2236 if (vma->vm_flags & VM_LOCKED) {
c58267c3 2237 mlock_vma_pages_range(vma, addr, start);
ba470de4 2238 }
1da177e4
LT
2239 return vma;
2240}
2241#endif
2242
1da177e4 2243/*
2c0b3814 2244 * Ok - we have the memory areas we should free on the vma list,
1da177e4 2245 * so release them, and do the vma updates.
2c0b3814
HD
2246 *
2247 * Called with the mm semaphore held.
1da177e4 2248 */
2c0b3814 2249static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1da177e4 2250{
4f74d2c8
LT
2251 unsigned long nr_accounted = 0;
2252
365e9c87
HD
2253 /* Update high watermark before we lower total_vm */
2254 update_hiwater_vm(mm);
1da177e4 2255 do {
2c0b3814
HD
2256 long nrpages = vma_pages(vma);
2257
4f74d2c8
LT
2258 if (vma->vm_flags & VM_ACCOUNT)
2259 nr_accounted += nrpages;
2c0b3814 2260 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
a8fb5618 2261 vma = remove_vma(vma);
146425a3 2262 } while (vma);
4f74d2c8 2263 vm_unacct_memory(nr_accounted);
1da177e4
LT
2264 validate_mm(mm);
2265}
2266
2267/*
2268 * Get rid of page table information in the indicated region.
2269 *
f10df686 2270 * Called with the mm semaphore held.
1da177e4
LT
2271 */
2272static void unmap_region(struct mm_struct *mm,
e0da382c
HD
2273 struct vm_area_struct *vma, struct vm_area_struct *prev,
2274 unsigned long start, unsigned long end)
1da177e4 2275{
e0da382c 2276 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
d16dfc55 2277 struct mmu_gather tlb;
1da177e4
LT
2278
2279 lru_add_drain();
d16dfc55 2280 tlb_gather_mmu(&tlb, mm, 0);
365e9c87 2281 update_hiwater_rss(mm);
4f74d2c8 2282 unmap_vmas(&tlb, vma, start, end);
d16dfc55
PZ
2283 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2284 next ? next->vm_start : 0);
2285 tlb_finish_mmu(&tlb, start, end);
1da177e4
LT
2286}
2287
2288/*
2289 * Create a list of vma's touched by the unmap, removing them from the mm's
2290 * vma list as we go..
2291 */
2292static void
2293detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2294 struct vm_area_struct *prev, unsigned long end)
2295{
2296 struct vm_area_struct **insertion_point;
2297 struct vm_area_struct *tail_vma = NULL;
1363c3cd 2298 unsigned long addr;
1da177e4
LT
2299
2300 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
297c5eee 2301 vma->vm_prev = NULL;
1da177e4 2302 do {
d3737187 2303 vma_rb_erase(vma, &mm->mm_rb);
1da177e4
LT
2304 mm->map_count--;
2305 tail_vma = vma;
2306 vma = vma->vm_next;
2307 } while (vma && vma->vm_start < end);
2308 *insertion_point = vma;
d3737187 2309 if (vma) {
297c5eee 2310 vma->vm_prev = prev;
d3737187
ML
2311 vma_gap_update(vma);
2312 } else
2313 mm->highest_vm_end = prev ? prev->vm_end : 0;
1da177e4 2314 tail_vma->vm_next = NULL;
1363c3cd
WW
2315 if (mm->unmap_area == arch_unmap_area)
2316 addr = prev ? prev->vm_end : mm->mmap_base;
2317 else
2318 addr = vma ? vma->vm_start : mm->mmap_base;
2319 mm->unmap_area(mm, addr);
1da177e4
LT
2320 mm->mmap_cache = NULL; /* Kill the cache. */
2321}
2322
2323/*
659ace58
KM
2324 * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
2325 * munmap path where it doesn't make sense to fail.
1da177e4 2326 */
659ace58 2327static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1da177e4
LT
2328 unsigned long addr, int new_below)
2329{
2330 struct mempolicy *pol;
2331 struct vm_area_struct *new;
5beb4930 2332 int err = -ENOMEM;
1da177e4 2333
a5516438
AK
2334 if (is_vm_hugetlb_page(vma) && (addr &
2335 ~(huge_page_mask(hstate_vma(vma)))))
1da177e4
LT
2336 return -EINVAL;
2337
e94b1766 2338 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1da177e4 2339 if (!new)
5beb4930 2340 goto out_err;
1da177e4
LT
2341
2342 /* most fields are the same, copy all, and then fixup */
2343 *new = *vma;
2344
5beb4930
RR
2345 INIT_LIST_HEAD(&new->anon_vma_chain);
2346
1da177e4
LT
2347 if (new_below)
2348 new->vm_end = addr;
2349 else {
2350 new->vm_start = addr;
2351 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2352 }
2353
846a16bf 2354 pol = mpol_dup(vma_policy(vma));
1da177e4 2355 if (IS_ERR(pol)) {
5beb4930
RR
2356 err = PTR_ERR(pol);
2357 goto out_free_vma;
1da177e4
LT
2358 }
2359 vma_set_policy(new, pol);
2360
5beb4930
RR
2361 if (anon_vma_clone(new, vma))
2362 goto out_free_mpol;
2363
e9714acf 2364 if (new->vm_file)
1da177e4
LT
2365 get_file(new->vm_file);
2366
2367 if (new->vm_ops && new->vm_ops->open)
2368 new->vm_ops->open(new);
2369
2370 if (new_below)
5beb4930 2371 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
1da177e4
LT
2372 ((addr - new->vm_start) >> PAGE_SHIFT), new);
2373 else
5beb4930 2374 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
1da177e4 2375
5beb4930
RR
2376 /* Success. */
2377 if (!err)
2378 return 0;
2379
2380 /* Clean everything up if vma_adjust failed. */
58927533
RR
2381 if (new->vm_ops && new->vm_ops->close)
2382 new->vm_ops->close(new);
e9714acf 2383 if (new->vm_file)
5beb4930 2384 fput(new->vm_file);
2aeadc30 2385 unlink_anon_vmas(new);
5beb4930
RR
2386 out_free_mpol:
2387 mpol_put(pol);
2388 out_free_vma:
2389 kmem_cache_free(vm_area_cachep, new);
2390 out_err:
2391 return err;
1da177e4
LT
2392}
2393
659ace58
KM
2394/*
2395 * Split a vma into two pieces at address 'addr', a new vma is allocated
2396 * either for the first part or the tail.
2397 */
2398int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2399 unsigned long addr, int new_below)
2400{
2401 if (mm->map_count >= sysctl_max_map_count)
2402 return -ENOMEM;
2403
2404 return __split_vma(mm, vma, addr, new_below);
2405}
2406
1da177e4
LT
2407/* Munmap is split into 2 main parts -- this part which finds
2408 * what needs doing, and the areas themselves, which do the
2409 * work. This now handles partial unmappings.
2410 * Jeremy Fitzhardinge <jeremy@goop.org>
2411 */
2412int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2413{
2414 unsigned long end;
146425a3 2415 struct vm_area_struct *vma, *prev, *last;
1da177e4
LT
2416
2417 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
2418 return -EINVAL;
2419
2420 if ((len = PAGE_ALIGN(len)) == 0)
2421 return -EINVAL;
2422
2423 /* Find the first overlapping VMA */
9be34c9d 2424 vma = find_vma(mm, start);
146425a3 2425 if (!vma)
1da177e4 2426 return 0;
9be34c9d 2427 prev = vma->vm_prev;
146425a3 2428 /* we have start < vma->vm_end */
1da177e4
LT
2429
2430 /* if it doesn't overlap, we have nothing.. */
2431 end = start + len;
146425a3 2432 if (vma->vm_start >= end)
1da177e4
LT
2433 return 0;
2434
2435 /*
2436 * If we need to split any vma, do it now to save pain later.
2437 *
2438 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2439 * unmapped vm_area_struct will remain in use: so lower split_vma
2440 * places tmp vma above, and higher split_vma places tmp vma below.
2441 */
146425a3 2442 if (start > vma->vm_start) {
659ace58
KM
2443 int error;
2444
2445 /*
2446 * Make sure that map_count on return from munmap() will
2447 * not exceed its limit; but let map_count go just above
2448 * its limit temporarily, to help free resources as expected.
2449 */
2450 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2451 return -ENOMEM;
2452
2453 error = __split_vma(mm, vma, start, 0);
1da177e4
LT
2454 if (error)
2455 return error;
146425a3 2456 prev = vma;
1da177e4
LT
2457 }
2458
2459 /* Does it split the last one? */
2460 last = find_vma(mm, end);
2461 if (last && end > last->vm_start) {
659ace58 2462 int error = __split_vma(mm, last, end, 1);
1da177e4
LT
2463 if (error)
2464 return error;
2465 }
146425a3 2466 vma = prev? prev->vm_next: mm->mmap;
1da177e4 2467
ba470de4
RR
2468 /*
2469 * unlock any mlock()ed ranges before detaching vmas
2470 */
2471 if (mm->locked_vm) {
2472 struct vm_area_struct *tmp = vma;
2473 while (tmp && tmp->vm_start < end) {
2474 if (tmp->vm_flags & VM_LOCKED) {
2475 mm->locked_vm -= vma_pages(tmp);
2476 munlock_vma_pages_all(tmp);
2477 }
2478 tmp = tmp->vm_next;
2479 }
2480 }
2481
1da177e4
LT
2482 /*
2483 * Remove the vma's, and unmap the actual pages
2484 */
146425a3
HD
2485 detach_vmas_to_be_unmapped(mm, vma, prev, end);
2486 unmap_region(mm, vma, prev, start, end);
1da177e4
LT
2487
2488 /* Fix up all other VM information */
2c0b3814 2489 remove_vma_list(mm, vma);
1da177e4
LT
2490
2491 return 0;
2492}
1da177e4 2493
bfce281c 2494int vm_munmap(unsigned long start, size_t len)
1da177e4
LT
2495{
2496 int ret;
bfce281c 2497 struct mm_struct *mm = current->mm;
1da177e4
LT
2498
2499 down_write(&mm->mmap_sem);
a46ef99d 2500 ret = do_munmap(mm, start, len);
1da177e4
LT
2501 up_write(&mm->mmap_sem);
2502 return ret;
2503}
a46ef99d
LT
2504EXPORT_SYMBOL(vm_munmap);
2505
2506SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2507{
2508 profile_munmap(addr);
bfce281c 2509 return vm_munmap(addr, len);
a46ef99d 2510}
1da177e4
LT
2511
2512static inline void verify_mm_writelocked(struct mm_struct *mm)
2513{
a241ec65 2514#ifdef CONFIG_DEBUG_VM
1da177e4
LT
2515 if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2516 WARN_ON(1);
2517 up_read(&mm->mmap_sem);
2518 }
2519#endif
2520}
2521
2522/*
2523 * this is really a simplified "do_mmap". it only handles
2524 * anonymous maps. eventually we may be able to do some
2525 * brk-specific accounting here.
2526 */
e4eb1ff6 2527static unsigned long do_brk(unsigned long addr, unsigned long len)
1da177e4
LT
2528{
2529 struct mm_struct * mm = current->mm;
2530 struct vm_area_struct * vma, * prev;
2531 unsigned long flags;
2532 struct rb_node ** rb_link, * rb_parent;
2533 pgoff_t pgoff = addr >> PAGE_SHIFT;
3a459756 2534 int error;
1da177e4
LT
2535
2536 len = PAGE_ALIGN(len);
2537 if (!len)
2538 return addr;
2539
3a459756
KK
2540 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2541
2c6a1016
AV
2542 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2543 if (error & ~PAGE_MASK)
3a459756
KK
2544 return error;
2545
1da177e4
LT
2546 /*
2547 * mlock MCL_FUTURE?
2548 */
2549 if (mm->def_flags & VM_LOCKED) {
2550 unsigned long locked, lock_limit;
93ea1d0a
CW
2551 locked = len >> PAGE_SHIFT;
2552 locked += mm->locked_vm;
59e99e5b 2553 lock_limit = rlimit(RLIMIT_MEMLOCK);
93ea1d0a 2554 lock_limit >>= PAGE_SHIFT;
1da177e4
LT
2555 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2556 return -EAGAIN;
2557 }
2558
2559 /*
2560 * mm->mmap_sem is required to protect against another thread
2561 * changing the mappings in case we sleep.
2562 */
2563 verify_mm_writelocked(mm);
2564
2565 /*
2566 * Clear old maps. this also does some error checking for us
2567 */
2568 munmap_back:
6597d783 2569 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
1da177e4
LT
2570 if (do_munmap(mm, addr, len))
2571 return -ENOMEM;
2572 goto munmap_back;
2573 }
2574
2575 /* Check against address space limits *after* clearing old maps... */
119f657c 2576 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1da177e4
LT
2577 return -ENOMEM;
2578
2579 if (mm->map_count > sysctl_max_map_count)
2580 return -ENOMEM;
2581
191c5424 2582 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
1da177e4
LT
2583 return -ENOMEM;
2584
1da177e4 2585 /* Can we just expand an old private anonymous mapping? */
ba470de4
RR
2586 vma = vma_merge(mm, prev, addr, addr + len, flags,
2587 NULL, NULL, pgoff, NULL);
2588 if (vma)
1da177e4
LT
2589 goto out;
2590
2591 /*
2592 * create a vma struct for an anonymous mapping
2593 */
c5e3b83e 2594 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
2595 if (!vma) {
2596 vm_unacct_memory(len >> PAGE_SHIFT);
2597 return -ENOMEM;
2598 }
1da177e4 2599
5beb4930 2600 INIT_LIST_HEAD(&vma->anon_vma_chain);
1da177e4
LT
2601 vma->vm_mm = mm;
2602 vma->vm_start = addr;
2603 vma->vm_end = addr + len;
2604 vma->vm_pgoff = pgoff;
2605 vma->vm_flags = flags;
3ed75eb8 2606 vma->vm_page_prot = vm_get_page_prot(flags);
1da177e4
LT
2607 vma_link(mm, vma, prev, rb_link, rb_parent);
2608out:
3af9e859 2609 perf_event_mmap(vma);
1da177e4 2610 mm->total_vm += len >> PAGE_SHIFT;
128557ff
ML
2611 if (flags & VM_LOCKED)
2612 mm->locked_vm += (len >> PAGE_SHIFT);
1da177e4
LT
2613 return addr;
2614}
2615
e4eb1ff6
LT
2616unsigned long vm_brk(unsigned long addr, unsigned long len)
2617{
2618 struct mm_struct *mm = current->mm;
2619 unsigned long ret;
128557ff 2620 bool populate;
e4eb1ff6
LT
2621
2622 down_write(&mm->mmap_sem);
2623 ret = do_brk(addr, len);
128557ff 2624 populate = ((mm->def_flags & VM_LOCKED) != 0);
e4eb1ff6 2625 up_write(&mm->mmap_sem);
128557ff
ML
2626 if (populate)
2627 mm_populate(addr, len);
e4eb1ff6
LT
2628 return ret;
2629}
2630EXPORT_SYMBOL(vm_brk);
1da177e4
LT
2631
2632/* Release all mmaps. */
2633void exit_mmap(struct mm_struct *mm)
2634{
d16dfc55 2635 struct mmu_gather tlb;
ba470de4 2636 struct vm_area_struct *vma;
1da177e4
LT
2637 unsigned long nr_accounted = 0;
2638
d6dd61c8 2639 /* mm's last user has gone, and its about to be pulled down */
cddb8a5c 2640 mmu_notifier_release(mm);
d6dd61c8 2641
ba470de4
RR
2642 if (mm->locked_vm) {
2643 vma = mm->mmap;
2644 while (vma) {
2645 if (vma->vm_flags & VM_LOCKED)
2646 munlock_vma_pages_all(vma);
2647 vma = vma->vm_next;
2648 }
2649 }
9480c53e
JF
2650
2651 arch_exit_mmap(mm);
2652
ba470de4 2653 vma = mm->mmap;
9480c53e
JF
2654 if (!vma) /* Can happen if dup_mmap() received an OOM */
2655 return;
2656
1da177e4 2657 lru_add_drain();
1da177e4 2658 flush_cache_mm(mm);
d16dfc55 2659 tlb_gather_mmu(&tlb, mm, 1);
901608d9 2660 /* update_hiwater_rss(mm) here? but nobody should be looking */
e0da382c 2661 /* Use -1 here to ensure all VMAs in the mm are unmapped */
4f74d2c8 2662 unmap_vmas(&tlb, vma, 0, -1);
9ba69294 2663
d16dfc55 2664 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
853f5e26 2665 tlb_finish_mmu(&tlb, 0, -1);
1da177e4 2666
1da177e4 2667 /*
8f4f8c16
HD
2668 * Walk the list again, actually closing and freeing it,
2669 * with preemption enabled, without holding any MM locks.
1da177e4 2670 */
4f74d2c8
LT
2671 while (vma) {
2672 if (vma->vm_flags & VM_ACCOUNT)
2673 nr_accounted += vma_pages(vma);
a8fb5618 2674 vma = remove_vma(vma);
4f74d2c8
LT
2675 }
2676 vm_unacct_memory(nr_accounted);
e0da382c 2677
f9aed62a 2678 WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
1da177e4
LT
2679}
2680
2681/* Insert vm structure into process list sorted by address
2682 * and into the inode's i_mmap tree. If vm_file is non-NULL
3d48ae45 2683 * then i_mmap_mutex is taken here.
1da177e4 2684 */
6597d783 2685int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
1da177e4 2686{
6597d783
HD
2687 struct vm_area_struct *prev;
2688 struct rb_node **rb_link, *rb_parent;
1da177e4
LT
2689
2690 /*
2691 * The vm_pgoff of a purely anonymous vma should be irrelevant
2692 * until its first write fault, when page's anon_vma and index
2693 * are set. But now set the vm_pgoff it will almost certainly
2694 * end up with (unless mremap moves it elsewhere before that
2695 * first wfault), so /proc/pid/maps tells a consistent story.
2696 *
2697 * By setting it to reflect the virtual start address of the
2698 * vma, merges and splits can happen in a seamless way, just
2699 * using the existing file pgoff checks and manipulations.
2700 * Similarly in do_mmap_pgoff and in do_brk.
2701 */
2702 if (!vma->vm_file) {
2703 BUG_ON(vma->anon_vma);
2704 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2705 }
6597d783
HD
2706 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
2707 &prev, &rb_link, &rb_parent))
1da177e4 2708 return -ENOMEM;
2fd4ef85 2709 if ((vma->vm_flags & VM_ACCOUNT) &&
34b4e4aa 2710 security_vm_enough_memory_mm(mm, vma_pages(vma)))
2fd4ef85 2711 return -ENOMEM;
2b144498 2712
1da177e4
LT
2713 vma_link(mm, vma, prev, rb_link, rb_parent);
2714 return 0;
2715}
2716
2717/*
2718 * Copy the vma structure to a new location in the same mm,
2719 * prior to moving page table entries, to effect an mremap move.
2720 */
2721struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
38a76013
ML
2722 unsigned long addr, unsigned long len, pgoff_t pgoff,
2723 bool *need_rmap_locks)
1da177e4
LT
2724{
2725 struct vm_area_struct *vma = *vmap;
2726 unsigned long vma_start = vma->vm_start;
2727 struct mm_struct *mm = vma->vm_mm;
2728 struct vm_area_struct *new_vma, *prev;
2729 struct rb_node **rb_link, *rb_parent;
2730 struct mempolicy *pol;
948f017b 2731 bool faulted_in_anon_vma = true;
1da177e4
LT
2732
2733 /*
2734 * If anonymous vma has not yet been faulted, update new pgoff
2735 * to match new location, to increase its chance of merging.
2736 */
948f017b 2737 if (unlikely(!vma->vm_file && !vma->anon_vma)) {
1da177e4 2738 pgoff = addr >> PAGE_SHIFT;
948f017b
AA
2739 faulted_in_anon_vma = false;
2740 }
1da177e4 2741
6597d783
HD
2742 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
2743 return NULL; /* should never get here */
1da177e4
LT
2744 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2745 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2746 if (new_vma) {
2747 /*
2748 * Source vma may have been merged into new_vma
2749 */
948f017b
AA
2750 if (unlikely(vma_start >= new_vma->vm_start &&
2751 vma_start < new_vma->vm_end)) {
2752 /*
2753 * The only way we can get a vma_merge with
2754 * self during an mremap is if the vma hasn't
2755 * been faulted in yet and we were allowed to
2756 * reset the dst vma->vm_pgoff to the
2757 * destination address of the mremap to allow
2758 * the merge to happen. mremap must change the
2759 * vm_pgoff linearity between src and dst vmas
2760 * (in turn preventing a vma_merge) to be
2761 * safe. It is only safe to keep the vm_pgoff
2762 * linear if there are no pages mapped yet.
2763 */
2764 VM_BUG_ON(faulted_in_anon_vma);
38a76013 2765 *vmap = vma = new_vma;
108d6642 2766 }
38a76013 2767 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1da177e4 2768 } else {
e94b1766 2769 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
2770 if (new_vma) {
2771 *new_vma = *vma;
523d4e20
ML
2772 new_vma->vm_start = addr;
2773 new_vma->vm_end = addr + len;
2774 new_vma->vm_pgoff = pgoff;
846a16bf 2775 pol = mpol_dup(vma_policy(vma));
5beb4930
RR
2776 if (IS_ERR(pol))
2777 goto out_free_vma;
523d4e20 2778 vma_set_policy(new_vma, pol);
5beb4930
RR
2779 INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2780 if (anon_vma_clone(new_vma, vma))
2781 goto out_free_mempol;
e9714acf 2782 if (new_vma->vm_file)
1da177e4
LT
2783 get_file(new_vma->vm_file);
2784 if (new_vma->vm_ops && new_vma->vm_ops->open)
2785 new_vma->vm_ops->open(new_vma);
2786 vma_link(mm, new_vma, prev, rb_link, rb_parent);
38a76013 2787 *need_rmap_locks = false;
1da177e4
LT
2788 }
2789 }
2790 return new_vma;
5beb4930
RR
2791
2792 out_free_mempol:
2793 mpol_put(pol);
2794 out_free_vma:
2795 kmem_cache_free(vm_area_cachep, new_vma);
2796 return NULL;
1da177e4 2797}
119f657c 2798
2799/*
2800 * Return true if the calling process may expand its vm space by the passed
2801 * number of pages
2802 */
2803int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2804{
2805 unsigned long cur = mm->total_vm; /* pages */
2806 unsigned long lim;
2807
59e99e5b 2808 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
119f657c 2809
2810 if (cur + npages > lim)
2811 return 0;
2812 return 1;
2813}
fa5dc22f
RM
2814
2815
b1d0e4f5
NP
2816static int special_mapping_fault(struct vm_area_struct *vma,
2817 struct vm_fault *vmf)
fa5dc22f 2818{
b1d0e4f5 2819 pgoff_t pgoff;
fa5dc22f
RM
2820 struct page **pages;
2821
b1d0e4f5
NP
2822 /*
2823 * special mappings have no vm_file, and in that case, the mm
2824 * uses vm_pgoff internally. So we have to subtract it from here.
2825 * We are allowed to do this because we are the mm; do not copy
2826 * this code into drivers!
2827 */
2828 pgoff = vmf->pgoff - vma->vm_pgoff;
fa5dc22f 2829
b1d0e4f5
NP
2830 for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2831 pgoff--;
fa5dc22f
RM
2832
2833 if (*pages) {
2834 struct page *page = *pages;
2835 get_page(page);
b1d0e4f5
NP
2836 vmf->page = page;
2837 return 0;
fa5dc22f
RM
2838 }
2839
b1d0e4f5 2840 return VM_FAULT_SIGBUS;
fa5dc22f
RM
2841}
2842
2843/*
2844 * Having a close hook prevents vma merging regardless of flags.
2845 */
2846static void special_mapping_close(struct vm_area_struct *vma)
2847{
2848}
2849
f0f37e2f 2850static const struct vm_operations_struct special_mapping_vmops = {
fa5dc22f 2851 .close = special_mapping_close,
b1d0e4f5 2852 .fault = special_mapping_fault,
fa5dc22f
RM
2853};
2854
2855/*
2856 * Called with mm->mmap_sem held for writing.
2857 * Insert a new vma covering the given region, with the given flags.
2858 * Its pages are supplied by the given array of struct page *.
2859 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2860 * The region past the last page supplied will always produce SIGBUS.
2861 * The array pointer and the pages it points to are assumed to stay alive
2862 * for as long as this mapping might exist.
2863 */
2864int install_special_mapping(struct mm_struct *mm,
2865 unsigned long addr, unsigned long len,
2866 unsigned long vm_flags, struct page **pages)
2867{
462e635e 2868 int ret;
fa5dc22f
RM
2869 struct vm_area_struct *vma;
2870
2871 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2872 if (unlikely(vma == NULL))
2873 return -ENOMEM;
2874
5beb4930 2875 INIT_LIST_HEAD(&vma->anon_vma_chain);
fa5dc22f
RM
2876 vma->vm_mm = mm;
2877 vma->vm_start = addr;
2878 vma->vm_end = addr + len;
2879
2f98735c 2880 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
3ed75eb8 2881 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
fa5dc22f
RM
2882
2883 vma->vm_ops = &special_mapping_vmops;
2884 vma->vm_private_data = pages;
2885
462e635e
TO
2886 ret = insert_vm_struct(mm, vma);
2887 if (ret)
2888 goto out;
fa5dc22f
RM
2889
2890 mm->total_vm += len >> PAGE_SHIFT;
2891
cdd6c482 2892 perf_event_mmap(vma);
089dd79d 2893
fa5dc22f 2894 return 0;
462e635e
TO
2895
2896out:
2897 kmem_cache_free(vm_area_cachep, vma);
2898 return ret;
fa5dc22f 2899}
7906d00c
AA
2900
2901static DEFINE_MUTEX(mm_all_locks_mutex);
2902
454ed842 2903static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
7906d00c 2904{
bf181b9f 2905 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
7906d00c
AA
2906 /*
2907 * The LSB of head.next can't change from under us
2908 * because we hold the mm_all_locks_mutex.
2909 */
572043c9 2910 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
7906d00c
AA
2911 /*
2912 * We can safely modify head.next after taking the
5a505085 2913 * anon_vma->root->rwsem. If some other vma in this mm shares
7906d00c
AA
2914 * the same anon_vma we won't take it again.
2915 *
2916 * No need of atomic instructions here, head.next
2917 * can't change from under us thanks to the
5a505085 2918 * anon_vma->root->rwsem.
7906d00c
AA
2919 */
2920 if (__test_and_set_bit(0, (unsigned long *)
bf181b9f 2921 &anon_vma->root->rb_root.rb_node))
7906d00c
AA
2922 BUG();
2923 }
2924}
2925
454ed842 2926static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
7906d00c
AA
2927{
2928 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2929 /*
2930 * AS_MM_ALL_LOCKS can't change from under us because
2931 * we hold the mm_all_locks_mutex.
2932 *
2933 * Operations on ->flags have to be atomic because
2934 * even if AS_MM_ALL_LOCKS is stable thanks to the
2935 * mm_all_locks_mutex, there may be other cpus
2936 * changing other bitflags in parallel to us.
2937 */
2938 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2939 BUG();
3d48ae45 2940 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
7906d00c
AA
2941 }
2942}
2943
2944/*
2945 * This operation locks against the VM for all pte/vma/mm related
2946 * operations that could ever happen on a certain mm. This includes
2947 * vmtruncate, try_to_unmap, and all page faults.
2948 *
2949 * The caller must take the mmap_sem in write mode before calling
2950 * mm_take_all_locks(). The caller isn't allowed to release the
2951 * mmap_sem until mm_drop_all_locks() returns.
2952 *
2953 * mmap_sem in write mode is required in order to block all operations
2954 * that could modify pagetables and free pages without need of
2955 * altering the vma layout (for example populate_range() with
2956 * nonlinear vmas). It's also needed in write mode to avoid new
2957 * anon_vmas to be associated with existing vmas.
2958 *
2959 * A single task can't take more than one mm_take_all_locks() in a row
2960 * or it would deadlock.
2961 *
bf181b9f 2962 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
7906d00c
AA
2963 * mapping->flags avoid to take the same lock twice, if more than one
2964 * vma in this mm is backed by the same anon_vma or address_space.
2965 *
2966 * We can take all the locks in random order because the VM code
631b0cfd 2967 * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
7906d00c
AA
2968 * takes more than one of them in a row. Secondly we're protected
2969 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2970 *
2971 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2972 * that may have to take thousand of locks.
2973 *
2974 * mm_take_all_locks() can fail if it's interrupted by signals.
2975 */
2976int mm_take_all_locks(struct mm_struct *mm)
2977{
2978 struct vm_area_struct *vma;
5beb4930 2979 struct anon_vma_chain *avc;
7906d00c
AA
2980
2981 BUG_ON(down_read_trylock(&mm->mmap_sem));
2982
2983 mutex_lock(&mm_all_locks_mutex);
2984
2985 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2986 if (signal_pending(current))
2987 goto out_unlock;
7906d00c 2988 if (vma->vm_file && vma->vm_file->f_mapping)
454ed842 2989 vm_lock_mapping(mm, vma->vm_file->f_mapping);
7906d00c 2990 }
7cd5a02f
PZ
2991
2992 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2993 if (signal_pending(current))
2994 goto out_unlock;
2995 if (vma->anon_vma)
5beb4930
RR
2996 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2997 vm_lock_anon_vma(mm, avc->anon_vma);
7906d00c 2998 }
7cd5a02f 2999
584cff54 3000 return 0;
7906d00c
AA
3001
3002out_unlock:
584cff54
KC
3003 mm_drop_all_locks(mm);
3004 return -EINTR;
7906d00c
AA
3005}
3006
3007static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3008{
bf181b9f 3009 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
7906d00c
AA
3010 /*
3011 * The LSB of head.next can't change to 0 from under
3012 * us because we hold the mm_all_locks_mutex.
3013 *
3014 * We must however clear the bitflag before unlocking
bf181b9f 3015 * the vma so the users using the anon_vma->rb_root will
7906d00c
AA
3016 * never see our bitflag.
3017 *
3018 * No need of atomic instructions here, head.next
3019 * can't change from under us until we release the
5a505085 3020 * anon_vma->root->rwsem.
7906d00c
AA
3021 */
3022 if (!__test_and_clear_bit(0, (unsigned long *)
bf181b9f 3023 &anon_vma->root->rb_root.rb_node))
7906d00c 3024 BUG();
cba48b98 3025 anon_vma_unlock(anon_vma);
7906d00c
AA
3026 }
3027}
3028
3029static void vm_unlock_mapping(struct address_space *mapping)
3030{
3031 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3032 /*
3033 * AS_MM_ALL_LOCKS can't change to 0 from under us
3034 * because we hold the mm_all_locks_mutex.
3035 */
3d48ae45 3036 mutex_unlock(&mapping->i_mmap_mutex);
7906d00c
AA
3037 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3038 &mapping->flags))
3039 BUG();
3040 }
3041}
3042
3043/*
3044 * The mmap_sem cannot be released by the caller until
3045 * mm_drop_all_locks() returns.
3046 */
3047void mm_drop_all_locks(struct mm_struct *mm)
3048{
3049 struct vm_area_struct *vma;
5beb4930 3050 struct anon_vma_chain *avc;
7906d00c
AA
3051
3052 BUG_ON(down_read_trylock(&mm->mmap_sem));
3053 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3054
3055 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3056 if (vma->anon_vma)
5beb4930
RR
3057 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3058 vm_unlock_anon_vma(avc->anon_vma);
7906d00c
AA
3059 if (vma->vm_file && vma->vm_file->f_mapping)
3060 vm_unlock_mapping(vma->vm_file->f_mapping);
3061 }
3062
3063 mutex_unlock(&mm_all_locks_mutex);
3064}
8feae131
DH
3065
3066/*
3067 * initialise the VMA slab
3068 */
3069void __init mmap_init(void)
3070{
00a62ce9
KM
3071 int ret;
3072
3073 ret = percpu_counter_init(&vm_committed_as, 0);
3074 VM_BUG_ON(ret);
8feae131 3075}