Commit | Line | Data |
---|---|---|
ac41aaee | 1 | // SPDX-License-Identifier: GPL-2.0+ |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * flexible mmap layout support |
4 | * | |
5 | * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. | |
6 | * All Rights Reserved. | |
7 | * | |
1da177e4 LT |
8 | * Started by Ingo Molnar <mingo@elte.hu> |
9 | */ | |
10 | ||
ca21872e | 11 | #include <linux/elf-randomize.h> |
1da177e4 LT |
12 | #include <linux/personality.h> |
13 | #include <linux/mm.h> | |
638ad34a | 14 | #include <linux/mman.h> |
3f07c014 | 15 | #include <linux/sched/signal.h> |
01042607 | 16 | #include <linux/sched/mm.h> |
df1ca53c | 17 | #include <linux/random.h> |
048cd4e5 | 18 | #include <linux/compat.h> |
1f6b83e5 | 19 | #include <linux/security.h> |
ff24b07a | 20 | #include <asm/elf.h> |
1da177e4 | 21 | |
9046e401 HC |
22 | static unsigned long stack_maxrandom_size(void) |
23 | { | |
24 | if (!(current->flags & PF_RANDOMIZE)) | |
25 | return 0; | |
9046e401 HC |
26 | return STACK_RND_MASK << PAGE_SHIFT; |
27 | } | |
28 | ||
8f2af155 | 29 | static inline int mmap_is_legacy(struct rlimit *rlim_stack) |
1060f62e HC |
30 | { |
31 | if (current->personality & ADDR_COMPAT_LAYOUT) | |
32 | return 1; | |
8f2af155 | 33 | if (rlim_stack->rlim_cur == RLIM_INFINITY) |
1060f62e HC |
34 | return 1; |
35 | return sysctl_legacy_va_layout; | |
36 | } | |
37 | ||
2b68f6ca | 38 | unsigned long arch_mmap_rnd(void) |
df1ca53c | 39 | { |
a251c17a | 40 | return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT; |
df1ca53c HC |
41 | } |
42 | ||
8e89a356 | 43 | static unsigned long mmap_base_legacy(unsigned long rnd) |
7aba842f | 44 | { |
8e89a356 | 45 | return TASK_UNMAPPED_BASE + rnd; |
7aba842f HC |
46 | } |
47 | ||
8f2af155 KC |
48 | static inline unsigned long mmap_base(unsigned long rnd, |
49 | struct rlimit *rlim_stack) | |
1da177e4 | 50 | { |
8f2af155 | 51 | unsigned long gap = rlim_stack->rlim_cur; |
a0308c13 MS |
52 | unsigned long pad = stack_maxrandom_size() + stack_guard_gap; |
53 | unsigned long gap_min, gap_max; | |
54 | ||
55 | /* Values close to RLIM_INFINITY can overflow. */ | |
56 | if (gap + pad > gap) | |
57 | gap += pad; | |
58 | ||
59 | /* | |
60 | * Top of mmap area (just below the process stack). | |
f2f47d0e | 61 | * Leave at least a ~128 MB hole. |
a0308c13 | 62 | */ |
f2f47d0e | 63 | gap_min = SZ_128M; |
a0308c13 MS |
64 | gap_max = (STACK_TOP / 6) * 5; |
65 | ||
66 | if (gap < gap_min) | |
67 | gap = gap_min; | |
68 | else if (gap > gap_max) | |
69 | gap = gap_max; | |
1da177e4 | 70 | |
a0308c13 | 71 | return PAGE_ALIGN(STACK_TOP - gap - rnd); |
1da177e4 LT |
72 | } |
73 | ||
86f48f92 SS |
74 | static int get_align_mask(struct file *filp, unsigned long flags) |
75 | { | |
76 | if (!(current->flags & PF_RANDOMIZE)) | |
77 | return 0; | |
78 | if (filp || (flags & MAP_SHARED)) | |
79 | return MMAP_ALIGN_MASK << PAGE_SHIFT; | |
80 | return 0; | |
81 | } | |
82 | ||
712fa5f2 AG |
83 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
84 | unsigned long len, unsigned long pgoff, | |
85 | unsigned long flags) | |
1f6b83e5 MS |
86 | { |
87 | struct mm_struct *mm = current->mm; | |
88 | struct vm_area_struct *vma; | |
89 | struct vm_unmapped_area_info info; | |
1f6b83e5 | 90 | |
9b11c791 | 91 | if (len > TASK_SIZE - mmap_min_addr) |
1f6b83e5 MS |
92 | return -ENOMEM; |
93 | ||
94 | if (flags & MAP_FIXED) | |
9b11c791 | 95 | goto check_asce_limit; |
1f6b83e5 MS |
96 | |
97 | if (addr) { | |
98 | addr = PAGE_ALIGN(addr); | |
99 | vma = find_vma(mm, addr); | |
9b11c791 | 100 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && |
1be7107f | 101 | (!vma || addr + len <= vm_start_gap(vma))) |
9b11c791 | 102 | goto check_asce_limit; |
1f6b83e5 MS |
103 | } |
104 | ||
1f6b83e5 MS |
105 | info.flags = 0; |
106 | info.length = len; | |
107 | info.low_limit = mm->mmap_base; | |
9b11c791 | 108 | info.high_limit = TASK_SIZE; |
86f48f92 | 109 | info.align_mask = get_align_mask(filp, flags); |
1f6b83e5 | 110 | info.align_offset = pgoff << PAGE_SHIFT; |
9b11c791 | 111 | addr = vm_unmapped_area(&info); |
712fa5f2 | 112 | if (offset_in_page(addr)) |
9b11c791 MS |
113 | return addr; |
114 | ||
115 | check_asce_limit: | |
712fa5f2 | 116 | return check_asce_limit(mm, addr, len); |
1f6b83e5 MS |
117 | } |
118 | ||
712fa5f2 AG |
119 | unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
120 | unsigned long len, unsigned long pgoff, | |
121 | unsigned long flags) | |
1f6b83e5 MS |
122 | { |
123 | struct vm_area_struct *vma; | |
124 | struct mm_struct *mm = current->mm; | |
1f6b83e5 | 125 | struct vm_unmapped_area_info info; |
1f6b83e5 MS |
126 | |
127 | /* requested length too big for entire address space */ | |
9b11c791 | 128 | if (len > TASK_SIZE - mmap_min_addr) |
1f6b83e5 MS |
129 | return -ENOMEM; |
130 | ||
131 | if (flags & MAP_FIXED) | |
9b11c791 | 132 | goto check_asce_limit; |
1f6b83e5 MS |
133 | |
134 | /* requesting a specific address */ | |
135 | if (addr) { | |
136 | addr = PAGE_ALIGN(addr); | |
137 | vma = find_vma(mm, addr); | |
9b11c791 | 138 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && |
1be7107f | 139 | (!vma || addr + len <= vm_start_gap(vma))) |
9b11c791 | 140 | goto check_asce_limit; |
1f6b83e5 MS |
141 | } |
142 | ||
1f6b83e5 MS |
143 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
144 | info.length = len; | |
6b008640 | 145 | info.low_limit = PAGE_SIZE; |
1f6b83e5 | 146 | info.high_limit = mm->mmap_base; |
86f48f92 | 147 | info.align_mask = get_align_mask(filp, flags); |
1f6b83e5 MS |
148 | info.align_offset = pgoff << PAGE_SHIFT; |
149 | addr = vm_unmapped_area(&info); | |
150 | ||
151 | /* | |
152 | * A failed mmap() very likely causes application failure, | |
153 | * so fall back to the bottom-up function here. This scenario | |
154 | * can happen with large stack limits and large mmap() | |
155 | * allocations. | |
156 | */ | |
712fa5f2 | 157 | if (offset_in_page(addr)) { |
1f6b83e5 MS |
158 | VM_BUG_ON(addr != -ENOMEM); |
159 | info.flags = 0; | |
160 | info.low_limit = TASK_UNMAPPED_BASE; | |
9b11c791 | 161 | info.high_limit = TASK_SIZE; |
1f6b83e5 | 162 | addr = vm_unmapped_area(&info); |
712fa5f2 | 163 | if (offset_in_page(addr)) |
9b11c791 | 164 | return addr; |
1f6b83e5 MS |
165 | } |
166 | ||
9b11c791 | 167 | check_asce_limit: |
712fa5f2 | 168 | return check_asce_limit(mm, addr, len); |
6252d702 | 169 | } |
9b11c791 | 170 | |
6252d702 MS |
171 | /* |
172 | * This function, called very early during the creation of a new | |
173 | * process VM image, sets up which VM layout function to use: | |
174 | */ | |
8f2af155 | 175 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) |
6252d702 | 176 | { |
8e89a356 KC |
177 | unsigned long random_factor = 0UL; |
178 | ||
179 | if (current->flags & PF_RANDOMIZE) | |
2b68f6ca | 180 | random_factor = arch_mmap_rnd(); |
8e89a356 | 181 | |
6252d702 MS |
182 | /* |
183 | * Fall back to the standard layout if the personality | |
184 | * bit is set, or if the expected stack growth is unlimited: | |
185 | */ | |
8f2af155 | 186 | if (mmap_is_legacy(rlim_stack)) { |
8e89a356 | 187 | mm->mmap_base = mmap_base_legacy(random_factor); |
9b11c791 | 188 | mm->get_unmapped_area = arch_get_unmapped_area; |
6252d702 | 189 | } else { |
8f2af155 | 190 | mm->mmap_base = mmap_base(random_factor, rlim_stack); |
9b11c791 | 191 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
6252d702 MS |
192 | } |
193 | } | |
fd5d210f AK |
194 | |
195 | static const pgprot_t protection_map[16] = { | |
196 | [VM_NONE] = PAGE_NONE, | |
197 | [VM_READ] = PAGE_RO, | |
198 | [VM_WRITE] = PAGE_RO, | |
199 | [VM_WRITE | VM_READ] = PAGE_RO, | |
200 | [VM_EXEC] = PAGE_RX, | |
201 | [VM_EXEC | VM_READ] = PAGE_RX, | |
202 | [VM_EXEC | VM_WRITE] = PAGE_RX, | |
203 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX, | |
204 | [VM_SHARED] = PAGE_NONE, | |
205 | [VM_SHARED | VM_READ] = PAGE_RO, | |
206 | [VM_SHARED | VM_WRITE] = PAGE_RW, | |
207 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW, | |
208 | [VM_SHARED | VM_EXEC] = PAGE_RX, | |
209 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX, | |
210 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, | |
211 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX | |
212 | }; | |
213 | DECLARE_VM_GET_PAGE_PROT |