Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
0d55303c | 2 | #include <linux/compat.h> |
1da177e4 LT |
3 | #include <linux/errno.h> |
4 | #include <linux/sched.h> | |
01042607 | 5 | #include <linux/sched/mm.h> |
1da177e4 LT |
6 | #include <linux/syscalls.h> |
7 | #include <linux/mm.h> | |
4e950f6f | 8 | #include <linux/fs.h> |
1da177e4 | 9 | #include <linux/smp.h> |
1da177e4 LT |
10 | #include <linux/sem.h> |
11 | #include <linux/msg.h> | |
12 | #include <linux/shm.h> | |
13 | #include <linux/stat.h> | |
14 | #include <linux/mman.h> | |
15 | #include <linux/file.h> | |
16 | #include <linux/utsname.h> | |
17 | #include <linux/personality.h> | |
cc503c1b | 18 | #include <linux/random.h> |
e9c8abb6 | 19 | #include <linux/uaccess.h> |
910b2c51 | 20 | #include <linux/elf.h> |
1da177e4 | 21 | |
1b028f78 | 22 | #include <asm/elf.h> |
1da177e4 LT |
23 | #include <asm/ia32.h> |
24 | ||
dfb09f9b BP |
25 | /* |
26 | * Align a virtual address to avoid aliasing in the I$ on AMD F15h. | |
dfb09f9b | 27 | */ |
f9902472 | 28 | static unsigned long get_align_mask(void) |
dfb09f9b | 29 | { |
dfb09f9b BP |
30 | /* handle 32- and 64-bit case with a single conditional */ |
31 | if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32()))) | |
f9902472 | 32 | return 0; |
dfb09f9b BP |
33 | |
34 | if (!(current->flags & PF_RANDOMIZE)) | |
f9902472 | 35 | return 0; |
dfb09f9b | 36 | |
f9902472 ML |
37 | return va_align.mask; |
38 | } | |
dfb09f9b | 39 | |
4e26d11f HMG |
40 | /* |
41 | * To avoid aliasing in the I$ on AMD F15h, the bits defined by the | |
42 | * va_align.bits, [12:upper_bit), are set to a random value instead of | |
43 | * zeroing them. This random value is computed once per boot. This form | |
44 | * of ASLR is known as "per-boot ASLR". | |
45 | * | |
46 | * To achieve this, the random value is added to the info.align_offset | |
47 | * value before calling vm_unmapped_area() or ORed directly to the | |
48 | * address. | |
49 | */ | |
50 | static unsigned long get_align_bits(void) | |
51 | { | |
52 | return va_align.bits & get_align_mask(); | |
53 | } | |
54 | ||
dfb09f9b BP |
55 | static int __init control_va_addr_alignment(char *str) |
56 | { | |
57 | /* guard against enabling this on other CPU families */ | |
58 | if (va_align.flags < 0) | |
59 | return 1; | |
60 | ||
61 | if (*str == 0) | |
62 | return 1; | |
63 | ||
dfb09f9b BP |
64 | if (!strcmp(str, "32")) |
65 | va_align.flags = ALIGN_VA_32; | |
66 | else if (!strcmp(str, "64")) | |
67 | va_align.flags = ALIGN_VA_64; | |
68 | else if (!strcmp(str, "off")) | |
69 | va_align.flags = 0; | |
70 | else if (!strcmp(str, "on")) | |
71 | va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; | |
72 | else | |
1ef64b1e | 73 | pr_warn("invalid option value: 'align_va_addr=%s'\n", str); |
dfb09f9b BP |
74 | |
75 | return 1; | |
76 | } | |
1ef64b1e | 77 | __setup("align_va_addr=", control_va_addr_alignment); |
dfb09f9b | 78 | |
0ac676fb JB |
79 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, |
80 | unsigned long, prot, unsigned long, flags, | |
81 | unsigned long, fd, unsigned long, off) | |
1da177e4 | 82 | { |
1da177e4 | 83 | if (off & ~PAGE_MASK) |
91a8f6cb | 84 | return -EINVAL; |
1da177e4 | 85 | |
91a8f6cb | 86 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); |
1da177e4 LT |
87 | } |
88 | ||
b569bab7 KS |
89 | static void find_start_end(unsigned long addr, unsigned long flags, |
90 | unsigned long *begin, unsigned long *end) | |
1da177e4 | 91 | { |
a846446b | 92 | if (!in_32bit_syscall() && (flags & MAP_32BIT)) { |
1da177e4 LT |
93 | /* This is usually used needed to map code in small |
94 | model, so it needs to be in the first 31bit. Limit | |
95 | it to that. This means we need to move the | |
96 | unmapped base down for this case. This can give | |
97 | conflicts with the heap, but we assume that glibc | |
98 | malloc knows how to fall back to mmap. Give it 1GB | |
e9c8abb6 GP |
99 | of playground for now. -AK */ |
100 | *begin = 0x40000000; | |
101 | *end = 0x80000000; | |
cc503c1b | 102 | if (current->flags & PF_RANDOMIZE) { |
9c6f0902 | 103 | *begin = randomize_page(*begin, 0x02000000); |
cc503c1b | 104 | } |
1b028f78 | 105 | return; |
84929801 | 106 | } |
1b028f78 DS |
107 | |
108 | *begin = get_mmap_base(1); | |
a846446b | 109 | if (in_32bit_syscall()) |
b569bab7 KS |
110 | *end = task_size_32bit(); |
111 | else | |
112 | *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW); | |
e9c8abb6 | 113 | } |
1da177e4 | 114 | |
c44357c2 RE |
115 | static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) |
116 | { | |
117 | if (vm_flags & VM_SHADOW_STACK) | |
118 | return PAGE_SIZE; | |
119 | ||
120 | return 0; | |
121 | } | |
122 | ||
1da177e4 | 123 | unsigned long |
c5ecd8eb RE |
124 | arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, |
125 | unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) | |
1da177e4 LT |
126 | { |
127 | struct mm_struct *mm = current->mm; | |
128 | struct vm_area_struct *vma; | |
b80fa3cb | 129 | struct vm_unmapped_area_info info = {}; |
1da177e4 | 130 | unsigned long begin, end; |
e9c8abb6 | 131 | |
11300a64 BH |
132 | if (flags & MAP_FIXED) |
133 | return addr; | |
134 | ||
b569bab7 | 135 | find_start_end(addr, flags, &begin, &end); |
1da177e4 LT |
136 | |
137 | if (len > end) | |
138 | return -ENOMEM; | |
139 | ||
140 | if (addr) { | |
141 | addr = PAGE_ALIGN(addr); | |
142 | vma = find_vma(mm, addr); | |
143 | if (end - len >= addr && | |
1be7107f | 144 | (!vma || addr + len <= vm_start_gap(vma))) |
1da177e4 LT |
145 | return addr; |
146 | } | |
1363c3cd | 147 | |
f9902472 ML |
148 | info.length = len; |
149 | info.low_limit = begin; | |
150 | info.high_limit = end; | |
7d025059 | 151 | info.align_offset = pgoff << PAGE_SHIFT; |
c44357c2 | 152 | info.start_gap = stack_guard_placement(vm_flags); |
4e26d11f HMG |
153 | if (filp) { |
154 | info.align_mask = get_align_mask(); | |
155 | info.align_offset += get_align_bits(); | |
156 | } | |
f9902472 | 157 | return vm_unmapped_area(&info); |
1da177e4 LT |
158 | } |
159 | ||
cc503c1b | 160 | unsigned long |
c5ecd8eb RE |
161 | arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr0, |
162 | unsigned long len, unsigned long pgoff, | |
163 | unsigned long flags, vm_flags_t vm_flags) | |
cc503c1b JK |
164 | { |
165 | struct vm_area_struct *vma; | |
166 | struct mm_struct *mm = current->mm; | |
f9902472 | 167 | unsigned long addr = addr0; |
b80fa3cb | 168 | struct vm_unmapped_area_info info = {}; |
cc503c1b JK |
169 | |
170 | /* requested length too big for entire address space */ | |
171 | if (len > TASK_SIZE) | |
172 | return -ENOMEM; | |
173 | ||
1e0f25db | 174 | /* No address checking. See comment at mmap_address_hint_valid() */ |
cc503c1b JK |
175 | if (flags & MAP_FIXED) |
176 | return addr; | |
177 | ||
e3e81aca | 178 | /* for MAP_32BIT mappings we force the legacy mmap base */ |
a846446b | 179 | if (!in_32bit_syscall() && (flags & MAP_32BIT)) |
cc503c1b JK |
180 | goto bottomup; |
181 | ||
182 | /* requesting a specific address */ | |
183 | if (addr) { | |
1e0f25db KS |
184 | addr &= PAGE_MASK; |
185 | if (!mmap_address_hint_valid(addr, len)) | |
186 | goto get_unmapped_area; | |
187 | ||
cc503c1b | 188 | vma = find_vma(mm, addr); |
1e0f25db | 189 | if (!vma || addr + len <= vm_start_gap(vma)) |
cc503c1b JK |
190 | return addr; |
191 | } | |
1e0f25db | 192 | get_unmapped_area: |
cc503c1b | 193 | |
f9902472 ML |
194 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
195 | info.length = len; | |
29f890d1 RE |
196 | if (!in_32bit_syscall() && (flags & MAP_ABOVE4G)) |
197 | info.low_limit = SZ_4G; | |
198 | else | |
199 | info.low_limit = PAGE_SIZE; | |
200 | ||
1b028f78 | 201 | info.high_limit = get_mmap_base(0); |
c44357c2 | 202 | info.start_gap = stack_guard_placement(vm_flags); |
b569bab7 KS |
203 | |
204 | /* | |
205 | * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area | |
206 | * in the full address space. | |
207 | * | |
a846446b DS |
208 | * !in_32bit_syscall() check to avoid high addresses for x32 |
209 | * (and make it no op on native i386). | |
b569bab7 | 210 | */ |
a846446b | 211 | if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) |
b569bab7 KS |
212 | info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; |
213 | ||
7d025059 | 214 | info.align_offset = pgoff << PAGE_SHIFT; |
4e26d11f HMG |
215 | if (filp) { |
216 | info.align_mask = get_align_mask(); | |
217 | info.align_offset += get_align_bits(); | |
218 | } | |
f9902472 ML |
219 | addr = vm_unmapped_area(&info); |
220 | if (!(addr & ~PAGE_MASK)) | |
221 | return addr; | |
222 | VM_BUG_ON(addr != -ENOMEM); | |
b716ad95 | 223 | |
cc503c1b JK |
224 | bottomup: |
225 | /* | |
226 | * A failed mmap() very likely causes application failure, | |
227 | * so fall back to the bottom-up function here. This scenario | |
228 | * can happen with large stack limits and large mmap() | |
229 | * allocations. | |
230 | */ | |
f9902472 | 231 | return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); |
cc503c1b | 232 | } |
c5ecd8eb RE |
233 | |
234 | unsigned long | |
235 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
236 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
237 | { | |
238 | return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0); | |
239 | } | |
240 | ||
241 | unsigned long | |
242 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, | |
243 | const unsigned long len, const unsigned long pgoff, | |
244 | const unsigned long flags) | |
245 | { | |
246 | return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, flags, 0); | |
247 | } |