Commit | Line | Data |
---|---|---|
57319d80 QR |
1 | /* |
2 | * mpx.c - Memory Protection eXtensions | |
3 | * | |
4 | * Copyright (c) 2014, Intel Corporation. | |
5 | * Qiaowei Ren <qiaowei.ren@intel.com> | |
6 | * Dave Hansen <dave.hansen@intel.com> | |
7 | */ | |
8 | #include <linux/kernel.h> | |
fcc7ffd6 | 9 | #include <linux/slab.h> |
57319d80 QR |
10 | #include <linux/syscalls.h> |
11 | #include <linux/sched/sysctl.h> | |
12 | ||
fe3d197f DH |
13 | #include <asm/i387.h> |
14 | #include <asm/insn.h> | |
57319d80 | 15 | #include <asm/mman.h> |
1de4fa14 | 16 | #include <asm/mmu_context.h> |
57319d80 | 17 | #include <asm/mpx.h> |
fe3d197f DH |
18 | #include <asm/processor.h> |
19 | #include <asm/fpu-internal.h> | |
57319d80 QR |
20 | |
21 | static const char *mpx_mapping_name(struct vm_area_struct *vma) | |
22 | { | |
23 | return "[mpx]"; | |
24 | } | |
25 | ||
26 | static struct vm_operations_struct mpx_vma_ops = { | |
27 | .name = mpx_mapping_name, | |
28 | }; | |
29 | ||
1de4fa14 DH |
30 | static int is_mpx_vma(struct vm_area_struct *vma) |
31 | { | |
32 | return (vma->vm_ops == &mpx_vma_ops); | |
33 | } | |
34 | ||
57319d80 QR |
35 | /* |
36 | * This is really a simplified "vm_mmap". it only handles MPX | |
37 | * bounds tables (the bounds directory is user-allocated). | |
38 | * | |
39 | * Later on, we use the vma->vm_ops to uniquely identify these | |
40 | * VMAs. | |
41 | */ | |
42 | static unsigned long mpx_mmap(unsigned long len) | |
43 | { | |
44 | unsigned long ret; | |
45 | unsigned long addr, pgoff; | |
46 | struct mm_struct *mm = current->mm; | |
47 | vm_flags_t vm_flags; | |
48 | struct vm_area_struct *vma; | |
49 | ||
50 | /* Only bounds table and bounds directory can be allocated here */ | |
51 | if (len != MPX_BD_SIZE_BYTES && len != MPX_BT_SIZE_BYTES) | |
52 | return -EINVAL; | |
53 | ||
54 | down_write(&mm->mmap_sem); | |
55 | ||
56 | /* Too many mappings? */ | |
57 | if (mm->map_count > sysctl_max_map_count) { | |
58 | ret = -ENOMEM; | |
59 | goto out; | |
60 | } | |
61 | ||
62 | /* Obtain the address to map to. we verify (or select) it and ensure | |
63 | * that it represents a valid section of the address space. | |
64 | */ | |
65 | addr = get_unmapped_area(NULL, 0, len, 0, MAP_ANONYMOUS | MAP_PRIVATE); | |
66 | if (addr & ~PAGE_MASK) { | |
67 | ret = addr; | |
68 | goto out; | |
69 | } | |
70 | ||
71 | vm_flags = VM_READ | VM_WRITE | VM_MPX | | |
72 | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | |
73 | ||
74 | /* Set pgoff according to addr for anon_vma */ | |
75 | pgoff = addr >> PAGE_SHIFT; | |
76 | ||
77 | ret = mmap_region(NULL, addr, len, vm_flags, pgoff); | |
78 | if (IS_ERR_VALUE(ret)) | |
79 | goto out; | |
80 | ||
81 | vma = find_vma(mm, ret); | |
82 | if (!vma) { | |
83 | ret = -ENOMEM; | |
84 | goto out; | |
85 | } | |
86 | vma->vm_ops = &mpx_vma_ops; | |
87 | ||
88 | if (vm_flags & VM_LOCKED) { | |
89 | up_write(&mm->mmap_sem); | |
90 | mm_populate(ret, len); | |
91 | return ret; | |
92 | } | |
93 | ||
94 | out: | |
95 | up_write(&mm->mmap_sem); | |
96 | return ret; | |
97 | } | |
fcc7ffd6 DH |
98 | |
99 | enum reg_type { | |
100 | REG_TYPE_RM = 0, | |
101 | REG_TYPE_INDEX, | |
102 | REG_TYPE_BASE, | |
103 | }; | |
104 | ||
68c009c4 DH |
105 | static int get_reg_offset(struct insn *insn, struct pt_regs *regs, |
106 | enum reg_type type) | |
fcc7ffd6 DH |
107 | { |
108 | int regno = 0; | |
109 | ||
110 | static const int regoff[] = { | |
111 | offsetof(struct pt_regs, ax), | |
112 | offsetof(struct pt_regs, cx), | |
113 | offsetof(struct pt_regs, dx), | |
114 | offsetof(struct pt_regs, bx), | |
115 | offsetof(struct pt_regs, sp), | |
116 | offsetof(struct pt_regs, bp), | |
117 | offsetof(struct pt_regs, si), | |
118 | offsetof(struct pt_regs, di), | |
119 | #ifdef CONFIG_X86_64 | |
120 | offsetof(struct pt_regs, r8), | |
121 | offsetof(struct pt_regs, r9), | |
122 | offsetof(struct pt_regs, r10), | |
123 | offsetof(struct pt_regs, r11), | |
124 | offsetof(struct pt_regs, r12), | |
125 | offsetof(struct pt_regs, r13), | |
126 | offsetof(struct pt_regs, r14), | |
127 | offsetof(struct pt_regs, r15), | |
128 | #endif | |
129 | }; | |
130 | int nr_registers = ARRAY_SIZE(regoff); | |
131 | /* | |
132 | * Don't possibly decode a 32-bit instructions as | |
133 | * reading a 64-bit-only register. | |
134 | */ | |
135 | if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) | |
136 | nr_registers -= 8; | |
137 | ||
138 | switch (type) { | |
139 | case REG_TYPE_RM: | |
140 | regno = X86_MODRM_RM(insn->modrm.value); | |
141 | if (X86_REX_B(insn->rex_prefix.value) == 1) | |
142 | regno += 8; | |
143 | break; | |
144 | ||
145 | case REG_TYPE_INDEX: | |
146 | regno = X86_SIB_INDEX(insn->sib.value); | |
147 | if (X86_REX_X(insn->rex_prefix.value) == 1) | |
148 | regno += 8; | |
149 | break; | |
150 | ||
151 | case REG_TYPE_BASE: | |
152 | regno = X86_SIB_BASE(insn->sib.value); | |
153 | if (X86_REX_B(insn->rex_prefix.value) == 1) | |
154 | regno += 8; | |
155 | break; | |
156 | ||
157 | default: | |
158 | pr_err("invalid register type"); | |
159 | BUG(); | |
160 | break; | |
161 | } | |
162 | ||
163 | if (regno > nr_registers) { | |
164 | WARN_ONCE(1, "decoded an instruction with an invalid register"); | |
165 | return -EINVAL; | |
166 | } | |
167 | return regoff[regno]; | |
168 | } | |
169 | ||
170 | /* | |
171 | * return the address being referenced be instruction | |
172 | * for rm=3 returning the content of the rm reg | |
173 | * for rm!=3 calculates the address using SIB and Disp | |
174 | */ | |
175 | static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs) | |
176 | { | |
68c009c4 DH |
177 | unsigned long addr, base, indx; |
178 | int addr_offset, base_offset, indx_offset; | |
fcc7ffd6 DH |
179 | insn_byte_t sib; |
180 | ||
181 | insn_get_modrm(insn); | |
182 | insn_get_sib(insn); | |
183 | sib = insn->sib.value; | |
184 | ||
185 | if (X86_MODRM_MOD(insn->modrm.value) == 3) { | |
186 | addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); | |
187 | if (addr_offset < 0) | |
188 | goto out_err; | |
189 | addr = regs_get_register(regs, addr_offset); | |
190 | } else { | |
191 | if (insn->sib.nbytes) { | |
192 | base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); | |
193 | if (base_offset < 0) | |
194 | goto out_err; | |
195 | ||
196 | indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); | |
197 | if (indx_offset < 0) | |
198 | goto out_err; | |
199 | ||
200 | base = regs_get_register(regs, base_offset); | |
201 | indx = regs_get_register(regs, indx_offset); | |
202 | addr = base + indx * (1 << X86_SIB_SCALE(sib)); | |
203 | } else { | |
204 | addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); | |
205 | if (addr_offset < 0) | |
206 | goto out_err; | |
207 | addr = regs_get_register(regs, addr_offset); | |
208 | } | |
209 | addr += insn->displacement.value; | |
210 | } | |
211 | return (void __user *)addr; | |
212 | out_err: | |
213 | return (void __user *)-1; | |
214 | } | |
215 | ||
216 | static int mpx_insn_decode(struct insn *insn, | |
217 | struct pt_regs *regs) | |
218 | { | |
219 | unsigned char buf[MAX_INSN_SIZE]; | |
220 | int x86_64 = !test_thread_flag(TIF_IA32); | |
221 | int not_copied; | |
222 | int nr_copied; | |
223 | ||
224 | not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf)); | |
225 | nr_copied = sizeof(buf) - not_copied; | |
226 | /* | |
227 | * The decoder _should_ fail nicely if we pass it a short buffer. | |
228 | * But, let's not depend on that implementation detail. If we | |
229 | * did not get anything, just error out now. | |
230 | */ | |
231 | if (!nr_copied) | |
232 | return -EFAULT; | |
233 | insn_init(insn, buf, nr_copied, x86_64); | |
234 | insn_get_length(insn); | |
235 | /* | |
236 | * copy_from_user() tries to get as many bytes as we could see in | |
237 | * the largest possible instruction. If the instruction we are | |
238 | * after is shorter than that _and_ we attempt to copy from | |
239 | * something unreadable, we might get a short read. This is OK | |
240 | * as long as the read did not stop in the middle of the | |
241 | * instruction. Check to see if we got a partial instruction. | |
242 | */ | |
243 | if (nr_copied < insn->length) | |
244 | return -EFAULT; | |
245 | ||
246 | insn_get_opcode(insn); | |
247 | /* | |
248 | * We only _really_ need to decode bndcl/bndcn/bndcu | |
249 | * Error out on anything else. | |
250 | */ | |
251 | if (insn->opcode.bytes[0] != 0x0f) | |
252 | goto bad_opcode; | |
253 | if ((insn->opcode.bytes[1] != 0x1a) && | |
254 | (insn->opcode.bytes[1] != 0x1b)) | |
255 | goto bad_opcode; | |
256 | ||
257 | return 0; | |
258 | bad_opcode: | |
259 | return -EINVAL; | |
260 | } | |
261 | ||
262 | /* | |
263 | * If a bounds overflow occurs then a #BR is generated. This | |
264 | * function decodes MPX instructions to get violation address | |
265 | * and set this address into extended struct siginfo. | |
266 | * | |
267 | * Note that this is not a super precise way of doing this. | |
268 | * Userspace could have, by the time we get here, written | |
269 | * anything it wants in to the instructions. We can not | |
270 | * trust anything about it. They might not be valid | |
271 | * instructions or might encode invalid registers, etc... | |
272 | * | |
273 | * The caller is expected to kfree() the returned siginfo_t. | |
274 | */ | |
275 | siginfo_t *mpx_generate_siginfo(struct pt_regs *regs, | |
276 | struct xsave_struct *xsave_buf) | |
277 | { | |
fe3d197f DH |
278 | struct bndreg *bndregs, *bndreg; |
279 | siginfo_t *info = NULL; | |
fcc7ffd6 DH |
280 | struct insn insn; |
281 | uint8_t bndregno; | |
282 | int err; | |
fcc7ffd6 DH |
283 | |
284 | err = mpx_insn_decode(&insn, regs); | |
285 | if (err) | |
286 | goto err_out; | |
287 | ||
288 | /* | |
289 | * We know at this point that we are only dealing with | |
290 | * MPX instructions. | |
291 | */ | |
292 | insn_get_modrm(&insn); | |
293 | bndregno = X86_MODRM_REG(insn.modrm.value); | |
294 | if (bndregno > 3) { | |
295 | err = -EINVAL; | |
296 | goto err_out; | |
297 | } | |
fe3d197f DH |
298 | /* get the bndregs _area_ of the xsave structure */ |
299 | bndregs = get_xsave_addr(xsave_buf, XSTATE_BNDREGS); | |
300 | if (!bndregs) { | |
301 | err = -EINVAL; | |
302 | goto err_out; | |
303 | } | |
304 | /* now go select the individual register in the set of 4 */ | |
305 | bndreg = &bndregs[bndregno]; | |
306 | ||
fcc7ffd6 DH |
307 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
308 | if (!info) { | |
309 | err = -ENOMEM; | |
310 | goto err_out; | |
311 | } | |
312 | /* | |
313 | * The registers are always 64-bit, but the upper 32 | |
314 | * bits are ignored in 32-bit mode. Also, note that the | |
315 | * upper bounds are architecturally represented in 1's | |
316 | * complement form. | |
317 | * | |
318 | * The 'unsigned long' cast is because the compiler | |
319 | * complains when casting from integers to different-size | |
320 | * pointers. | |
321 | */ | |
fe3d197f DH |
322 | info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound; |
323 | info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound; | |
fcc7ffd6 DH |
324 | info->si_addr_lsb = 0; |
325 | info->si_signo = SIGSEGV; | |
326 | info->si_errno = 0; | |
327 | info->si_code = SEGV_BNDERR; | |
328 | info->si_addr = mpx_get_addr_ref(&insn, regs); | |
329 | /* | |
330 | * We were not able to extract an address from the instruction, | |
331 | * probably because there was something invalid in it. | |
332 | */ | |
333 | if (info->si_addr == (void *)-1) { | |
334 | err = -EINVAL; | |
335 | goto err_out; | |
336 | } | |
337 | return info; | |
338 | err_out: | |
fe3d197f DH |
339 | /* info might be NULL, but kfree() handles that */ |
340 | kfree(info); | |
fcc7ffd6 DH |
341 | return ERR_PTR(err); |
342 | } | |
fe3d197f DH |
343 | |
344 | static __user void *task_get_bounds_dir(struct task_struct *tsk) | |
345 | { | |
346 | struct bndcsr *bndcsr; | |
347 | ||
348 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) | |
349 | return MPX_INVALID_BOUNDS_DIR; | |
350 | ||
814564a0 DH |
351 | /* |
352 | * 32-bit binaries on 64-bit kernels are currently | |
353 | * unsupported. | |
354 | */ | |
355 | if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32)) | |
356 | return MPX_INVALID_BOUNDS_DIR; | |
fe3d197f DH |
357 | /* |
358 | * The bounds directory pointer is stored in a register | |
359 | * only accessible if we first do an xsave. | |
360 | */ | |
361 | fpu_save_init(&tsk->thread.fpu); | |
362 | bndcsr = get_xsave_addr(&tsk->thread.fpu.state->xsave, XSTATE_BNDCSR); | |
363 | if (!bndcsr) | |
364 | return MPX_INVALID_BOUNDS_DIR; | |
365 | ||
366 | /* | |
367 | * Make sure the register looks valid by checking the | |
368 | * enable bit. | |
369 | */ | |
370 | if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG)) | |
371 | return MPX_INVALID_BOUNDS_DIR; | |
372 | ||
373 | /* | |
374 | * Lastly, mask off the low bits used for configuration | |
375 | * flags, and return the address of the bounds table. | |
376 | */ | |
377 | return (void __user *)(unsigned long) | |
378 | (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK); | |
379 | } | |
380 | ||
381 | int mpx_enable_management(struct task_struct *tsk) | |
382 | { | |
383 | void __user *bd_base = MPX_INVALID_BOUNDS_DIR; | |
384 | struct mm_struct *mm = tsk->mm; | |
385 | int ret = 0; | |
386 | ||
387 | /* | |
388 | * runtime in the userspace will be responsible for allocation of | |
389 | * the bounds directory. Then, it will save the base of the bounds | |
390 | * directory into XSAVE/XRSTOR Save Area and enable MPX through | |
391 | * XRSTOR instruction. | |
392 | * | |
393 | * fpu_xsave() is expected to be very expensive. Storing the bounds | |
394 | * directory here means that we do not have to do xsave in the unmap | |
395 | * path; we can just use mm->bd_addr instead. | |
396 | */ | |
397 | bd_base = task_get_bounds_dir(tsk); | |
398 | down_write(&mm->mmap_sem); | |
399 | mm->bd_addr = bd_base; | |
400 | if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR) | |
401 | ret = -ENXIO; | |
402 | ||
403 | up_write(&mm->mmap_sem); | |
404 | return ret; | |
405 | } | |
406 | ||
407 | int mpx_disable_management(struct task_struct *tsk) | |
408 | { | |
409 | struct mm_struct *mm = current->mm; | |
410 | ||
411 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) | |
412 | return -ENXIO; | |
413 | ||
414 | down_write(&mm->mmap_sem); | |
415 | mm->bd_addr = MPX_INVALID_BOUNDS_DIR; | |
416 | up_write(&mm->mmap_sem); | |
417 | return 0; | |
418 | } | |
419 | ||
420 | /* | |
421 | * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each | |
422 | * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB, | |
423 | * and the size of each bounds table is 4MB. | |
424 | */ | |
425 | static int allocate_bt(long __user *bd_entry) | |
426 | { | |
427 | unsigned long expected_old_val = 0; | |
428 | unsigned long actual_old_val = 0; | |
429 | unsigned long bt_addr; | |
430 | int ret = 0; | |
431 | ||
432 | /* | |
433 | * Carve the virtual space out of userspace for the new | |
434 | * bounds table: | |
435 | */ | |
436 | bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES); | |
437 | if (IS_ERR((void *)bt_addr)) | |
438 | return PTR_ERR((void *)bt_addr); | |
439 | /* | |
440 | * Set the valid flag (kinda like _PAGE_PRESENT in a pte) | |
441 | */ | |
442 | bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG; | |
443 | ||
444 | /* | |
445 | * Go poke the address of the new bounds table in to the | |
446 | * bounds directory entry out in userspace memory. Note: | |
447 | * we may race with another CPU instantiating the same table. | |
448 | * In that case the cmpxchg will see an unexpected | |
449 | * 'actual_old_val'. | |
450 | * | |
451 | * This can fault, but that's OK because we do not hold | |
452 | * mmap_sem at this point, unlike some of the other part | |
453 | * of the MPX code that have to pagefault_disable(). | |
454 | */ | |
455 | ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry, | |
456 | expected_old_val, bt_addr); | |
457 | if (ret) | |
458 | goto out_unmap; | |
459 | ||
460 | /* | |
461 | * The user_atomic_cmpxchg_inatomic() will only return nonzero | |
462 | * for faults, *not* if the cmpxchg itself fails. Now we must | |
463 | * verify that the cmpxchg itself completed successfully. | |
464 | */ | |
465 | /* | |
466 | * We expected an empty 'expected_old_val', but instead found | |
467 | * an apparently valid entry. Assume we raced with another | |
468 | * thread to instantiate this table and desclare succecss. | |
469 | */ | |
470 | if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) { | |
471 | ret = 0; | |
472 | goto out_unmap; | |
473 | } | |
474 | /* | |
475 | * We found a non-empty bd_entry but it did not have the | |
476 | * VALID_FLAG set. Return an error which will result in | |
477 | * a SEGV since this probably means that somebody scribbled | |
478 | * some invalid data in to a bounds table. | |
479 | */ | |
480 | if (expected_old_val != actual_old_val) { | |
481 | ret = -EINVAL; | |
482 | goto out_unmap; | |
483 | } | |
484 | return 0; | |
485 | out_unmap: | |
486 | vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES); | |
487 | return ret; | |
488 | } | |
489 | ||
490 | /* | |
491 | * When a BNDSTX instruction attempts to save bounds to a bounds | |
492 | * table, it will first attempt to look up the table in the | |
493 | * first-level bounds directory. If it does not find a table in | |
494 | * the directory, a #BR is generated and we get here in order to | |
495 | * allocate a new table. | |
496 | * | |
497 | * With 32-bit mode, the size of BD is 4MB, and the size of each | |
498 | * bound table is 16KB. With 64-bit mode, the size of BD is 2GB, | |
499 | * and the size of each bound table is 4MB. | |
500 | */ | |
501 | static int do_mpx_bt_fault(struct xsave_struct *xsave_buf) | |
502 | { | |
503 | unsigned long bd_entry, bd_base; | |
504 | struct bndcsr *bndcsr; | |
505 | ||
506 | bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR); | |
507 | if (!bndcsr) | |
508 | return -EINVAL; | |
509 | /* | |
510 | * Mask off the preserve and enable bits | |
511 | */ | |
512 | bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK; | |
513 | /* | |
514 | * The hardware provides the address of the missing or invalid | |
515 | * entry via BNDSTATUS, so we don't have to go look it up. | |
516 | */ | |
517 | bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK; | |
518 | /* | |
519 | * Make sure the directory entry is within where we think | |
520 | * the directory is. | |
521 | */ | |
522 | if ((bd_entry < bd_base) || | |
523 | (bd_entry >= bd_base + MPX_BD_SIZE_BYTES)) | |
524 | return -EINVAL; | |
525 | ||
526 | return allocate_bt((long __user *)bd_entry); | |
527 | } | |
528 | ||
529 | int mpx_handle_bd_fault(struct xsave_struct *xsave_buf) | |
530 | { | |
531 | /* | |
532 | * Userspace never asked us to manage the bounds tables, | |
533 | * so refuse to help. | |
534 | */ | |
535 | if (!kernel_managing_mpx_tables(current->mm)) | |
536 | return -EINVAL; | |
537 | ||
538 | if (do_mpx_bt_fault(xsave_buf)) { | |
539 | force_sig(SIGSEGV, current); | |
540 | /* | |
541 | * The force_sig() is essentially "handling" this | |
542 | * exception, so we do not pass up the error | |
543 | * from do_mpx_bt_fault(). | |
544 | */ | |
545 | } | |
546 | return 0; | |
547 | } | |
1de4fa14 DH |
548 | |
549 | /* | |
550 | * A thin wrapper around get_user_pages(). Returns 0 if the | |
551 | * fault was resolved or -errno if not. | |
552 | */ | |
553 | static int mpx_resolve_fault(long __user *addr, int write) | |
554 | { | |
555 | long gup_ret; | |
556 | int nr_pages = 1; | |
557 | int force = 0; | |
558 | ||
559 | gup_ret = get_user_pages(current, current->mm, (unsigned long)addr, | |
560 | nr_pages, write, force, NULL, NULL); | |
561 | /* | |
562 | * get_user_pages() returns number of pages gotten. | |
563 | * 0 means we failed to fault in and get anything, | |
564 | * probably because 'addr' is bad. | |
565 | */ | |
566 | if (!gup_ret) | |
567 | return -EFAULT; | |
568 | /* Other error, return it */ | |
569 | if (gup_ret < 0) | |
570 | return gup_ret; | |
571 | /* must have gup'd a page and gup_ret>0, success */ | |
572 | return 0; | |
573 | } | |
574 | ||
575 | /* | |
576 | * Get the base of bounds tables pointed by specific bounds | |
577 | * directory entry. | |
578 | */ | |
579 | static int get_bt_addr(struct mm_struct *mm, | |
580 | long __user *bd_entry, unsigned long *bt_addr) | |
581 | { | |
582 | int ret; | |
583 | int valid_bit; | |
584 | ||
585 | if (!access_ok(VERIFY_READ, (bd_entry), sizeof(*bd_entry))) | |
586 | return -EFAULT; | |
587 | ||
588 | while (1) { | |
589 | int need_write = 0; | |
590 | ||
591 | pagefault_disable(); | |
592 | ret = get_user(*bt_addr, bd_entry); | |
593 | pagefault_enable(); | |
594 | if (!ret) | |
595 | break; | |
596 | if (ret == -EFAULT) | |
597 | ret = mpx_resolve_fault(bd_entry, need_write); | |
598 | /* | |
599 | * If we could not resolve the fault, consider it | |
600 | * userspace's fault and error out. | |
601 | */ | |
602 | if (ret) | |
603 | return ret; | |
604 | } | |
605 | ||
606 | valid_bit = *bt_addr & MPX_BD_ENTRY_VALID_FLAG; | |
607 | *bt_addr &= MPX_BT_ADDR_MASK; | |
608 | ||
609 | /* | |
610 | * When the kernel is managing bounds tables, a bounds directory | |
611 | * entry will either have a valid address (plus the valid bit) | |
612 | * *OR* be completely empty. If we see a !valid entry *and* some | |
613 | * data in the address field, we know something is wrong. This | |
614 | * -EINVAL return will cause a SIGSEGV. | |
615 | */ | |
616 | if (!valid_bit && *bt_addr) | |
617 | return -EINVAL; | |
618 | /* | |
619 | * Do we have an completely zeroed bt entry? That is OK. It | |
620 | * just means there was no bounds table for this memory. Make | |
621 | * sure to distinguish this from -EINVAL, which will cause | |
622 | * a SEGV. | |
623 | */ | |
624 | if (!valid_bit) | |
625 | return -ENOENT; | |
626 | ||
627 | return 0; | |
628 | } | |
629 | ||
630 | /* | |
631 | * Free the backing physical pages of bounds table 'bt_addr'. | |
632 | * Assume start...end is within that bounds table. | |
633 | */ | |
634 | static int zap_bt_entries(struct mm_struct *mm, | |
635 | unsigned long bt_addr, | |
636 | unsigned long start, unsigned long end) | |
637 | { | |
638 | struct vm_area_struct *vma; | |
639 | unsigned long addr, len; | |
640 | ||
641 | /* | |
642 | * Find the first overlapping vma. If vma->vm_start > start, there | |
643 | * will be a hole in the bounds table. This -EINVAL return will | |
644 | * cause a SIGSEGV. | |
645 | */ | |
646 | vma = find_vma(mm, start); | |
647 | if (!vma || vma->vm_start > start) | |
648 | return -EINVAL; | |
649 | ||
650 | /* | |
651 | * A NUMA policy on a VM_MPX VMA could cause this bouds table to | |
652 | * be split. So we need to look across the entire 'start -> end' | |
653 | * range of this bounds table, find all of the VM_MPX VMAs, and | |
654 | * zap only those. | |
655 | */ | |
656 | addr = start; | |
657 | while (vma && vma->vm_start < end) { | |
658 | /* | |
659 | * We followed a bounds directory entry down | |
660 | * here. If we find a non-MPX VMA, that's bad, | |
661 | * so stop immediately and return an error. This | |
662 | * probably results in a SIGSEGV. | |
663 | */ | |
664 | if (!is_mpx_vma(vma)) | |
665 | return -EINVAL; | |
666 | ||
667 | len = min(vma->vm_end, end) - addr; | |
668 | zap_page_range(vma, addr, len, NULL); | |
669 | ||
670 | vma = vma->vm_next; | |
671 | addr = vma->vm_start; | |
672 | } | |
673 | ||
674 | return 0; | |
675 | } | |
676 | ||
677 | static int unmap_single_bt(struct mm_struct *mm, | |
678 | long __user *bd_entry, unsigned long bt_addr) | |
679 | { | |
680 | unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG; | |
681 | unsigned long actual_old_val = 0; | |
682 | int ret; | |
683 | ||
684 | while (1) { | |
685 | int need_write = 1; | |
686 | ||
687 | pagefault_disable(); | |
688 | ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry, | |
689 | expected_old_val, 0); | |
690 | pagefault_enable(); | |
691 | if (!ret) | |
692 | break; | |
693 | if (ret == -EFAULT) | |
694 | ret = mpx_resolve_fault(bd_entry, need_write); | |
695 | /* | |
696 | * If we could not resolve the fault, consider it | |
697 | * userspace's fault and error out. | |
698 | */ | |
699 | if (ret) | |
700 | return ret; | |
701 | } | |
702 | /* | |
703 | * The cmpxchg was performed, check the results. | |
704 | */ | |
705 | if (actual_old_val != expected_old_val) { | |
706 | /* | |
707 | * Someone else raced with us to unmap the table. | |
708 | * There was no bounds table pointed to by the | |
709 | * directory, so declare success. Somebody freed | |
710 | * it. | |
711 | */ | |
712 | if (!actual_old_val) | |
713 | return 0; | |
714 | /* | |
715 | * Something messed with the bounds directory | |
716 | * entry. We hold mmap_sem for read or write | |
717 | * here, so it could not be a _new_ bounds table | |
718 | * that someone just allocated. Something is | |
719 | * wrong, so pass up the error and SIGSEGV. | |
720 | */ | |
721 | return -EINVAL; | |
722 | } | |
723 | ||
724 | /* | |
725 | * Note, we are likely being called under do_munmap() already. To | |
726 | * avoid recursion, do_munmap() will check whether it comes | |
727 | * from one bounds table through VM_MPX flag. | |
728 | */ | |
729 | return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES); | |
730 | } | |
731 | ||
732 | /* | |
733 | * If the bounds table pointed by bounds directory 'bd_entry' is | |
734 | * not shared, unmap this whole bounds table. Otherwise, only free | |
735 | * those backing physical pages of bounds table entries covered | |
736 | * in this virtual address region start...end. | |
737 | */ | |
738 | static int unmap_shared_bt(struct mm_struct *mm, | |
739 | long __user *bd_entry, unsigned long start, | |
740 | unsigned long end, bool prev_shared, bool next_shared) | |
741 | { | |
742 | unsigned long bt_addr; | |
743 | int ret; | |
744 | ||
745 | ret = get_bt_addr(mm, bd_entry, &bt_addr); | |
746 | /* | |
747 | * We could see an "error" ret for not-present bounds | |
748 | * tables (not really an error), or actual errors, but | |
749 | * stop unmapping either way. | |
750 | */ | |
751 | if (ret) | |
752 | return ret; | |
753 | ||
754 | if (prev_shared && next_shared) | |
755 | ret = zap_bt_entries(mm, bt_addr, | |
756 | bt_addr+MPX_GET_BT_ENTRY_OFFSET(start), | |
757 | bt_addr+MPX_GET_BT_ENTRY_OFFSET(end)); | |
758 | else if (prev_shared) | |
759 | ret = zap_bt_entries(mm, bt_addr, | |
760 | bt_addr+MPX_GET_BT_ENTRY_OFFSET(start), | |
761 | bt_addr+MPX_BT_SIZE_BYTES); | |
762 | else if (next_shared) | |
763 | ret = zap_bt_entries(mm, bt_addr, bt_addr, | |
764 | bt_addr+MPX_GET_BT_ENTRY_OFFSET(end)); | |
765 | else | |
766 | ret = unmap_single_bt(mm, bd_entry, bt_addr); | |
767 | ||
768 | return ret; | |
769 | } | |
770 | ||
771 | /* | |
772 | * A virtual address region being munmap()ed might share bounds table | |
773 | * with adjacent VMAs. We only need to free the backing physical | |
774 | * memory of these shared bounds tables entries covered in this virtual | |
775 | * address region. | |
776 | */ | |
777 | static int unmap_edge_bts(struct mm_struct *mm, | |
778 | unsigned long start, unsigned long end) | |
779 | { | |
780 | int ret; | |
781 | long __user *bde_start, *bde_end; | |
782 | struct vm_area_struct *prev, *next; | |
783 | bool prev_shared = false, next_shared = false; | |
784 | ||
785 | bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start); | |
786 | bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1); | |
787 | ||
788 | /* | |
789 | * Check whether bde_start and bde_end are shared with adjacent | |
790 | * VMAs. | |
791 | * | |
792 | * We already unliked the VMAs from the mm's rbtree so 'start' | |
793 | * is guaranteed to be in a hole. This gets us the first VMA | |
794 | * before the hole in to 'prev' and the next VMA after the hole | |
795 | * in to 'next'. | |
796 | */ | |
797 | next = find_vma_prev(mm, start, &prev); | |
798 | if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1)) | |
799 | == bde_start) | |
800 | prev_shared = true; | |
801 | if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start)) | |
802 | == bde_end) | |
803 | next_shared = true; | |
804 | ||
805 | /* | |
806 | * This virtual address region being munmap()ed is only | |
807 | * covered by one bounds table. | |
808 | * | |
809 | * In this case, if this table is also shared with adjacent | |
810 | * VMAs, only part of the backing physical memory of the bounds | |
811 | * table need be freeed. Otherwise the whole bounds table need | |
812 | * be unmapped. | |
813 | */ | |
814 | if (bde_start == bde_end) { | |
815 | return unmap_shared_bt(mm, bde_start, start, end, | |
816 | prev_shared, next_shared); | |
817 | } | |
818 | ||
819 | /* | |
820 | * If more than one bounds tables are covered in this virtual | |
821 | * address region being munmap()ed, we need to separately check | |
822 | * whether bde_start and bde_end are shared with adjacent VMAs. | |
823 | */ | |
824 | ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false); | |
825 | if (ret) | |
826 | return ret; | |
827 | ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared); | |
828 | if (ret) | |
829 | return ret; | |
830 | ||
831 | return 0; | |
832 | } | |
833 | ||
834 | static int mpx_unmap_tables(struct mm_struct *mm, | |
835 | unsigned long start, unsigned long end) | |
836 | { | |
837 | int ret; | |
838 | long __user *bd_entry, *bde_start, *bde_end; | |
839 | unsigned long bt_addr; | |
840 | ||
841 | /* | |
842 | * "Edge" bounds tables are those which are being used by the region | |
843 | * (start -> end), but that may be shared with adjacent areas. If they | |
844 | * turn out to be completely unshared, they will be freed. If they are | |
845 | * shared, we will free the backing store (like an MADV_DONTNEED) for | |
846 | * areas used by this region. | |
847 | */ | |
848 | ret = unmap_edge_bts(mm, start, end); | |
849 | switch (ret) { | |
850 | /* non-present tables are OK */ | |
851 | case 0: | |
852 | case -ENOENT: | |
853 | /* Success, or no tables to unmap */ | |
854 | break; | |
855 | case -EINVAL: | |
856 | case -EFAULT: | |
857 | default: | |
858 | return ret; | |
859 | } | |
860 | ||
861 | /* | |
862 | * Only unmap the bounds table that are | |
863 | * 1. fully covered | |
864 | * 2. not at the edges of the mapping, even if full aligned | |
865 | */ | |
866 | bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start); | |
867 | bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1); | |
868 | for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) { | |
869 | ret = get_bt_addr(mm, bd_entry, &bt_addr); | |
870 | switch (ret) { | |
871 | case 0: | |
872 | break; | |
873 | case -ENOENT: | |
874 | /* No table here, try the next one */ | |
875 | continue; | |
876 | case -EINVAL: | |
877 | case -EFAULT: | |
878 | default: | |
879 | /* | |
880 | * Note: we are being strict here. | |
881 | * Any time we run in to an issue | |
882 | * unmapping tables, we stop and | |
883 | * SIGSEGV. | |
884 | */ | |
885 | return ret; | |
886 | } | |
887 | ||
888 | ret = unmap_single_bt(mm, bd_entry, bt_addr); | |
889 | if (ret) | |
890 | return ret; | |
891 | } | |
892 | ||
893 | return 0; | |
894 | } | |
895 | ||
896 | /* | |
897 | * Free unused bounds tables covered in a virtual address region being | |
898 | * munmap()ed. Assume end > start. | |
899 | * | |
900 | * This function will be called by do_munmap(), and the VMAs covering | |
901 | * the virtual address region start...end have already been split if | |
902 | * necessary, and the 'vma' is the first vma in this range (start -> end). | |
903 | */ | |
904 | void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, | |
905 | unsigned long start, unsigned long end) | |
906 | { | |
907 | int ret; | |
908 | ||
909 | /* | |
910 | * Refuse to do anything unless userspace has asked | |
911 | * the kernel to help manage the bounds tables, | |
912 | */ | |
913 | if (!kernel_managing_mpx_tables(current->mm)) | |
914 | return; | |
915 | /* | |
916 | * This will look across the entire 'start -> end' range, | |
917 | * and find all of the non-VM_MPX VMAs. | |
918 | * | |
919 | * To avoid recursion, if a VM_MPX vma is found in the range | |
920 | * (start->end), we will not continue follow-up work. This | |
921 | * recursion represents having bounds tables for bounds tables, | |
922 | * which should not occur normally. Being strict about it here | |
923 | * helps ensure that we do not have an exploitable stack overflow. | |
924 | */ | |
925 | do { | |
926 | if (vma->vm_flags & VM_MPX) | |
927 | return; | |
928 | vma = vma->vm_next; | |
929 | } while (vma && vma->vm_start < end); | |
930 | ||
931 | ret = mpx_unmap_tables(mm, start, end); | |
932 | if (ret) | |
933 | force_sig(SIGSEGV, current); | |
934 | } |