Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 | 3 | * Copyright (C) 1995 Linus Torvalds |
2d4a7167 | 4 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. |
f8eeb2e6 | 5 | * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar |
1da177e4 | 6 | */ |
a2bcd473 | 7 | #include <linux/sched.h> /* test_thread_flag(), ... */ |
68db0cf1 | 8 | #include <linux/sched/task_stack.h> /* task_stack_*(), ... */ |
a2bcd473 | 9 | #include <linux/kdebug.h> /* oops_begin/end, ... */ |
4cdf8dbe | 10 | #include <linux/extable.h> /* search_exception_tables */ |
57c8a661 | 11 | #include <linux/memblock.h> /* max_low_pfn */ |
9326638c | 12 | #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ |
a2bcd473 | 13 | #include <linux/mmiotrace.h> /* kmmio_handler, ... */ |
cdd6c482 | 14 | #include <linux/perf_event.h> /* perf_sw_event */ |
f672b49b | 15 | #include <linux/hugetlb.h> /* hstate_index_to_shift */ |
268bb0ce | 16 | #include <linux/prefetch.h> /* prefetchw */ |
56dd9470 | 17 | #include <linux/context_tracking.h> /* exception_enter(), ... */ |
70ffdb93 | 18 | #include <linux/uaccess.h> /* faulthandler_disabled() */ |
3425d934 | 19 | #include <linux/efi.h> /* efi_recover_from_page_fault()*/ |
50a7ca3c | 20 | #include <linux/mm_types.h> |
2d4a7167 | 21 | |
019132ff | 22 | #include <asm/cpufeature.h> /* boot_cpu_has, ... */ |
a2bcd473 IM |
23 | #include <asm/traps.h> /* dotraplinkage, ... */ |
24 | #include <asm/pgalloc.h> /* pgd_*(), ... */ | |
f40c3300 AL |
25 | #include <asm/fixmap.h> /* VSYSCALL_ADDR */ |
26 | #include <asm/vsyscall.h> /* emulate_vsyscall */ | |
ba3e127e | 27 | #include <asm/vm86.h> /* struct vm86 */ |
019132ff | 28 | #include <asm/mmu_context.h> /* vma_pkey() */ |
3425d934 | 29 | #include <asm/efi.h> /* efi_recover_from_page_fault()*/ |
a1a371c4 | 30 | #include <asm/desc.h> /* store_idt(), ... */ |
1da177e4 | 31 | |
d34603b0 SA |
32 | #define CREATE_TRACE_POINTS |
33 | #include <asm/trace/exceptions.h> | |
34 | ||
b814d41f | 35 | /* |
b319eed0 IM |
36 | * Returns 0 if mmiotrace is disabled, or if the fault is not |
37 | * handled by mmiotrace: | |
b814d41f | 38 | */ |
9326638c | 39 | static nokprobe_inline int |
62c9295f | 40 | kmmio_fault(struct pt_regs *regs, unsigned long addr) |
86069782 | 41 | { |
0fd0e3da PP |
42 | if (unlikely(is_kmmio_active())) |
43 | if (kmmio_handler(regs, addr) == 1) | |
44 | return -1; | |
0fd0e3da | 45 | return 0; |
86069782 PP |
46 | } |
47 | ||
9326638c | 48 | static nokprobe_inline int kprobes_fault(struct pt_regs *regs) |
1bd858a5 | 49 | { |
a980c0ef JH |
50 | if (!kprobes_built_in()) |
51 | return 0; | |
52 | if (user_mode(regs)) | |
53 | return 0; | |
54 | /* | |
55 | * To be potentially processing a kprobe fault and to be allowed to call | |
56 | * kprobe_running(), we have to be non-preemptible. | |
57 | */ | |
58 | if (preemptible()) | |
59 | return 0; | |
60 | if (!kprobe_running()) | |
61 | return 0; | |
62 | return kprobe_fault_handler(regs, X86_TRAP_PF); | |
33cb5243 | 63 | } |
1bd858a5 | 64 | |
1dc85be0 | 65 | /* |
2d4a7167 IM |
66 | * Prefetch quirks: |
67 | * | |
68 | * 32-bit mode: | |
69 | * | |
70 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | |
71 | * Check that here and ignore it. | |
1dc85be0 | 72 | * |
2d4a7167 | 73 | * 64-bit mode: |
1dc85be0 | 74 | * |
2d4a7167 IM |
75 | * Sometimes the CPU reports invalid exceptions on prefetch. |
76 | * Check that here and ignore it. | |
77 | * | |
78 | * Opcode checker based on code by Richard Brunner. | |
1dc85be0 | 79 | */ |
107a0367 IM |
80 | static inline int |
81 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, | |
82 | unsigned char opcode, int *prefetch) | |
83 | { | |
84 | unsigned char instr_hi = opcode & 0xf0; | |
85 | unsigned char instr_lo = opcode & 0x0f; | |
86 | ||
87 | switch (instr_hi) { | |
88 | case 0x20: | |
89 | case 0x30: | |
90 | /* | |
91 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | |
92 | * In X86_64 long mode, the CPU will signal invalid | |
93 | * opcode if some of these prefixes are present so | |
94 | * X86_64 will never get here anyway | |
95 | */ | |
96 | return ((instr_lo & 7) == 0x6); | |
97 | #ifdef CONFIG_X86_64 | |
98 | case 0x40: | |
99 | /* | |
100 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | |
101 | * Need to figure out under what instruction mode the | |
102 | * instruction was issued. Could check the LDT for lm, | |
103 | * but for now it's good enough to assume that long | |
104 | * mode only uses well known segments or kernel. | |
105 | */ | |
318f5a2a | 106 | return (!user_mode(regs) || user_64bit_mode(regs)); |
107a0367 IM |
107 | #endif |
108 | case 0x60: | |
109 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
110 | return (instr_lo & 0xC) == 0x4; | |
111 | case 0xF0: | |
112 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | |
113 | return !instr_lo || (instr_lo>>1) == 1; | |
114 | case 0x00: | |
115 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
116 | if (probe_kernel_address(instr, opcode)) | |
117 | return 0; | |
118 | ||
119 | *prefetch = (instr_lo == 0xF) && | |
120 | (opcode == 0x0D || opcode == 0x18); | |
121 | return 0; | |
122 | default: | |
123 | return 0; | |
124 | } | |
125 | } | |
126 | ||
2d4a7167 IM |
127 | static int |
128 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | |
33cb5243 | 129 | { |
2d4a7167 | 130 | unsigned char *max_instr; |
ab2bf0c1 | 131 | unsigned char *instr; |
33cb5243 | 132 | int prefetch = 0; |
1da177e4 | 133 | |
3085354d IM |
134 | /* |
135 | * If it was a exec (instruction fetch) fault on NX page, then | |
136 | * do not ignore the fault: | |
137 | */ | |
1067f030 | 138 | if (error_code & X86_PF_INSTR) |
1da177e4 | 139 | return 0; |
1dc85be0 | 140 | |
107a0367 | 141 | instr = (void *)convert_ip_to_linear(current, regs); |
f1290ec9 | 142 | max_instr = instr + 15; |
1da177e4 | 143 | |
d31bf07f | 144 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) |
1da177e4 LT |
145 | return 0; |
146 | ||
107a0367 | 147 | while (instr < max_instr) { |
2d4a7167 | 148 | unsigned char opcode; |
1da177e4 | 149 | |
ab2bf0c1 | 150 | if (probe_kernel_address(instr, opcode)) |
33cb5243 | 151 | break; |
1da177e4 | 152 | |
1da177e4 LT |
153 | instr++; |
154 | ||
107a0367 | 155 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) |
1da177e4 | 156 | break; |
1da177e4 LT |
157 | } |
158 | return prefetch; | |
159 | } | |
160 | ||
f2f13a85 IM |
161 | DEFINE_SPINLOCK(pgd_lock); |
162 | LIST_HEAD(pgd_list); | |
163 | ||
164 | #ifdef CONFIG_X86_32 | |
165 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |
33cb5243 | 166 | { |
f2f13a85 IM |
167 | unsigned index = pgd_index(address); |
168 | pgd_t *pgd_k; | |
e0c4f675 | 169 | p4d_t *p4d, *p4d_k; |
f2f13a85 IM |
170 | pud_t *pud, *pud_k; |
171 | pmd_t *pmd, *pmd_k; | |
2d4a7167 | 172 | |
f2f13a85 IM |
173 | pgd += index; |
174 | pgd_k = init_mm.pgd + index; | |
175 | ||
176 | if (!pgd_present(*pgd_k)) | |
177 | return NULL; | |
178 | ||
179 | /* | |
180 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
181 | * and redundant with the set_pmd() on non-PAE. As would | |
e0c4f675 | 182 | * set_p4d/set_pud. |
f2f13a85 | 183 | */ |
e0c4f675 KS |
184 | p4d = p4d_offset(pgd, address); |
185 | p4d_k = p4d_offset(pgd_k, address); | |
186 | if (!p4d_present(*p4d_k)) | |
187 | return NULL; | |
188 | ||
189 | pud = pud_offset(p4d, address); | |
190 | pud_k = pud_offset(p4d_k, address); | |
f2f13a85 IM |
191 | if (!pud_present(*pud_k)) |
192 | return NULL; | |
193 | ||
194 | pmd = pmd_offset(pud, address); | |
195 | pmd_k = pmd_offset(pud_k, address); | |
196 | if (!pmd_present(*pmd_k)) | |
197 | return NULL; | |
198 | ||
b8bcfe99 | 199 | if (!pmd_present(*pmd)) |
f2f13a85 | 200 | set_pmd(pmd, *pmd_k); |
b8bcfe99 | 201 | else |
f2f13a85 | 202 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
f2f13a85 IM |
203 | |
204 | return pmd_k; | |
205 | } | |
206 | ||
207 | void vmalloc_sync_all(void) | |
208 | { | |
209 | unsigned long address; | |
210 | ||
211 | if (SHARED_KERNEL_PMD) | |
212 | return; | |
213 | ||
214 | for (address = VMALLOC_START & PMD_MASK; | |
dc4fac84 | 215 | address >= TASK_SIZE_MAX && address < FIXADDR_TOP; |
f2f13a85 | 216 | address += PMD_SIZE) { |
f2f13a85 IM |
217 | struct page *page; |
218 | ||
a79e53d8 | 219 | spin_lock(&pgd_lock); |
f2f13a85 | 220 | list_for_each_entry(page, &pgd_list, lru) { |
617d34d9 | 221 | spinlock_t *pgt_lock; |
f01f7c56 | 222 | pmd_t *ret; |
617d34d9 | 223 | |
a79e53d8 | 224 | /* the pgt_lock only for Xen */ |
617d34d9 JF |
225 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
226 | ||
227 | spin_lock(pgt_lock); | |
228 | ret = vmalloc_sync_one(page_address(page), address); | |
229 | spin_unlock(pgt_lock); | |
230 | ||
231 | if (!ret) | |
f2f13a85 IM |
232 | break; |
233 | } | |
a79e53d8 | 234 | spin_unlock(&pgd_lock); |
f2f13a85 IM |
235 | } |
236 | } | |
237 | ||
238 | /* | |
239 | * 32-bit: | |
240 | * | |
241 | * Handle a fault on the vmalloc or module mapping area | |
242 | */ | |
9326638c | 243 | static noinline int vmalloc_fault(unsigned long address) |
f2f13a85 IM |
244 | { |
245 | unsigned long pgd_paddr; | |
246 | pmd_t *pmd_k; | |
247 | pte_t *pte_k; | |
248 | ||
249 | /* Make sure we are in vmalloc area: */ | |
250 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
251 | return -1; | |
252 | ||
253 | /* | |
254 | * Synchronize this task's top level page-table | |
255 | * with the 'reference' page table. | |
256 | * | |
257 | * Do _not_ use "current" here. We might be inside | |
258 | * an interrupt in the middle of a task switch.. | |
259 | */ | |
6c690ee1 | 260 | pgd_paddr = read_cr3_pa(); |
f2f13a85 IM |
261 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); |
262 | if (!pmd_k) | |
263 | return -1; | |
264 | ||
18a95521 | 265 | if (pmd_large(*pmd_k)) |
f4eafd8b TK |
266 | return 0; |
267 | ||
f2f13a85 IM |
268 | pte_k = pte_offset_kernel(pmd_k, address); |
269 | if (!pte_present(*pte_k)) | |
270 | return -1; | |
271 | ||
272 | return 0; | |
273 | } | |
9326638c | 274 | NOKPROBE_SYMBOL(vmalloc_fault); |
f2f13a85 IM |
275 | |
276 | /* | |
277 | * Did it hit the DOS screen memory VA from vm86 mode? | |
278 | */ | |
279 | static inline void | |
280 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
281 | struct task_struct *tsk) | |
282 | { | |
9fda6a06 | 283 | #ifdef CONFIG_VM86 |
f2f13a85 IM |
284 | unsigned long bit; |
285 | ||
9fda6a06 | 286 | if (!v8086_mode(regs) || !tsk->thread.vm86) |
f2f13a85 IM |
287 | return; |
288 | ||
289 | bit = (address - 0xA0000) >> PAGE_SHIFT; | |
290 | if (bit < 32) | |
9fda6a06 BG |
291 | tsk->thread.vm86->screen_bitmap |= 1 << bit; |
292 | #endif | |
33cb5243 | 293 | } |
1da177e4 | 294 | |
087975b0 | 295 | static bool low_pfn(unsigned long pfn) |
1da177e4 | 296 | { |
087975b0 AM |
297 | return pfn < max_low_pfn; |
298 | } | |
1156e098 | 299 | |
087975b0 AM |
300 | static void dump_pagetable(unsigned long address) |
301 | { | |
6c690ee1 | 302 | pgd_t *base = __va(read_cr3_pa()); |
087975b0 | 303 | pgd_t *pgd = &base[pgd_index(address)]; |
e0c4f675 KS |
304 | p4d_t *p4d; |
305 | pud_t *pud; | |
087975b0 AM |
306 | pmd_t *pmd; |
307 | pte_t *pte; | |
2d4a7167 | 308 | |
1156e098 | 309 | #ifdef CONFIG_X86_PAE |
39e48d9b | 310 | pr_info("*pdpt = %016Lx ", pgd_val(*pgd)); |
087975b0 AM |
311 | if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) |
312 | goto out; | |
39e48d9b JB |
313 | #define pr_pde pr_cont |
314 | #else | |
315 | #define pr_pde pr_info | |
1156e098 | 316 | #endif |
e0c4f675 KS |
317 | p4d = p4d_offset(pgd, address); |
318 | pud = pud_offset(p4d, address); | |
319 | pmd = pmd_offset(pud, address); | |
39e48d9b JB |
320 | pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); |
321 | #undef pr_pde | |
1156e098 HH |
322 | |
323 | /* | |
324 | * We must not directly access the pte in the highpte | |
325 | * case if the page table is located in highmem. | |
326 | * And let's rather not kmap-atomic the pte, just in case | |
2d4a7167 | 327 | * it's allocated already: |
1156e098 | 328 | */ |
087975b0 AM |
329 | if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) |
330 | goto out; | |
1156e098 | 331 | |
087975b0 | 332 | pte = pte_offset_kernel(pmd, address); |
39e48d9b | 333 | pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); |
087975b0 | 334 | out: |
39e48d9b | 335 | pr_cont("\n"); |
f2f13a85 IM |
336 | } |
337 | ||
338 | #else /* CONFIG_X86_64: */ | |
339 | ||
340 | void vmalloc_sync_all(void) | |
341 | { | |
5372e155 | 342 | sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); |
f2f13a85 IM |
343 | } |
344 | ||
345 | /* | |
346 | * 64-bit: | |
347 | * | |
348 | * Handle a fault on the vmalloc area | |
f2f13a85 | 349 | */ |
9326638c | 350 | static noinline int vmalloc_fault(unsigned long address) |
f2f13a85 | 351 | { |
565977a3 TK |
352 | pgd_t *pgd, *pgd_k; |
353 | p4d_t *p4d, *p4d_k; | |
354 | pud_t *pud; | |
355 | pmd_t *pmd; | |
356 | pte_t *pte; | |
f2f13a85 IM |
357 | |
358 | /* Make sure we are in vmalloc area: */ | |
359 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
360 | return -1; | |
361 | ||
ebc8827f FW |
362 | WARN_ON_ONCE(in_nmi()); |
363 | ||
f2f13a85 IM |
364 | /* |
365 | * Copy kernel mappings over when needed. This can also | |
366 | * happen within a race in page table update. In the later | |
367 | * case just flush: | |
368 | */ | |
6c690ee1 | 369 | pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address); |
565977a3 TK |
370 | pgd_k = pgd_offset_k(address); |
371 | if (pgd_none(*pgd_k)) | |
f2f13a85 IM |
372 | return -1; |
373 | ||
ed7588d5 | 374 | if (pgtable_l5_enabled()) { |
36b3a772 | 375 | if (pgd_none(*pgd)) { |
565977a3 | 376 | set_pgd(pgd, *pgd_k); |
36b3a772 AL |
377 | arch_flush_lazy_mmu_mode(); |
378 | } else { | |
565977a3 | 379 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k)); |
36b3a772 | 380 | } |
1160c277 | 381 | } |
f2f13a85 | 382 | |
b50858ce KS |
383 | /* With 4-level paging, copying happens on the p4d level. */ |
384 | p4d = p4d_offset(pgd, address); | |
565977a3 TK |
385 | p4d_k = p4d_offset(pgd_k, address); |
386 | if (p4d_none(*p4d_k)) | |
b50858ce KS |
387 | return -1; |
388 | ||
ed7588d5 | 389 | if (p4d_none(*p4d) && !pgtable_l5_enabled()) { |
565977a3 | 390 | set_p4d(p4d, *p4d_k); |
b50858ce KS |
391 | arch_flush_lazy_mmu_mode(); |
392 | } else { | |
565977a3 | 393 | BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k)); |
b50858ce KS |
394 | } |
395 | ||
36b3a772 | 396 | BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4); |
f2f13a85 | 397 | |
b50858ce | 398 | pud = pud_offset(p4d, address); |
565977a3 | 399 | if (pud_none(*pud)) |
f2f13a85 IM |
400 | return -1; |
401 | ||
18a95521 | 402 | if (pud_large(*pud)) |
f4eafd8b TK |
403 | return 0; |
404 | ||
f2f13a85 | 405 | pmd = pmd_offset(pud, address); |
565977a3 | 406 | if (pmd_none(*pmd)) |
f2f13a85 IM |
407 | return -1; |
408 | ||
18a95521 | 409 | if (pmd_large(*pmd)) |
f4eafd8b TK |
410 | return 0; |
411 | ||
f2f13a85 | 412 | pte = pte_offset_kernel(pmd, address); |
565977a3 TK |
413 | if (!pte_present(*pte)) |
414 | return -1; | |
f2f13a85 IM |
415 | |
416 | return 0; | |
417 | } | |
9326638c | 418 | NOKPROBE_SYMBOL(vmalloc_fault); |
f2f13a85 | 419 | |
e05139f2 | 420 | #ifdef CONFIG_CPU_SUP_AMD |
f2f13a85 | 421 | static const char errata93_warning[] = |
ad361c98 JP |
422 | KERN_ERR |
423 | "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | |
424 | "******* Working around it, but it may cause SEGVs or burn power.\n" | |
425 | "******* Please consider a BIOS update.\n" | |
426 | "******* Disabling USB legacy in the BIOS may also help.\n"; | |
e05139f2 | 427 | #endif |
f2f13a85 IM |
428 | |
429 | /* | |
430 | * No vm86 mode in 64-bit mode: | |
431 | */ | |
432 | static inline void | |
433 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | |
434 | struct task_struct *tsk) | |
435 | { | |
436 | } | |
437 | ||
438 | static int bad_address(void *p) | |
439 | { | |
440 | unsigned long dummy; | |
441 | ||
442 | return probe_kernel_address((unsigned long *)p, dummy); | |
443 | } | |
444 | ||
445 | static void dump_pagetable(unsigned long address) | |
446 | { | |
6c690ee1 | 447 | pgd_t *base = __va(read_cr3_pa()); |
087975b0 | 448 | pgd_t *pgd = base + pgd_index(address); |
e0c4f675 | 449 | p4d_t *p4d; |
1da177e4 LT |
450 | pud_t *pud; |
451 | pmd_t *pmd; | |
452 | pte_t *pte; | |
453 | ||
2d4a7167 IM |
454 | if (bad_address(pgd)) |
455 | goto bad; | |
456 | ||
39e48d9b | 457 | pr_info("PGD %lx ", pgd_val(*pgd)); |
2d4a7167 IM |
458 | |
459 | if (!pgd_present(*pgd)) | |
460 | goto out; | |
1da177e4 | 461 | |
e0c4f675 KS |
462 | p4d = p4d_offset(pgd, address); |
463 | if (bad_address(p4d)) | |
464 | goto bad; | |
465 | ||
39e48d9b | 466 | pr_cont("P4D %lx ", p4d_val(*p4d)); |
e0c4f675 KS |
467 | if (!p4d_present(*p4d) || p4d_large(*p4d)) |
468 | goto out; | |
469 | ||
470 | pud = pud_offset(p4d, address); | |
2d4a7167 IM |
471 | if (bad_address(pud)) |
472 | goto bad; | |
473 | ||
39e48d9b | 474 | pr_cont("PUD %lx ", pud_val(*pud)); |
b5360222 | 475 | if (!pud_present(*pud) || pud_large(*pud)) |
2d4a7167 | 476 | goto out; |
1da177e4 LT |
477 | |
478 | pmd = pmd_offset(pud, address); | |
2d4a7167 IM |
479 | if (bad_address(pmd)) |
480 | goto bad; | |
481 | ||
39e48d9b | 482 | pr_cont("PMD %lx ", pmd_val(*pmd)); |
2d4a7167 IM |
483 | if (!pmd_present(*pmd) || pmd_large(*pmd)) |
484 | goto out; | |
1da177e4 LT |
485 | |
486 | pte = pte_offset_kernel(pmd, address); | |
2d4a7167 IM |
487 | if (bad_address(pte)) |
488 | goto bad; | |
489 | ||
39e48d9b | 490 | pr_cont("PTE %lx", pte_val(*pte)); |
2d4a7167 | 491 | out: |
39e48d9b | 492 | pr_cont("\n"); |
1da177e4 LT |
493 | return; |
494 | bad: | |
39e48d9b | 495 | pr_info("BAD\n"); |
8c938f9f IM |
496 | } |
497 | ||
f2f13a85 | 498 | #endif /* CONFIG_X86_64 */ |
1da177e4 | 499 | |
2d4a7167 IM |
500 | /* |
501 | * Workaround for K8 erratum #93 & buggy BIOS. | |
502 | * | |
503 | * BIOS SMM functions are required to use a specific workaround | |
504 | * to avoid corruption of the 64bit RIP register on C stepping K8. | |
505 | * | |
506 | * A lot of BIOS that didn't get tested properly miss this. | |
507 | * | |
508 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. | |
509 | * Try to work around it here. | |
510 | * | |
511 | * Note we only handle faults in kernel here. | |
512 | * Does nothing on 32-bit. | |
fdfe8aa8 | 513 | */ |
33cb5243 | 514 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
1da177e4 | 515 | { |
e05139f2 JB |
516 | #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) |
517 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD | |
518 | || boot_cpu_data.x86 != 0xf) | |
519 | return 0; | |
520 | ||
65ea5b03 | 521 | if (address != regs->ip) |
1da177e4 | 522 | return 0; |
2d4a7167 | 523 | |
33cb5243 | 524 | if ((address >> 32) != 0) |
1da177e4 | 525 | return 0; |
2d4a7167 | 526 | |
1da177e4 | 527 | address |= 0xffffffffUL << 32; |
33cb5243 HH |
528 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
529 | (address >= MODULES_VADDR && address <= MODULES_END)) { | |
a454ab31 | 530 | printk_once(errata93_warning); |
65ea5b03 | 531 | regs->ip = address; |
1da177e4 LT |
532 | return 1; |
533 | } | |
fdfe8aa8 | 534 | #endif |
1da177e4 | 535 | return 0; |
33cb5243 | 536 | } |
1da177e4 | 537 | |
35f3266f | 538 | /* |
2d4a7167 IM |
539 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps |
540 | * to illegal addresses >4GB. | |
541 | * | |
542 | * We catch this in the page fault handler because these addresses | |
543 | * are not reachable. Just detect this case and return. Any code | |
35f3266f HH |
544 | * segment in LDT is compatibility mode. |
545 | */ | |
546 | static int is_errata100(struct pt_regs *regs, unsigned long address) | |
547 | { | |
548 | #ifdef CONFIG_X86_64 | |
2d4a7167 | 549 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) |
35f3266f HH |
550 | return 1; |
551 | #endif | |
552 | return 0; | |
553 | } | |
554 | ||
29caf2f9 HH |
555 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
556 | { | |
557 | #ifdef CONFIG_X86_F00F_BUG | |
558 | unsigned long nr; | |
2d4a7167 | 559 | |
29caf2f9 | 560 | /* |
2d4a7167 | 561 | * Pentium F0 0F C7 C8 bug workaround: |
29caf2f9 | 562 | */ |
e2604b49 | 563 | if (boot_cpu_has_bug(X86_BUG_F00F)) { |
29caf2f9 HH |
564 | nr = (address - idt_descr.address) >> 3; |
565 | ||
566 | if (nr == 6) { | |
567 | do_invalid_op(regs, 0); | |
568 | return 1; | |
569 | } | |
570 | } | |
571 | #endif | |
572 | return 0; | |
573 | } | |
574 | ||
a1a371c4 AL |
575 | static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index) |
576 | { | |
577 | u32 offset = (index >> 3) * sizeof(struct desc_struct); | |
578 | unsigned long addr; | |
579 | struct ldttss_desc desc; | |
580 | ||
581 | if (index == 0) { | |
582 | pr_alert("%s: NULL\n", name); | |
583 | return; | |
584 | } | |
585 | ||
586 | if (offset + sizeof(struct ldttss_desc) >= gdt->size) { | |
587 | pr_alert("%s: 0x%hx -- out of bounds\n", name, index); | |
588 | return; | |
589 | } | |
590 | ||
591 | if (probe_kernel_read(&desc, (void *)(gdt->address + offset), | |
592 | sizeof(struct ldttss_desc))) { | |
593 | pr_alert("%s: 0x%hx -- GDT entry is not readable\n", | |
594 | name, index); | |
595 | return; | |
596 | } | |
597 | ||
5ccd3528 | 598 | addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24); |
a1a371c4 AL |
599 | #ifdef CONFIG_X86_64 |
600 | addr |= ((u64)desc.base3 << 32); | |
601 | #endif | |
602 | pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n", | |
603 | name, index, addr, (desc.limit0 | (desc.limit1 << 16))); | |
604 | } | |
605 | ||
a2aa52ab IM |
606 | /* |
607 | * This helper function transforms the #PF error_code bits into | |
608 | * "[PROT] [USER]" type of descriptive, almost human-readable error strings: | |
609 | */ | |
610 | static void err_str_append(unsigned long error_code, char *buf, unsigned long mask, const char *txt) | |
a1a371c4 | 611 | { |
a2aa52ab | 612 | if (error_code & mask) { |
a1a371c4 AL |
613 | if (buf[0]) |
614 | strcat(buf, " "); | |
615 | strcat(buf, txt); | |
616 | } | |
617 | } | |
618 | ||
2d4a7167 | 619 | static void |
a2aa52ab | 620 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
b3279c7f | 621 | { |
a2aa52ab | 622 | char err_txt[64]; |
a1a371c4 | 623 | |
1156e098 HH |
624 | if (!oops_may_print()) |
625 | return; | |
626 | ||
1067f030 | 627 | if (error_code & X86_PF_INSTR) { |
93809be8 | 628 | unsigned int level; |
426e34cc MF |
629 | pgd_t *pgd; |
630 | pte_t *pte; | |
2d4a7167 | 631 | |
6c690ee1 | 632 | pgd = __va(read_cr3_pa()); |
426e34cc MF |
633 | pgd += pgd_index(address); |
634 | ||
635 | pte = lookup_address_in_pgd(pgd, address, &level); | |
1156e098 | 636 | |
8f766149 | 637 | if (pte && pte_present(*pte) && !pte_exec(*pte)) |
d79d0d8a DV |
638 | pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", |
639 | from_kuid(&init_user_ns, current_uid())); | |
eff50c34 JK |
640 | if (pte && pte_present(*pte) && pte_exec(*pte) && |
641 | (pgd_flags(*pgd) & _PAGE_USER) && | |
1e02ce4c | 642 | (__read_cr4() & X86_CR4_SMEP)) |
d79d0d8a DV |
643 | pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n", |
644 | from_kuid(&init_user_ns, current_uid())); | |
1156e098 | 645 | } |
1156e098 | 646 | |
4188f063 DV |
647 | pr_alert("BUG: unable to handle kernel %s at %px\n", |
648 | address < PAGE_SIZE ? "NULL pointer dereference" : "paging request", | |
649 | (void *)address); | |
2d4a7167 | 650 | |
a2aa52ab IM |
651 | err_txt[0] = 0; |
652 | ||
653 | /* | |
654 | * Note: length of these appended strings including the separation space and the | |
655 | * zero delimiter must fit into err_txt[]. | |
656 | */ | |
657 | err_str_append(error_code, err_txt, X86_PF_PROT, "[PROT]" ); | |
658 | err_str_append(error_code, err_txt, X86_PF_WRITE, "[WRITE]"); | |
659 | err_str_append(error_code, err_txt, X86_PF_USER, "[USER]" ); | |
660 | err_str_append(error_code, err_txt, X86_PF_RSVD, "[RSVD]" ); | |
661 | err_str_append(error_code, err_txt, X86_PF_INSTR, "[INSTR]"); | |
662 | err_str_append(error_code, err_txt, X86_PF_PK, "[PK]" ); | |
663 | ||
664 | pr_alert("#PF error: %s\n", error_code ? err_txt : "[normal kernel read fault]"); | |
665 | ||
a1a371c4 AL |
666 | if (!(error_code & X86_PF_USER) && user_mode(regs)) { |
667 | struct desc_ptr idt, gdt; | |
668 | u16 ldtr, tr; | |
669 | ||
670 | pr_alert("This was a system access from user code\n"); | |
671 | ||
672 | /* | |
673 | * This can happen for quite a few reasons. The more obvious | |
674 | * ones are faults accessing the GDT, or LDT. Perhaps | |
675 | * surprisingly, if the CPU tries to deliver a benign or | |
676 | * contributory exception from user code and gets a page fault | |
677 | * during delivery, the page fault can be delivered as though | |
678 | * it originated directly from user code. This could happen | |
679 | * due to wrong permissions on the IDT, GDT, LDT, TSS, or | |
680 | * kernel or IST stack. | |
681 | */ | |
682 | store_idt(&idt); | |
683 | ||
684 | /* Usable even on Xen PV -- it's just slow. */ | |
685 | native_store_gdt(&gdt); | |
686 | ||
687 | pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n", | |
688 | idt.address, idt.size, gdt.address, gdt.size); | |
689 | ||
690 | store_ldt(ldtr); | |
691 | show_ldttss(&gdt, "LDTR", ldtr); | |
692 | ||
693 | store_tr(tr); | |
694 | show_ldttss(&gdt, "TR", tr); | |
695 | } | |
696 | ||
b3279c7f HH |
697 | dump_pagetable(address); |
698 | } | |
699 | ||
2d4a7167 IM |
700 | static noinline void |
701 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, | |
702 | unsigned long address) | |
1da177e4 | 703 | { |
2d4a7167 IM |
704 | struct task_struct *tsk; |
705 | unsigned long flags; | |
706 | int sig; | |
707 | ||
708 | flags = oops_begin(); | |
709 | tsk = current; | |
710 | sig = SIGKILL; | |
1209140c | 711 | |
1da177e4 | 712 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", |
92181f19 | 713 | tsk->comm, address); |
1da177e4 | 714 | dump_pagetable(address); |
2d4a7167 | 715 | |
22f5991c | 716 | if (__die("Bad pagetable", regs, error_code)) |
874d93d1 | 717 | sig = 0; |
2d4a7167 | 718 | |
874d93d1 | 719 | oops_end(flags, regs, sig); |
1da177e4 LT |
720 | } |
721 | ||
e49d3cbe AL |
722 | static void set_signal_archinfo(unsigned long address, |
723 | unsigned long error_code) | |
724 | { | |
725 | struct task_struct *tsk = current; | |
726 | ||
727 | /* | |
728 | * To avoid leaking information about the kernel page | |
729 | * table layout, pretend that user-mode accesses to | |
730 | * kernel addresses are always protection faults. | |
731 | */ | |
732 | if (address >= TASK_SIZE_MAX) | |
733 | error_code |= X86_PF_PROT; | |
734 | ||
735 | tsk->thread.trap_nr = X86_TRAP_PF; | |
736 | tsk->thread.error_code = error_code | X86_PF_USER; | |
737 | tsk->thread.cr2 = address; | |
738 | } | |
739 | ||
2d4a7167 IM |
740 | static noinline void |
741 | no_context(struct pt_regs *regs, unsigned long error_code, | |
4fc34901 | 742 | unsigned long address, int signal, int si_code) |
92181f19 NP |
743 | { |
744 | struct task_struct *tsk = current; | |
92181f19 NP |
745 | unsigned long flags; |
746 | int sig; | |
92181f19 | 747 | |
ebb53e25 AL |
748 | if (user_mode(regs)) { |
749 | /* | |
750 | * This is an implicit supervisor-mode access from user | |
751 | * mode. Bypass all the kernel-mode recovery code and just | |
752 | * OOPS. | |
753 | */ | |
754 | goto oops; | |
755 | } | |
756 | ||
2d4a7167 | 757 | /* Are we prepared to handle this kernel fault? */ |
81fd9c18 | 758 | if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) { |
c026b359 PZ |
759 | /* |
760 | * Any interrupt that takes a fault gets the fixup. This makes | |
761 | * the below recursive fault logic only apply to a faults from | |
762 | * task context. | |
763 | */ | |
764 | if (in_interrupt()) | |
765 | return; | |
766 | ||
767 | /* | |
768 | * Per the above we're !in_interrupt(), aka. task context. | |
769 | * | |
770 | * In this case we need to make sure we're not recursively | |
771 | * faulting through the emulate_vsyscall() logic. | |
772 | */ | |
2a53ccbc | 773 | if (current->thread.sig_on_uaccess_err && signal) { |
e49d3cbe | 774 | set_signal_archinfo(address, error_code); |
4fc34901 AL |
775 | |
776 | /* XXX: hwpoison faults will set the wrong code. */ | |
b4fd52f2 EB |
777 | force_sig_fault(signal, si_code, (void __user *)address, |
778 | tsk); | |
4fc34901 | 779 | } |
c026b359 PZ |
780 | |
781 | /* | |
782 | * Barring that, we can do the fixup and be happy. | |
783 | */ | |
92181f19 | 784 | return; |
4fc34901 | 785 | } |
92181f19 | 786 | |
6271cfdf AL |
787 | #ifdef CONFIG_VMAP_STACK |
788 | /* | |
789 | * Stack overflow? During boot, we can fault near the initial | |
790 | * stack in the direct map, but that's not an overflow -- check | |
791 | * that we're in vmalloc space to avoid this. | |
792 | */ | |
793 | if (is_vmalloc_addr((void *)address) && | |
794 | (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || | |
795 | address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { | |
6271cfdf AL |
796 | unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); |
797 | /* | |
798 | * We're likely to be running with very little stack space | |
799 | * left. It's plausible that we'd hit this condition but | |
800 | * double-fault even before we get this far, in which case | |
801 | * we're fine: the double-fault handler will deal with it. | |
802 | * | |
803 | * We don't want to make it all the way into the oops code | |
804 | * and then double-fault, though, because we're likely to | |
805 | * break the console driver and lose most of the stack dump. | |
806 | */ | |
807 | asm volatile ("movq %[stack], %%rsp\n\t" | |
808 | "call handle_stack_overflow\n\t" | |
809 | "1: jmp 1b" | |
f5caf621 | 810 | : ASM_CALL_CONSTRAINT |
6271cfdf AL |
811 | : "D" ("kernel stack overflow (page fault)"), |
812 | "S" (regs), "d" (address), | |
813 | [stack] "rm" (stack)); | |
814 | unreachable(); | |
815 | } | |
816 | #endif | |
817 | ||
92181f19 | 818 | /* |
2d4a7167 IM |
819 | * 32-bit: |
820 | * | |
821 | * Valid to do another page fault here, because if this fault | |
822 | * had been triggered by is_prefetch fixup_exception would have | |
823 | * handled it. | |
824 | * | |
825 | * 64-bit: | |
92181f19 | 826 | * |
2d4a7167 | 827 | * Hall of shame of CPU/BIOS bugs. |
92181f19 NP |
828 | */ |
829 | if (is_prefetch(regs, error_code, address)) | |
830 | return; | |
831 | ||
832 | if (is_errata93(regs, address)) | |
833 | return; | |
834 | ||
3425d934 SP |
835 | /* |
836 | * Buggy firmware could access regions which might page fault, try to | |
837 | * recover from such faults. | |
838 | */ | |
839 | if (IS_ENABLED(CONFIG_EFI)) | |
840 | efi_recover_from_page_fault(address); | |
841 | ||
ebb53e25 | 842 | oops: |
92181f19 NP |
843 | /* |
844 | * Oops. The kernel tried to access some bad page. We'll have to | |
2d4a7167 | 845 | * terminate things with extreme prejudice: |
92181f19 | 846 | */ |
92181f19 | 847 | flags = oops_begin(); |
92181f19 NP |
848 | |
849 | show_fault_oops(regs, error_code, address); | |
850 | ||
a70857e4 | 851 | if (task_stack_end_corrupted(tsk)) |
b0f4c4b3 | 852 | printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); |
19803078 | 853 | |
92181f19 NP |
854 | sig = SIGKILL; |
855 | if (__die("Oops", regs, error_code)) | |
856 | sig = 0; | |
2d4a7167 | 857 | |
92181f19 | 858 | /* Executive summary in case the body of the oops scrolled away */ |
b0f4c4b3 | 859 | printk(KERN_DEFAULT "CR2: %016lx\n", address); |
2d4a7167 | 860 | |
92181f19 | 861 | oops_end(flags, regs, sig); |
92181f19 NP |
862 | } |
863 | ||
2d4a7167 IM |
864 | /* |
865 | * Print out info about fatal segfaults, if the show_unhandled_signals | |
866 | * sysctl is set: | |
867 | */ | |
868 | static inline void | |
869 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |
870 | unsigned long address, struct task_struct *tsk) | |
871 | { | |
ba54d856 BP |
872 | const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG; |
873 | ||
2d4a7167 IM |
874 | if (!unhandled_signal(tsk, SIGSEGV)) |
875 | return; | |
876 | ||
877 | if (!printk_ratelimit()) | |
878 | return; | |
879 | ||
10a7e9d8 | 880 | printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx", |
ba54d856 | 881 | loglvl, tsk->comm, task_pid_nr(tsk), address, |
2d4a7167 IM |
882 | (void *)regs->ip, (void *)regs->sp, error_code); |
883 | ||
884 | print_vma_addr(KERN_CONT " in ", regs->ip); | |
885 | ||
886 | printk(KERN_CONT "\n"); | |
ba54d856 | 887 | |
342db04a | 888 | show_opcodes(regs, loglvl); |
2d4a7167 IM |
889 | } |
890 | ||
02e983b7 DH |
891 | /* |
892 | * The (legacy) vsyscall page is the long page in the kernel portion | |
893 | * of the address space that has user-accessible permissions. | |
894 | */ | |
895 | static bool is_vsyscall_vaddr(unsigned long vaddr) | |
896 | { | |
3ae0ad92 | 897 | return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); |
02e983b7 DH |
898 | } |
899 | ||
2d4a7167 IM |
900 | static void |
901 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
419ceeb1 | 902 | unsigned long address, u32 pkey, int si_code) |
92181f19 NP |
903 | { |
904 | struct task_struct *tsk = current; | |
905 | ||
906 | /* User mode accesses just cause a SIGSEGV */ | |
6ea59b07 | 907 | if (user_mode(regs) && (error_code & X86_PF_USER)) { |
92181f19 | 908 | /* |
2d4a7167 | 909 | * It's possible to have interrupts off here: |
92181f19 NP |
910 | */ |
911 | local_irq_enable(); | |
912 | ||
913 | /* | |
914 | * Valid to do another page fault here because this one came | |
2d4a7167 | 915 | * from user space: |
92181f19 NP |
916 | */ |
917 | if (is_prefetch(regs, error_code, address)) | |
918 | return; | |
919 | ||
920 | if (is_errata100(regs, address)) | |
921 | return; | |
922 | ||
dc4fac84 AL |
923 | /* |
924 | * To avoid leaking information about the kernel page table | |
925 | * layout, pretend that user-mode accesses to kernel addresses | |
926 | * are always protection faults. | |
927 | */ | |
928 | if (address >= TASK_SIZE_MAX) | |
1067f030 | 929 | error_code |= X86_PF_PROT; |
3ae36655 | 930 | |
e575a86f | 931 | if (likely(show_unhandled_signals)) |
2d4a7167 IM |
932 | show_signal_msg(regs, error_code, address, tsk); |
933 | ||
e49d3cbe | 934 | set_signal_archinfo(address, error_code); |
92181f19 | 935 | |
9db812db | 936 | if (si_code == SEGV_PKUERR) |
419ceeb1 | 937 | force_sig_pkuerr((void __user *)address, pkey); |
9db812db | 938 | |
b4fd52f2 | 939 | force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); |
2d4a7167 | 940 | |
92181f19 NP |
941 | return; |
942 | } | |
943 | ||
944 | if (is_f00f_bug(regs, address)) | |
945 | return; | |
946 | ||
4fc34901 | 947 | no_context(regs, error_code, address, SIGSEGV, si_code); |
92181f19 NP |
948 | } |
949 | ||
2d4a7167 IM |
950 | static noinline void |
951 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |
768fd9c6 | 952 | unsigned long address) |
92181f19 | 953 | { |
419ceeb1 | 954 | __bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR); |
92181f19 NP |
955 | } |
956 | ||
2d4a7167 IM |
957 | static void |
958 | __bad_area(struct pt_regs *regs, unsigned long error_code, | |
419ceeb1 | 959 | unsigned long address, u32 pkey, int si_code) |
92181f19 NP |
960 | { |
961 | struct mm_struct *mm = current->mm; | |
92181f19 NP |
962 | /* |
963 | * Something tried to access memory that isn't in our memory map.. | |
964 | * Fix it, but check if it's kernel or user first.. | |
965 | */ | |
966 | up_read(&mm->mmap_sem); | |
967 | ||
aba1ecd3 | 968 | __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); |
92181f19 NP |
969 | } |
970 | ||
2d4a7167 IM |
971 | static noinline void |
972 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) | |
92181f19 | 973 | { |
419ceeb1 | 974 | __bad_area(regs, error_code, address, 0, SEGV_MAPERR); |
92181f19 NP |
975 | } |
976 | ||
33a709b2 DH |
977 | static inline bool bad_area_access_from_pkeys(unsigned long error_code, |
978 | struct vm_area_struct *vma) | |
979 | { | |
07f146f5 DH |
980 | /* This code is always called on the current mm */ |
981 | bool foreign = false; | |
982 | ||
33a709b2 DH |
983 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) |
984 | return false; | |
1067f030 | 985 | if (error_code & X86_PF_PK) |
33a709b2 | 986 | return true; |
07f146f5 | 987 | /* this checks permission keys on the VMA: */ |
1067f030 RN |
988 | if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), |
989 | (error_code & X86_PF_INSTR), foreign)) | |
07f146f5 | 990 | return true; |
33a709b2 | 991 | return false; |
92181f19 NP |
992 | } |
993 | ||
2d4a7167 IM |
994 | static noinline void |
995 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, | |
7b2d0dba | 996 | unsigned long address, struct vm_area_struct *vma) |
92181f19 | 997 | { |
019132ff DH |
998 | /* |
999 | * This OSPKE check is not strictly necessary at runtime. | |
1000 | * But, doing it this way allows compiler optimizations | |
1001 | * if pkeys are compiled out. | |
1002 | */ | |
aba1ecd3 | 1003 | if (bad_area_access_from_pkeys(error_code, vma)) { |
9db812db EB |
1004 | /* |
1005 | * A protection key fault means that the PKRU value did not allow | |
1006 | * access to some PTE. Userspace can figure out what PKRU was | |
1007 | * from the XSAVE state. This function captures the pkey from | |
1008 | * the vma and passes it to userspace so userspace can discover | |
1009 | * which protection key was set on the PTE. | |
1010 | * | |
1011 | * If we get here, we know that the hardware signaled a X86_PF_PK | |
1012 | * fault and that there was a VMA once we got in the fault | |
1013 | * handler. It does *not* guarantee that the VMA we find here | |
1014 | * was the one that we faulted on. | |
1015 | * | |
1016 | * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); | |
1017 | * 2. T1 : set PKRU to deny access to pkey=4, touches page | |
1018 | * 3. T1 : faults... | |
1019 | * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); | |
1020 | * 5. T1 : enters fault handler, takes mmap_sem, etc... | |
1021 | * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really | |
1022 | * faulted on a pte with its pkey=4. | |
1023 | */ | |
aba1ecd3 | 1024 | u32 pkey = vma_pkey(vma); |
9db812db | 1025 | |
419ceeb1 | 1026 | __bad_area(regs, error_code, address, pkey, SEGV_PKUERR); |
aba1ecd3 | 1027 | } else { |
419ceeb1 | 1028 | __bad_area(regs, error_code, address, 0, SEGV_ACCERR); |
aba1ecd3 | 1029 | } |
92181f19 NP |
1030 | } |
1031 | ||
2d4a7167 | 1032 | static void |
a6e04aa9 | 1033 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, |
3d353901 | 1034 | vm_fault_t fault) |
92181f19 NP |
1035 | { |
1036 | struct task_struct *tsk = current; | |
92181f19 | 1037 | |
2d4a7167 | 1038 | /* Kernel mode? Handle exceptions or die: */ |
1067f030 | 1039 | if (!(error_code & X86_PF_USER)) { |
4fc34901 | 1040 | no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); |
96054569 LT |
1041 | return; |
1042 | } | |
2d4a7167 | 1043 | |
cd1b68f0 | 1044 | /* User-space => ok to do another page fault: */ |
92181f19 NP |
1045 | if (is_prefetch(regs, error_code, address)) |
1046 | return; | |
2d4a7167 | 1047 | |
e49d3cbe | 1048 | set_signal_archinfo(address, error_code); |
2d4a7167 | 1049 | |
a6e04aa9 | 1050 | #ifdef CONFIG_MEMORY_FAILURE |
f672b49b | 1051 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { |
40e55394 EB |
1052 | unsigned lsb = 0; |
1053 | ||
1054 | pr_err( | |
a6e04aa9 AK |
1055 | "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", |
1056 | tsk->comm, tsk->pid, address); | |
40e55394 EB |
1057 | if (fault & VM_FAULT_HWPOISON_LARGE) |
1058 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | |
1059 | if (fault & VM_FAULT_HWPOISON) | |
1060 | lsb = PAGE_SHIFT; | |
1061 | force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, tsk); | |
1062 | return; | |
a6e04aa9 AK |
1063 | } |
1064 | #endif | |
b4fd52f2 | 1065 | force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); |
92181f19 NP |
1066 | } |
1067 | ||
3a13c4d7 | 1068 | static noinline void |
2d4a7167 | 1069 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
25c102d8 | 1070 | unsigned long address, vm_fault_t fault) |
92181f19 | 1071 | { |
1067f030 | 1072 | if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { |
3a13c4d7 JW |
1073 | no_context(regs, error_code, address, 0, 0); |
1074 | return; | |
b80ef10e | 1075 | } |
b80ef10e | 1076 | |
2d4a7167 | 1077 | if (fault & VM_FAULT_OOM) { |
f8626854 | 1078 | /* Kernel mode? Handle exceptions or die: */ |
1067f030 | 1079 | if (!(error_code & X86_PF_USER)) { |
4fc34901 AL |
1080 | no_context(regs, error_code, address, |
1081 | SIGSEGV, SEGV_MAPERR); | |
3a13c4d7 | 1082 | return; |
f8626854 AV |
1083 | } |
1084 | ||
c2d23f91 DR |
1085 | /* |
1086 | * We ran out of memory, call the OOM killer, and return the | |
1087 | * userspace (which will retry the fault, or kill us if we got | |
1088 | * oom-killed): | |
1089 | */ | |
1090 | pagefault_out_of_memory(); | |
2d4a7167 | 1091 | } else { |
f672b49b AK |
1092 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
1093 | VM_FAULT_HWPOISON_LARGE)) | |
27274f73 | 1094 | do_sigbus(regs, error_code, address, fault); |
33692f27 | 1095 | else if (fault & VM_FAULT_SIGSEGV) |
768fd9c6 | 1096 | bad_area_nosemaphore(regs, error_code, address); |
2d4a7167 IM |
1097 | else |
1098 | BUG(); | |
1099 | } | |
92181f19 NP |
1100 | } |
1101 | ||
8fed6200 | 1102 | static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte) |
d8b57bb7 | 1103 | { |
1067f030 | 1104 | if ((error_code & X86_PF_WRITE) && !pte_write(*pte)) |
d8b57bb7 | 1105 | return 0; |
2d4a7167 | 1106 | |
1067f030 | 1107 | if ((error_code & X86_PF_INSTR) && !pte_exec(*pte)) |
d8b57bb7 TG |
1108 | return 0; |
1109 | ||
1110 | return 1; | |
1111 | } | |
1112 | ||
5b727a3b | 1113 | /* |
2d4a7167 IM |
1114 | * Handle a spurious fault caused by a stale TLB entry. |
1115 | * | |
1116 | * This allows us to lazily refresh the TLB when increasing the | |
1117 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it | |
1118 | * eagerly is very expensive since that implies doing a full | |
1119 | * cross-processor TLB flush, even if no stale TLB entries exist | |
1120 | * on other processors. | |
1121 | * | |
31668511 DV |
1122 | * Spurious faults may only occur if the TLB contains an entry with |
1123 | * fewer permission than the page table entry. Non-present (P = 0) | |
1124 | * and reserved bit (R = 1) faults are never spurious. | |
1125 | * | |
5b727a3b JF |
1126 | * There are no security implications to leaving a stale TLB when |
1127 | * increasing the permissions on a page. | |
31668511 DV |
1128 | * |
1129 | * Returns non-zero if a spurious fault was handled, zero otherwise. | |
1130 | * | |
1131 | * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 | |
1132 | * (Optional Invalidation). | |
5b727a3b | 1133 | */ |
9326638c | 1134 | static noinline int |
8fed6200 | 1135 | spurious_kernel_fault(unsigned long error_code, unsigned long address) |
5b727a3b JF |
1136 | { |
1137 | pgd_t *pgd; | |
e0c4f675 | 1138 | p4d_t *p4d; |
5b727a3b JF |
1139 | pud_t *pud; |
1140 | pmd_t *pmd; | |
1141 | pte_t *pte; | |
3c3e5694 | 1142 | int ret; |
5b727a3b | 1143 | |
31668511 DV |
1144 | /* |
1145 | * Only writes to RO or instruction fetches from NX may cause | |
1146 | * spurious faults. | |
1147 | * | |
1148 | * These could be from user or supervisor accesses but the TLB | |
1149 | * is only lazily flushed after a kernel mapping protection | |
1150 | * change, so user accesses are not expected to cause spurious | |
1151 | * faults. | |
1152 | */ | |
1067f030 RN |
1153 | if (error_code != (X86_PF_WRITE | X86_PF_PROT) && |
1154 | error_code != (X86_PF_INSTR | X86_PF_PROT)) | |
5b727a3b JF |
1155 | return 0; |
1156 | ||
1157 | pgd = init_mm.pgd + pgd_index(address); | |
1158 | if (!pgd_present(*pgd)) | |
1159 | return 0; | |
1160 | ||
e0c4f675 KS |
1161 | p4d = p4d_offset(pgd, address); |
1162 | if (!p4d_present(*p4d)) | |
1163 | return 0; | |
1164 | ||
1165 | if (p4d_large(*p4d)) | |
8fed6200 | 1166 | return spurious_kernel_fault_check(error_code, (pte_t *) p4d); |
e0c4f675 KS |
1167 | |
1168 | pud = pud_offset(p4d, address); | |
5b727a3b JF |
1169 | if (!pud_present(*pud)) |
1170 | return 0; | |
1171 | ||
d8b57bb7 | 1172 | if (pud_large(*pud)) |
8fed6200 | 1173 | return spurious_kernel_fault_check(error_code, (pte_t *) pud); |
d8b57bb7 | 1174 | |
5b727a3b JF |
1175 | pmd = pmd_offset(pud, address); |
1176 | if (!pmd_present(*pmd)) | |
1177 | return 0; | |
1178 | ||
d8b57bb7 | 1179 | if (pmd_large(*pmd)) |
8fed6200 | 1180 | return spurious_kernel_fault_check(error_code, (pte_t *) pmd); |
d8b57bb7 | 1181 | |
5b727a3b | 1182 | pte = pte_offset_kernel(pmd, address); |
954f8571 | 1183 | if (!pte_present(*pte)) |
5b727a3b JF |
1184 | return 0; |
1185 | ||
8fed6200 | 1186 | ret = spurious_kernel_fault_check(error_code, pte); |
3c3e5694 SR |
1187 | if (!ret) |
1188 | return 0; | |
1189 | ||
1190 | /* | |
2d4a7167 IM |
1191 | * Make sure we have permissions in PMD. |
1192 | * If not, then there's a bug in the page tables: | |
3c3e5694 | 1193 | */ |
8fed6200 | 1194 | ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd); |
3c3e5694 | 1195 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); |
2d4a7167 | 1196 | |
3c3e5694 | 1197 | return ret; |
5b727a3b | 1198 | } |
8fed6200 | 1199 | NOKPROBE_SYMBOL(spurious_kernel_fault); |
5b727a3b | 1200 | |
abd4f750 | 1201 | int show_unhandled_signals = 1; |
1da177e4 | 1202 | |
2d4a7167 | 1203 | static inline int |
68da336a | 1204 | access_error(unsigned long error_code, struct vm_area_struct *vma) |
92181f19 | 1205 | { |
07f146f5 DH |
1206 | /* This is only called for the current mm, so: */ |
1207 | bool foreign = false; | |
e8c6226d DH |
1208 | |
1209 | /* | |
1210 | * Read or write was blocked by protection keys. This is | |
1211 | * always an unconditional error and can never result in | |
1212 | * a follow-up action to resolve the fault, like a COW. | |
1213 | */ | |
1067f030 | 1214 | if (error_code & X86_PF_PK) |
e8c6226d DH |
1215 | return 1; |
1216 | ||
07f146f5 DH |
1217 | /* |
1218 | * Make sure to check the VMA so that we do not perform | |
1067f030 | 1219 | * faults just to hit a X86_PF_PK as soon as we fill in a |
07f146f5 DH |
1220 | * page. |
1221 | */ | |
1067f030 RN |
1222 | if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), |
1223 | (error_code & X86_PF_INSTR), foreign)) | |
07f146f5 | 1224 | return 1; |
33a709b2 | 1225 | |
1067f030 | 1226 | if (error_code & X86_PF_WRITE) { |
2d4a7167 | 1227 | /* write, present and write, not present: */ |
92181f19 NP |
1228 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
1229 | return 1; | |
2d4a7167 | 1230 | return 0; |
92181f19 NP |
1231 | } |
1232 | ||
2d4a7167 | 1233 | /* read, present: */ |
1067f030 | 1234 | if (unlikely(error_code & X86_PF_PROT)) |
2d4a7167 IM |
1235 | return 1; |
1236 | ||
1237 | /* read, not present: */ | |
1238 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | |
1239 | return 1; | |
1240 | ||
92181f19 NP |
1241 | return 0; |
1242 | } | |
1243 | ||
0973a06c HS |
1244 | static int fault_in_kernel_space(unsigned long address) |
1245 | { | |
3ae0ad92 DH |
1246 | /* |
1247 | * On 64-bit systems, the vsyscall page is at an address above | |
1248 | * TASK_SIZE_MAX, but is not considered part of the kernel | |
1249 | * address space. | |
1250 | */ | |
1251 | if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address)) | |
1252 | return false; | |
1253 | ||
d9517346 | 1254 | return address >= TASK_SIZE_MAX; |
0973a06c HS |
1255 | } |
1256 | ||
1da177e4 | 1257 | /* |
8fed6200 DH |
1258 | * Called for all faults where 'address' is part of the kernel address |
1259 | * space. Might get called for faults that originate from *code* that | |
1260 | * ran in userspace or the kernel. | |
1da177e4 | 1261 | */ |
8fed6200 DH |
1262 | static void |
1263 | do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, | |
1264 | unsigned long address) | |
1da177e4 | 1265 | { |
367e3f1d DH |
1266 | /* |
1267 | * Protection keys exceptions only happen on user pages. We | |
1268 | * have no user pages in the kernel portion of the address | |
1269 | * space, so do not expect them here. | |
1270 | */ | |
1271 | WARN_ON_ONCE(hw_error_code & X86_PF_PK); | |
1da177e4 LT |
1272 | |
1273 | /* | |
8fed6200 | 1274 | * We can fault-in kernel-space virtual memory on-demand. The |
1da177e4 LT |
1275 | * 'reference' page table is init_mm.pgd. |
1276 | * | |
1277 | * NOTE! We MUST NOT take any locks for this case. We may | |
1278 | * be in an interrupt or a critical region, and should | |
1279 | * only copy the information from the master page table, | |
1280 | * nothing more. | |
1281 | * | |
8fed6200 DH |
1282 | * Before doing this on-demand faulting, ensure that the |
1283 | * fault is not any of the following: | |
1284 | * 1. A fault on a PTE with a reserved bit set. | |
1285 | * 2. A fault caused by a user-mode access. (Do not demand- | |
1286 | * fault kernel memory due to user-mode accesses). | |
1287 | * 3. A fault caused by a page-level protection violation. | |
1288 | * (A demand fault would be on a non-present page which | |
1289 | * would have X86_PF_PROT==0). | |
1da177e4 | 1290 | */ |
8fed6200 DH |
1291 | if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) { |
1292 | if (vmalloc_fault(address) >= 0) | |
5b727a3b | 1293 | return; |
8fed6200 | 1294 | } |
5b727a3b | 1295 | |
8fed6200 DH |
1296 | /* Was the fault spurious, caused by lazy TLB invalidation? */ |
1297 | if (spurious_kernel_fault(hw_error_code, address)) | |
1298 | return; | |
2d4a7167 | 1299 | |
8fed6200 DH |
1300 | /* kprobes don't want to hook the spurious faults: */ |
1301 | if (kprobes_fault(regs)) | |
92181f19 | 1302 | return; |
8fed6200 DH |
1303 | |
1304 | /* | |
1305 | * Note, despite being a "bad area", there are quite a few | |
1306 | * acceptable reasons to get here, such as erratum fixups | |
1307 | * and handling kernel code that can fault, like get_user(). | |
1308 | * | |
1309 | * Don't take the mm semaphore here. If we fixup a prefetch | |
1310 | * fault we could otherwise deadlock: | |
1311 | */ | |
ba9f6f89 | 1312 | bad_area_nosemaphore(regs, hw_error_code, address); |
8fed6200 DH |
1313 | } |
1314 | NOKPROBE_SYMBOL(do_kern_addr_fault); | |
1315 | ||
aa37c51b DH |
1316 | /* Handle faults in the user portion of the address space */ |
1317 | static inline | |
1318 | void do_user_addr_fault(struct pt_regs *regs, | |
1319 | unsigned long hw_error_code, | |
1320 | unsigned long address) | |
1da177e4 | 1321 | { |
2d4a7167 | 1322 | struct vm_area_struct *vma; |
1da177e4 LT |
1323 | struct task_struct *tsk; |
1324 | struct mm_struct *mm; | |
50a7ca3c | 1325 | vm_fault_t fault, major = 0; |
759496ba | 1326 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
1da177e4 | 1327 | |
a9ba9a3b AV |
1328 | tsk = current; |
1329 | mm = tsk->mm; | |
f8c2ee22 | 1330 | |
2d4a7167 | 1331 | /* kprobes don't want to hook the spurious faults: */ |
e00b12e6 | 1332 | if (unlikely(kprobes_fault(regs))) |
9be260a6 | 1333 | return; |
8c914cb7 | 1334 | |
5b0c2cac DH |
1335 | /* |
1336 | * Reserved bits are never expected to be set on | |
1337 | * entries in the user portion of the page tables. | |
1338 | */ | |
164477c2 DH |
1339 | if (unlikely(hw_error_code & X86_PF_RSVD)) |
1340 | pgtable_bad(regs, hw_error_code, address); | |
1da177e4 | 1341 | |
5b0c2cac | 1342 | /* |
e50928d7 AL |
1343 | * If SMAP is on, check for invalid kernel (supervisor) access to user |
1344 | * pages in the user address space. The odd case here is WRUSS, | |
1345 | * which, according to the preliminary documentation, does not respect | |
1346 | * SMAP and will have the USER bit set so, in all cases, SMAP | |
1347 | * enforcement appears to be consistent with the USER bit. | |
5b0c2cac | 1348 | */ |
a15781b5 AL |
1349 | if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) && |
1350 | !(hw_error_code & X86_PF_USER) && | |
e50928d7 | 1351 | !(regs->flags & X86_EFLAGS_AC))) |
a15781b5 | 1352 | { |
ba9f6f89 | 1353 | bad_area_nosemaphore(regs, hw_error_code, address); |
4640c7ee | 1354 | return; |
40d3cd66 PA |
1355 | } |
1356 | ||
1da177e4 | 1357 | /* |
2d4a7167 | 1358 | * If we're in an interrupt, have no user context or are running |
70ffdb93 | 1359 | * in a region with pagefaults disabled then we must not take the fault |
1da177e4 | 1360 | */ |
70ffdb93 | 1361 | if (unlikely(faulthandler_disabled() || !mm)) { |
ba9f6f89 | 1362 | bad_area_nosemaphore(regs, hw_error_code, address); |
92181f19 NP |
1363 | return; |
1364 | } | |
1da177e4 | 1365 | |
e00b12e6 PZ |
1366 | /* |
1367 | * It's safe to allow irq's after cr2 has been saved and the | |
1368 | * vmalloc fault has been handled. | |
1369 | * | |
1370 | * User-mode registers count as a user access even for any | |
1371 | * potential system fault or CPU buglet: | |
1372 | */ | |
f39b6f0e | 1373 | if (user_mode(regs)) { |
e00b12e6 | 1374 | local_irq_enable(); |
e00b12e6 PZ |
1375 | flags |= FAULT_FLAG_USER; |
1376 | } else { | |
1377 | if (regs->flags & X86_EFLAGS_IF) | |
1378 | local_irq_enable(); | |
1379 | } | |
1380 | ||
1381 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
1382 | ||
0ed32f1a | 1383 | if (hw_error_code & X86_PF_WRITE) |
759496ba | 1384 | flags |= FAULT_FLAG_WRITE; |
0ed32f1a | 1385 | if (hw_error_code & X86_PF_INSTR) |
d61172b4 | 1386 | flags |= FAULT_FLAG_INSTRUCTION; |
759496ba | 1387 | |
3ae0ad92 | 1388 | #ifdef CONFIG_X86_64 |
3a1dfe6e | 1389 | /* |
3ae0ad92 DH |
1390 | * Instruction fetch faults in the vsyscall page might need |
1391 | * emulation. The vsyscall page is at a high address | |
1392 | * (>PAGE_OFFSET), but is considered to be part of the user | |
1393 | * address space. | |
1da177e4 | 1394 | * |
3ae0ad92 DH |
1395 | * The vsyscall page does not have a "real" VMA, so do this |
1396 | * emulation before we go searching for VMAs. | |
1397 | */ | |
0ed32f1a | 1398 | if ((hw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) { |
3ae0ad92 DH |
1399 | if (emulate_vsyscall(regs, address)) |
1400 | return; | |
1401 | } | |
1402 | #endif | |
1403 | ||
3a1dfe6e | 1404 | /* |
88259744 DH |
1405 | * Kernel-mode access to the user address space should only occur |
1406 | * on well-defined single instructions listed in the exception | |
1407 | * tables. But, an erroneous kernel fault occurring outside one of | |
1408 | * those areas which also holds mmap_sem might deadlock attempting | |
1409 | * to validate the fault against the address space. | |
1da177e4 | 1410 | * |
88259744 DH |
1411 | * Only do the expensive exception table search when we might be at |
1412 | * risk of a deadlock. This happens if we | |
1413 | * 1. Failed to acquire mmap_sem, and | |
6344be60 | 1414 | * 2. The access did not originate in userspace. |
1da177e4 | 1415 | */ |
92181f19 | 1416 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
6344be60 | 1417 | if (!user_mode(regs) && !search_exception_tables(regs->ip)) { |
88259744 DH |
1418 | /* |
1419 | * Fault from code in kernel from | |
1420 | * which we do not expect faults. | |
1421 | */ | |
0ed32f1a | 1422 | bad_area_nosemaphore(regs, hw_error_code, address); |
92181f19 NP |
1423 | return; |
1424 | } | |
d065bd81 | 1425 | retry: |
1da177e4 | 1426 | down_read(&mm->mmap_sem); |
01006074 PZ |
1427 | } else { |
1428 | /* | |
2d4a7167 IM |
1429 | * The above down_read_trylock() might have succeeded in |
1430 | * which case we'll have missed the might_sleep() from | |
1431 | * down_read(): | |
01006074 PZ |
1432 | */ |
1433 | might_sleep(); | |
1da177e4 LT |
1434 | } |
1435 | ||
1436 | vma = find_vma(mm, address); | |
92181f19 | 1437 | if (unlikely(!vma)) { |
0ed32f1a | 1438 | bad_area(regs, hw_error_code, address); |
92181f19 NP |
1439 | return; |
1440 | } | |
1441 | if (likely(vma->vm_start <= address)) | |
1da177e4 | 1442 | goto good_area; |
92181f19 | 1443 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
0ed32f1a | 1444 | bad_area(regs, hw_error_code, address); |
92181f19 NP |
1445 | return; |
1446 | } | |
92181f19 | 1447 | if (unlikely(expand_stack(vma, address))) { |
0ed32f1a | 1448 | bad_area(regs, hw_error_code, address); |
92181f19 NP |
1449 | return; |
1450 | } | |
1451 | ||
1452 | /* | |
1453 | * Ok, we have a good vm_area for this memory access, so | |
1454 | * we can handle it.. | |
1455 | */ | |
1da177e4 | 1456 | good_area: |
0ed32f1a AL |
1457 | if (unlikely(access_error(hw_error_code, vma))) { |
1458 | bad_area_access_error(regs, hw_error_code, address, vma); | |
92181f19 | 1459 | return; |
1da177e4 LT |
1460 | } |
1461 | ||
1462 | /* | |
1463 | * If for any reason at all we couldn't handle the fault, | |
1464 | * make sure we exit gracefully rather than endlessly redo | |
9a95f3cf PC |
1465 | * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if |
1466 | * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. | |
cb0631fd VB |
1467 | * |
1468 | * Note that handle_userfault() may also release and reacquire mmap_sem | |
1469 | * (and not return with VM_FAULT_RETRY), when returning to userland to | |
1470 | * repeat the page fault later with a VM_FAULT_NOPAGE retval | |
1471 | * (potentially after handling any pending signal during the return to | |
1472 | * userland). The return to userland is identified whenever | |
1473 | * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. | |
1da177e4 | 1474 | */ |
dcddffd4 | 1475 | fault = handle_mm_fault(vma, address, flags); |
26178ec1 | 1476 | major |= fault & VM_FAULT_MAJOR; |
2d4a7167 | 1477 | |
3a13c4d7 | 1478 | /* |
26178ec1 LT |
1479 | * If we need to retry the mmap_sem has already been released, |
1480 | * and if there is a fatal signal pending there is no guarantee | |
1481 | * that we made any progress. Handle this case first. | |
3a13c4d7 | 1482 | */ |
26178ec1 LT |
1483 | if (unlikely(fault & VM_FAULT_RETRY)) { |
1484 | /* Retry at most once */ | |
1485 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | |
1486 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
1487 | flags |= FAULT_FLAG_TRIED; | |
1488 | if (!fatal_signal_pending(tsk)) | |
1489 | goto retry; | |
1490 | } | |
1491 | ||
1492 | /* User mode? Just return to handle the fatal exception */ | |
cf3c0a15 | 1493 | if (flags & FAULT_FLAG_USER) |
26178ec1 LT |
1494 | return; |
1495 | ||
1496 | /* Not returning to user mode? Handle exceptions or die: */ | |
0ed32f1a | 1497 | no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR); |
3a13c4d7 | 1498 | return; |
26178ec1 | 1499 | } |
3a13c4d7 | 1500 | |
26178ec1 | 1501 | up_read(&mm->mmap_sem); |
3a13c4d7 | 1502 | if (unlikely(fault & VM_FAULT_ERROR)) { |
0ed32f1a | 1503 | mm_fault_error(regs, hw_error_code, address, fault); |
3a13c4d7 | 1504 | return; |
37b23e05 KM |
1505 | } |
1506 | ||
d065bd81 | 1507 | /* |
26178ec1 LT |
1508 | * Major/minor page fault accounting. If any of the events |
1509 | * returned VM_FAULT_MAJOR, we account it as a major fault. | |
d065bd81 | 1510 | */ |
26178ec1 LT |
1511 | if (major) { |
1512 | tsk->maj_flt++; | |
1513 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | |
1514 | } else { | |
1515 | tsk->min_flt++; | |
1516 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | |
ac17dc8e | 1517 | } |
d729ab35 | 1518 | |
8c938f9f | 1519 | check_v8086_mode(regs, address, tsk); |
1da177e4 | 1520 | } |
aa37c51b DH |
1521 | NOKPROBE_SYMBOL(do_user_addr_fault); |
1522 | ||
1523 | /* | |
1524 | * This routine handles page faults. It determines the address, | |
1525 | * and the problem, and then passes it off to one of the appropriate | |
1526 | * routines. | |
1527 | */ | |
1528 | static noinline void | |
1529 | __do_page_fault(struct pt_regs *regs, unsigned long hw_error_code, | |
1530 | unsigned long address) | |
1531 | { | |
1532 | prefetchw(¤t->mm->mmap_sem); | |
1533 | ||
1534 | if (unlikely(kmmio_fault(regs, address))) | |
1535 | return; | |
1536 | ||
1537 | /* Was the fault on kernel-controlled part of the address space? */ | |
1538 | if (unlikely(fault_in_kernel_space(address))) | |
1539 | do_kern_addr_fault(regs, hw_error_code, address); | |
1540 | else | |
1541 | do_user_addr_fault(regs, hw_error_code, address); | |
1542 | } | |
9326638c | 1543 | NOKPROBE_SYMBOL(__do_page_fault); |
6ba3c97a | 1544 | |
9326638c MH |
1545 | static nokprobe_inline void |
1546 | trace_page_fault_entries(unsigned long address, struct pt_regs *regs, | |
1547 | unsigned long error_code) | |
d34603b0 SA |
1548 | { |
1549 | if (user_mode(regs)) | |
d4078e23 | 1550 | trace_page_fault_user(address, regs, error_code); |
d34603b0 | 1551 | else |
d4078e23 | 1552 | trace_page_fault_kernel(address, regs, error_code); |
d34603b0 SA |
1553 | } |
1554 | ||
11a7ffb0 TG |
1555 | /* |
1556 | * We must have this function blacklisted from kprobes, tagged with notrace | |
1557 | * and call read_cr2() before calling anything else. To avoid calling any | |
1558 | * kind of tracing machinery before we've observed the CR2 value. | |
1559 | * | |
1560 | * exception_{enter,exit}() contains all sorts of tracepoints. | |
1561 | */ | |
9326638c | 1562 | dotraplinkage void notrace |
11a7ffb0 | 1563 | do_page_fault(struct pt_regs *regs, unsigned long error_code) |
25c74b10 | 1564 | { |
11a7ffb0 | 1565 | unsigned long address = read_cr2(); /* Get the faulting address */ |
d4078e23 | 1566 | enum ctx_state prev_state; |
25c74b10 SA |
1567 | |
1568 | prev_state = exception_enter(); | |
80954747 | 1569 | if (trace_pagefault_enabled()) |
11a7ffb0 TG |
1570 | trace_page_fault_entries(address, regs, error_code); |
1571 | ||
0ac09f9f | 1572 | __do_page_fault(regs, error_code, address); |
25c74b10 SA |
1573 | exception_exit(prev_state); |
1574 | } | |
11a7ffb0 | 1575 | NOKPROBE_SYMBOL(do_page_fault); |