Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
14cf11af | 2 | /* |
14cf11af PM |
3 | * PowerPC version |
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
5 | * | |
6 | * Derived from "arch/i386/mm/fault.c" | |
7 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
8 | * | |
9 | * Modified by Cort Dougan and Paul Mackerras. | |
10 | * | |
11 | * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com) | |
14cf11af PM |
12 | */ |
13 | ||
14cf11af PM |
14 | #include <linux/signal.h> |
15 | #include <linux/sched.h> | |
68db0cf1 | 16 | #include <linux/sched/task_stack.h> |
14cf11af PM |
17 | #include <linux/kernel.h> |
18 | #include <linux/errno.h> | |
19 | #include <linux/string.h> | |
ff27a9a0 | 20 | #include <linux/string_choices.h> |
14cf11af | 21 | #include <linux/types.h> |
0e36b0d1 | 22 | #include <linux/pagemap.h> |
14cf11af PM |
23 | #include <linux/ptrace.h> |
24 | #include <linux/mman.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/highmem.h> | |
8a39b05f | 28 | #include <linux/extable.h> |
14cf11af | 29 | #include <linux/kprobes.h> |
1eeb66a1 | 30 | #include <linux/kdebug.h> |
cdd6c482 | 31 | #include <linux/perf_event.h> |
76462232 | 32 | #include <linux/ratelimit.h> |
ba12eede | 33 | #include <linux/context_tracking.h> |
9d57472f | 34 | #include <linux/hugetlb.h> |
70ffdb93 | 35 | #include <linux/uaccess.h> |
90cbac0e | 36 | #include <linux/kfence.h> |
98c26a72 | 37 | #include <linux/pkeys.h> |
14cf11af | 38 | |
40900194 | 39 | #include <asm/firmware.h> |
3a96570f | 40 | #include <asm/interrupt.h> |
14cf11af | 41 | #include <asm/page.h> |
14cf11af PM |
42 | #include <asm/mmu.h> |
43 | #include <asm/mmu_context.h> | |
14cf11af | 44 | #include <asm/siginfo.h> |
ae3a197e | 45 | #include <asm/debug.h> |
5e5be3ae | 46 | #include <asm/kup.h> |
8094892d | 47 | #include <asm/inst.h> |
4f9e87c0 | 48 | |
773b3e53 | 49 | |
9be72573 BH |
50 | /* |
51 | * do_page_fault error handling helpers | |
52 | */ | |
53 | ||
c3350602 | 54 | static int |
cd60ab7a | 55 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code) |
c3350602 BH |
56 | { |
57 | /* | |
58 | * If we are in kernel mode, bail out with a SEGV, this will | |
59 | * be caught by the assembly which will restore the non-volatile | |
60 | * registers before calling bad_page_fault() | |
61 | */ | |
62 | if (!user_mode(regs)) | |
63 | return SIGSEGV; | |
64 | ||
cd60ab7a | 65 | _exception(SIGSEGV, regs, si_code, address); |
c3350602 BH |
66 | |
67 | return 0; | |
68 | } | |
69 | ||
70 | static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address) | |
71 | { | |
cd60ab7a | 72 | return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); |
c3350602 BH |
73 | } |
74 | ||
0cec9541 KW |
75 | static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, |
76 | struct mm_struct *mm, struct vm_area_struct *vma) | |
c3350602 | 77 | { |
c3350602 BH |
78 | |
79 | /* | |
80 | * Something tried to access memory that isn't in our memory map.. | |
81 | * Fix it, but check if it's kernel or user first.. | |
82 | */ | |
0cec9541 KW |
83 | if (mm) |
84 | mmap_read_unlock(mm); | |
85 | else | |
86 | vma_end_read(vma); | |
c3350602 | 87 | |
cd60ab7a | 88 | return __bad_area_nosemaphore(regs, address, si_code); |
c3350602 BH |
89 | } |
90 | ||
fe4a6856 | 91 | static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, |
0cec9541 | 92 | struct mm_struct *mm, |
fe4a6856 | 93 | struct vm_area_struct *vma) |
99cd1302 | 94 | { |
fe4a6856 AK |
95 | int pkey; |
96 | ||
97 | /* | |
98 | * We don't try to fetch the pkey from page table because reading | |
99 | * page table without locking doesn't guarantee stable pte value. | |
100 | * Hence the pkey value that we return to userspace can be different | |
101 | * from the pkey that actually caused access error. | |
102 | * | |
103 | * It does *not* guarantee that the VMA we find here | |
104 | * was the one that we faulted on. | |
105 | * | |
106 | * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); | |
107 | * 2. T1 : set AMR to deny access to pkey=4, touches, page | |
108 | * 3. T1 : faults... | |
109 | * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); | |
c1e8d7c6 | 110 | * 5. T1 : enters fault handler, takes mmap_lock, etc... |
fe4a6856 AK |
111 | * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really |
112 | * faulted on a pte with its pkey=4. | |
113 | */ | |
114 | pkey = vma_pkey(vma); | |
115 | ||
0cec9541 KW |
116 | if (mm) |
117 | mmap_read_unlock(mm); | |
118 | else | |
119 | vma_end_read(vma); | |
fe4a6856 | 120 | |
8eb2ba25 EB |
121 | /* |
122 | * If we are in kernel mode, bail out with a SEGV, this will | |
123 | * be caught by the assembly which will restore the non-volatile | |
124 | * registers before calling bad_page_fault() | |
125 | */ | |
126 | if (!user_mode(regs)) | |
127 | return SIGSEGV; | |
128 | ||
5d8fb8a5 | 129 | _exception_pkey(regs, address, pkey); |
8eb2ba25 EB |
130 | |
131 | return 0; | |
c3350602 BH |
132 | } |
133 | ||
0cec9541 KW |
134 | static noinline int bad_access(struct pt_regs *regs, unsigned long address, |
135 | struct mm_struct *mm, struct vm_area_struct *vma) | |
ecb101ae | 136 | { |
0cec9541 | 137 | return __bad_area(regs, address, SEGV_ACCERR, mm, vma); |
ecb101ae JS |
138 | } |
139 | ||
3913fdd7 | 140 | static int do_sigbus(struct pt_regs *regs, unsigned long address, |
50a7ca3c | 141 | vm_fault_t fault) |
9be72573 | 142 | { |
63af5262 | 143 | if (!user_mode(regs)) |
b5c8f0fd | 144 | return SIGBUS; |
63af5262 AB |
145 | |
146 | current->thread.trap_nr = BUS_ADRERR; | |
3913fdd7 AB |
147 | #ifdef CONFIG_MEMORY_FAILURE |
148 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { | |
f654fc07 EB |
149 | unsigned int lsb = 0; /* shutup gcc */ |
150 | ||
3913fdd7 AB |
151 | pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", |
152 | current->comm, current->pid, address); | |
f654fc07 EB |
153 | |
154 | if (fault & VM_FAULT_HWPOISON_LARGE) | |
155 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | |
156 | if (fault & VM_FAULT_HWPOISON) | |
157 | lsb = PAGE_SHIFT; | |
158 | ||
f8eac901 | 159 | force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb); |
f654fc07 | 160 | return 0; |
3913fdd7 | 161 | } |
9d57472f | 162 | |
3913fdd7 | 163 | #endif |
2e1661d2 | 164 | force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); |
b5c8f0fd | 165 | return 0; |
9be72573 BH |
166 | } |
167 | ||
50a7ca3c SJ |
168 | static int mm_fault_error(struct pt_regs *regs, unsigned long addr, |
169 | vm_fault_t fault) | |
9be72573 BH |
170 | { |
171 | /* | |
b5c8f0fd BH |
172 | * Kernel page fault interrupted by SIGKILL. We have no reason to |
173 | * continue processing. | |
9be72573 | 174 | */ |
b5c8f0fd BH |
175 | if (fatal_signal_pending(current) && !user_mode(regs)) |
176 | return SIGKILL; | |
9be72573 BH |
177 | |
178 | /* Out of memory */ | |
c2d23f91 | 179 | if (fault & VM_FAULT_OOM) { |
c2d23f91 DR |
180 | /* |
181 | * We ran out of memory, or some other thing happened to us that | |
182 | * made us unable to handle the page fault gracefully. | |
183 | */ | |
184 | if (!user_mode(regs)) | |
b5c8f0fd | 185 | return SIGSEGV; |
c2d23f91 | 186 | pagefault_out_of_memory(); |
b5c8f0fd BH |
187 | } else { |
188 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | |
189 | VM_FAULT_HWPOISON_LARGE)) | |
190 | return do_sigbus(regs, addr, fault); | |
191 | else if (fault & VM_FAULT_SIGSEGV) | |
192 | return bad_area_nosemaphore(regs, addr); | |
193 | else | |
194 | BUG(); | |
c2d23f91 | 195 | } |
b5c8f0fd | 196 | return 0; |
9be72573 | 197 | } |
14cf11af | 198 | |
d3ca5874 | 199 | /* Is this a bad kernel fault ? */ |
de78a9c4 | 200 | static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, |
5e5be3ae | 201 | unsigned long address, bool is_write) |
d3ca5874 | 202 | { |
7153d4bf | 203 | int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE; |
de78a9c4 | 204 | |
cd5d5e60 | 205 | if (is_exec) { |
0fb1c25a CL |
206 | pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n", |
207 | address >= TASK_SIZE ? "exec-protected" : "user", | |
208 | address, | |
209 | from_kuid(&init_user_ns, current_uid())); | |
5e5be3ae ME |
210 | |
211 | // Kernel exec fault is always bad | |
212 | return true; | |
d3ca5874 | 213 | } |
de78a9c4 | 214 | |
5e5be3ae ME |
215 | // Kernel fault on kernel address is bad |
216 | if (address >= TASK_SIZE) | |
217 | return true; | |
218 | ||
cbd7e6ca CL |
219 | // Read/write fault blocked by KUAP is bad, it can never succeed. |
220 | if (bad_kuap_fault(regs, address, is_write)) { | |
221 | pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n", | |
ff27a9a0 | 222 | str_write_read(is_write), address, |
cbd7e6ca | 223 | from_kuid(&init_user_ns, current_uid())); |
5e5be3ae | 224 | |
cbd7e6ca CL |
225 | // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad |
226 | if (!search_exception_tables(regs->nip)) | |
227 | return true; | |
228 | ||
229 | // Read/write fault in a valid region (the exception table search passed | |
230 | // above), but blocked by KUAP is bad, it can never succeed. | |
3dc12dfe | 231 | return WARN(true, "Bug: %s fault blocked by KUAP!", is_write ? "Write" : "Read"); |
cbd7e6ca | 232 | } |
5e5be3ae | 233 | |
cbd7e6ca | 234 | // What's left? Kernel fault on user and allowed by KUAP in the faulting context. |
5e5be3ae | 235 | return false; |
d3ca5874 BH |
236 | } |
237 | ||
fe4a6856 AK |
238 | static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey, |
239 | struct vm_area_struct *vma) | |
240 | { | |
fe4a6856 AK |
241 | /* |
242 | * Make sure to check the VMA so that we do not perform | |
243 | * faults just to hit a pkey fault as soon as we fill in a | |
244 | * page. Only called for current mm, hence foreign == 0 | |
245 | */ | |
246 | if (!arch_vma_access_permitted(vma, is_write, is_exec, 0)) | |
247 | return true; | |
248 | ||
249 | return false; | |
250 | } | |
fe4a6856 AK |
251 | |
252 | static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma) | |
bd0d63f8 BH |
253 | { |
254 | /* | |
255 | * Allow execution from readable areas if the MMU does not | |
256 | * provide separate controls over reading and executing. | |
257 | * | |
258 | * Note: That code used to not be enabled for 4xx/BookE. | |
259 | * It is now as I/D cache coherency for these is done at | |
260 | * set_pte_at() time and I see no reason why the test | |
261 | * below wouldn't be valid on those processors. This -may- | |
262 | * break programs compiled with a really old ABI though. | |
263 | */ | |
264 | if (is_exec) { | |
265 | return !(vma->vm_flags & VM_EXEC) && | |
266 | (cpu_has_feature(CPU_FTR_NOEXECUTE) || | |
267 | !(vma->vm_flags & (VM_READ | VM_WRITE))); | |
268 | } | |
269 | ||
270 | if (is_write) { | |
271 | if (unlikely(!(vma->vm_flags & VM_WRITE))) | |
272 | return true; | |
273 | return false; | |
274 | } | |
275 | ||
395cac77 | 276 | /* |
b1fba034 CL |
277 | * VM_READ, VM_WRITE and VM_EXEC may imply read permissions, as |
278 | * defined in protection_map[]. In that case Read faults can only be | |
279 | * caused by a PROT_NONE mapping. However a non exec access on a | |
280 | * VM_EXEC only mapping is invalid anyway, so report it as such. | |
395cac77 | 281 | */ |
f2c7e356 | 282 | if (unlikely(!vma_is_accessible(vma))) |
bd0d63f8 | 283 | return true; |
f2c7e356 | 284 | |
b1fba034 | 285 | if ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC) |
f2c7e356 RC |
286 | return true; |
287 | ||
f2ed480f AK |
288 | /* |
289 | * We should ideally do the vma pkey access check here. But in the | |
290 | * fault path, handle_mm_fault() also does the same check. To avoid | |
291 | * these multiple checks, we skip it here and handle access error due | |
292 | * to pkeys later. | |
293 | */ | |
bd0d63f8 BH |
294 | return false; |
295 | } | |
296 | ||
3da02648 BH |
297 | #ifdef CONFIG_PPC_SMLPAR |
298 | static inline void cmo_account_page_fault(void) | |
299 | { | |
300 | if (firmware_has_feature(FW_FEATURE_CMO)) { | |
301 | u32 page_ins; | |
302 | ||
303 | preempt_disable(); | |
304 | page_ins = be32_to_cpu(get_lppaca()->page_ins); | |
305 | page_ins += 1 << PAGE_FACTOR; | |
306 | get_lppaca()->page_ins = cpu_to_be32(page_ins); | |
307 | preempt_enable(); | |
308 | } | |
309 | } | |
310 | #else | |
311 | static inline void cmo_account_page_fault(void) { } | |
312 | #endif /* CONFIG_PPC_SMLPAR */ | |
313 | ||
374f3f59 AK |
314 | static void sanity_check_fault(bool is_write, bool is_user, |
315 | unsigned long error_code, unsigned long address) | |
2865d08d | 316 | { |
374f3f59 AK |
317 | /* |
318 | * Userspace trying to access kernel address, we get PROTFAULT for that. | |
319 | */ | |
320 | if (is_user && address >= TASK_SIZE) { | |
0f9aee0c CL |
321 | if ((long)address == -1) |
322 | return; | |
323 | ||
374f3f59 AK |
324 | pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n", |
325 | current->comm, current->pid, address, | |
326 | from_kuid(&init_user_ns, current_uid())); | |
327 | return; | |
328 | } | |
329 | ||
7ceb4002 CL |
330 | if (!IS_ENABLED(CONFIG_PPC_BOOK3S)) |
331 | return; | |
332 | ||
2865d08d BH |
333 | /* |
334 | * For hash translation mode, we should never get a | |
335 | * PROTFAULT. Any update to pte to reduce access will result in us | |
336 | * removing the hash page table entry, thus resulting in a DSISR_NOHPTE | |
337 | * fault instead of DSISR_PROTFAULT. | |
338 | * | |
339 | * A pte update to relax the access will not result in a hash page table | |
340 | * entry invalidate and hence can result in DSISR_PROTFAULT. | |
341 | * ptep_set_access_flags() doesn't do a hpte flush. This is why we have | |
342 | * the special !is_write in the below conditional. | |
343 | * | |
344 | * For platforms that doesn't supports coherent icache and do support | |
345 | * per page noexec bit, we do setup things such that we do the | |
346 | * sync between D/I cache via fault. But that is handled via low level | |
347 | * hash fault code (hash_page_do_lazy_icache()) and we should not reach | |
348 | * here in such case. | |
349 | * | |
350 | * For wrong access that can result in PROTFAULT, the above vma->vm_flags | |
351 | * check should handle those and hence we should fall to the bad_area | |
352 | * handling correctly. | |
353 | * | |
354 | * For embedded with per page exec support that doesn't support coherent | |
355 | * icache we do get PROTFAULT and we handle that D/I cache sync in | |
356 | * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON | |
357 | * is conditional for server MMU. | |
358 | * | |
359 | * For radix, we can get prot fault for autonuma case, because radix | |
360 | * page table will have them marked noaccess for user. | |
361 | */ | |
374f3f59 AK |
362 | if (radix_enabled() || is_write) |
363 | return; | |
364 | ||
365 | WARN_ON_ONCE(error_code & DSISR_PROTFAULT); | |
2865d08d | 366 | } |
2865d08d | 367 | |
41b464e5 BH |
368 | /* |
369 | * Define the correct "is_write" bit in error_code based | |
370 | * on the processor family | |
371 | */ | |
7bf5f056 | 372 | #ifdef CONFIG_BOOKE |
41b464e5 BH |
373 | #define page_fault_is_write(__err) ((__err) & ESR_DST) |
374 | #else | |
375 | #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE) | |
5250d026 CL |
376 | #endif |
377 | ||
7bf5f056 | 378 | #ifdef CONFIG_BOOKE |
5250d026 CL |
379 | #define page_fault_is_bad(__err) (0) |
380 | #elif defined(CONFIG_PPC_8xx) | |
4915349b | 381 | #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) |
f3d96e69 | 382 | #elif defined(CONFIG_PPC64) |
335e1a91 HM |
383 | static int page_fault_is_bad(unsigned long err) |
384 | { | |
385 | unsigned long flag = DSISR_BAD_FAULT_64S; | |
386 | ||
387 | /* | |
388 | * PAPR+ v2.11 § 14.15.3.4.1 (unreleased) | |
389 | * If byte 0, bit 3 of pi-attribute-specifier-type in | |
390 | * ibm,pi-features property is defined, ignore the DSI error | |
391 | * which is caused by the paste instruction on the | |
392 | * suspended NX window. | |
393 | */ | |
394 | if (mmu_has_feature(MMU_FTR_NX_DSI)) | |
395 | flag &= ~DSISR_BAD_COPYPASTE; | |
396 | ||
397 | return err & flag; | |
398 | } | |
f3d96e69 BH |
399 | #else |
400 | #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S) | |
401 | #endif | |
41b464e5 | 402 | |
14cf11af PM |
403 | /* |
404 | * For 600- and 800-family processors, the error_code parameter is DSISR | |
31d6490c NP |
405 | * for a data fault, SRR1 for an instruction fault. |
406 | * For 400-family processors the error_code parameter is ESR for a data fault, | |
407 | * 0 for an instruction fault. | |
408 | * For 64-bit processors, the error_code parameter is DSISR for a data access | |
409 | * fault, SRR1 & 0x08000000 for an instruction access fault. | |
14cf11af PM |
410 | * |
411 | * The return value is 0 if the fault was handled, or the signal | |
412 | * number if this is a kernel fault that can't be handled here. | |
413 | */ | |
a008f8f9 | 414 | static int ___do_page_fault(struct pt_regs *regs, unsigned long address, |
7afad422 | 415 | unsigned long error_code) |
14cf11af PM |
416 | { |
417 | struct vm_area_struct * vma; | |
418 | struct mm_struct *mm = current->mm; | |
dde16072 | 419 | unsigned int flags = FAULT_FLAG_DEFAULT; |
7153d4bf | 420 | int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE; |
da929f6a | 421 | int is_user = user_mode(regs); |
41b464e5 | 422 | int is_write = page_fault_is_write(error_code); |
50a7ca3c | 423 | vm_fault_t fault, major = 0; |
b98cca44 | 424 | bool kprobe_fault = kprobe_page_fault(regs, 11); |
14cf11af | 425 | |
b98cca44 | 426 | if (unlikely(debugger_fault_handler(regs) || kprobe_fault)) |
65d47fd4 | 427 | return 0; |
14cf11af | 428 | |
f3d96e69 | 429 | if (unlikely(page_fault_is_bad(error_code))) { |
65d47fd4 | 430 | if (is_user) { |
f3d96e69 | 431 | _exception(SIGBUS, regs, BUS_OBJERR, address); |
65d47fd4 BH |
432 | return 0; |
433 | } | |
434 | return SIGBUS; | |
e6c8290a | 435 | } |
e6c8290a | 436 | |
2865d08d | 437 | /* Additional sanity check(s) */ |
374f3f59 | 438 | sanity_check_fault(is_write, is_user, error_code, address); |
2865d08d | 439 | |
d7df2443 BH |
440 | /* |
441 | * The kernel should never take an execute fault nor should it | |
de78a9c4 | 442 | * take a page fault to a kernel address or a page fault to a user |
06dbbb4d RHI |
443 | * address outside of dedicated places. |
444 | * | |
445 | * Rather than kfence directly reporting false negatives, search whether | |
446 | * the NIP belongs to the fixup table for cases where fault could come | |
447 | * from functions like copy_from_kernel_nofault(). | |
d7df2443 | 448 | */ |
90cbac0e | 449 | if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) { |
06dbbb4d RHI |
450 | if (is_kfence_address((void *)address) && |
451 | !search_exception_tables(instruction_pointer(regs)) && | |
452 | kfence_handle_page_fault(address, is_write, regs)) | |
90cbac0e CL |
453 | return 0; |
454 | ||
65d47fd4 | 455 | return SIGSEGV; |
90cbac0e | 456 | } |
14cf11af | 457 | |
11ccdd33 BH |
458 | /* |
459 | * If we're in an interrupt, have no user context or are running | |
460 | * in a region with pagefaults disabled then we must not take the fault | |
461 | */ | |
462 | if (unlikely(faulthandler_disabled() || !mm)) { | |
463 | if (is_user) | |
464 | printk_ratelimited(KERN_ERR "Page fault in user mode" | |
465 | " with faulthandler_disabled()=%d" | |
466 | " mm=%p\n", | |
467 | faulthandler_disabled(), mm); | |
468 | return bad_area_nosemaphore(regs, address); | |
469 | } | |
470 | ||
e6f8a6c8 | 471 | interrupt_cond_local_irq_enable(regs); |
a546498f | 472 | |
a8b0ca17 | 473 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
7dd1fcc2 | 474 | |
69e044dd | 475 | /* |
c1e8d7c6 | 476 | * We want to do this outside mmap_lock, because reading code around nip |
69e044dd | 477 | * can result in fault, which will cause a deadlock when called with |
c1e8d7c6 | 478 | * mmap_lock held |
69e044dd | 479 | */ |
da929f6a | 480 | if (is_user) |
759496ba | 481 | flags |= FAULT_FLAG_USER; |
d2e0d2c5 BH |
482 | if (is_write) |
483 | flags |= FAULT_FLAG_WRITE; | |
484 | if (is_exec) | |
485 | flags |= FAULT_FLAG_INSTRUCTION; | |
759496ba | 486 | |
70d4cbc8 LD |
487 | if (!(flags & FAULT_FLAG_USER)) |
488 | goto lock_mmap; | |
489 | ||
490 | vma = lock_vma_under_rcu(mm, address); | |
491 | if (!vma) | |
492 | goto lock_mmap; | |
493 | ||
494 | if (unlikely(access_pkey_error(is_write, is_exec, | |
495 | (error_code & DSISR_KEYFAULT), vma))) { | |
0cec9541 KW |
496 | count_vm_vma_lock_event(VMA_LOCK_SUCCESS); |
497 | return bad_access_pkey(regs, address, NULL, vma); | |
70d4cbc8 LD |
498 | } |
499 | ||
500 | if (unlikely(access_error(is_write, is_exec, vma))) { | |
0cec9541 KW |
501 | count_vm_vma_lock_event(VMA_LOCK_SUCCESS); |
502 | return bad_access(regs, address, NULL, vma); | |
70d4cbc8 LD |
503 | } |
504 | ||
505 | fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); | |
4089eef0 SB |
506 | if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) |
507 | vma_end_read(vma); | |
70d4cbc8 LD |
508 | |
509 | if (!(fault & VM_FAULT_RETRY)) { | |
510 | count_vm_vma_lock_event(VMA_LOCK_SUCCESS); | |
511 | goto done; | |
512 | } | |
513 | count_vm_vma_lock_event(VMA_LOCK_RETRY); | |
46e714c7 SB |
514 | if (fault & VM_FAULT_MAJOR) |
515 | flags |= FAULT_FLAG_TRIED; | |
70d4cbc8 LD |
516 | |
517 | if (fault_signal_pending(fault, regs)) | |
518 | return user_mode(regs) ? 0 : SIGBUS; | |
519 | ||
520 | lock_mmap: | |
70d4cbc8 | 521 | |
14cf11af PM |
522 | /* When running in the kernel we expect faults to occur only to |
523 | * addresses in user space. All other faults represent errors in the | |
fc5266ea | 524 | * kernel and should generate an OOPS. Unfortunately, in the case of an |
c1e8d7c6 | 525 | * erroneous fault occurring in a code path which already holds mmap_lock |
14cf11af PM |
526 | * we will deadlock attempting to validate the fault against the |
527 | * address space. Luckily the kernel only validly references user | |
528 | * space from well defined areas of code, which are listed in the | |
e6fe228c | 529 | * exceptions table. lock_mm_and_find_vma() handles that logic. |
14cf11af | 530 | */ |
9be72573 | 531 | retry: |
e6fe228c | 532 | vma = lock_mm_and_find_vma(mm, address, regs); |
b15021d9 | 533 | if (unlikely(!vma)) |
e6fe228c | 534 | return bad_area_nosemaphore(regs, address); |
14cf11af | 535 | |
fe4a6856 AK |
536 | if (unlikely(access_pkey_error(is_write, is_exec, |
537 | (error_code & DSISR_KEYFAULT), vma))) | |
0cec9541 | 538 | return bad_access_pkey(regs, address, mm, vma); |
fe4a6856 | 539 | |
bd0d63f8 | 540 | if (unlikely(access_error(is_write, is_exec, vma))) |
0cec9541 | 541 | return bad_access(regs, address, mm, vma); |
14cf11af PM |
542 | |
543 | /* | |
544 | * If for any reason at all we couldn't handle the fault, | |
545 | * make sure we exit gracefully rather than endlessly redo | |
546 | * the fault. | |
547 | */ | |
428fdc09 | 548 | fault = handle_mm_fault(vma, address, flags, regs); |
e6c2a479 | 549 | |
f43bb27e | 550 | major |= fault & VM_FAULT_MAJOR; |
14c02e41 | 551 | |
c9a0dad1 PX |
552 | if (fault_signal_pending(fault, regs)) |
553 | return user_mode(regs) ? 0 : SIGBUS; | |
554 | ||
d9272525 PX |
555 | /* The fault is fully completed (including releasing mmap lock) */ |
556 | if (fault & VM_FAULT_COMPLETED) | |
557 | goto out; | |
558 | ||
14c02e41 | 559 | /* |
c1e8d7c6 | 560 | * Handle the retry right now, the mmap_lock has been released in that |
14c02e41 LD |
561 | * case. |
562 | */ | |
563 | if (unlikely(fault & VM_FAULT_RETRY)) { | |
36ef159f QZ |
564 | flags |= FAULT_FLAG_TRIED; |
565 | goto retry; | |
14cf11af | 566 | } |
9be72573 | 567 | |
d8ed45c5 | 568 | mmap_read_unlock(current->mm); |
b5c8f0fd | 569 | |
70d4cbc8 | 570 | done: |
b5c8f0fd BH |
571 | if (unlikely(fault & VM_FAULT_ERROR)) |
572 | return mm_fault_error(regs, address, fault); | |
573 | ||
d9272525 | 574 | out: |
9be72573 | 575 | /* |
14c02e41 | 576 | * Major/minor page fault accounting. |
9be72573 | 577 | */ |
428fdc09 | 578 | if (major) |
3da02648 | 579 | cmo_account_page_fault(); |
428fdc09 | 580 | |
c3350602 | 581 | return 0; |
7afad422 | 582 | } |
a008f8f9 | 583 | NOKPROBE_SYMBOL(___do_page_fault); |
7afad422 | 584 | |
c45ba4f4 | 585 | static __always_inline void __do_page_fault(struct pt_regs *regs) |
7afad422 | 586 | { |
4cb84284 NP |
587 | long err; |
588 | ||
a008f8f9 | 589 | err = ___do_page_fault(regs, regs->dar, regs->dsisr); |
c45ba4f4 NP |
590 | if (unlikely(err)) |
591 | bad_page_fault(regs, err); | |
a008f8f9 | 592 | } |
a008f8f9 | 593 | |
c45ba4f4 | 594 | DEFINE_INTERRUPT_HANDLER(do_page_fault) |
a008f8f9 | 595 | { |
c45ba4f4 | 596 | __do_page_fault(regs); |
14cf11af PM |
597 | } |
598 | ||
a008f8f9 NP |
599 | #ifdef CONFIG_PPC_BOOK3S_64 |
600 | /* Same as do_page_fault but interrupt entry has already run in do_hash_fault */ | |
c45ba4f4 | 601 | void hash__do_page_fault(struct pt_regs *regs) |
a008f8f9 | 602 | { |
c45ba4f4 | 603 | __do_page_fault(regs); |
a008f8f9 NP |
604 | } |
605 | NOKPROBE_SYMBOL(hash__do_page_fault); | |
606 | #endif | |
607 | ||
14cf11af PM |
608 | /* |
609 | * bad_page_fault is called when we have a bad access from the kernel. | |
610 | * It is called from the DSI and ISI handlers in head.S and from some | |
611 | * of the procedures in traps.c. | |
612 | */ | |
c45ba4f4 | 613 | static void __bad_page_fault(struct pt_regs *regs, int sig) |
14cf11af | 614 | { |
46ddcb39 | 615 | int is_write = page_fault_is_write(regs->dsisr); |
d4679ac8 | 616 | const char *msg; |
14cf11af | 617 | |
14cf11af | 618 | /* kernel has accessed a bad area */ |
723925b7 | 619 | |
d4679ac8 ME |
620 | if (regs->dar < PAGE_SIZE) |
621 | msg = "Kernel NULL pointer dereference"; | |
622 | else | |
623 | msg = "Unable to handle kernel data access"; | |
624 | ||
2271db20 | 625 | switch (TRAP(regs)) { |
7153d4bf | 626 | case INTERRUPT_DATA_STORAGE: |
7153d4bf | 627 | case INTERRUPT_H_DATA_STORAGE: |
d4679ac8 | 628 | pr_alert("BUG: %s on %s at 0x%08lx\n", msg, |
ff27a9a0 | 629 | str_write_read(is_write), regs->dar); |
a416dd8d | 630 | break; |
d4679ac8 ME |
631 | case INTERRUPT_DATA_SEGMENT: |
632 | pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar); | |
633 | break; | |
7153d4bf XS |
634 | case INTERRUPT_INST_STORAGE: |
635 | case INTERRUPT_INST_SEGMENT: | |
49a502ea CL |
636 | pr_alert("BUG: Unable to handle kernel instruction fetch%s", |
637 | regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n"); | |
a416dd8d | 638 | break; |
7153d4bf | 639 | case INTERRUPT_ALIGNMENT: |
49a502ea CL |
640 | pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n", |
641 | regs->dar); | |
eab861a7 | 642 | break; |
a416dd8d | 643 | default: |
49a502ea CL |
644 | pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n", |
645 | regs->dar); | |
a416dd8d | 646 | break; |
723925b7 OJ |
647 | } |
648 | printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", | |
649 | regs->nip); | |
650 | ||
a70857e4 | 651 | if (task_stack_end_corrupted(current)) |
28b54990 AB |
652 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); |
653 | ||
14cf11af PM |
654 | die("Kernel access of bad area", regs, sig); |
655 | } | |
5f1888a0 | 656 | |
8458c628 | 657 | void bad_page_fault(struct pt_regs *regs, int sig) |
5f1888a0 CL |
658 | { |
659 | const struct exception_table_entry *entry; | |
660 | ||
661 | /* Are we prepared to handle this fault? */ | |
662 | entry = search_exception_tables(instruction_pointer(regs)); | |
663 | if (entry) | |
664 | instruction_pointer_set(regs, extable_fixup(entry)); | |
665 | else | |
8458c628 | 666 | __bad_page_fault(regs, sig); |
5f1888a0 | 667 | } |
71f47976 NP |
668 | |
669 | #ifdef CONFIG_PPC_BOOK3S_64 | |
3a96570f | 670 | DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv) |
71f47976 NP |
671 | { |
672 | bad_page_fault(regs, SIGSEGV); | |
673 | } | |
935b534c NP |
674 | |
675 | /* | |
676 | * In radix, segment interrupts indicate the EA is not addressable by the | |
677 | * page table geometry, so they are always sent here. | |
678 | * | |
679 | * In hash, this is called if do_slb_fault returns error. Typically it is | |
680 | * because the EA was outside the region allowed by software. | |
681 | */ | |
682 | DEFINE_INTERRUPT_HANDLER(do_bad_segment_interrupt) | |
683 | { | |
684 | int err = regs->result; | |
685 | ||
686 | if (err == -EFAULT) { | |
687 | if (user_mode(regs)) | |
688 | _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar); | |
689 | else | |
690 | bad_page_fault(regs, SIGSEGV); | |
691 | } else if (err == -EINVAL) { | |
692 | unrecoverable_exception(regs); | |
693 | } else { | |
694 | BUG(); | |
695 | } | |
696 | } | |
71f47976 | 697 | #endif |