ia64: fix livelock in uaccess
[linux-2.6-block.git] / arch / ia64 / mm / fault.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * MMU fault handling support.
4 *
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
3f07c014 8#include <linux/sched/signal.h>
1da177e4
LT
9#include <linux/kernel.h>
10#include <linux/mm.h>
e7088170 11#include <linux/extable.h>
1da177e4 12#include <linux/interrupt.h>
1f7ad57b 13#include <linux/kprobes.h>
1eeb66a1 14#include <linux/kdebug.h>
268bb0ce 15#include <linux/prefetch.h>
70ffdb93 16#include <linux/uaccess.h>
b444eed8 17#include <linux/perf_event.h>
1da177e4 18
1da177e4 19#include <asm/processor.h>
82ed1ac9 20#include <asm/exception.h>
1da177e4 21
620de2f5 22extern int die(char *, struct pt_regs *, long);
1da177e4 23
1da177e4
LT
24/*
25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
26 * (inside region 5, on ia64) and that page is present.
27 */
28static int
29mapped_kernel_page_is_present (unsigned long address)
30{
31 pgd_t *pgd;
c03ab9e3 32 p4d_t *p4d;
1da177e4
LT
33 pud_t *pud;
34 pmd_t *pmd;
35 pte_t *ptep, pte;
36
37 pgd = pgd_offset_k(address);
38 if (pgd_none(*pgd) || pgd_bad(*pgd))
39 return 0;
40
c03ab9e3
MR
41 p4d = p4d_offset(pgd, address);
42 if (p4d_none(*p4d) || p4d_bad(*p4d))
43 return 0;
44
45 pud = pud_offset(p4d, address);
1da177e4
LT
46 if (pud_none(*pud) || pud_bad(*pud))
47 return 0;
48
49 pmd = pmd_offset(pud, address);
50 if (pmd_none(*pmd) || pmd_bad(*pmd))
51 return 0;
52
53 ptep = pte_offset_kernel(pmd, address);
54 if (!ptep)
55 return 0;
56
57 pte = *ptep;
58 return pte_present(pte);
59}
60
f28fa729
KC
61# define VM_READ_BIT 0
62# define VM_WRITE_BIT 1
63# define VM_EXEC_BIT 2
64
1f7ad57b 65void __kprobes
1da177e4
LT
66ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
67{
68 int signal = SIGSEGV, code = SEGV_MAPERR;
69 struct vm_area_struct *vma, *prev_vma;
70 struct mm_struct *mm = current->mm;
1da177e4 71 unsigned long mask;
50a7ca3c 72 vm_fault_t fault;
dde16072 73 unsigned int flags = FAULT_FLAG_DEFAULT;
f28fa729
KC
74
75 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
76 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
77
da1c55f1
ML
78 /* mmap_lock is performance critical.... */
79 prefetchw(&mm->mmap_lock);
0ffe9849 80
1da177e4
LT
81 /*
82 * If we're in an interrupt or have no user context, we must not take the fault..
83 */
70ffdb93 84 if (faulthandler_disabled() || !mm)
1da177e4
LT
85 goto no_context;
86
7213b252
AK
87 /*
88 * This is to handle the kprobes on user space access instructions
89 */
b98cca44 90 if (kprobe_page_fault(regs, TRAP_BRKPT))
7213b252
AK
91 return;
92
759496ba
JW
93 if (user_mode(regs))
94 flags |= FAULT_FLAG_USER;
95 if (mask & VM_WRITE)
96 flags |= FAULT_FLAG_WRITE;
b444eed8
PX
97
98 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
f28fa729 99retry:
d8ed45c5 100 mmap_read_lock(mm);
1da177e4
LT
101
102 vma = find_vma_prev(mm, address, &prev_vma);
e8c59c0c 103 if (!vma && !prev_vma )
1da177e4
LT
104 goto bad_area;
105
e8c59c0c
AB
106 /*
107 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
108 *
109 * May find no vma, but could be that the last vm area is the
110 * register backing store that needs to expand upwards, in
111 * this case vma will be null, but prev_vma will ne non-null
112 */
113 if (( !vma && prev_vma ) || (address < vma->vm_start) )
1da177e4
LT
114 goto check_expansion;
115
116 good_area:
117 code = SEGV_ACCERR;
118
119 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
120
1da177e4
LT
121# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
122 || (1 << VM_EXEC_BIT) != VM_EXEC)
123# error File is out of sync with <linux/mm.h>. Please update.
124# endif
125
df67b3da
JB
126 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
127 goto bad_area;
128
1da177e4
LT
129 if ((vma->vm_flags & mask) != mask)
130 goto bad_area;
131
1da177e4
LT
132 /*
133 * If for any reason at all we couldn't handle the fault, make
134 * sure we exit gracefully rather than endlessly redo the
135 * fault.
136 */
b444eed8 137 fault = handle_mm_fault(vma, address, flags, regs);
f28fa729 138
d088af1e
AV
139 if (fault_signal_pending(fault, regs)) {
140 if (!user_mode(regs))
141 goto no_context;
f28fa729 142 return;
d088af1e 143 }
f28fa729 144
d9272525
PX
145 /* The fault is fully completed (including releasing mmap lock) */
146 if (fault & VM_FAULT_COMPLETED)
147 return;
148
83c54070 149 if (unlikely(fault & VM_FAULT_ERROR)) {
1da177e4
LT
150 /*
151 * We ran out of memory, or some other thing happened
152 * to us that made us unable to handle the page fault
153 * gracefully.
154 */
83c54070
NP
155 if (fault & VM_FAULT_OOM) {
156 goto out_of_memory;
33692f27
LT
157 } else if (fault & VM_FAULT_SIGSEGV) {
158 goto bad_area;
83c54070
NP
159 } else if (fault & VM_FAULT_SIGBUS) {
160 signal = SIGBUS;
161 goto bad_area;
162 }
1da177e4
LT
163 BUG();
164 }
f28fa729 165
36ef159f
QZ
166 if (fault & VM_FAULT_RETRY) {
167 flags |= FAULT_FLAG_TRIED;
f28fa729 168
36ef159f
QZ
169 /* No need to mmap_read_unlock(mm) as we would
170 * have already released it in __lock_page_or_retry
171 * in mm/filemap.c.
172 */
f28fa729 173
36ef159f 174 goto retry;
f28fa729
KC
175 }
176
d8ed45c5 177 mmap_read_unlock(mm);
1da177e4
LT
178 return;
179
180 check_expansion:
181 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
e8c59c0c
AB
182 if (!vma)
183 goto bad_area;
1da177e4
LT
184 if (!(vma->vm_flags & VM_GROWSDOWN))
185 goto bad_area;
186 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
187 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
188 goto bad_area;
189 if (expand_stack(vma, address))
190 goto bad_area;
191 } else {
192 vma = prev_vma;
193 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
194 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
195 goto bad_area;
46dea3d0
HD
196 /*
197 * Since the register backing store is accessed sequentially,
198 * we disallow growing it by more than a page at a time.
199 */
200 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
201 goto bad_area;
202 if (expand_upwards(vma, address))
1da177e4
LT
203 goto bad_area;
204 }
205 goto good_area;
206
207 bad_area:
d8ed45c5 208 mmap_read_unlock(mm);
1da177e4
LT
209 if ((isr & IA64_ISR_SP)
210 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
211 {
212 /*
213 * This fault was due to a speculative load or lfetch.fault, set the "ed"
214 * bit in the psr to ensure forward progress. (Target register will get a
215 * NaT for ld.s, lfetch will be canceled.)
216 */
217 ia64_psr(regs)->ed = 1;
218 return;
219 }
220 if (user_mode(regs)) {
a618a275 221 force_sig_fault(signal, code, (void __user *) address,
2e1661d2 222 0, __ISR_VALID, isr);
1da177e4
LT
223 return;
224 }
225
226 no_context:
f0a8d3c9
TL
227 if ((isr & IA64_ISR_SP)
228 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
229 {
1da177e4 230 /*
f0a8d3c9
TL
231 * This fault was due to a speculative load or lfetch.fault, set the "ed"
232 * bit in the psr to ensure forward progress. (Target register will get a
233 * NaT for ld.s, lfetch will be canceled.)
1da177e4
LT
234 */
235 ia64_psr(regs)->ed = 1;
236 return;
237 }
238
1da177e4
LT
239 /*
240 * Since we have no vma's for region 5, we might get here even if the address is
241 * valid, due to the VHPT walker inserting a non present translation that becomes
242 * stale. If that happens, the non present fault handler already purged the stale
243 * translation, which fixed the problem. So, we check to see if the translation is
244 * valid, and return if it is.
245 */
246 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
247 return;
248
63028aa7
KU
249 if (ia64_done_with_exception(regs))
250 return;
251
1da177e4
LT
252 /*
253 * Oops. The kernel tried to access some bad page. We'll have to terminate things
254 * with extreme prejudice.
255 */
256 bust_spinlocks(1);
257
258 if (address < PAGE_SIZE)
259 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
260 else
261 printk(KERN_ALERT "Unable to handle kernel paging request at "
262 "virtual address %016lx\n", address);
620de2f5
JB
263 if (die("Oops", regs, isr))
264 regs = NULL;
1da177e4 265 bust_spinlocks(0);
620de2f5 266 if (regs)
0e25498f 267 make_task_dead(SIGKILL);
1da177e4
LT
268 return;
269
270 out_of_memory:
d8ed45c5 271 mmap_read_unlock(mm);
0c3b96e4 272 if (!user_mode(regs))
273 goto no_context;
274 pagefault_out_of_memory();
1da177e4 275}