signal: Remove the task parameter from force_sig_fault
[linux-2.6-block.git] / arch / ia64 / mm / fault.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * MMU fault handling support.
4 *
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
3f07c014 8#include <linux/sched/signal.h>
1da177e4
LT
9#include <linux/kernel.h>
10#include <linux/mm.h>
e7088170 11#include <linux/extable.h>
1da177e4 12#include <linux/interrupt.h>
1f7ad57b 13#include <linux/kprobes.h>
1eeb66a1 14#include <linux/kdebug.h>
268bb0ce 15#include <linux/prefetch.h>
70ffdb93 16#include <linux/uaccess.h>
1da177e4
LT
17
18#include <asm/pgtable.h>
19#include <asm/processor.h>
82ed1ac9 20#include <asm/exception.h>
1da177e4 21
620de2f5 22extern int die(char *, struct pt_regs *, long);
1da177e4 23
ae9a5b85 24#ifdef CONFIG_KPROBES
576fe0bd 25static inline int notify_page_fault(struct pt_regs *regs, int trap)
ae9a5b85 26{
576fe0bd 27 int ret = 0;
ae9a5b85 28
576fe0bd
CH
29 if (!user_mode(regs)) {
30 /* kprobe_running() needs smp_processor_id() */
31 preempt_disable();
45e18c22 32 if (kprobe_running() && kprobe_fault_handler(regs, trap))
576fe0bd
CH
33 ret = 1;
34 preempt_enable();
35 }
ae9a5b85 36
576fe0bd 37 return ret;
ae9a5b85
AK
38}
39#else
576fe0bd 40static inline int notify_page_fault(struct pt_regs *regs, int trap)
ae9a5b85 41{
576fe0bd 42 return 0;
ae9a5b85
AK
43}
44#endif
45
1da177e4
LT
46/*
47 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
48 * (inside region 5, on ia64) and that page is present.
49 */
50static int
51mapped_kernel_page_is_present (unsigned long address)
52{
53 pgd_t *pgd;
54 pud_t *pud;
55 pmd_t *pmd;
56 pte_t *ptep, pte;
57
58 pgd = pgd_offset_k(address);
59 if (pgd_none(*pgd) || pgd_bad(*pgd))
60 return 0;
61
62 pud = pud_offset(pgd, address);
63 if (pud_none(*pud) || pud_bad(*pud))
64 return 0;
65
66 pmd = pmd_offset(pud, address);
67 if (pmd_none(*pmd) || pmd_bad(*pmd))
68 return 0;
69
70 ptep = pte_offset_kernel(pmd, address);
71 if (!ptep)
72 return 0;
73
74 pte = *ptep;
75 return pte_present(pte);
76}
77
f28fa729
KC
78# define VM_READ_BIT 0
79# define VM_WRITE_BIT 1
80# define VM_EXEC_BIT 2
81
1f7ad57b 82void __kprobes
1da177e4
LT
83ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
84{
85 int signal = SIGSEGV, code = SEGV_MAPERR;
86 struct vm_area_struct *vma, *prev_vma;
87 struct mm_struct *mm = current->mm;
1da177e4 88 unsigned long mask;
50a7ca3c 89 vm_fault_t fault;
f28fa729
KC
90 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
91
92 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
93 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
94
0ffe9849
CL
95 /* mmap_sem is performance critical.... */
96 prefetchw(&mm->mmap_sem);
97
1da177e4
LT
98 /*
99 * If we're in an interrupt or have no user context, we must not take the fault..
100 */
70ffdb93 101 if (faulthandler_disabled() || !mm)
1da177e4
LT
102 goto no_context;
103
104#ifdef CONFIG_VIRTUAL_MEM_MAP
105 /*
106 * If fault is in region 5 and we are in the kernel, we may already
107 * have the mmap_sem (pfn_valid macro is called during mmap). There
108 * is no vma for region 5 addr's anyway, so skip getting the semaphore
109 * and go directly to the exception handling code.
110 */
111
112 if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
113 goto bad_area_no_up;
114#endif
115
7213b252
AK
116 /*
117 * This is to handle the kprobes on user space access instructions
118 */
576fe0bd 119 if (notify_page_fault(regs, TRAP_BRKPT))
7213b252
AK
120 return;
121
759496ba
JW
122 if (user_mode(regs))
123 flags |= FAULT_FLAG_USER;
124 if (mask & VM_WRITE)
125 flags |= FAULT_FLAG_WRITE;
f28fa729 126retry:
1da177e4
LT
127 down_read(&mm->mmap_sem);
128
129 vma = find_vma_prev(mm, address, &prev_vma);
e8c59c0c 130 if (!vma && !prev_vma )
1da177e4
LT
131 goto bad_area;
132
e8c59c0c
AB
133 /*
134 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
135 *
136 * May find no vma, but could be that the last vm area is the
137 * register backing store that needs to expand upwards, in
138 * this case vma will be null, but prev_vma will ne non-null
139 */
140 if (( !vma && prev_vma ) || (address < vma->vm_start) )
1da177e4
LT
141 goto check_expansion;
142
143 good_area:
144 code = SEGV_ACCERR;
145
146 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
147
1da177e4
LT
148# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
149 || (1 << VM_EXEC_BIT) != VM_EXEC)
150# error File is out of sync with <linux/mm.h>. Please update.
151# endif
152
df67b3da
JB
153 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
154 goto bad_area;
155
1da177e4
LT
156 if ((vma->vm_flags & mask) != mask)
157 goto bad_area;
158
1da177e4
LT
159 /*
160 * If for any reason at all we couldn't handle the fault, make
161 * sure we exit gracefully rather than endlessly redo the
162 * fault.
163 */
dcddffd4 164 fault = handle_mm_fault(vma, address, flags);
f28fa729
KC
165
166 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
167 return;
168
83c54070 169 if (unlikely(fault & VM_FAULT_ERROR)) {
1da177e4
LT
170 /*
171 * We ran out of memory, or some other thing happened
172 * to us that made us unable to handle the page fault
173 * gracefully.
174 */
83c54070
NP
175 if (fault & VM_FAULT_OOM) {
176 goto out_of_memory;
33692f27
LT
177 } else if (fault & VM_FAULT_SIGSEGV) {
178 goto bad_area;
83c54070
NP
179 } else if (fault & VM_FAULT_SIGBUS) {
180 signal = SIGBUS;
181 goto bad_area;
182 }
1da177e4
LT
183 BUG();
184 }
f28fa729
KC
185
186 if (flags & FAULT_FLAG_ALLOW_RETRY) {
187 if (fault & VM_FAULT_MAJOR)
188 current->maj_flt++;
189 else
190 current->min_flt++;
191 if (fault & VM_FAULT_RETRY) {
192 flags &= ~FAULT_FLAG_ALLOW_RETRY;
45cac65b 193 flags |= FAULT_FLAG_TRIED;
f28fa729
KC
194
195 /* No need to up_read(&mm->mmap_sem) as we would
196 * have already released it in __lock_page_or_retry
197 * in mm/filemap.c.
198 */
199
200 goto retry;
201 }
202 }
203
1da177e4
LT
204 up_read(&mm->mmap_sem);
205 return;
206
207 check_expansion:
208 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
e8c59c0c
AB
209 if (!vma)
210 goto bad_area;
1da177e4
LT
211 if (!(vma->vm_flags & VM_GROWSDOWN))
212 goto bad_area;
213 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
214 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
215 goto bad_area;
216 if (expand_stack(vma, address))
217 goto bad_area;
218 } else {
219 vma = prev_vma;
220 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
221 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
222 goto bad_area;
46dea3d0
HD
223 /*
224 * Since the register backing store is accessed sequentially,
225 * we disallow growing it by more than a page at a time.
226 */
227 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
228 goto bad_area;
229 if (expand_upwards(vma, address))
1da177e4
LT
230 goto bad_area;
231 }
232 goto good_area;
233
234 bad_area:
235 up_read(&mm->mmap_sem);
236#ifdef CONFIG_VIRTUAL_MEM_MAP
237 bad_area_no_up:
238#endif
239 if ((isr & IA64_ISR_SP)
240 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
241 {
242 /*
243 * This fault was due to a speculative load or lfetch.fault, set the "ed"
244 * bit in the psr to ensure forward progress. (Target register will get a
245 * NaT for ld.s, lfetch will be canceled.)
246 */
247 ia64_psr(regs)->ed = 1;
248 return;
249 }
250 if (user_mode(regs)) {
a618a275 251 force_sig_fault(signal, code, (void __user *) address,
2e1661d2 252 0, __ISR_VALID, isr);
1da177e4
LT
253 return;
254 }
255
256 no_context:
f0a8d3c9
TL
257 if ((isr & IA64_ISR_SP)
258 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
259 {
1da177e4 260 /*
f0a8d3c9
TL
261 * This fault was due to a speculative load or lfetch.fault, set the "ed"
262 * bit in the psr to ensure forward progress. (Target register will get a
263 * NaT for ld.s, lfetch will be canceled.)
1da177e4
LT
264 */
265 ia64_psr(regs)->ed = 1;
266 return;
267 }
268
1da177e4
LT
269 /*
270 * Since we have no vma's for region 5, we might get here even if the address is
271 * valid, due to the VHPT walker inserting a non present translation that becomes
272 * stale. If that happens, the non present fault handler already purged the stale
273 * translation, which fixed the problem. So, we check to see if the translation is
274 * valid, and return if it is.
275 */
276 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
277 return;
278
63028aa7
KU
279 if (ia64_done_with_exception(regs))
280 return;
281
1da177e4
LT
282 /*
283 * Oops. The kernel tried to access some bad page. We'll have to terminate things
284 * with extreme prejudice.
285 */
286 bust_spinlocks(1);
287
288 if (address < PAGE_SIZE)
289 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
290 else
291 printk(KERN_ALERT "Unable to handle kernel paging request at "
292 "virtual address %016lx\n", address);
620de2f5
JB
293 if (die("Oops", regs, isr))
294 regs = NULL;
1da177e4 295 bust_spinlocks(0);
620de2f5
JB
296 if (regs)
297 do_exit(SIGKILL);
1da177e4
LT
298 return;
299
300 out_of_memory:
301 up_read(&mm->mmap_sem);
0c3b96e4 302 if (!user_mode(regs))
303 goto no_context;
304 pagefault_out_of_memory();
1da177e4 305}