mm: do page fault accounting in handle_mm_fault
[linux-2.6-block.git] / arch / ia64 / mm / fault.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * MMU fault handling support.
4 *
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
3f07c014 8#include <linux/sched/signal.h>
1da177e4
LT
9#include <linux/kernel.h>
10#include <linux/mm.h>
e7088170 11#include <linux/extable.h>
1da177e4 12#include <linux/interrupt.h>
1f7ad57b 13#include <linux/kprobes.h>
1eeb66a1 14#include <linux/kdebug.h>
268bb0ce 15#include <linux/prefetch.h>
70ffdb93 16#include <linux/uaccess.h>
1da177e4 17
1da177e4 18#include <asm/processor.h>
82ed1ac9 19#include <asm/exception.h>
1da177e4 20
620de2f5 21extern int die(char *, struct pt_regs *, long);
1da177e4 22
1da177e4
LT
23/*
24 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
25 * (inside region 5, on ia64) and that page is present.
26 */
27static int
28mapped_kernel_page_is_present (unsigned long address)
29{
30 pgd_t *pgd;
c03ab9e3 31 p4d_t *p4d;
1da177e4
LT
32 pud_t *pud;
33 pmd_t *pmd;
34 pte_t *ptep, pte;
35
36 pgd = pgd_offset_k(address);
37 if (pgd_none(*pgd) || pgd_bad(*pgd))
38 return 0;
39
c03ab9e3
MR
40 p4d = p4d_offset(pgd, address);
41 if (p4d_none(*p4d) || p4d_bad(*p4d))
42 return 0;
43
44 pud = pud_offset(p4d, address);
1da177e4
LT
45 if (pud_none(*pud) || pud_bad(*pud))
46 return 0;
47
48 pmd = pmd_offset(pud, address);
49 if (pmd_none(*pmd) || pmd_bad(*pmd))
50 return 0;
51
52 ptep = pte_offset_kernel(pmd, address);
53 if (!ptep)
54 return 0;
55
56 pte = *ptep;
57 return pte_present(pte);
58}
59
f28fa729
KC
60# define VM_READ_BIT 0
61# define VM_WRITE_BIT 1
62# define VM_EXEC_BIT 2
63
1f7ad57b 64void __kprobes
1da177e4
LT
65ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
66{
67 int signal = SIGSEGV, code = SEGV_MAPERR;
68 struct vm_area_struct *vma, *prev_vma;
69 struct mm_struct *mm = current->mm;
1da177e4 70 unsigned long mask;
50a7ca3c 71 vm_fault_t fault;
dde16072 72 unsigned int flags = FAULT_FLAG_DEFAULT;
f28fa729
KC
73
74 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
75 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
76
da1c55f1
ML
77 /* mmap_lock is performance critical.... */
78 prefetchw(&mm->mmap_lock);
0ffe9849 79
1da177e4
LT
80 /*
81 * If we're in an interrupt or have no user context, we must not take the fault..
82 */
70ffdb93 83 if (faulthandler_disabled() || !mm)
1da177e4
LT
84 goto no_context;
85
86#ifdef CONFIG_VIRTUAL_MEM_MAP
87 /*
88 * If fault is in region 5 and we are in the kernel, we may already
c1e8d7c6 89 * have the mmap_lock (pfn_valid macro is called during mmap). There
1da177e4
LT
90 * is no vma for region 5 addr's anyway, so skip getting the semaphore
91 * and go directly to the exception handling code.
92 */
93
94 if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
95 goto bad_area_no_up;
96#endif
97
7213b252
AK
98 /*
99 * This is to handle the kprobes on user space access instructions
100 */
b98cca44 101 if (kprobe_page_fault(regs, TRAP_BRKPT))
7213b252
AK
102 return;
103
759496ba
JW
104 if (user_mode(regs))
105 flags |= FAULT_FLAG_USER;
106 if (mask & VM_WRITE)
107 flags |= FAULT_FLAG_WRITE;
f28fa729 108retry:
d8ed45c5 109 mmap_read_lock(mm);
1da177e4
LT
110
111 vma = find_vma_prev(mm, address, &prev_vma);
e8c59c0c 112 if (!vma && !prev_vma )
1da177e4
LT
113 goto bad_area;
114
e8c59c0c
AB
115 /*
116 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
117 *
118 * May find no vma, but could be that the last vm area is the
119 * register backing store that needs to expand upwards, in
120 * this case vma will be null, but prev_vma will ne non-null
121 */
122 if (( !vma && prev_vma ) || (address < vma->vm_start) )
1da177e4
LT
123 goto check_expansion;
124
125 good_area:
126 code = SEGV_ACCERR;
127
128 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
129
1da177e4
LT
130# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
131 || (1 << VM_EXEC_BIT) != VM_EXEC)
132# error File is out of sync with <linux/mm.h>. Please update.
133# endif
134
df67b3da
JB
135 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
136 goto bad_area;
137
1da177e4
LT
138 if ((vma->vm_flags & mask) != mask)
139 goto bad_area;
140
1da177e4
LT
141 /*
142 * If for any reason at all we couldn't handle the fault, make
143 * sure we exit gracefully rather than endlessly redo the
144 * fault.
145 */
bce617ed 146 fault = handle_mm_fault(vma, address, flags, NULL);
f28fa729 147
4ef87322 148 if (fault_signal_pending(fault, regs))
f28fa729
KC
149 return;
150
83c54070 151 if (unlikely(fault & VM_FAULT_ERROR)) {
1da177e4
LT
152 /*
153 * We ran out of memory, or some other thing happened
154 * to us that made us unable to handle the page fault
155 * gracefully.
156 */
83c54070
NP
157 if (fault & VM_FAULT_OOM) {
158 goto out_of_memory;
33692f27
LT
159 } else if (fault & VM_FAULT_SIGSEGV) {
160 goto bad_area;
83c54070
NP
161 } else if (fault & VM_FAULT_SIGBUS) {
162 signal = SIGBUS;
163 goto bad_area;
164 }
1da177e4
LT
165 BUG();
166 }
f28fa729
KC
167
168 if (flags & FAULT_FLAG_ALLOW_RETRY) {
169 if (fault & VM_FAULT_MAJOR)
170 current->maj_flt++;
171 else
172 current->min_flt++;
173 if (fault & VM_FAULT_RETRY) {
45cac65b 174 flags |= FAULT_FLAG_TRIED;
f28fa729 175
3e4e28c5 176 /* No need to mmap_read_unlock(mm) as we would
f28fa729
KC
177 * have already released it in __lock_page_or_retry
178 * in mm/filemap.c.
179 */
180
181 goto retry;
182 }
183 }
184
d8ed45c5 185 mmap_read_unlock(mm);
1da177e4
LT
186 return;
187
188 check_expansion:
189 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
e8c59c0c
AB
190 if (!vma)
191 goto bad_area;
1da177e4
LT
192 if (!(vma->vm_flags & VM_GROWSDOWN))
193 goto bad_area;
194 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
195 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
196 goto bad_area;
197 if (expand_stack(vma, address))
198 goto bad_area;
199 } else {
200 vma = prev_vma;
201 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
202 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
203 goto bad_area;
46dea3d0
HD
204 /*
205 * Since the register backing store is accessed sequentially,
206 * we disallow growing it by more than a page at a time.
207 */
208 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
209 goto bad_area;
210 if (expand_upwards(vma, address))
1da177e4
LT
211 goto bad_area;
212 }
213 goto good_area;
214
215 bad_area:
d8ed45c5 216 mmap_read_unlock(mm);
1da177e4
LT
217#ifdef CONFIG_VIRTUAL_MEM_MAP
218 bad_area_no_up:
219#endif
220 if ((isr & IA64_ISR_SP)
221 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
222 {
223 /*
224 * This fault was due to a speculative load or lfetch.fault, set the "ed"
225 * bit in the psr to ensure forward progress. (Target register will get a
226 * NaT for ld.s, lfetch will be canceled.)
227 */
228 ia64_psr(regs)->ed = 1;
229 return;
230 }
231 if (user_mode(regs)) {
a618a275 232 force_sig_fault(signal, code, (void __user *) address,
2e1661d2 233 0, __ISR_VALID, isr);
1da177e4
LT
234 return;
235 }
236
237 no_context:
f0a8d3c9
TL
238 if ((isr & IA64_ISR_SP)
239 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
240 {
1da177e4 241 /*
f0a8d3c9
TL
242 * This fault was due to a speculative load or lfetch.fault, set the "ed"
243 * bit in the psr to ensure forward progress. (Target register will get a
244 * NaT for ld.s, lfetch will be canceled.)
1da177e4
LT
245 */
246 ia64_psr(regs)->ed = 1;
247 return;
248 }
249
1da177e4
LT
250 /*
251 * Since we have no vma's for region 5, we might get here even if the address is
252 * valid, due to the VHPT walker inserting a non present translation that becomes
253 * stale. If that happens, the non present fault handler already purged the stale
254 * translation, which fixed the problem. So, we check to see if the translation is
255 * valid, and return if it is.
256 */
257 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
258 return;
259
63028aa7
KU
260 if (ia64_done_with_exception(regs))
261 return;
262
1da177e4
LT
263 /*
264 * Oops. The kernel tried to access some bad page. We'll have to terminate things
265 * with extreme prejudice.
266 */
267 bust_spinlocks(1);
268
269 if (address < PAGE_SIZE)
270 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
271 else
272 printk(KERN_ALERT "Unable to handle kernel paging request at "
273 "virtual address %016lx\n", address);
620de2f5
JB
274 if (die("Oops", regs, isr))
275 regs = NULL;
1da177e4 276 bust_spinlocks(0);
620de2f5
JB
277 if (regs)
278 do_exit(SIGKILL);
1da177e4
LT
279 return;
280
281 out_of_memory:
d8ed45c5 282 mmap_read_unlock(mm);
0c3b96e4 283 if (!user_mode(regs))
284 goto no_context;
285 pagefault_out_of_memory();
1da177e4 286}