Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * MMU fault handling support. | |
3 | * | |
4 | * Copyright (C) 1998-2002 Hewlett-Packard Co | |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
6 | */ | |
7 | #include <linux/sched.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/smp_lock.h> | |
11 | #include <linux/interrupt.h> | |
1f7ad57b | 12 | #include <linux/kprobes.h> |
1da177e4 LT |
13 | |
14 | #include <asm/pgtable.h> | |
15 | #include <asm/processor.h> | |
16 | #include <asm/system.h> | |
17 | #include <asm/uaccess.h> | |
7213b252 | 18 | #include <asm/kdebug.h> |
1da177e4 LT |
19 | |
20 | extern void die (char *, struct pt_regs *, long); | |
21 | ||
ae9a5b85 AK |
22 | #ifdef CONFIG_KPROBES |
23 | ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); | |
24 | ||
25 | /* Hook to register for page fault notifications */ | |
26 | int register_page_fault_notifier(struct notifier_block *nb) | |
27 | { | |
28 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | |
29 | } | |
30 | ||
31 | int unregister_page_fault_notifier(struct notifier_block *nb) | |
32 | { | |
33 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); | |
34 | } | |
35 | ||
36 | static inline int notify_page_fault(enum die_val val, const char *str, | |
37 | struct pt_regs *regs, long err, int trap, int sig) | |
38 | { | |
39 | struct die_args args = { | |
40 | .regs = regs, | |
41 | .str = str, | |
42 | .err = err, | |
43 | .trapnr = trap, | |
44 | .signr = sig | |
45 | }; | |
46 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); | |
47 | } | |
48 | #else | |
49 | static inline int notify_page_fault(enum die_val val, const char *str, | |
50 | struct pt_regs *regs, long err, int trap, int sig) | |
51 | { | |
52 | return NOTIFY_DONE; | |
53 | } | |
54 | #endif | |
55 | ||
1da177e4 LT |
56 | /* |
57 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment | |
58 | * (inside region 5, on ia64) and that page is present. | |
59 | */ | |
60 | static int | |
61 | mapped_kernel_page_is_present (unsigned long address) | |
62 | { | |
63 | pgd_t *pgd; | |
64 | pud_t *pud; | |
65 | pmd_t *pmd; | |
66 | pte_t *ptep, pte; | |
67 | ||
68 | pgd = pgd_offset_k(address); | |
69 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | |
70 | return 0; | |
71 | ||
72 | pud = pud_offset(pgd, address); | |
73 | if (pud_none(*pud) || pud_bad(*pud)) | |
74 | return 0; | |
75 | ||
76 | pmd = pmd_offset(pud, address); | |
77 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | |
78 | return 0; | |
79 | ||
80 | ptep = pte_offset_kernel(pmd, address); | |
81 | if (!ptep) | |
82 | return 0; | |
83 | ||
84 | pte = *ptep; | |
85 | return pte_present(pte); | |
86 | } | |
87 | ||
1f7ad57b | 88 | void __kprobes |
1da177e4 LT |
89 | ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) |
90 | { | |
91 | int signal = SIGSEGV, code = SEGV_MAPERR; | |
92 | struct vm_area_struct *vma, *prev_vma; | |
93 | struct mm_struct *mm = current->mm; | |
94 | struct siginfo si; | |
95 | unsigned long mask; | |
96 | ||
0ffe9849 CL |
97 | /* mmap_sem is performance critical.... */ |
98 | prefetchw(&mm->mmap_sem); | |
99 | ||
1da177e4 LT |
100 | /* |
101 | * If we're in an interrupt or have no user context, we must not take the fault.. | |
102 | */ | |
103 | if (in_atomic() || !mm) | |
104 | goto no_context; | |
105 | ||
106 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
107 | /* | |
108 | * If fault is in region 5 and we are in the kernel, we may already | |
109 | * have the mmap_sem (pfn_valid macro is called during mmap). There | |
110 | * is no vma for region 5 addr's anyway, so skip getting the semaphore | |
111 | * and go directly to the exception handling code. | |
112 | */ | |
113 | ||
114 | if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) | |
115 | goto bad_area_no_up; | |
116 | #endif | |
117 | ||
7213b252 AK |
118 | /* |
119 | * This is to handle the kprobes on user space access instructions | |
120 | */ | |
ae9a5b85 | 121 | if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, |
7213b252 AK |
122 | SIGSEGV) == NOTIFY_STOP) |
123 | return; | |
124 | ||
1da177e4 LT |
125 | down_read(&mm->mmap_sem); |
126 | ||
127 | vma = find_vma_prev(mm, address, &prev_vma); | |
128 | if (!vma) | |
129 | goto bad_area; | |
130 | ||
131 | /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */ | |
132 | if (address < vma->vm_start) | |
133 | goto check_expansion; | |
134 | ||
135 | good_area: | |
136 | code = SEGV_ACCERR; | |
137 | ||
138 | /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ | |
139 | ||
140 | # define VM_READ_BIT 0 | |
141 | # define VM_WRITE_BIT 1 | |
142 | # define VM_EXEC_BIT 2 | |
143 | ||
144 | # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ | |
145 | || (1 << VM_EXEC_BIT) != VM_EXEC) | |
146 | # error File is out of sync with <linux/mm.h>. Please update. | |
147 | # endif | |
148 | ||
149 | mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) | |
150 | | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT) | |
151 | | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT)); | |
152 | ||
153 | if ((vma->vm_flags & mask) != mask) | |
154 | goto bad_area; | |
155 | ||
156 | survive: | |
157 | /* | |
158 | * If for any reason at all we couldn't handle the fault, make | |
159 | * sure we exit gracefully rather than endlessly redo the | |
160 | * fault. | |
161 | */ | |
162 | switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) { | |
163 | case VM_FAULT_MINOR: | |
164 | ++current->min_flt; | |
165 | break; | |
166 | case VM_FAULT_MAJOR: | |
167 | ++current->maj_flt; | |
168 | break; | |
169 | case VM_FAULT_SIGBUS: | |
170 | /* | |
171 | * We ran out of memory, or some other thing happened | |
172 | * to us that made us unable to handle the page fault | |
173 | * gracefully. | |
174 | */ | |
175 | signal = SIGBUS; | |
176 | goto bad_area; | |
177 | case VM_FAULT_OOM: | |
178 | goto out_of_memory; | |
179 | default: | |
180 | BUG(); | |
181 | } | |
182 | up_read(&mm->mmap_sem); | |
183 | return; | |
184 | ||
185 | check_expansion: | |
186 | if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { | |
187 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
188 | goto bad_area; | |
189 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
190 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
191 | goto bad_area; | |
192 | if (expand_stack(vma, address)) | |
193 | goto bad_area; | |
194 | } else { | |
195 | vma = prev_vma; | |
196 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
197 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
198 | goto bad_area; | |
46dea3d0 HD |
199 | /* |
200 | * Since the register backing store is accessed sequentially, | |
201 | * we disallow growing it by more than a page at a time. | |
202 | */ | |
203 | if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) | |
204 | goto bad_area; | |
205 | if (expand_upwards(vma, address)) | |
1da177e4 LT |
206 | goto bad_area; |
207 | } | |
208 | goto good_area; | |
209 | ||
210 | bad_area: | |
211 | up_read(&mm->mmap_sem); | |
212 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
213 | bad_area_no_up: | |
214 | #endif | |
215 | if ((isr & IA64_ISR_SP) | |
216 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
217 | { | |
218 | /* | |
219 | * This fault was due to a speculative load or lfetch.fault, set the "ed" | |
220 | * bit in the psr to ensure forward progress. (Target register will get a | |
221 | * NaT for ld.s, lfetch will be canceled.) | |
222 | */ | |
223 | ia64_psr(regs)->ed = 1; | |
224 | return; | |
225 | } | |
226 | if (user_mode(regs)) { | |
227 | si.si_signo = signal; | |
228 | si.si_errno = 0; | |
229 | si.si_code = code; | |
230 | si.si_addr = (void __user *) address; | |
231 | si.si_isr = isr; | |
232 | si.si_flags = __ISR_VALID; | |
233 | force_sig_info(signal, &si, current); | |
234 | return; | |
235 | } | |
236 | ||
237 | no_context: | |
f0a8d3c9 TL |
238 | if ((isr & IA64_ISR_SP) |
239 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
240 | { | |
1da177e4 | 241 | /* |
f0a8d3c9 TL |
242 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
243 | * bit in the psr to ensure forward progress. (Target register will get a | |
244 | * NaT for ld.s, lfetch will be canceled.) | |
1da177e4 LT |
245 | */ |
246 | ia64_psr(regs)->ed = 1; | |
247 | return; | |
248 | } | |
249 | ||
1da177e4 LT |
250 | /* |
251 | * Since we have no vma's for region 5, we might get here even if the address is | |
252 | * valid, due to the VHPT walker inserting a non present translation that becomes | |
253 | * stale. If that happens, the non present fault handler already purged the stale | |
254 | * translation, which fixed the problem. So, we check to see if the translation is | |
255 | * valid, and return if it is. | |
256 | */ | |
257 | if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) | |
258 | return; | |
259 | ||
63028aa7 KU |
260 | if (ia64_done_with_exception(regs)) |
261 | return; | |
262 | ||
1da177e4 LT |
263 | /* |
264 | * Oops. The kernel tried to access some bad page. We'll have to terminate things | |
265 | * with extreme prejudice. | |
266 | */ | |
267 | bust_spinlocks(1); | |
268 | ||
269 | if (address < PAGE_SIZE) | |
270 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); | |
271 | else | |
272 | printk(KERN_ALERT "Unable to handle kernel paging request at " | |
273 | "virtual address %016lx\n", address); | |
274 | die("Oops", regs, isr); | |
275 | bust_spinlocks(0); | |
276 | do_exit(SIGKILL); | |
277 | return; | |
278 | ||
279 | out_of_memory: | |
280 | up_read(&mm->mmap_sem); | |
281 | if (current->pid == 1) { | |
282 | yield(); | |
283 | down_read(&mm->mmap_sem); | |
284 | goto survive; | |
285 | } | |
286 | printk(KERN_CRIT "VM: killing process %s\n", current->comm); | |
287 | if (user_mode(regs)) | |
288 | do_exit(SIGKILL); | |
289 | goto no_context; | |
290 | } |