Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * MMU fault handling support. | |
3 | * | |
4 | * Copyright (C) 1998-2002 Hewlett-Packard Co | |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
6 | */ | |
7 | #include <linux/sched.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/mm.h> | |
1da177e4 | 10 | #include <linux/interrupt.h> |
1f7ad57b | 11 | #include <linux/kprobes.h> |
1eeb66a1 | 12 | #include <linux/kdebug.h> |
1da177e4 LT |
13 | |
14 | #include <asm/pgtable.h> | |
15 | #include <asm/processor.h> | |
16 | #include <asm/system.h> | |
17 | #include <asm/uaccess.h> | |
18 | ||
620de2f5 | 19 | extern int die(char *, struct pt_regs *, long); |
1da177e4 | 20 | |
ae9a5b85 | 21 | #ifdef CONFIG_KPROBES |
576fe0bd | 22 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
ae9a5b85 | 23 | { |
576fe0bd | 24 | int ret = 0; |
ae9a5b85 | 25 | |
576fe0bd CH |
26 | if (!user_mode(regs)) { |
27 | /* kprobe_running() needs smp_processor_id() */ | |
28 | preempt_disable(); | |
45e18c22 | 29 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) |
576fe0bd CH |
30 | ret = 1; |
31 | preempt_enable(); | |
32 | } | |
ae9a5b85 | 33 | |
576fe0bd | 34 | return ret; |
ae9a5b85 AK |
35 | } |
36 | #else | |
576fe0bd | 37 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
ae9a5b85 | 38 | { |
576fe0bd | 39 | return 0; |
ae9a5b85 AK |
40 | } |
41 | #endif | |
42 | ||
1da177e4 LT |
43 | /* |
44 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment | |
45 | * (inside region 5, on ia64) and that page is present. | |
46 | */ | |
47 | static int | |
48 | mapped_kernel_page_is_present (unsigned long address) | |
49 | { | |
50 | pgd_t *pgd; | |
51 | pud_t *pud; | |
52 | pmd_t *pmd; | |
53 | pte_t *ptep, pte; | |
54 | ||
55 | pgd = pgd_offset_k(address); | |
56 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | |
57 | return 0; | |
58 | ||
59 | pud = pud_offset(pgd, address); | |
60 | if (pud_none(*pud) || pud_bad(*pud)) | |
61 | return 0; | |
62 | ||
63 | pmd = pmd_offset(pud, address); | |
64 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | |
65 | return 0; | |
66 | ||
67 | ptep = pte_offset_kernel(pmd, address); | |
68 | if (!ptep) | |
69 | return 0; | |
70 | ||
71 | pte = *ptep; | |
72 | return pte_present(pte); | |
73 | } | |
74 | ||
1f7ad57b | 75 | void __kprobes |
1da177e4 LT |
76 | ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) |
77 | { | |
78 | int signal = SIGSEGV, code = SEGV_MAPERR; | |
79 | struct vm_area_struct *vma, *prev_vma; | |
80 | struct mm_struct *mm = current->mm; | |
81 | struct siginfo si; | |
82 | unsigned long mask; | |
83c54070 | 83 | int fault; |
1da177e4 | 84 | |
0ffe9849 CL |
85 | /* mmap_sem is performance critical.... */ |
86 | prefetchw(&mm->mmap_sem); | |
87 | ||
1da177e4 LT |
88 | /* |
89 | * If we're in an interrupt or have no user context, we must not take the fault.. | |
90 | */ | |
91 | if (in_atomic() || !mm) | |
92 | goto no_context; | |
93 | ||
94 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
95 | /* | |
96 | * If fault is in region 5 and we are in the kernel, we may already | |
97 | * have the mmap_sem (pfn_valid macro is called during mmap). There | |
98 | * is no vma for region 5 addr's anyway, so skip getting the semaphore | |
99 | * and go directly to the exception handling code. | |
100 | */ | |
101 | ||
102 | if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) | |
103 | goto bad_area_no_up; | |
104 | #endif | |
105 | ||
7213b252 AK |
106 | /* |
107 | * This is to handle the kprobes on user space access instructions | |
108 | */ | |
576fe0bd | 109 | if (notify_page_fault(regs, TRAP_BRKPT)) |
7213b252 AK |
110 | return; |
111 | ||
1da177e4 LT |
112 | down_read(&mm->mmap_sem); |
113 | ||
114 | vma = find_vma_prev(mm, address, &prev_vma); | |
e8c59c0c | 115 | if (!vma && !prev_vma ) |
1da177e4 LT |
116 | goto bad_area; |
117 | ||
e8c59c0c AB |
118 | /* |
119 | * find_vma_prev() returns vma such that address < vma->vm_end or NULL | |
120 | * | |
121 | * May find no vma, but could be that the last vm area is the | |
122 | * register backing store that needs to expand upwards, in | |
123 | * this case vma will be null, but prev_vma will ne non-null | |
124 | */ | |
125 | if (( !vma && prev_vma ) || (address < vma->vm_start) ) | |
1da177e4 LT |
126 | goto check_expansion; |
127 | ||
128 | good_area: | |
129 | code = SEGV_ACCERR; | |
130 | ||
131 | /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ | |
132 | ||
133 | # define VM_READ_BIT 0 | |
134 | # define VM_WRITE_BIT 1 | |
135 | # define VM_EXEC_BIT 2 | |
136 | ||
137 | # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ | |
138 | || (1 << VM_EXEC_BIT) != VM_EXEC) | |
139 | # error File is out of sync with <linux/mm.h>. Please update. | |
140 | # endif | |
141 | ||
df67b3da JB |
142 | if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) |
143 | goto bad_area; | |
144 | ||
1da177e4 | 145 | mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) |
df67b3da | 146 | | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); |
1da177e4 LT |
147 | |
148 | if ((vma->vm_flags & mask) != mask) | |
149 | goto bad_area; | |
150 | ||
1da177e4 LT |
151 | /* |
152 | * If for any reason at all we couldn't handle the fault, make | |
153 | * sure we exit gracefully rather than endlessly redo the | |
154 | * fault. | |
155 | */ | |
d06063cc | 156 | fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); |
83c54070 | 157 | if (unlikely(fault & VM_FAULT_ERROR)) { |
1da177e4 LT |
158 | /* |
159 | * We ran out of memory, or some other thing happened | |
160 | * to us that made us unable to handle the page fault | |
161 | * gracefully. | |
162 | */ | |
83c54070 NP |
163 | if (fault & VM_FAULT_OOM) { |
164 | goto out_of_memory; | |
165 | } else if (fault & VM_FAULT_SIGBUS) { | |
166 | signal = SIGBUS; | |
167 | goto bad_area; | |
168 | } | |
1da177e4 LT |
169 | BUG(); |
170 | } | |
83c54070 NP |
171 | if (fault & VM_FAULT_MAJOR) |
172 | current->maj_flt++; | |
173 | else | |
174 | current->min_flt++; | |
1da177e4 LT |
175 | up_read(&mm->mmap_sem); |
176 | return; | |
177 | ||
178 | check_expansion: | |
179 | if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { | |
e8c59c0c AB |
180 | if (!vma) |
181 | goto bad_area; | |
1da177e4 LT |
182 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
183 | goto bad_area; | |
184 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
185 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
186 | goto bad_area; | |
187 | if (expand_stack(vma, address)) | |
188 | goto bad_area; | |
189 | } else { | |
190 | vma = prev_vma; | |
191 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
192 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
193 | goto bad_area; | |
46dea3d0 HD |
194 | /* |
195 | * Since the register backing store is accessed sequentially, | |
196 | * we disallow growing it by more than a page at a time. | |
197 | */ | |
198 | if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) | |
199 | goto bad_area; | |
200 | if (expand_upwards(vma, address)) | |
1da177e4 LT |
201 | goto bad_area; |
202 | } | |
203 | goto good_area; | |
204 | ||
205 | bad_area: | |
206 | up_read(&mm->mmap_sem); | |
207 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
208 | bad_area_no_up: | |
209 | #endif | |
210 | if ((isr & IA64_ISR_SP) | |
211 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
212 | { | |
213 | /* | |
214 | * This fault was due to a speculative load or lfetch.fault, set the "ed" | |
215 | * bit in the psr to ensure forward progress. (Target register will get a | |
216 | * NaT for ld.s, lfetch will be canceled.) | |
217 | */ | |
218 | ia64_psr(regs)->ed = 1; | |
219 | return; | |
220 | } | |
221 | if (user_mode(regs)) { | |
222 | si.si_signo = signal; | |
223 | si.si_errno = 0; | |
224 | si.si_code = code; | |
225 | si.si_addr = (void __user *) address; | |
226 | si.si_isr = isr; | |
227 | si.si_flags = __ISR_VALID; | |
228 | force_sig_info(signal, &si, current); | |
229 | return; | |
230 | } | |
231 | ||
232 | no_context: | |
f0a8d3c9 TL |
233 | if ((isr & IA64_ISR_SP) |
234 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
235 | { | |
1da177e4 | 236 | /* |
f0a8d3c9 TL |
237 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
238 | * bit in the psr to ensure forward progress. (Target register will get a | |
239 | * NaT for ld.s, lfetch will be canceled.) | |
1da177e4 LT |
240 | */ |
241 | ia64_psr(regs)->ed = 1; | |
242 | return; | |
243 | } | |
244 | ||
1da177e4 LT |
245 | /* |
246 | * Since we have no vma's for region 5, we might get here even if the address is | |
247 | * valid, due to the VHPT walker inserting a non present translation that becomes | |
248 | * stale. If that happens, the non present fault handler already purged the stale | |
249 | * translation, which fixed the problem. So, we check to see if the translation is | |
250 | * valid, and return if it is. | |
251 | */ | |
252 | if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) | |
253 | return; | |
254 | ||
63028aa7 KU |
255 | if (ia64_done_with_exception(regs)) |
256 | return; | |
257 | ||
1da177e4 LT |
258 | /* |
259 | * Oops. The kernel tried to access some bad page. We'll have to terminate things | |
260 | * with extreme prejudice. | |
261 | */ | |
262 | bust_spinlocks(1); | |
263 | ||
264 | if (address < PAGE_SIZE) | |
265 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); | |
266 | else | |
267 | printk(KERN_ALERT "Unable to handle kernel paging request at " | |
268 | "virtual address %016lx\n", address); | |
620de2f5 JB |
269 | if (die("Oops", regs, isr)) |
270 | regs = NULL; | |
1da177e4 | 271 | bust_spinlocks(0); |
620de2f5 JB |
272 | if (regs) |
273 | do_exit(SIGKILL); | |
1da177e4 LT |
274 | return; |
275 | ||
276 | out_of_memory: | |
277 | up_read(&mm->mmap_sem); | |
0c3b96e4 | 278 | if (!user_mode(regs)) |
279 | goto no_context; | |
280 | pagefault_out_of_memory(); | |
1da177e4 | 281 | } |