Commit | Line | Data |
---|---|---|
3f65ce4d CZ |
1 | // TODO VM_EXEC flag work-around, cache aliasing |
2 | /* | |
3 | * arch/xtensa/mm/fault.c | |
4 | * | |
5 | * This file is subject to the terms and conditions of the GNU General Public | |
6 | * License. See the file "COPYING" in the main directory of this archive | |
7 | * for more details. | |
8 | * | |
1bbedc3a | 9 | * Copyright (C) 2001 - 2010 Tensilica Inc. |
3f65ce4d CZ |
10 | * |
11 | * Chris Zankel <chris@zankel.net> | |
12 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> | |
13 | */ | |
14 | ||
15 | #include <linux/mm.h> | |
6cc306ea | 16 | #include <linux/extable.h> |
5a891ed5 | 17 | #include <linux/hardirq.h> |
af885de8 | 18 | #include <linux/perf_event.h> |
70ffdb93 | 19 | #include <linux/uaccess.h> |
3f65ce4d CZ |
20 | #include <asm/mmu_context.h> |
21 | #include <asm/cacheflush.h> | |
22 | #include <asm/hardirq.h> | |
3f65ce4d | 23 | |
f615136c | 24 | DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; |
3f65ce4d CZ |
25 | void bad_page_fault(struct pt_regs*, unsigned long, int); |
26 | ||
27 | /* | |
28 | * This routine handles page faults. It determines the address, | |
29 | * and the problem, and then passes it off to one of the appropriate | |
30 | * routines. | |
31 | * | |
32 | * Note: does not handle Miss and MultiHit. | |
33 | */ | |
34 | ||
35 | void do_page_fault(struct pt_regs *regs) | |
36 | { | |
37 | struct vm_area_struct * vma; | |
38 | struct mm_struct *mm = current->mm; | |
39 | unsigned int exccause = regs->exccause; | |
40 | unsigned int address = regs->excvaddr; | |
91810105 | 41 | int code; |
3f65ce4d CZ |
42 | |
43 | int is_write, is_exec; | |
50a7ca3c | 44 | vm_fault_t fault; |
dde16072 | 45 | unsigned int flags = FAULT_FLAG_DEFAULT; |
3f65ce4d | 46 | |
91810105 | 47 | code = SEGV_MAPERR; |
3f65ce4d CZ |
48 | |
49 | /* We fault-in kernel-space virtual memory on-demand. The | |
50 | * 'reference' page table is init_mm.pgd. | |
51 | */ | |
52 | if (address >= TASK_SIZE && !user_mode(regs)) | |
53 | goto vmalloc_fault; | |
54 | ||
55 | /* If we're in an interrupt or have no user | |
56 | * context, we must not take the fault.. | |
57 | */ | |
70ffdb93 | 58 | if (faulthandler_disabled() || !mm) { |
3f65ce4d CZ |
59 | bad_page_fault(regs, address, SIGSEGV); |
60 | return; | |
61 | } | |
62 | ||
173d6681 CZ |
63 | is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0; |
64 | is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE || | |
65 | exccause == EXCCAUSE_ITLB_MISS || | |
66 | exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; | |
3f65ce4d | 67 | |
c130d3be MF |
68 | pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n", |
69 | current->comm, current->pid, | |
70 | address, exccause, regs->pc, | |
71 | is_write ? "w" : "", is_exec ? "x" : ""); | |
3f65ce4d | 72 | |
759496ba JW |
73 | if (user_mode(regs)) |
74 | flags |= FAULT_FLAG_USER; | |
484e51e4 PX |
75 | |
76 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
77 | ||
f107701f | 78 | retry: |
d8ed45c5 | 79 | mmap_read_lock(mm); |
3f65ce4d CZ |
80 | vma = find_vma(mm, address); |
81 | ||
82 | if (!vma) | |
83 | goto bad_area; | |
84 | if (vma->vm_start <= address) | |
85 | goto good_area; | |
86 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
87 | goto bad_area; | |
88 | if (expand_stack(vma, address)) | |
89 | goto bad_area; | |
90 | ||
91 | /* Ok, we have a good vm_area for this memory access, so | |
92 | * we can handle it.. | |
93 | */ | |
94 | ||
95 | good_area: | |
91810105 | 96 | code = SEGV_ACCERR; |
3f65ce4d CZ |
97 | |
98 | if (is_write) { | |
99 | if (!(vma->vm_flags & VM_WRITE)) | |
100 | goto bad_area; | |
f107701f | 101 | flags |= FAULT_FLAG_WRITE; |
3f65ce4d CZ |
102 | } else if (is_exec) { |
103 | if (!(vma->vm_flags & VM_EXEC)) | |
104 | goto bad_area; | |
105 | } else /* Allow read even from write-only pages. */ | |
106 | if (!(vma->vm_flags & (VM_READ | VM_WRITE))) | |
107 | goto bad_area; | |
108 | ||
109 | /* If for any reason at all we couldn't handle the fault, | |
110 | * make sure we exit gracefully rather than endlessly redo | |
111 | * the fault. | |
112 | */ | |
484e51e4 | 113 | fault = handle_mm_fault(vma, address, flags, regs); |
f107701f | 114 | |
7b9acbb6 MF |
115 | if (fault_signal_pending(fault, regs)) { |
116 | if (!user_mode(regs)) | |
117 | goto bad_page_fault; | |
f107701f | 118 | return; |
7b9acbb6 | 119 | } |
f107701f | 120 | |
83c54070 NP |
121 | if (unlikely(fault & VM_FAULT_ERROR)) { |
122 | if (fault & VM_FAULT_OOM) | |
123 | goto out_of_memory; | |
33692f27 LT |
124 | else if (fault & VM_FAULT_SIGSEGV) |
125 | goto bad_area; | |
83c54070 NP |
126 | else if (fault & VM_FAULT_SIGBUS) |
127 | goto do_sigbus; | |
3f65ce4d CZ |
128 | BUG(); |
129 | } | |
f107701f | 130 | |
36ef159f QZ |
131 | if (fault & VM_FAULT_RETRY) { |
132 | flags |= FAULT_FLAG_TRIED; | |
f107701f | 133 | |
36ef159f QZ |
134 | /* No need to mmap_read_unlock(mm) as we would |
135 | * have already released it in __lock_page_or_retry | |
136 | * in mm/filemap.c. | |
137 | */ | |
138 | ||
139 | goto retry; | |
f107701f | 140 | } |
3f65ce4d | 141 | |
d8ed45c5 | 142 | mmap_read_unlock(mm); |
3f65ce4d CZ |
143 | return; |
144 | ||
145 | /* Something tried to access memory that isn't in our memory map.. | |
146 | * Fix it, but check if it's kernel or user first.. | |
147 | */ | |
148 | bad_area: | |
d8ed45c5 | 149 | mmap_read_unlock(mm); |
3f65ce4d CZ |
150 | if (user_mode(regs)) { |
151 | current->thread.bad_vaddr = address; | |
152 | current->thread.error_code = is_write; | |
2e1661d2 | 153 | force_sig_fault(SIGSEGV, code, (void *) address); |
3f65ce4d CZ |
154 | return; |
155 | } | |
156 | bad_page_fault(regs, address, SIGSEGV); | |
157 | return; | |
158 | ||
159 | ||
160 | /* We ran out of memory, or some other thing happened to us that made | |
161 | * us unable to handle the page fault gracefully. | |
162 | */ | |
163 | out_of_memory: | |
d8ed45c5 | 164 | mmap_read_unlock(mm); |
f76f5d71 NP |
165 | if (!user_mode(regs)) |
166 | bad_page_fault(regs, address, SIGKILL); | |
167 | else | |
168 | pagefault_out_of_memory(); | |
3f65ce4d CZ |
169 | return; |
170 | ||
171 | do_sigbus: | |
d8ed45c5 | 172 | mmap_read_unlock(mm); |
3f65ce4d CZ |
173 | |
174 | /* Send a sigbus, regardless of whether we were in kernel | |
175 | * or user mode. | |
176 | */ | |
177 | current->thread.bad_vaddr = address; | |
2e1661d2 | 178 | force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address); |
3f65ce4d CZ |
179 | |
180 | /* Kernel mode? Handle exceptions or die */ | |
181 | if (!user_mode(regs)) | |
182 | bad_page_fault(regs, address, SIGBUS); | |
1bbedc3a | 183 | return; |
3f65ce4d CZ |
184 | |
185 | vmalloc_fault: | |
186 | { | |
187 | /* Synchronize this task's top level page-table | |
188 | * with the 'reference' page table. | |
189 | */ | |
190 | struct mm_struct *act_mm = current->active_mm; | |
191 | int index = pgd_index(address); | |
192 | pgd_t *pgd, *pgd_k; | |
f5ee2567 | 193 | p4d_t *p4d, *p4d_k; |
f0d1eab8 | 194 | pud_t *pud, *pud_k; |
3f65ce4d CZ |
195 | pmd_t *pmd, *pmd_k; |
196 | pte_t *pte_k; | |
197 | ||
198 | if (act_mm == NULL) | |
199 | goto bad_page_fault; | |
200 | ||
201 | pgd = act_mm->pgd + index; | |
202 | pgd_k = init_mm.pgd + index; | |
203 | ||
204 | if (!pgd_present(*pgd_k)) | |
205 | goto bad_page_fault; | |
206 | ||
207 | pgd_val(*pgd) = pgd_val(*pgd_k); | |
208 | ||
f5ee2567 MR |
209 | p4d = p4d_offset(pgd, address); |
210 | p4d_k = p4d_offset(pgd_k, address); | |
211 | if (!p4d_present(*p4d) || !p4d_present(*p4d_k)) | |
212 | goto bad_page_fault; | |
213 | ||
214 | pud = pud_offset(p4d, address); | |
215 | pud_k = pud_offset(p4d_k, address); | |
f0d1eab8 MR |
216 | if (!pud_present(*pud) || !pud_present(*pud_k)) |
217 | goto bad_page_fault; | |
218 | ||
219 | pmd = pmd_offset(pud, address); | |
220 | pmd_k = pmd_offset(pud_k, address); | |
3f65ce4d CZ |
221 | if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) |
222 | goto bad_page_fault; | |
223 | ||
224 | pmd_val(*pmd) = pmd_val(*pmd_k); | |
225 | pte_k = pte_offset_kernel(pmd_k, address); | |
226 | ||
227 | if (!pte_present(*pte_k)) | |
228 | goto bad_page_fault; | |
229 | return; | |
230 | } | |
231 | bad_page_fault: | |
232 | bad_page_fault(regs, address, SIGKILL); | |
233 | return; | |
234 | } | |
235 | ||
236 | ||
237 | void | |
238 | bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | |
239 | { | |
9fd5a04d | 240 | extern void __noreturn die(const char*, struct pt_regs*, long); |
3f65ce4d CZ |
241 | const struct exception_table_entry *entry; |
242 | ||
243 | /* Are we prepared to handle this kernel fault? */ | |
244 | if ((entry = search_exception_tables(regs->pc)) != NULL) { | |
c130d3be MF |
245 | pr_debug("%s: Exception at pc=%#010lx (%lx)\n", |
246 | current->comm, regs->pc, entry->fixup); | |
3f65ce4d CZ |
247 | current->thread.bad_uaddr = address; |
248 | regs->pc = entry->fixup; | |
249 | return; | |
250 | } | |
251 | ||
252 | /* Oops. The kernel tried to access some bad page. We'll have to | |
253 | * terminate things with extreme prejudice. | |
254 | */ | |
c130d3be MF |
255 | pr_alert("Unable to handle kernel paging request at virtual " |
256 | "address %08lx\n pc = %08lx, ra = %08lx\n", | |
257 | address, regs->pc, regs->areg[0]); | |
3f65ce4d | 258 | die("Oops", regs, sig); |
3f65ce4d | 259 | } |