Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/alpha/mm/fault.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/config.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/mm.h> | |
11 | #include <asm/io.h> | |
12 | ||
13 | #define __EXTERN_INLINE inline | |
14 | #include <asm/mmu_context.h> | |
15 | #include <asm/tlbflush.h> | |
16 | #undef __EXTERN_INLINE | |
17 | ||
18 | #include <linux/signal.h> | |
19 | #include <linux/errno.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/ptrace.h> | |
23 | #include <linux/mman.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/smp_lock.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/module.h> | |
28 | ||
29 | #include <asm/system.h> | |
30 | #include <asm/uaccess.h> | |
31 | ||
32 | extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); | |
33 | ||
34 | ||
35 | /* | |
36 | * Force a new ASN for a task. | |
37 | */ | |
38 | ||
39 | #ifndef CONFIG_SMP | |
40 | unsigned long last_asn = ASN_FIRST_VERSION; | |
41 | #endif | |
42 | ||
43 | void | |
44 | __load_new_mm_context(struct mm_struct *next_mm) | |
45 | { | |
46 | unsigned long mmc; | |
47 | struct pcb_struct *pcb; | |
48 | ||
49 | mmc = __get_new_mm_context(next_mm, smp_processor_id()); | |
50 | next_mm->context[smp_processor_id()] = mmc; | |
51 | ||
52 | pcb = ¤t_thread_info()->pcb; | |
53 | pcb->asn = mmc & HARDWARE_ASN_MASK; | |
54 | pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; | |
55 | ||
56 | __reload_thread(pcb); | |
57 | } | |
58 | ||
59 | ||
60 | /* | |
61 | * This routine handles page faults. It determines the address, | |
62 | * and the problem, and then passes it off to handle_mm_fault(). | |
63 | * | |
64 | * mmcsr: | |
65 | * 0 = translation not valid | |
66 | * 1 = access violation | |
67 | * 2 = fault-on-read | |
68 | * 3 = fault-on-execute | |
69 | * 4 = fault-on-write | |
70 | * | |
71 | * cause: | |
72 | * -1 = instruction fetch | |
73 | * 0 = load | |
74 | * 1 = store | |
75 | * | |
76 | * Registers $9 through $15 are saved in a block just prior to `regs' and | |
77 | * are saved and restored around the call to allow exception code to | |
78 | * modify them. | |
79 | */ | |
80 | ||
81 | /* Macro for exception fixup code to access integer registers. */ | |
82 | #define dpf_reg(r) \ | |
83 | (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ | |
84 | (r) <= 18 ? (r)+8 : (r)-10]) | |
85 | ||
86 | asmlinkage void | |
87 | do_page_fault(unsigned long address, unsigned long mmcsr, | |
88 | long cause, struct pt_regs *regs) | |
89 | { | |
90 | struct vm_area_struct * vma; | |
91 | struct mm_struct *mm = current->mm; | |
92 | const struct exception_table_entry *fixup; | |
93 | int fault, si_code = SEGV_MAPERR; | |
94 | siginfo_t info; | |
95 | ||
96 | /* As of EV6, a load into $31/$f31 is a prefetch, and never faults | |
97 | (or is suppressed by the PALcode). Support that for older CPUs | |
98 | by ignoring such an instruction. */ | |
99 | if (cause == 0) { | |
100 | unsigned int insn; | |
101 | __get_user(insn, (unsigned int __user *)regs->pc); | |
102 | if ((insn >> 21 & 0x1f) == 0x1f && | |
103 | /* ldq ldl ldt lds ldg ldf ldwu ldbu */ | |
104 | (1ul << (insn >> 26) & 0x30f00001400ul)) { | |
105 | regs->pc += 4; | |
106 | return; | |
107 | } | |
108 | } | |
109 | ||
110 | /* If we're in an interrupt context, or have no user context, | |
111 | we must not take the fault. */ | |
112 | if (!mm || in_interrupt()) | |
113 | goto no_context; | |
114 | ||
115 | #ifdef CONFIG_ALPHA_LARGE_VMALLOC | |
116 | if (address >= TASK_SIZE) | |
117 | goto vmalloc_fault; | |
118 | #endif | |
119 | ||
120 | down_read(&mm->mmap_sem); | |
121 | vma = find_vma(mm, address); | |
122 | if (!vma) | |
123 | goto bad_area; | |
124 | if (vma->vm_start <= address) | |
125 | goto good_area; | |
126 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
127 | goto bad_area; | |
128 | if (expand_stack(vma, address)) | |
129 | goto bad_area; | |
130 | ||
131 | /* Ok, we have a good vm_area for this memory access, so | |
132 | we can handle it. */ | |
133 | good_area: | |
134 | si_code = SEGV_ACCERR; | |
135 | if (cause < 0) { | |
136 | if (!(vma->vm_flags & VM_EXEC)) | |
137 | goto bad_area; | |
138 | } else if (!cause) { | |
139 | /* Allow reads even for write-only mappings */ | |
140 | if (!(vma->vm_flags & (VM_READ | VM_WRITE))) | |
141 | goto bad_area; | |
142 | } else { | |
143 | if (!(vma->vm_flags & VM_WRITE)) | |
144 | goto bad_area; | |
145 | } | |
146 | ||
147 | survive: | |
148 | /* If for any reason at all we couldn't handle the fault, | |
149 | make sure we exit gracefully rather than endlessly redo | |
150 | the fault. */ | |
151 | fault = handle_mm_fault(mm, vma, address, cause > 0); | |
152 | up_read(&mm->mmap_sem); | |
153 | ||
154 | switch (fault) { | |
155 | case VM_FAULT_MINOR: | |
156 | current->min_flt++; | |
157 | break; | |
158 | case VM_FAULT_MAJOR: | |
159 | current->maj_flt++; | |
160 | break; | |
161 | case VM_FAULT_SIGBUS: | |
162 | goto do_sigbus; | |
163 | case VM_FAULT_OOM: | |
164 | goto out_of_memory; | |
165 | default: | |
166 | BUG(); | |
167 | } | |
168 | return; | |
169 | ||
170 | /* Something tried to access memory that isn't in our memory map. | |
171 | Fix it, but check if it's kernel or user first. */ | |
172 | bad_area: | |
173 | up_read(&mm->mmap_sem); | |
174 | ||
175 | if (user_mode(regs)) | |
176 | goto do_sigsegv; | |
177 | ||
178 | no_context: | |
179 | /* Are we prepared to handle this fault as an exception? */ | |
180 | if ((fixup = search_exception_tables(regs->pc)) != 0) { | |
181 | unsigned long newpc; | |
182 | newpc = fixup_exception(dpf_reg, fixup, regs->pc); | |
183 | regs->pc = newpc; | |
184 | return; | |
185 | } | |
186 | ||
187 | /* Oops. The kernel tried to access some bad page. We'll have to | |
188 | terminate things with extreme prejudice. */ | |
189 | printk(KERN_ALERT "Unable to handle kernel paging request at " | |
190 | "virtual address %016lx\n", address); | |
191 | die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); | |
192 | do_exit(SIGKILL); | |
193 | ||
194 | /* We ran out of memory, or some other thing happened to us that | |
195 | made us unable to handle the page fault gracefully. */ | |
196 | out_of_memory: | |
197 | if (current->pid == 1) { | |
198 | yield(); | |
199 | down_read(&mm->mmap_sem); | |
200 | goto survive; | |
201 | } | |
202 | printk(KERN_ALERT "VM: killing process %s(%d)\n", | |
203 | current->comm, current->pid); | |
204 | if (!user_mode(regs)) | |
205 | goto no_context; | |
206 | do_exit(SIGKILL); | |
207 | ||
208 | do_sigbus: | |
209 | /* Send a sigbus, regardless of whether we were in kernel | |
210 | or user mode. */ | |
211 | info.si_signo = SIGBUS; | |
212 | info.si_errno = 0; | |
213 | info.si_code = BUS_ADRERR; | |
214 | info.si_addr = (void __user *) address; | |
215 | force_sig_info(SIGBUS, &info, current); | |
216 | if (!user_mode(regs)) | |
217 | goto no_context; | |
218 | return; | |
219 | ||
220 | do_sigsegv: | |
221 | info.si_signo = SIGSEGV; | |
222 | info.si_errno = 0; | |
223 | info.si_code = si_code; | |
224 | info.si_addr = (void __user *) address; | |
225 | force_sig_info(SIGSEGV, &info, current); | |
226 | return; | |
227 | ||
228 | #ifdef CONFIG_ALPHA_LARGE_VMALLOC | |
229 | vmalloc_fault: | |
230 | if (user_mode(regs)) | |
231 | goto do_sigsegv; | |
232 | else { | |
233 | /* Synchronize this task's top level page-table | |
234 | with the "reference" page table from init. */ | |
235 | long index = pgd_index(address); | |
236 | pgd_t *pgd, *pgd_k; | |
237 | ||
238 | pgd = current->active_mm->pgd + index; | |
239 | pgd_k = swapper_pg_dir + index; | |
240 | if (!pgd_present(*pgd) && pgd_present(*pgd_k)) { | |
241 | pgd_val(*pgd) = pgd_val(*pgd_k); | |
242 | return; | |
243 | } | |
244 | goto no_context; | |
245 | } | |
246 | #endif | |
247 | } |