1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/extable.h>
5 #include <linux/module.h>
6 #include <linux/signal.h>
7 #include <linux/ptrace.h>
9 #include <linux/init.h>
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/perf_event.h>
14 #include <asm/tlbflush.h>
16 extern void __noreturn die(const char *str, struct pt_regs *regs, long err);
19 * This is useful to dump out the page tables associated with
22 void show_pte(struct mm_struct *mm, unsigned long addr)
28 pr_alert("pgd = %p\n", mm->pgd);
29 pgd = pgd_offset(mm, addr);
30 pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
45 p4d = p4d_offset(pgd, addr);
46 pud = pud_offset(p4d, addr);
47 pmd = pmd_offset(pud, addr);
49 pr_alert(", *pmd=%08lx", pmd_val(*pmd));
60 if (IS_ENABLED(CONFIG_HIGHMEM))
63 /* We must not map this if we have highmem enabled */
64 pte = pte_offset_map(pmd, addr);
65 pr_alert(", *pte=%08lx", pte_val(*pte));
73 void do_page_fault(unsigned long entry, unsigned long addr,
74 unsigned int error_code, struct pt_regs *regs)
76 struct task_struct *tsk;
78 struct vm_area_struct *vma;
81 unsigned int mask = VM_ACCESS_FLAGS;
82 unsigned int flags = FAULT_FLAG_DEFAULT;
84 error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
87 si_code = SEGV_MAPERR;
89 * We fault-in kernel-space virtual memory on-demand. The
90 * 'reference' page table is init_mm.pgd.
92 * NOTE! We MUST NOT take any locks for this case. We may
93 * be in an interrupt or a critical region, and should
94 * only copy the information from the master page table,
97 if (addr >= TASK_SIZE) {
99 goto bad_area_nosemaphore;
101 if (addr >= TASK_SIZE && addr < VMALLOC_END
102 && (entry == ENTRY_PTE_NOT_PRESENT))
108 /* Send a signal to the task for handling the unalignment access. */
109 if (entry == ENTRY_GENERAL_EXCPETION
110 && error_code == ETYPE_ALIGNMENT_CHECK) {
112 goto bad_area_nosemaphore;
118 * If we're in an interrupt or have no user
119 * context, we must not take the fault..
121 if (unlikely(faulthandler_disabled() || !mm))
124 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
127 * As per x86, we may deadlock here. However, since the kernel only
128 * validly references user space from well defined areas of the code,
129 * we can bug out early if this is from code which shouldn't.
131 if (unlikely(!mmap_read_trylock(mm))) {
132 if (!user_mode(regs) &&
133 !search_exception_tables(instruction_pointer(regs)))
139 * The above down_read_trylock() might have succeeded in which
140 * case, we'll have missed the might_sleep() from down_read().
143 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
144 if (!user_mode(regs) &&
145 !search_exception_tables(instruction_pointer(regs)))
150 vma = find_vma(mm, addr);
155 if (vma->vm_start <= addr)
158 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
161 if (unlikely(expand_stack(vma, addr)))
165 * Ok, we have a good vm_area for this memory access, so
170 si_code = SEGV_ACCERR;
172 /* first do some preliminary protection checks */
173 if (entry == ENTRY_PTE_NOT_PRESENT) {
174 if (error_code & ITYPE_mskINST)
177 mask = VM_READ | VM_WRITE;
179 } else if (entry == ENTRY_TLB_MISC) {
180 switch (error_code & ITYPE_mskETYPE) {
186 flags |= FAULT_FLAG_WRITE;
193 flags |= FAULT_FLAG_WRITE;
202 if (!(vma->vm_flags & mask))
206 * If for any reason at all we couldn't handle the fault,
207 * make sure we exit gracefully rather than endlessly redo
211 fault = handle_mm_fault(vma, addr, flags, regs);
214 * If we need to retry but a fatal signal is pending, handle the
215 * signal first. We do not need to release the mmap_lock because it
216 * would already be released in __lock_page_or_retry in mm/filemap.c.
218 if (fault_signal_pending(fault, regs)) {
219 if (!user_mode(regs))
224 if (unlikely(fault & VM_FAULT_ERROR)) {
225 if (fault & VM_FAULT_OOM)
227 else if (fault & VM_FAULT_SIGBUS)
233 if (flags & FAULT_FLAG_ALLOW_RETRY) {
234 if (fault & VM_FAULT_RETRY) {
235 flags |= FAULT_FLAG_TRIED;
237 /* No need to mmap_read_unlock(mm) as we would
238 * have already released it in __lock_page_or_retry
245 mmap_read_unlock(mm);
249 * Something tried to access memory that isn't in our memory map..
250 * Fix it, but check if it's kernel or user first..
253 mmap_read_unlock(mm);
255 bad_area_nosemaphore:
257 /* User mode accesses just cause a SIGSEGV */
259 if (user_mode(regs)) {
260 tsk->thread.address = addr;
261 tsk->thread.error_code = error_code;
262 tsk->thread.trap_no = entry;
263 force_sig_fault(SIGSEGV, si_code, (void __user *)addr);
269 /* Are we prepared to handle this kernel fault?
271 * (The kernel has valid exception-points in the source
272 * when it acesses user-memory. When it fails in one
273 * of those points, we find it in a table and do a jump
274 * to some fixup code that loads an appropriate error
279 const struct exception_table_entry *entry;
282 search_exception_tables(instruction_pointer(regs))) !=
284 /* Adjust the instruction pointer in the stackframe */
285 instruction_pointer(regs) = entry->fixup;
291 * Oops. The kernel tried to access some bad page. We'll have to
292 * terminate things with extreme prejudice.
296 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
297 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
298 "paging request", addr);
301 die("Oops", regs, error_code);
304 * We ran out of memory, or some other thing happened to us that made
305 * us unable to handle the page fault gracefully.
309 mmap_read_unlock(mm);
310 if (!user_mode(regs))
312 pagefault_out_of_memory();
316 mmap_read_unlock(mm);
318 /* Kernel mode? Handle exceptions or die */
319 if (!user_mode(regs))
325 tsk->thread.address = addr;
326 tsk->thread.error_code = error_code;
327 tsk->thread.trap_no = entry;
328 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr);
335 * Synchronize this task's top level page-table
336 * with the 'reference' page table.
338 * Use current_pgd instead of tsk->active_mm->pgd
339 * since the latter might be unavailable if this
340 * code is executed in a misfortunately run irq
341 * (like inside schedule() between switch_mm and
345 unsigned int index = pgd_index(addr);
352 pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
353 pgd_k = init_mm.pgd + index;
355 if (!pgd_present(*pgd_k))
358 p4d = p4d_offset(pgd, addr);
359 p4d_k = p4d_offset(pgd_k, addr);
360 if (!p4d_present(*p4d_k))
363 pud = pud_offset(p4d, addr);
364 pud_k = pud_offset(p4d_k, addr);
365 if (!pud_present(*pud_k))
368 pmd = pmd_offset(pud, addr);
369 pmd_k = pmd_offset(pud_k, addr);
370 if (!pmd_present(*pmd_k))
373 if (!pmd_present(*pmd))
374 set_pmd(pmd, *pmd_k);
376 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
379 * Since the vmalloc area is global, we don't
380 * need to copy individual PTE's, it is enough to
381 * copy the pgd pointer into the pte page of the
382 * root task. If that is there, we'll find our pte if
386 /* Make sure the actual PTE exists as well to
387 * catch kernel vmalloc-area accesses to non-mapped
388 * addres. If we don't do this, this will just
389 * silently loop forever.
392 pte_k = pte_offset_kernel(pmd_k, addr);
393 if (!pte_present(*pte_k))