Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / arch / arc / mm / fault.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
fbd7053a
VG
2/* Page Fault Handling for ARC (TLB Miss / ProtV)
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
fbd7053a
VG
5 */
6
7#include <linux/signal.h>
8#include <linux/interrupt.h>
3f07c014 9#include <linux/sched/signal.h>
fbd7053a
VG
10#include <linux/errno.h>
11#include <linux/ptrace.h>
fbd7053a
VG
12#include <linux/uaccess.h>
13#include <linux/kdebug.h>
ceed97ab 14#include <linux/perf_event.h>
50a7ca3c 15#include <linux/mm_types.h>
4d369680 16#include <asm/entry.h>
da1677b0 17#include <asm/mmu.h>
fbd7053a 18
336e2136
VG
19/*
20 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
21 * Refer to asm/processor.h for System Memory Map
22 *
23 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
24 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
25 */
26noinline static int handle_kernel_vaddr_fault(unsigned long address)
fbd7053a
VG
27{
28 /*
29 * Synchronize this task's top level page-table
30 * with the 'reference' page table.
31 */
32 pgd_t *pgd, *pgd_k;
6aae3425 33 p4d_t *p4d, *p4d_k;
fbd7053a
VG
34 pud_t *pud, *pud_k;
35 pmd_t *pmd, *pmd_k;
36
6128df5b 37 pgd = pgd_offset(current->active_mm, address);
fbd7053a
VG
38 pgd_k = pgd_offset_k(address);
39
56809a28 40 if (pgd_none (*pgd_k))
fbd7053a 41 goto bad_area;
56809a28
VG
42 if (!pgd_present(*pgd))
43 set_pgd(pgd, *pgd_k);
2dde02ab 44
6aae3425
VG
45 p4d = p4d_offset(pgd, address);
46 p4d_k = p4d_offset(pgd_k, address);
56809a28 47 if (p4d_none(*p4d_k))
6aae3425 48 goto bad_area;
56809a28
VG
49 if (!p4d_present(*p4d))
50 set_p4d(p4d, *p4d_k);
8747ff70 51
6aae3425
VG
52 pud = pud_offset(p4d, address);
53 pud_k = pud_offset(p4d_k, address);
56809a28 54 if (pud_none(*pud_k))
fbd7053a 55 goto bad_area;
56809a28
VG
56 if (!pud_present(*pud))
57 set_pud(pud, *pud_k);
2dde02ab 58
fbd7053a
VG
59 pmd = pmd_offset(pud, address);
60 pmd_k = pmd_offset(pud_k, address);
56809a28 61 if (pmd_none(*pmd_k))
fbd7053a 62 goto bad_area;
56809a28
VG
63 if (!pmd_present(*pmd))
64 set_pmd(pmd, *pmd_k);
fbd7053a
VG
65
66 /* XXX: create the TLB entry here */
67 return 0;
68
69bad_area:
70 return 1;
71}
72
21a63b56 73void do_page_fault(unsigned long address, struct pt_regs *regs)
fbd7053a
VG
74{
75 struct vm_area_struct *vma = NULL;
76 struct task_struct *tsk = current;
77 struct mm_struct *mm = tsk->mm;
5e91bf5c 78 int sig, si_code = SEGV_MAPERR;
85c5e337 79 unsigned int write = 0, exec = 0, mask;
5e91bf5c 80 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
02c88d14 81 unsigned int flags; /* handle_mm_fault() input */
fbd7053a
VG
82
83 /*
fbd7053a
VG
84 * NOTE! We MUST NOT take any locks for this case. We may
85 * be in an interrupt or a critical region, and should
86 * only copy the information from the master page table,
87 * nothing more.
88 */
a8c715b4 89 if (address >= VMALLOC_START && !user_mode(regs)) {
13e2cc12 90 if (unlikely(handle_kernel_vaddr_fault(address)))
a8c715b4 91 goto no_context;
fbd7053a
VG
92 else
93 return;
94 }
95
fbd7053a
VG
96 /*
97 * If we're in an interrupt or have no user
98 * context, we must not take the fault..
99 */
70ffdb93 100 if (faulthandler_disabled() || !mm)
fbd7053a
VG
101 goto no_context;
102
58d9ceb7 103 if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
85c5e337 104 write = 1;
58d9ceb7
VG
105 else if ((regs->ecr.vec == ECR_V_PROTV) &&
106 (regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
85c5e337
VG
107 exec = 1;
108
dde16072 109 flags = FAULT_FLAG_DEFAULT;
759496ba
JW
110 if (user_mode(regs))
111 flags |= FAULT_FLAG_USER;
85c5e337
VG
112 if (write)
113 flags |= FAULT_FLAG_WRITE;
114
52e3f8d0 115 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
fbd7053a 116retry:
a050ba1e 117 vma = lock_mm_and_find_vma(mm, address, regs);
fbd7053a 118 if (!vma)
a050ba1e 119 goto bad_area_nosemaphore;
fbd7053a
VG
120
121 /*
85c5e337 122 * vm_area is good, now check permissions for this memory access
fbd7053a 123 */
85c5e337
VG
124 mask = VM_READ;
125 if (write)
126 mask = VM_WRITE;
127 if (exec)
128 mask = VM_EXEC;
129
130 if (!(vma->vm_flags & mask)) {
131 si_code = SEGV_ACCERR;
fbd7053a 132 goto bad_area;
fbd7053a
VG
133 }
134
52e3f8d0 135 fault = handle_mm_fault(vma, address, flags, regs);
fbd7053a 136
24a62cf4
PX
137 /* Quick path to respond to signals */
138 if (fault_signal_pending(fault, regs)) {
139 if (!user_mode(regs))
140 goto no_context;
141 return;
142 }
143
d9272525
PX
144 /* The fault is fully completed (including releasing mmap lock) */
145 if (fault & VM_FAULT_COMPLETED)
146 return;
147
02c88d14 148 /*
c1e8d7c6 149 * Fault retry nuances, mmap_lock already relinquished by core mm
02c88d14 150 */
36ef159f 151 if (unlikely(fault & VM_FAULT_RETRY)) {
24a62cf4
PX
152 flags |= FAULT_FLAG_TRIED;
153 goto retry;
fbd7053a
VG
154 }
155
926150db 156bad_area:
d8ed45c5 157 mmap_read_unlock(mm);
fbd7053a 158
a050ba1e 159bad_area_nosemaphore:
fbd7053a 160 /*
02c88d14
VG
161 * Major/minor page fault accounting
162 * (in case of retry we only land here once)
fbd7053a 163 */
52e3f8d0 164 if (likely(!(fault & VM_FAULT_ERROR)))
02c88d14 165 /* Normal return path: fault Handled Gracefully */
fbd7053a
VG
166 return;
167
98cb57ad
VG
168 if (!user_mode(regs))
169 goto no_context;
fbd7053a 170
5e91bf5c 171 if (fault & VM_FAULT_OOM) {
609838cf
JW
172 pagefault_out_of_memory();
173 return;
174 }
fbd7053a 175
5e91bf5c
VG
176 if (fault & VM_FAULT_SIGBUS) {
177 sig = SIGBUS;
178 si_code = BUS_ADRERR;
179 }
180 else {
181 sig = SIGSEGV;
182 }
fbd7053a 183
fbd7053a 184 tsk->thread.fault_address = address;
3eb51486 185 force_sig_fault(sig, si_code, (void __user *)address);
d0542c7e 186 return;
fbd7053a 187
d0542c7e
VG
188no_context:
189 if (fixup_exception(regs))
190 return;
fbd7053a 191
d0542c7e 192 die("Oops", regs, address);
fbd7053a 193}