ARC: mm: do_page_fault refactor #8: release mmap_sem sooner
[linux-2.6-block.git] / arch / arc / mm / fault.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
fbd7053a
VG
2/* Page Fault Handling for ARC (TLB Miss / ProtV)
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
fbd7053a
VG
5 */
6
7#include <linux/signal.h>
8#include <linux/interrupt.h>
3f07c014 9#include <linux/sched/signal.h>
fbd7053a
VG
10#include <linux/errno.h>
11#include <linux/ptrace.h>
fbd7053a
VG
12#include <linux/uaccess.h>
13#include <linux/kdebug.h>
ceed97ab 14#include <linux/perf_event.h>
50a7ca3c 15#include <linux/mm_types.h>
fbd7053a 16#include <asm/pgalloc.h>
da1677b0 17#include <asm/mmu.h>
fbd7053a 18
336e2136
VG
19/*
20 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
21 * Refer to asm/processor.h for System Memory Map
22 *
23 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
24 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
25 */
26noinline static int handle_kernel_vaddr_fault(unsigned long address)
fbd7053a
VG
27{
28 /*
29 * Synchronize this task's top level page-table
30 * with the 'reference' page table.
31 */
32 pgd_t *pgd, *pgd_k;
33 pud_t *pud, *pud_k;
34 pmd_t *pmd, *pmd_k;
35
9c41f4ee 36 pgd = pgd_offset_fast(current->active_mm, address);
fbd7053a
VG
37 pgd_k = pgd_offset_k(address);
38
39 if (!pgd_present(*pgd_k))
40 goto bad_area;
41
42 pud = pud_offset(pgd, address);
43 pud_k = pud_offset(pgd_k, address);
44 if (!pud_present(*pud_k))
45 goto bad_area;
46
47 pmd = pmd_offset(pud, address);
48 pmd_k = pmd_offset(pud_k, address);
49 if (!pmd_present(*pmd_k))
50 goto bad_area;
51
52 set_pmd(pmd, *pmd_k);
53
54 /* XXX: create the TLB entry here */
55 return 0;
56
57bad_area:
58 return 1;
59}
60
21a63b56 61void do_page_fault(unsigned long address, struct pt_regs *regs)
fbd7053a
VG
62{
63 struct vm_area_struct *vma = NULL;
64 struct task_struct *tsk = current;
65 struct mm_struct *mm = tsk->mm;
5e91bf5c 66 int sig, si_code = SEGV_MAPERR;
85c5e337 67 unsigned int write = 0, exec = 0, mask;
5e91bf5c 68 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
02c88d14 69 unsigned int flags; /* handle_mm_fault() input */
fbd7053a
VG
70
71 /*
fbd7053a
VG
72 * NOTE! We MUST NOT take any locks for this case. We may
73 * be in an interrupt or a critical region, and should
74 * only copy the information from the master page table,
75 * nothing more.
76 */
a8c715b4 77 if (address >= VMALLOC_START && !user_mode(regs)) {
13e2cc12 78 if (unlikely(handle_kernel_vaddr_fault(address)))
a8c715b4 79 goto no_context;
fbd7053a
VG
80 else
81 return;
82 }
83
fbd7053a
VG
84 /*
85 * If we're in an interrupt or have no user
86 * context, we must not take the fault..
87 */
70ffdb93 88 if (faulthandler_disabled() || !mm)
fbd7053a
VG
89 goto no_context;
90
85c5e337
VG
91 if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
92 write = 1;
93 else if ((regs->ecr_vec == ECR_V_PROTV) &&
94 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
95 exec = 1;
96
97 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
759496ba
JW
98 if (user_mode(regs))
99 flags |= FAULT_FLAG_USER;
85c5e337
VG
100 if (write)
101 flags |= FAULT_FLAG_WRITE;
102
fbd7053a
VG
103retry:
104 down_read(&mm->mmap_sem);
450e5b6f 105
fbd7053a
VG
106 vma = find_vma(mm, address);
107 if (!vma)
108 goto bad_area;
450e5b6f
VG
109 if (unlikely(address < vma->vm_start)) {
110 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
111 goto bad_area;
112 }
fbd7053a
VG
113
114 /*
85c5e337 115 * vm_area is good, now check permissions for this memory access
fbd7053a 116 */
85c5e337
VG
117 mask = VM_READ;
118 if (write)
119 mask = VM_WRITE;
120 if (exec)
121 mask = VM_EXEC;
122
123 if (!(vma->vm_flags & mask)) {
124 si_code = SEGV_ACCERR;
fbd7053a 125 goto bad_area;
fbd7053a
VG
126 }
127
dcddffd4 128 fault = handle_mm_fault(vma, address, flags);
fbd7053a 129
02c88d14
VG
130 /*
131 * Fault retry nuances
132 */
133 if (unlikely(fault & VM_FAULT_RETRY)) {
4d447455
VG
134
135 /*
02c88d14
VG
136 * If fault needs to be retried, handle any pending signals
137 * first (by returning to user mode).
138 * mmap_sem already relinquished by core mm for RETRY case
4d447455 139 */
02c88d14 140 if (fatal_signal_pending(current)) {
4d447455
VG
141 if (!user_mode(regs))
142 goto no_context;
fbd7053a 143 return;
4d447455 144 }
02c88d14
VG
145 /*
146 * retry state machine
147 */
148 if (flags & FAULT_FLAG_ALLOW_RETRY) {
149 flags &= ~FAULT_FLAG_ALLOW_RETRY;
150 flags |= FAULT_FLAG_TRIED;
151 goto retry;
152 }
fbd7053a
VG
153 }
154
926150db
VG
155bad_area:
156 up_read(&mm->mmap_sem);
157
02c88d14
VG
158 /*
159 * Major/minor page fault accounting
160 * (in case of retry we only land here once)
161 */
ceed97ab
VG
162 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
163
fbd7053a 164 if (likely(!(fault & VM_FAULT_ERROR))) {
02c88d14
VG
165 if (fault & VM_FAULT_MAJOR) {
166 tsk->maj_flt++;
167 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
168 regs, address);
169 } else {
170 tsk->min_flt++;
171 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
172 regs, address);
fbd7053a
VG
173 }
174
02c88d14 175 /* Normal return path: fault Handled Gracefully */
fbd7053a
VG
176 return;
177 }
178
98cb57ad
VG
179 if (!user_mode(regs))
180 goto no_context;
181
5e91bf5c
VG
182 if (fault & VM_FAULT_OOM) {
183 pagefault_out_of_memory();
184 return;
185 }
fbd7053a 186
5e91bf5c
VG
187 if (fault & VM_FAULT_SIGBUS) {
188 sig = SIGBUS;
189 si_code = BUS_ADRERR;
190 }
191 else {
192 sig = SIGSEGV;
193 }
fbd7053a
VG
194
195 tsk->thread.fault_address = address;
5e91bf5c 196 force_sig_fault(sig, si_code, (void __user *)address, tsk);
d0542c7e
VG
197 return;
198
199no_context:
200 if (fixup_exception(regs))
201 return;
202
203 die("Oops", regs, address);
fbd7053a 204}