License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / arch / m68k / mm / fault.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/m68k/mm/fault.c
4  *
5  *  Copyright (C) 1995  Hamish Macdonald
6  */
7
8 #include <linux/mman.h>
9 #include <linux/mm.h>
10 #include <linux/kernel.h>
11 #include <linux/ptrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15
16 #include <asm/setup.h>
17 #include <asm/traps.h>
18 #include <asm/pgalloc.h>
19
20 extern void die_if_kernel(char *, struct pt_regs *, long);
21
22 int send_fault_sig(struct pt_regs *regs)
23 {
24         siginfo_t siginfo = { 0, 0, 0, };
25
26         siginfo.si_signo = current->thread.signo;
27         siginfo.si_code = current->thread.code;
28         siginfo.si_addr = (void *)current->thread.faddr;
29         pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr,
30                  siginfo.si_signo, siginfo.si_code);
31
32         if (user_mode(regs)) {
33                 force_sig_info(siginfo.si_signo,
34                                &siginfo, current);
35         } else {
36                 if (fixup_exception(regs))
37                         return -1;
38
39                 //if (siginfo.si_signo == SIGBUS)
40                 //      force_sig_info(siginfo.si_signo,
41                 //                     &siginfo, current);
42
43                 /*
44                  * Oops. The kernel tried to access some bad page. We'll have to
45                  * terminate things with extreme prejudice.
46                  */
47                 if ((unsigned long)siginfo.si_addr < PAGE_SIZE)
48                         pr_alert("Unable to handle kernel NULL pointer dereference");
49                 else
50                         pr_alert("Unable to handle kernel access");
51                 pr_cont(" at virtual address %p\n", siginfo.si_addr);
52                 die_if_kernel("Oops", regs, 0 /*error_code*/);
53                 do_exit(SIGKILL);
54         }
55
56         return 1;
57 }
58
59 /*
60  * This routine handles page faults.  It determines the problem, and
61  * then passes it off to one of the appropriate routines.
62  *
63  * error_code:
64  *      bit 0 == 0 means no page found, 1 means protection fault
65  *      bit 1 == 0 means read, 1 means write
66  *
67  * If this routine detects a bad access, it returns 1, otherwise it
68  * returns 0.
69  */
70 int do_page_fault(struct pt_regs *regs, unsigned long address,
71                               unsigned long error_code)
72 {
73         struct mm_struct *mm = current->mm;
74         struct vm_area_struct * vma;
75         int fault;
76         unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
77
78         pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
79                 regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL);
80
81         /*
82          * If we're in an interrupt or have no user
83          * context, we must not take the fault..
84          */
85         if (faulthandler_disabled() || !mm)
86                 goto no_context;
87
88         if (user_mode(regs))
89                 flags |= FAULT_FLAG_USER;
90 retry:
91         down_read(&mm->mmap_sem);
92
93         vma = find_vma(mm, address);
94         if (!vma)
95                 goto map_err;
96         if (vma->vm_flags & VM_IO)
97                 goto acc_err;
98         if (vma->vm_start <= address)
99                 goto good_area;
100         if (!(vma->vm_flags & VM_GROWSDOWN))
101                 goto map_err;
102         if (user_mode(regs)) {
103                 /* Accessing the stack below usp is always a bug.  The
104                    "+ 256" is there due to some instructions doing
105                    pre-decrement on the stack and that doesn't show up
106                    until later.  */
107                 if (address + 256 < rdusp())
108                         goto map_err;
109         }
110         if (expand_stack(vma, address))
111                 goto map_err;
112
113 /*
114  * Ok, we have a good vm_area for this memory access, so
115  * we can handle it..
116  */
117 good_area:
118         pr_debug("do_page_fault: good_area\n");
119         switch (error_code & 3) {
120                 default:        /* 3: write, present */
121                         /* fall through */
122                 case 2:         /* write, not present */
123                         if (!(vma->vm_flags & VM_WRITE))
124                                 goto acc_err;
125                         flags |= FAULT_FLAG_WRITE;
126                         break;
127                 case 1:         /* read, present */
128                         goto acc_err;
129                 case 0:         /* read, not present */
130                         if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
131                                 goto acc_err;
132         }
133
134         /*
135          * If for any reason at all we couldn't handle the fault,
136          * make sure we exit gracefully rather than endlessly redo
137          * the fault.
138          */
139
140         fault = handle_mm_fault(vma, address, flags);
141         pr_debug("handle_mm_fault returns %d\n", fault);
142
143         if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
144                 return 0;
145
146         if (unlikely(fault & VM_FAULT_ERROR)) {
147                 if (fault & VM_FAULT_OOM)
148                         goto out_of_memory;
149                 else if (fault & VM_FAULT_SIGSEGV)
150                         goto map_err;
151                 else if (fault & VM_FAULT_SIGBUS)
152                         goto bus_err;
153                 BUG();
154         }
155
156         /*
157          * Major/minor page fault accounting is only done on the
158          * initial attempt. If we go through a retry, it is extremely
159          * likely that the page will be found in page cache at that point.
160          */
161         if (flags & FAULT_FLAG_ALLOW_RETRY) {
162                 if (fault & VM_FAULT_MAJOR)
163                         current->maj_flt++;
164                 else
165                         current->min_flt++;
166                 if (fault & VM_FAULT_RETRY) {
167                         /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
168                          * of starvation. */
169                         flags &= ~FAULT_FLAG_ALLOW_RETRY;
170                         flags |= FAULT_FLAG_TRIED;
171
172                         /*
173                          * No need to up_read(&mm->mmap_sem) as we would
174                          * have already released it in __lock_page_or_retry
175                          * in mm/filemap.c.
176                          */
177
178                         goto retry;
179                 }
180         }
181
182         up_read(&mm->mmap_sem);
183         return 0;
184
185 /*
186  * We ran out of memory, or some other thing happened to us that made
187  * us unable to handle the page fault gracefully.
188  */
189 out_of_memory:
190         up_read(&mm->mmap_sem);
191         if (!user_mode(regs))
192                 goto no_context;
193         pagefault_out_of_memory();
194         return 0;
195
196 no_context:
197         current->thread.signo = SIGBUS;
198         current->thread.faddr = address;
199         return send_fault_sig(regs);
200
201 bus_err:
202         current->thread.signo = SIGBUS;
203         current->thread.code = BUS_ADRERR;
204         current->thread.faddr = address;
205         goto send_sig;
206
207 map_err:
208         current->thread.signo = SIGSEGV;
209         current->thread.code = SEGV_MAPERR;
210         current->thread.faddr = address;
211         goto send_sig;
212
213 acc_err:
214         current->thread.signo = SIGSEGV;
215         current->thread.code = SEGV_ACCERR;
216         current->thread.faddr = address;
217
218 send_sig:
219         up_read(&mm->mmap_sem);
220         return send_fault_sig(regs);
221 }