a8dd1f27417cfcacdf4c77e68cf4183dc4c718e9
[linux-2.6-block.git] / kernel / entry / common.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/context_tracking.h>
4 #include <linux/entry-common.h>
5 #include <linux/resume_user_mode.h>
6 #include <linux/highmem.h>
7 #include <linux/jump_label.h>
8 #include <linux/kmsan.h>
9 #include <linux/livepatch.h>
10 #include <linux/audit.h>
11 #include <linux/tick.h>
12
13 #include "common.h"
14
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/syscalls.h>
17
18 static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
19 {
20         if (unlikely(audit_context())) {
21                 unsigned long args[6];
22
23                 syscall_get_arguments(current, regs, args);
24                 audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
25         }
26 }
27
28 long syscall_trace_enter(struct pt_regs *regs, long syscall,
29                                 unsigned long work)
30 {
31         long ret = 0;
32
33         /*
34          * Handle Syscall User Dispatch.  This must comes first, since
35          * the ABI here can be something that doesn't make sense for
36          * other syscall_work features.
37          */
38         if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
39                 if (syscall_user_dispatch(regs))
40                         return -1L;
41         }
42
43         /* Handle ptrace */
44         if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
45                 ret = ptrace_report_syscall_entry(regs);
46                 if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
47                         return -1L;
48         }
49
50         /* Do seccomp after ptrace, to catch any tracer changes. */
51         if (work & SYSCALL_WORK_SECCOMP) {
52                 ret = __secure_computing();
53                 if (ret == -1L)
54                         return ret;
55         }
56
57         /* Either of the above might have changed the syscall number */
58         syscall = syscall_get_nr(current, regs);
59
60         if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) {
61                 trace_sys_enter(regs, syscall);
62                 /*
63                  * Probes or BPF hooks in the tracepoint may have changed the
64                  * system call number as well.
65                  */
66                 syscall = syscall_get_nr(current, regs);
67         }
68
69         syscall_enter_audit(regs, syscall);
70
71         return ret ? : syscall;
72 }
73
74 noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
75 {
76         enter_from_user_mode(regs);
77         instrumentation_begin();
78         local_irq_enable();
79         instrumentation_end();
80 }
81
82 /* Workaround to allow gradual conversion of architecture code */
83 void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
84
85 /**
86  * exit_to_user_mode_loop - do any pending work before leaving to user space
87  * @regs:       Pointer to pt_regs on entry stack
88  * @ti_work:    TIF work flags as read by the caller
89  */
90 __always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
91                                                      unsigned long ti_work)
92 {
93         /*
94          * Before returning to user space ensure that all pending work
95          * items have been completed.
96          */
97         while (ti_work & EXIT_TO_USER_MODE_WORK) {
98
99                 local_irq_enable_exit_to_user(ti_work);
100
101                 if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
102                         schedule();
103
104                 if (ti_work & _TIF_UPROBE)
105                         uprobe_notify_resume(regs);
106
107                 if (ti_work & _TIF_PATCH_PENDING)
108                         klp_update_patch_state(current);
109
110                 if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
111                         arch_do_signal_or_restart(regs);
112
113                 if (ti_work & _TIF_NOTIFY_RESUME)
114                         resume_user_mode_work(regs);
115
116                 /* Architecture specific TIF work */
117                 arch_exit_to_user_mode_work(regs, ti_work);
118
119                 /*
120                  * Disable interrupts and reevaluate the work flags as they
121                  * might have changed while interrupts and preemption was
122                  * enabled above.
123                  */
124                 local_irq_disable_exit_to_user();
125
126                 /* Check if any of the above work has queued a deferred wakeup */
127                 tick_nohz_user_enter_prepare();
128
129                 ti_work = read_thread_flags();
130         }
131
132         /* Return the latest work state for arch_exit_to_user_mode() */
133         return ti_work;
134 }
135
136 /*
137  * If SYSCALL_EMU is set, then the only reason to report is when
138  * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP).  This syscall
139  * instruction has been already reported in syscall_enter_from_user_mode().
140  */
141 static inline bool report_single_step(unsigned long work)
142 {
143         if (work & SYSCALL_WORK_SYSCALL_EMU)
144                 return false;
145
146         return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
147 }
148
149 void syscall_exit_work(struct pt_regs *regs, unsigned long work)
150 {
151         bool step;
152
153         /*
154          * If the syscall was rolled back due to syscall user dispatching,
155          * then the tracers below are not invoked for the same reason as
156          * the entry side was not invoked in syscall_trace_enter(): The ABI
157          * of these syscalls is unknown.
158          */
159         if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
160                 if (unlikely(current->syscall_dispatch.on_dispatch)) {
161                         current->syscall_dispatch.on_dispatch = false;
162                         return;
163                 }
164         }
165
166         audit_syscall_exit(regs);
167
168         if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
169                 trace_sys_exit(regs, syscall_get_return_value(current, regs));
170
171         step = report_single_step(work);
172         if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
173                 ptrace_report_syscall_exit(regs, step);
174 }
175
176 noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
177 {
178         enter_from_user_mode(regs);
179 }
180
181 noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
182 {
183         instrumentation_begin();
184         exit_to_user_mode_prepare(regs);
185         instrumentation_end();
186         exit_to_user_mode();
187 }
188
189 noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
190 {
191         irqentry_state_t ret = {
192                 .exit_rcu = false,
193         };
194
195         if (user_mode(regs)) {
196                 irqentry_enter_from_user_mode(regs);
197                 return ret;
198         }
199
200         /*
201          * If this entry hit the idle task invoke ct_irq_enter() whether
202          * RCU is watching or not.
203          *
204          * Interrupts can nest when the first interrupt invokes softirq
205          * processing on return which enables interrupts.
206          *
207          * Scheduler ticks in the idle task can mark quiescent state and
208          * terminate a grace period, if and only if the timer interrupt is
209          * not nested into another interrupt.
210          *
211          * Checking for rcu_is_watching() here would prevent the nesting
212          * interrupt to invoke ct_irq_enter(). If that nested interrupt is
213          * the tick then rcu_flavor_sched_clock_irq() would wrongfully
214          * assume that it is the first interrupt and eventually claim
215          * quiescent state and end grace periods prematurely.
216          *
217          * Unconditionally invoke ct_irq_enter() so RCU state stays
218          * consistent.
219          *
220          * TINY_RCU does not support EQS, so let the compiler eliminate
221          * this part when enabled.
222          */
223         if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
224                 /*
225                  * If RCU is not watching then the same careful
226                  * sequence vs. lockdep and tracing is required
227                  * as in irqentry_enter_from_user_mode().
228                  */
229                 lockdep_hardirqs_off(CALLER_ADDR0);
230                 ct_irq_enter();
231                 instrumentation_begin();
232                 kmsan_unpoison_entry_regs(regs);
233                 trace_hardirqs_off_finish();
234                 instrumentation_end();
235
236                 ret.exit_rcu = true;
237                 return ret;
238         }
239
240         /*
241          * If RCU is watching then RCU only wants to check whether it needs
242          * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
243          * already contains a warning when RCU is not watching, so no point
244          * in having another one here.
245          */
246         lockdep_hardirqs_off(CALLER_ADDR0);
247         instrumentation_begin();
248         kmsan_unpoison_entry_regs(regs);
249         rcu_irq_enter_check_tick();
250         trace_hardirqs_off_finish();
251         instrumentation_end();
252
253         return ret;
254 }
255
256 void raw_irqentry_exit_cond_resched(void)
257 {
258         if (!preempt_count()) {
259                 /* Sanity check RCU and thread stack */
260                 rcu_irq_exit_check_preempt();
261                 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
262                         WARN_ON_ONCE(!on_thread_stack());
263                 if (need_resched())
264                         preempt_schedule_irq();
265         }
266 }
267 #ifdef CONFIG_PREEMPT_DYNAMIC
268 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
269 DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
270 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
271 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
272 void dynamic_irqentry_exit_cond_resched(void)
273 {
274         if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
275                 return;
276         raw_irqentry_exit_cond_resched();
277 }
278 #endif
279 #endif
280
281 noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
282 {
283         lockdep_assert_irqs_disabled();
284
285         /* Check whether this returns to user mode */
286         if (user_mode(regs)) {
287                 irqentry_exit_to_user_mode(regs);
288         } else if (!regs_irqs_disabled(regs)) {
289                 /*
290                  * If RCU was not watching on entry this needs to be done
291                  * carefully and needs the same ordering of lockdep/tracing
292                  * and RCU as the return to user mode path.
293                  */
294                 if (state.exit_rcu) {
295                         instrumentation_begin();
296                         /* Tell the tracer that IRET will enable interrupts */
297                         trace_hardirqs_on_prepare();
298                         lockdep_hardirqs_on_prepare();
299                         instrumentation_end();
300                         ct_irq_exit();
301                         lockdep_hardirqs_on(CALLER_ADDR0);
302                         return;
303                 }
304
305                 instrumentation_begin();
306                 if (IS_ENABLED(CONFIG_PREEMPTION))
307                         irqentry_exit_cond_resched();
308
309                 /* Covers both tracing and lockdep */
310                 trace_hardirqs_on();
311                 instrumentation_end();
312         } else {
313                 /*
314                  * IRQ flags state is correct already. Just tell RCU if it
315                  * was not watching on entry.
316                  */
317                 if (state.exit_rcu)
318                         ct_irq_exit();
319         }
320 }
321
322 irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
323 {
324         irqentry_state_t irq_state;
325
326         irq_state.lockdep = lockdep_hardirqs_enabled();
327
328         __nmi_enter();
329         lockdep_hardirqs_off(CALLER_ADDR0);
330         lockdep_hardirq_enter();
331         ct_nmi_enter();
332
333         instrumentation_begin();
334         kmsan_unpoison_entry_regs(regs);
335         trace_hardirqs_off_finish();
336         ftrace_nmi_enter();
337         instrumentation_end();
338
339         return irq_state;
340 }
341
342 void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
343 {
344         instrumentation_begin();
345         ftrace_nmi_exit();
346         if (irq_state.lockdep) {
347                 trace_hardirqs_on_prepare();
348                 lockdep_hardirqs_on_prepare();
349         }
350         instrumentation_end();
351
352         ct_nmi_exit();
353         lockdep_hardirq_exit();
354         if (irq_state.lockdep)
355                 lockdep_hardirqs_on(CALLER_ADDR0);
356         __nmi_exit();
357 }