Commit | Line | Data |
---|---|---|
142781e1 TG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <linux/context_tracking.h> | |
4 | #include <linux/entry-common.h> | |
03248add | 5 | #include <linux/resume_user_mode.h> |
5fbda3ec | 6 | #include <linux/highmem.h> |
99cf983c | 7 | #include <linux/jump_label.h> |
6cae637f | 8 | #include <linux/kmsan.h> |
a9f3a74a TG |
9 | #include <linux/livepatch.h> |
10 | #include <linux/audit.h> | |
f268c373 | 11 | #include <linux/tick.h> |
142781e1 | 12 | |
11894468 GKB |
13 | #include "common.h" |
14 | ||
142781e1 TG |
15 | #define CREATE_TRACE_POINTS |
16 | #include <trace/events/syscalls.h> | |
17 | ||
142781e1 TG |
18 | static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) |
19 | { | |
20 | if (unlikely(audit_context())) { | |
21 | unsigned long args[6]; | |
22 | ||
23 | syscall_get_arguments(current, regs, args); | |
24 | audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]); | |
25 | } | |
26 | } | |
27 | ||
221a1640 | 28 | long syscall_trace_enter(struct pt_regs *regs, long syscall, |
29915524 | 29 | unsigned long work) |
142781e1 TG |
30 | { |
31 | long ret = 0; | |
32 | ||
11894468 GKB |
33 | /* |
34 | * Handle Syscall User Dispatch. This must comes first, since | |
35 | * the ABI here can be something that doesn't make sense for | |
36 | * other syscall_work features. | |
37 | */ | |
38 | if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { | |
39 | if (syscall_user_dispatch(regs)) | |
40 | return -1L; | |
41 | } | |
42 | ||
142781e1 | 43 | /* Handle ptrace */ |
64eb35f7 | 44 | if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) { |
0cfcb2b9 | 45 | ret = ptrace_report_syscall_entry(regs); |
64eb35f7 | 46 | if (ret || (work & SYSCALL_WORK_SYSCALL_EMU)) |
142781e1 TG |
47 | return -1L; |
48 | } | |
49 | ||
50 | /* Do seccomp after ptrace, to catch any tracer changes. */ | |
23d67a54 | 51 | if (work & SYSCALL_WORK_SECCOMP) { |
1027cd80 | 52 | ret = __secure_computing(); |
142781e1 TG |
53 | if (ret == -1L) |
54 | return ret; | |
55 | } | |
56 | ||
b6ec4134 KC |
57 | /* Either of the above might have changed the syscall number */ |
58 | syscall = syscall_get_nr(current, regs); | |
59 | ||
fb13b11d | 60 | if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) { |
142781e1 | 61 | trace_sys_enter(regs, syscall); |
fb13b11d AR |
62 | /* |
63 | * Probes or BPF hooks in the tracepoint may have changed the | |
64 | * system call number as well. | |
65 | */ | |
66 | syscall = syscall_get_nr(current, regs); | |
67 | } | |
142781e1 TG |
68 | |
69 | syscall_enter_audit(regs, syscall); | |
70 | ||
71 | return ret ? : syscall; | |
72 | } | |
73 | ||
4facb95b TG |
74 | noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs) |
75 | { | |
caf4062e | 76 | enter_from_user_mode(regs); |
4facb95b TG |
77 | instrumentation_begin(); |
78 | local_irq_enable(); | |
79 | instrumentation_end(); | |
80 | } | |
81 | ||
a9f3a74a | 82 | /* Workaround to allow gradual conversion of architecture code */ |
8ba62d37 | 83 | void __weak arch_do_signal_or_restart(struct pt_regs *regs) { } |
a9f3a74a | 84 | |
d6801947 SS |
85 | /** |
86 | * exit_to_user_mode_loop - do any pending work before leaving to user space | |
87 | * @regs: Pointer to pt_regs on entry stack | |
88 | * @ti_work: TIF work flags as read by the caller | |
89 | */ | |
90 | __always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs, | |
91 | unsigned long ti_work) | |
a9f3a74a TG |
92 | { |
93 | /* | |
94 | * Before returning to user space ensure that all pending work | |
95 | * items have been completed. | |
96 | */ | |
97 | while (ti_work & EXIT_TO_USER_MODE_WORK) { | |
98 | ||
99 | local_irq_enable_exit_to_user(ti_work); | |
100 | ||
26baa1f1 | 101 | if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) |
a9f3a74a TG |
102 | schedule(); |
103 | ||
104 | if (ti_work & _TIF_UPROBE) | |
105 | uprobe_notify_resume(regs); | |
106 | ||
107 | if (ti_work & _TIF_PATCH_PENDING) | |
108 | klp_update_patch_state(current); | |
109 | ||
12db8b69 | 110 | if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) |
8ba62d37 | 111 | arch_do_signal_or_restart(regs); |
a9f3a74a | 112 | |
a68de80f | 113 | if (ti_work & _TIF_NOTIFY_RESUME) |
03248add | 114 | resume_user_mode_work(regs); |
a9f3a74a TG |
115 | |
116 | /* Architecture specific TIF work */ | |
117 | arch_exit_to_user_mode_work(regs, ti_work); | |
118 | ||
119 | /* | |
120 | * Disable interrupts and reevaluate the work flags as they | |
121 | * might have changed while interrupts and preemption was | |
122 | * enabled above. | |
123 | */ | |
124 | local_irq_disable_exit_to_user(); | |
47b8ff19 FW |
125 | |
126 | /* Check if any of the above work has queued a deferred wakeup */ | |
f268c373 | 127 | tick_nohz_user_enter_prepare(); |
47b8ff19 | 128 | |
6ce89512 | 129 | ti_work = read_thread_flags(); |
a9f3a74a TG |
130 | } |
131 | ||
132 | /* Return the latest work state for arch_exit_to_user_mode() */ | |
133 | return ti_work; | |
134 | } | |
135 | ||
a9f3a74a | 136 | /* |
64eb35f7 | 137 | * If SYSCALL_EMU is set, then the only reason to report is when |
6342adca | 138 | * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall |
900ffe39 | 139 | * instruction has been already reported in syscall_enter_from_user_mode(). |
a9f3a74a | 140 | */ |
64eb35f7 | 141 | static inline bool report_single_step(unsigned long work) |
a9f3a74a | 142 | { |
41c1a06d | 143 | if (work & SYSCALL_WORK_SYSCALL_EMU) |
64eb35f7 GKB |
144 | return false; |
145 | ||
6342adca | 146 | return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP; |
a9f3a74a | 147 | } |
29915524 | 148 | |
e43b8bb5 | 149 | void syscall_exit_work(struct pt_regs *regs, unsigned long work) |
a9f3a74a TG |
150 | { |
151 | bool step; | |
152 | ||
11894468 GKB |
153 | /* |
154 | * If the syscall was rolled back due to syscall user dispatching, | |
155 | * then the tracers below are not invoked for the same reason as | |
156 | * the entry side was not invoked in syscall_trace_enter(): The ABI | |
157 | * of these syscalls is unknown. | |
158 | */ | |
159 | if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { | |
160 | if (unlikely(current->syscall_dispatch.on_dispatch)) { | |
161 | current->syscall_dispatch.on_dispatch = false; | |
162 | return; | |
163 | } | |
164 | } | |
165 | ||
a9f3a74a TG |
166 | audit_syscall_exit(regs); |
167 | ||
524666cb | 168 | if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT) |
a9f3a74a TG |
169 | trace_sys_exit(regs, syscall_get_return_value(current, regs)); |
170 | ||
64eb35f7 | 171 | step = report_single_step(work); |
64c19ba2 | 172 | if (step || work & SYSCALL_WORK_SYSCALL_TRACE) |
0cfcb2b9 | 173 | ptrace_report_syscall_exit(regs, step); |
a9f3a74a TG |
174 | } |
175 | ||
142781e1 TG |
176 | noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) |
177 | { | |
caf4062e | 178 | enter_from_user_mode(regs); |
142781e1 | 179 | } |
a9f3a74a TG |
180 | |
181 | noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs) | |
182 | { | |
183 | instrumentation_begin(); | |
184 | exit_to_user_mode_prepare(regs); | |
185 | instrumentation_end(); | |
d6801947 | 186 | exit_to_user_mode(); |
a9f3a74a | 187 | } |
a5497bab | 188 | |
aadfc2f9 | 189 | noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) |
a5497bab TG |
190 | { |
191 | irqentry_state_t ret = { | |
192 | .exit_rcu = false, | |
193 | }; | |
194 | ||
195 | if (user_mode(regs)) { | |
196 | irqentry_enter_from_user_mode(regs); | |
197 | return ret; | |
198 | } | |
199 | ||
200 | /* | |
6f0e6c15 | 201 | * If this entry hit the idle task invoke ct_irq_enter() whether |
a5497bab TG |
202 | * RCU is watching or not. |
203 | * | |
78a56e04 | 204 | * Interrupts can nest when the first interrupt invokes softirq |
a5497bab TG |
205 | * processing on return which enables interrupts. |
206 | * | |
207 | * Scheduler ticks in the idle task can mark quiescent state and | |
208 | * terminate a grace period, if and only if the timer interrupt is | |
209 | * not nested into another interrupt. | |
210 | * | |
7f2a53c2 | 211 | * Checking for rcu_is_watching() here would prevent the nesting |
6f0e6c15 | 212 | * interrupt to invoke ct_irq_enter(). If that nested interrupt is |
a5497bab | 213 | * the tick then rcu_flavor_sched_clock_irq() would wrongfully |
97258ce9 | 214 | * assume that it is the first interrupt and eventually claim |
78a56e04 | 215 | * quiescent state and end grace periods prematurely. |
a5497bab | 216 | * |
6f0e6c15 | 217 | * Unconditionally invoke ct_irq_enter() so RCU state stays |
a5497bab TG |
218 | * consistent. |
219 | * | |
220 | * TINY_RCU does not support EQS, so let the compiler eliminate | |
221 | * this part when enabled. | |
222 | */ | |
223 | if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { | |
224 | /* | |
225 | * If RCU is not watching then the same careful | |
226 | * sequence vs. lockdep and tracing is required | |
45ff5105 | 227 | * as in irqentry_enter_from_user_mode(). |
a5497bab TG |
228 | */ |
229 | lockdep_hardirqs_off(CALLER_ADDR0); | |
6f0e6c15 | 230 | ct_irq_enter(); |
a5497bab | 231 | instrumentation_begin(); |
6cae637f | 232 | kmsan_unpoison_entry_regs(regs); |
a5497bab TG |
233 | trace_hardirqs_off_finish(); |
234 | instrumentation_end(); | |
235 | ||
236 | ret.exit_rcu = true; | |
237 | return ret; | |
238 | } | |
239 | ||
240 | /* | |
241 | * If RCU is watching then RCU only wants to check whether it needs | |
242 | * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick() | |
243 | * already contains a warning when RCU is not watching, so no point | |
244 | * in having another one here. | |
245 | */ | |
9d820f68 | 246 | lockdep_hardirqs_off(CALLER_ADDR0); |
a5497bab | 247 | instrumentation_begin(); |
6cae637f | 248 | kmsan_unpoison_entry_regs(regs); |
a5497bab | 249 | rcu_irq_enter_check_tick(); |
9d820f68 | 250 | trace_hardirqs_off_finish(); |
a5497bab TG |
251 | instrumentation_end(); |
252 | ||
253 | return ret; | |
254 | } | |
255 | ||
4624a14f | 256 | void raw_irqentry_exit_cond_resched(void) |
a5497bab TG |
257 | { |
258 | if (!preempt_count()) { | |
259 | /* Sanity check RCU and thread stack */ | |
260 | rcu_irq_exit_check_preempt(); | |
261 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) | |
262 | WARN_ON_ONCE(!on_thread_stack()); | |
263 | if (need_resched()) | |
264 | preempt_schedule_irq(); | |
265 | } | |
266 | } | |
40607ee9 | 267 | #ifdef CONFIG_PREEMPT_DYNAMIC |
99cf983c | 268 | #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) |
4624a14f | 269 | DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); |
99cf983c MR |
270 | #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) |
271 | DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); | |
272 | void dynamic_irqentry_exit_cond_resched(void) | |
273 | { | |
0a70045e | 274 | if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) |
99cf983c MR |
275 | return; |
276 | raw_irqentry_exit_cond_resched(); | |
277 | } | |
278 | #endif | |
40607ee9 | 279 | #endif |
a5497bab | 280 | |
aadfc2f9 | 281 | noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) |
a5497bab TG |
282 | { |
283 | lockdep_assert_irqs_disabled(); | |
284 | ||
285 | /* Check whether this returns to user mode */ | |
286 | if (user_mode(regs)) { | |
287 | irqentry_exit_to_user_mode(regs); | |
288 | } else if (!regs_irqs_disabled(regs)) { | |
289 | /* | |
290 | * If RCU was not watching on entry this needs to be done | |
291 | * carefully and needs the same ordering of lockdep/tracing | |
292 | * and RCU as the return to user mode path. | |
293 | */ | |
294 | if (state.exit_rcu) { | |
295 | instrumentation_begin(); | |
296 | /* Tell the tracer that IRET will enable interrupts */ | |
297 | trace_hardirqs_on_prepare(); | |
8b023acc | 298 | lockdep_hardirqs_on_prepare(); |
a5497bab | 299 | instrumentation_end(); |
6f0e6c15 | 300 | ct_irq_exit(); |
a5497bab TG |
301 | lockdep_hardirqs_on(CALLER_ADDR0); |
302 | return; | |
303 | } | |
304 | ||
305 | instrumentation_begin(); | |
4624a14f | 306 | if (IS_ENABLED(CONFIG_PREEMPTION)) |
a5497bab | 307 | irqentry_exit_cond_resched(); |
4624a14f | 308 | |
a5497bab TG |
309 | /* Covers both tracing and lockdep */ |
310 | trace_hardirqs_on(); | |
311 | instrumentation_end(); | |
312 | } else { | |
313 | /* | |
314 | * IRQ flags state is correct already. Just tell RCU if it | |
315 | * was not watching on entry. | |
316 | */ | |
317 | if (state.exit_rcu) | |
6f0e6c15 | 318 | ct_irq_exit(); |
a5497bab TG |
319 | } |
320 | } | |
b6be002b TG |
321 | |
322 | irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs) | |
323 | { | |
324 | irqentry_state_t irq_state; | |
325 | ||
326 | irq_state.lockdep = lockdep_hardirqs_enabled(); | |
327 | ||
328 | __nmi_enter(); | |
329 | lockdep_hardirqs_off(CALLER_ADDR0); | |
330 | lockdep_hardirq_enter(); | |
493c1822 | 331 | ct_nmi_enter(); |
b6be002b TG |
332 | |
333 | instrumentation_begin(); | |
6cae637f | 334 | kmsan_unpoison_entry_regs(regs); |
b6be002b TG |
335 | trace_hardirqs_off_finish(); |
336 | ftrace_nmi_enter(); | |
337 | instrumentation_end(); | |
338 | ||
339 | return irq_state; | |
340 | } | |
341 | ||
342 | void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state) | |
343 | { | |
344 | instrumentation_begin(); | |
345 | ftrace_nmi_exit(); | |
346 | if (irq_state.lockdep) { | |
347 | trace_hardirqs_on_prepare(); | |
8b023acc | 348 | lockdep_hardirqs_on_prepare(); |
b6be002b TG |
349 | } |
350 | instrumentation_end(); | |
351 | ||
493c1822 | 352 | ct_nmi_exit(); |
b6be002b TG |
353 | lockdep_hardirq_exit(); |
354 | if (irq_state.lockdep) | |
355 | lockdep_hardirqs_on(CALLER_ADDR0); | |
356 | __nmi_exit(); | |
357 | } |