ALSA: pcm: Fix missing check of the new non-cached buffer type
[linux-2.6-block.git] / kernel / context_tracking.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
4eacdf18
FW
2/*
3 * Context tracking: Probe on high level context boundaries such as kernel
4 * and userspace. This includes syscalls and exceptions entry/exit.
5 *
6 * This is used by RCU to remove its dependency on the timer tick while a CPU
7 * runs in userspace.
8 *
9 * Started by Frederic Weisbecker:
10 *
11 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
12 *
13 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
14 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
15 *
16 */
17
91d1aa43
FW
18#include <linux/context_tracking.h>
19#include <linux/rcupdate.h>
20#include <linux/sched.h>
91d1aa43 21#include <linux/hardirq.h>
6a61671b 22#include <linux/export.h>
4cdf77a8 23#include <linux/kprobes.h>
91d1aa43 24
1b6a259a
FW
25#define CREATE_TRACE_POINTS
26#include <trace/events/context_tracking.h>
27
ed11a7f1 28DEFINE_STATIC_KEY_FALSE(context_tracking_enabled);
48d6a816 29EXPORT_SYMBOL_GPL(context_tracking_enabled);
65f382fd
FW
30
31DEFINE_PER_CPU(struct context_tracking, context_tracking);
48d6a816 32EXPORT_SYMBOL_GPL(context_tracking);
91d1aa43 33
aed5ed47
FW
34static bool context_tracking_recursion_enter(void)
35{
36 int recursion;
37
38 recursion = __this_cpu_inc_return(context_tracking.recursion);
39 if (recursion == 1)
40 return true;
41
42 WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion);
43 __this_cpu_dec(context_tracking.recursion);
44
45 return false;
46}
47
48static void context_tracking_recursion_exit(void)
49{
50 __this_cpu_dec(context_tracking.recursion);
51}
52
4eacdf18 53/**
3aab4f50
RR
54 * context_tracking_enter - Inform the context tracking that the CPU is going
55 * enter user or guest space mode.
4eacdf18
FW
56 *
57 * This function must be called right before we switch from the kernel
3aab4f50
RR
58 * to user or guest space, when it's guaranteed the remaining kernel
59 * instructions to execute won't use any RCU read side critical section
60 * because this function sets RCU in extended quiescent state.
4eacdf18 61 */
d0e536d8 62void __context_tracking_enter(enum ctx_state state)
91d1aa43 63{
4eacdf18 64 /* Kernel threads aren't supposed to go to userspace */
91d1aa43
FW
65 WARN_ON_ONCE(!current->mm);
66
aed5ed47 67 if (!context_tracking_recursion_enter())
d0e536d8 68 return;
aed5ed47 69
3aab4f50 70 if ( __this_cpu_read(context_tracking.state) != state) {
d65ec121
FW
71 if (__this_cpu_read(context_tracking.active)) {
72 /*
73 * At this stage, only low level arch entry code remains and
74 * then we'll run in userspace. We can assume there won't be
75 * any RCU read-side critical section until the next call to
76 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
77 * on the tick.
78 */
19fdd98b
RR
79 if (state == CONTEXT_USER) {
80 trace_user_enter(0);
81 vtime_user_enter(current);
82 }
d65ec121
FW
83 rcu_user_enter();
84 }
4eacdf18 85 /*
d65ec121
FW
86 * Even if context tracking is disabled on this CPU, because it's outside
87 * the full dynticks mask for example, we still have to keep track of the
88 * context transitions and states to prevent inconsistency on those of
89 * other CPUs.
90 * If a task triggers an exception in userspace, sleep on the exception
91 * handler and then migrate to another CPU, that new CPU must know where
92 * the exception returns by the time we call exception_exit().
93 * This information can only be provided by the previous CPU when it called
94 * exception_enter().
95 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
96 * is false because we know that CPU is not tickless.
4eacdf18 97 */
3aab4f50 98 __this_cpu_write(context_tracking.state, state);
91d1aa43 99 }
aed5ed47 100 context_tracking_recursion_exit();
d0e536d8
PB
101}
102NOKPROBE_SYMBOL(__context_tracking_enter);
103EXPORT_SYMBOL_GPL(__context_tracking_enter);
104
105void context_tracking_enter(enum ctx_state state)
106{
107 unsigned long flags;
108
109 /*
110 * Some contexts may involve an exception occuring in an irq,
111 * leading to that nesting:
112 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
113 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
114 * helpers are enough to protect RCU uses inside the exception. So
115 * just return immediately if we detect we are in an IRQ.
116 */
117 if (in_interrupt())
118 return;
119
120 local_irq_save(flags);
121 __context_tracking_enter(state);
91d1aa43
FW
122 local_irq_restore(flags);
123}
3aab4f50 124NOKPROBE_SYMBOL(context_tracking_enter);
efc1e2c9 125EXPORT_SYMBOL_GPL(context_tracking_enter);
3aab4f50
RR
126
127void context_tracking_user_enter(void)
128{
f70cd6b0 129 user_enter();
3aab4f50 130}
4cdf77a8 131NOKPROBE_SYMBOL(context_tracking_user_enter);
91d1aa43 132
4eacdf18 133/**
3aab4f50
RR
134 * context_tracking_exit - Inform the context tracking that the CPU is
135 * exiting user or guest mode and entering the kernel.
4eacdf18 136 *
3aab4f50
RR
137 * This function must be called after we entered the kernel from user or
138 * guest space before any use of RCU read side critical section. This
139 * potentially include any high level kernel code like syscalls, exceptions,
140 * signal handling, etc...
4eacdf18
FW
141 *
142 * This call supports re-entrancy. This way it can be called from any exception
143 * handler without needing to know if we came from userspace or not.
144 */
d0e536d8 145void __context_tracking_exit(enum ctx_state state)
91d1aa43 146{
aed5ed47 147 if (!context_tracking_recursion_enter())
d0e536d8 148 return;
aed5ed47 149
3aab4f50 150 if (__this_cpu_read(context_tracking.state) == state) {
d65ec121
FW
151 if (__this_cpu_read(context_tracking.active)) {
152 /*
153 * We are going to run code that may use RCU. Inform
154 * RCU core about that (ie: we may need the tick again).
155 */
156 rcu_user_exit();
19fdd98b
RR
157 if (state == CONTEXT_USER) {
158 vtime_user_exit(current);
159 trace_user_exit(0);
160 }
d65ec121 161 }
c467ea76 162 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
91d1aa43 163 }
aed5ed47 164 context_tracking_recursion_exit();
d0e536d8
PB
165}
166NOKPROBE_SYMBOL(__context_tracking_exit);
167EXPORT_SYMBOL_GPL(__context_tracking_exit);
168
169void context_tracking_exit(enum ctx_state state)
170{
171 unsigned long flags;
172
173 if (in_interrupt())
174 return;
175
176 local_irq_save(flags);
177 __context_tracking_exit(state);
91d1aa43
FW
178 local_irq_restore(flags);
179}
3aab4f50 180NOKPROBE_SYMBOL(context_tracking_exit);
efc1e2c9 181EXPORT_SYMBOL_GPL(context_tracking_exit);
3aab4f50
RR
182
183void context_tracking_user_exit(void)
184{
f70cd6b0 185 user_exit();
3aab4f50 186}
4cdf77a8 187NOKPROBE_SYMBOL(context_tracking_user_exit);
91d1aa43 188
fafe870f 189void __init context_tracking_cpu_set(int cpu)
91d1aa43 190{
fafe870f
FW
191 static __initdata bool initialized = false;
192
193 if (!per_cpu(context_tracking.active, cpu)) {
194 per_cpu(context_tracking.active, cpu) = true;
ed11a7f1 195 static_branch_inc(&context_tracking_enabled);
fafe870f
FW
196 }
197
198 if (initialized)
199 return;
200
201 /*
202 * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
203 * This assumes that init is the only task at this early boot stage.
204 */
205 set_tsk_thread_flag(&init_task, TIF_NOHZ);
206 WARN_ON_ONCE(!tasklist_empty());
207
208 initialized = true;
91d1aa43 209}
65f382fd
FW
210
211#ifdef CONFIG_CONTEXT_TRACKING_FORCE
212void __init context_tracking_init(void)
213{
214 int cpu;
215
216 for_each_possible_cpu(cpu)
217 context_tracking_cpu_set(cpu);
218}
219#endif