Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
425e0968 IM |
2 | |
3 | #ifdef CONFIG_SCHEDSTATS | |
b5aadf7f | 4 | |
425e0968 IM |
5 | /* |
6 | * Expects runqueue lock to be held for atomicity of update | |
7 | */ | |
8 | static inline void | |
9 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
10 | { | |
11 | if (rq) { | |
12 | rq->rq_sched_info.run_delay += delta; | |
2d72376b | 13 | rq->rq_sched_info.pcount++; |
425e0968 IM |
14 | } |
15 | } | |
16 | ||
17 | /* | |
18 | * Expects runqueue lock to be held for atomicity of update | |
19 | */ | |
20 | static inline void | |
21 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |
22 | { | |
23 | if (rq) | |
9c2c4802 | 24 | rq->rq_cpu_time += delta; |
425e0968 | 25 | } |
46ac22ba AG |
26 | |
27 | static inline void | |
28 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | |
29 | { | |
30 | if (rq) | |
31 | rq->rq_sched_info.run_delay += delta; | |
32 | } | |
97fb7a0a | 33 | #define schedstat_enabled() static_branch_unlikely(&sched_schedstats) |
b85c8b71 | 34 | #define __schedstat_inc(var) do { var++; } while (0) |
97fb7a0a | 35 | #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) |
2ed41a55 | 36 | #define __schedstat_add(var, amt) do { var += (amt); } while (0) |
97fb7a0a IM |
37 | #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) |
38 | #define __schedstat_set(var, val) do { var = (val); } while (0) | |
39 | #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) | |
40 | #define schedstat_val(var) (var) | |
41 | #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) | |
42 | ||
43 | #else /* !CONFIG_SCHEDSTATS: */ | |
44 | static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } | |
45 | static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { } | |
46 | static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } | |
47 | # define schedstat_enabled() 0 | |
48 | # define __schedstat_inc(var) do { } while (0) | |
49 | # define schedstat_inc(var) do { } while (0) | |
50 | # define __schedstat_add(var, amt) do { } while (0) | |
51 | # define schedstat_add(var, amt) do { } while (0) | |
52 | # define __schedstat_set(var, val) do { } while (0) | |
53 | # define schedstat_set(var, val) do { } while (0) | |
54 | # define schedstat_val(var) 0 | |
55 | # define schedstat_val_or_zero(var) 0 | |
ae92882e | 56 | #endif /* CONFIG_SCHEDSTATS */ |
425e0968 | 57 | |
eb414681 JW |
58 | #ifdef CONFIG_PSI |
59 | /* | |
60 | * PSI tracks state that persists across sleeps, such as iowaits and | |
61 | * memory stalls. As a result, it has to distinguish between sleeps, | |
62 | * where a task's runnable state changes, and requeues, where a task | |
63 | * and its state are being moved between CPUs and runqueues. | |
64 | */ | |
65 | static inline void psi_enqueue(struct task_struct *p, bool wakeup) | |
66 | { | |
67 | int clear = 0, set = TSK_RUNNING; | |
68 | ||
e0c27447 | 69 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
70 | return; |
71 | ||
72 | if (!wakeup || p->sched_psi_wake_requeue) { | |
1066d1b6 | 73 | if (p->in_memstall) |
eb414681 JW |
74 | set |= TSK_MEMSTALL; |
75 | if (p->sched_psi_wake_requeue) | |
76 | p->sched_psi_wake_requeue = 0; | |
77 | } else { | |
78 | if (p->in_iowait) | |
79 | clear |= TSK_IOWAIT; | |
80 | } | |
81 | ||
82 | psi_task_change(p, clear, set); | |
83 | } | |
84 | ||
85 | static inline void psi_dequeue(struct task_struct *p, bool sleep) | |
86 | { | |
4117cebf | 87 | int clear = TSK_RUNNING; |
eb414681 | 88 | |
e0c27447 | 89 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
90 | return; |
91 | ||
4117cebf CZ |
92 | /* |
93 | * A voluntary sleep is a dequeue followed by a task switch. To | |
94 | * avoid walking all ancestors twice, psi_task_switch() handles | |
95 | * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU. | |
96 | * Do nothing here. | |
97 | */ | |
98 | if (sleep) | |
99 | return; | |
b05e75d6 | 100 | |
4117cebf CZ |
101 | if (p->in_memstall) |
102 | clear |= TSK_MEMSTALL; | |
eb414681 | 103 | |
4117cebf | 104 | psi_task_change(p, clear, 0); |
eb414681 JW |
105 | } |
106 | ||
107 | static inline void psi_ttwu_dequeue(struct task_struct *p) | |
108 | { | |
e0c27447 | 109 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
110 | return; |
111 | /* | |
112 | * Is the task being migrated during a wakeup? Make sure to | |
113 | * deregister its sleep-persistent psi states from the old | |
114 | * queue, and let psi_enqueue() know it has to requeue. | |
115 | */ | |
1066d1b6 | 116 | if (unlikely(p->in_iowait || p->in_memstall)) { |
eb414681 JW |
117 | struct rq_flags rf; |
118 | struct rq *rq; | |
119 | int clear = 0; | |
120 | ||
121 | if (p->in_iowait) | |
122 | clear |= TSK_IOWAIT; | |
1066d1b6 | 123 | if (p->in_memstall) |
eb414681 JW |
124 | clear |= TSK_MEMSTALL; |
125 | ||
126 | rq = __task_rq_lock(p, &rf); | |
127 | psi_task_change(p, clear, 0); | |
128 | p->sched_psi_wake_requeue = 1; | |
129 | __task_rq_unlock(rq, &rf); | |
130 | } | |
131 | } | |
132 | ||
b05e75d6 JW |
133 | static inline void psi_sched_switch(struct task_struct *prev, |
134 | struct task_struct *next, | |
135 | bool sleep) | |
136 | { | |
137 | if (static_branch_likely(&psi_disabled)) | |
138 | return; | |
139 | ||
36b238d5 | 140 | psi_task_switch(prev, next, sleep); |
b05e75d6 JW |
141 | } |
142 | ||
eb414681 JW |
143 | #else /* CONFIG_PSI */ |
144 | static inline void psi_enqueue(struct task_struct *p, bool wakeup) {} | |
145 | static inline void psi_dequeue(struct task_struct *p, bool sleep) {} | |
146 | static inline void psi_ttwu_dequeue(struct task_struct *p) {} | |
b05e75d6 JW |
147 | static inline void psi_sched_switch(struct task_struct *prev, |
148 | struct task_struct *next, | |
149 | bool sleep) {} | |
eb414681 JW |
150 | #endif /* CONFIG_PSI */ |
151 | ||
f6db8347 | 152 | #ifdef CONFIG_SCHED_INFO |
46ac22ba AG |
153 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
154 | { | |
155 | t->sched_info.last_queued = 0; | |
156 | } | |
157 | ||
425e0968 | 158 | /* |
d4a6f3c3 | 159 | * We are interested in knowing how long it was from the *first* time a |
97fb7a0a IM |
160 | * task was queued to the time that it finally hit a CPU, we call this routine |
161 | * from dequeue_task() to account for possible rq->clock skew across CPUs. The | |
162 | * delta taken on each CPU would annul the skew. | |
425e0968 | 163 | */ |
43148951 | 164 | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) |
425e0968 | 165 | { |
43148951 | 166 | unsigned long long now = rq_clock(rq), delta = 0; |
46ac22ba | 167 | |
65d74e91 | 168 | if (sched_info_on()) { |
46ac22ba AG |
169 | if (t->sched_info.last_queued) |
170 | delta = now - t->sched_info.last_queued; | |
65d74e91 | 171 | } |
46ac22ba AG |
172 | sched_info_reset_dequeued(t); |
173 | t->sched_info.run_delay += delta; | |
174 | ||
43148951 | 175 | rq_sched_info_dequeued(rq, delta); |
425e0968 IM |
176 | } |
177 | ||
178 | /* | |
97fb7a0a | 179 | * Called when a task finally hits the CPU. We can now calculate how |
425e0968 IM |
180 | * long it was waiting to run. We also note when it began so that we |
181 | * can keep stats on how long its timeslice is. | |
182 | */ | |
43148951 | 183 | static void sched_info_arrive(struct rq *rq, struct task_struct *t) |
425e0968 | 184 | { |
43148951 | 185 | unsigned long long now = rq_clock(rq), delta = 0; |
425e0968 IM |
186 | |
187 | if (t->sched_info.last_queued) | |
188 | delta = now - t->sched_info.last_queued; | |
46ac22ba | 189 | sched_info_reset_dequeued(t); |
425e0968 IM |
190 | t->sched_info.run_delay += delta; |
191 | t->sched_info.last_arrival = now; | |
2d72376b | 192 | t->sched_info.pcount++; |
425e0968 | 193 | |
43148951 | 194 | rq_sched_info_arrive(rq, delta); |
425e0968 IM |
195 | } |
196 | ||
197 | /* | |
425e0968 IM |
198 | * This function is only called from enqueue_task(), but also only updates |
199 | * the timestamp if it is already not set. It's assumed that | |
200 | * sched_info_dequeued() will clear that stamp when appropriate. | |
201 | */ | |
43148951 | 202 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
425e0968 | 203 | { |
65d74e91 | 204 | if (sched_info_on()) { |
425e0968 | 205 | if (!t->sched_info.last_queued) |
43148951 | 206 | t->sched_info.last_queued = rq_clock(rq); |
97fb7a0a | 207 | } |
425e0968 IM |
208 | } |
209 | ||
210 | /* | |
13b62e46 MT |
211 | * Called when a process ceases being the active-running process involuntarily |
212 | * due, typically, to expiring its time slice (this may also be called when | |
213 | * switching to the idle task). Now we can calculate how long we ran. | |
d4abc238 BR |
214 | * Also, if the process is still in the TASK_RUNNING state, call |
215 | * sched_info_queued() to mark that it has now again started waiting on | |
216 | * the runqueue. | |
425e0968 | 217 | */ |
43148951 | 218 | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) |
425e0968 | 219 | { |
97fb7a0a | 220 | unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; |
425e0968 | 221 | |
43148951 | 222 | rq_sched_info_depart(rq, delta); |
d4abc238 BR |
223 | |
224 | if (t->state == TASK_RUNNING) | |
43148951 | 225 | sched_info_queued(rq, t); |
425e0968 IM |
226 | } |
227 | ||
228 | /* | |
229 | * Called when tasks are switched involuntarily due, typically, to expiring | |
230 | * their time slice. (This may also be called when switching to or from | |
231 | * the idle task.) We are only called when prev != next. | |
232 | */ | |
233 | static inline void | |
97fb7a0a | 234 | __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) |
425e0968 | 235 | { |
425e0968 | 236 | /* |
97fb7a0a | 237 | * prev now departs the CPU. It's not interesting to record |
425e0968 IM |
238 | * stats about how efficient we were at scheduling the idle |
239 | * process, however. | |
240 | */ | |
241 | if (prev != rq->idle) | |
43148951 | 242 | sched_info_depart(rq, prev); |
425e0968 IM |
243 | |
244 | if (next != rq->idle) | |
43148951 | 245 | sched_info_arrive(rq, next); |
425e0968 | 246 | } |
97fb7a0a | 247 | |
425e0968 | 248 | static inline void |
97fb7a0a | 249 | sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) |
425e0968 | 250 | { |
65d74e91 | 251 | if (sched_info_on()) |
43148951 | 252 | __sched_info_switch(rq, prev, next); |
425e0968 | 253 | } |
97fb7a0a IM |
254 | |
255 | #else /* !CONFIG_SCHED_INFO: */ | |
256 | # define sched_info_queued(rq, t) do { } while (0) | |
257 | # define sched_info_reset_dequeued(t) do { } while (0) | |
258 | # define sched_info_dequeued(rq, t) do { } while (0) | |
259 | # define sched_info_depart(rq, t) do { } while (0) | |
260 | # define sched_info_arrive(rq, next) do { } while (0) | |
261 | # define sched_info_switch(rq, t, next) do { } while (0) | |
f6db8347 | 262 | #endif /* CONFIG_SCHED_INFO */ |