Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
425e0968 IM |
2 | |
3 | #ifdef CONFIG_SCHEDSTATS | |
b5aadf7f | 4 | |
425e0968 IM |
5 | /* |
6 | * Expects runqueue lock to be held for atomicity of update | |
7 | */ | |
8 | static inline void | |
9 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
10 | { | |
11 | if (rq) { | |
12 | rq->rq_sched_info.run_delay += delta; | |
2d72376b | 13 | rq->rq_sched_info.pcount++; |
425e0968 IM |
14 | } |
15 | } | |
16 | ||
17 | /* | |
18 | * Expects runqueue lock to be held for atomicity of update | |
19 | */ | |
20 | static inline void | |
21 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |
22 | { | |
23 | if (rq) | |
9c2c4802 | 24 | rq->rq_cpu_time += delta; |
425e0968 | 25 | } |
46ac22ba AG |
26 | |
27 | static inline void | |
28 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | |
29 | { | |
30 | if (rq) | |
31 | rq->rq_sched_info.run_delay += delta; | |
32 | } | |
97fb7a0a | 33 | #define schedstat_enabled() static_branch_unlikely(&sched_schedstats) |
b85c8b71 | 34 | #define __schedstat_inc(var) do { var++; } while (0) |
97fb7a0a | 35 | #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) |
2ed41a55 | 36 | #define __schedstat_add(var, amt) do { var += (amt); } while (0) |
97fb7a0a IM |
37 | #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) |
38 | #define __schedstat_set(var, val) do { var = (val); } while (0) | |
39 | #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) | |
40 | #define schedstat_val(var) (var) | |
41 | #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) | |
42 | ||
43 | #else /* !CONFIG_SCHEDSTATS: */ | |
44 | static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } | |
45 | static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { } | |
46 | static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } | |
47 | # define schedstat_enabled() 0 | |
48 | # define __schedstat_inc(var) do { } while (0) | |
49 | # define schedstat_inc(var) do { } while (0) | |
50 | # define __schedstat_add(var, amt) do { } while (0) | |
51 | # define schedstat_add(var, amt) do { } while (0) | |
52 | # define __schedstat_set(var, val) do { } while (0) | |
53 | # define schedstat_set(var, val) do { } while (0) | |
54 | # define schedstat_val(var) 0 | |
55 | # define schedstat_val_or_zero(var) 0 | |
ae92882e | 56 | #endif /* CONFIG_SCHEDSTATS */ |
425e0968 | 57 | |
eb414681 JW |
58 | #ifdef CONFIG_PSI |
59 | /* | |
60 | * PSI tracks state that persists across sleeps, such as iowaits and | |
61 | * memory stalls. As a result, it has to distinguish between sleeps, | |
62 | * where a task's runnable state changes, and requeues, where a task | |
63 | * and its state are being moved between CPUs and runqueues. | |
64 | */ | |
65 | static inline void psi_enqueue(struct task_struct *p, bool wakeup) | |
66 | { | |
67 | int clear = 0, set = TSK_RUNNING; | |
68 | ||
e0c27447 | 69 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
70 | return; |
71 | ||
72 | if (!wakeup || p->sched_psi_wake_requeue) { | |
73 | if (p->flags & PF_MEMSTALL) | |
74 | set |= TSK_MEMSTALL; | |
75 | if (p->sched_psi_wake_requeue) | |
76 | p->sched_psi_wake_requeue = 0; | |
77 | } else { | |
78 | if (p->in_iowait) | |
79 | clear |= TSK_IOWAIT; | |
80 | } | |
81 | ||
82 | psi_task_change(p, clear, set); | |
83 | } | |
84 | ||
85 | static inline void psi_dequeue(struct task_struct *p, bool sleep) | |
86 | { | |
87 | int clear = TSK_RUNNING, set = 0; | |
88 | ||
e0c27447 | 89 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
90 | return; |
91 | ||
92 | if (!sleep) { | |
93 | if (p->flags & PF_MEMSTALL) | |
94 | clear |= TSK_MEMSTALL; | |
95 | } else { | |
96 | if (p->in_iowait) | |
97 | set |= TSK_IOWAIT; | |
98 | } | |
99 | ||
100 | psi_task_change(p, clear, set); | |
101 | } | |
102 | ||
103 | static inline void psi_ttwu_dequeue(struct task_struct *p) | |
104 | { | |
e0c27447 | 105 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
106 | return; |
107 | /* | |
108 | * Is the task being migrated during a wakeup? Make sure to | |
109 | * deregister its sleep-persistent psi states from the old | |
110 | * queue, and let psi_enqueue() know it has to requeue. | |
111 | */ | |
112 | if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) { | |
113 | struct rq_flags rf; | |
114 | struct rq *rq; | |
115 | int clear = 0; | |
116 | ||
117 | if (p->in_iowait) | |
118 | clear |= TSK_IOWAIT; | |
119 | if (p->flags & PF_MEMSTALL) | |
120 | clear |= TSK_MEMSTALL; | |
121 | ||
122 | rq = __task_rq_lock(p, &rf); | |
123 | psi_task_change(p, clear, 0); | |
124 | p->sched_psi_wake_requeue = 1; | |
125 | __task_rq_unlock(rq, &rf); | |
126 | } | |
127 | } | |
128 | ||
129 | static inline void psi_task_tick(struct rq *rq) | |
130 | { | |
e0c27447 | 131 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
132 | return; |
133 | ||
134 | if (unlikely(rq->curr->flags & PF_MEMSTALL)) | |
135 | psi_memstall_tick(rq->curr, cpu_of(rq)); | |
136 | } | |
137 | #else /* CONFIG_PSI */ | |
138 | static inline void psi_enqueue(struct task_struct *p, bool wakeup) {} | |
139 | static inline void psi_dequeue(struct task_struct *p, bool sleep) {} | |
140 | static inline void psi_ttwu_dequeue(struct task_struct *p) {} | |
141 | static inline void psi_task_tick(struct rq *rq) {} | |
142 | #endif /* CONFIG_PSI */ | |
143 | ||
f6db8347 | 144 | #ifdef CONFIG_SCHED_INFO |
46ac22ba AG |
145 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
146 | { | |
147 | t->sched_info.last_queued = 0; | |
148 | } | |
149 | ||
425e0968 | 150 | /* |
d4a6f3c3 | 151 | * We are interested in knowing how long it was from the *first* time a |
97fb7a0a IM |
152 | * task was queued to the time that it finally hit a CPU, we call this routine |
153 | * from dequeue_task() to account for possible rq->clock skew across CPUs. The | |
154 | * delta taken on each CPU would annul the skew. | |
425e0968 | 155 | */ |
43148951 | 156 | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) |
425e0968 | 157 | { |
43148951 | 158 | unsigned long long now = rq_clock(rq), delta = 0; |
46ac22ba AG |
159 | |
160 | if (unlikely(sched_info_on())) | |
161 | if (t->sched_info.last_queued) | |
162 | delta = now - t->sched_info.last_queued; | |
163 | sched_info_reset_dequeued(t); | |
164 | t->sched_info.run_delay += delta; | |
165 | ||
43148951 | 166 | rq_sched_info_dequeued(rq, delta); |
425e0968 IM |
167 | } |
168 | ||
169 | /* | |
97fb7a0a | 170 | * Called when a task finally hits the CPU. We can now calculate how |
425e0968 IM |
171 | * long it was waiting to run. We also note when it began so that we |
172 | * can keep stats on how long its timeslice is. | |
173 | */ | |
43148951 | 174 | static void sched_info_arrive(struct rq *rq, struct task_struct *t) |
425e0968 | 175 | { |
43148951 | 176 | unsigned long long now = rq_clock(rq), delta = 0; |
425e0968 IM |
177 | |
178 | if (t->sched_info.last_queued) | |
179 | delta = now - t->sched_info.last_queued; | |
46ac22ba | 180 | sched_info_reset_dequeued(t); |
425e0968 IM |
181 | t->sched_info.run_delay += delta; |
182 | t->sched_info.last_arrival = now; | |
2d72376b | 183 | t->sched_info.pcount++; |
425e0968 | 184 | |
43148951 | 185 | rq_sched_info_arrive(rq, delta); |
425e0968 IM |
186 | } |
187 | ||
188 | /* | |
425e0968 IM |
189 | * This function is only called from enqueue_task(), but also only updates |
190 | * the timestamp if it is already not set. It's assumed that | |
191 | * sched_info_dequeued() will clear that stamp when appropriate. | |
192 | */ | |
43148951 | 193 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
425e0968 | 194 | { |
97fb7a0a | 195 | if (unlikely(sched_info_on())) { |
425e0968 | 196 | if (!t->sched_info.last_queued) |
43148951 | 197 | t->sched_info.last_queued = rq_clock(rq); |
97fb7a0a | 198 | } |
425e0968 IM |
199 | } |
200 | ||
201 | /* | |
13b62e46 MT |
202 | * Called when a process ceases being the active-running process involuntarily |
203 | * due, typically, to expiring its time slice (this may also be called when | |
204 | * switching to the idle task). Now we can calculate how long we ran. | |
d4abc238 BR |
205 | * Also, if the process is still in the TASK_RUNNING state, call |
206 | * sched_info_queued() to mark that it has now again started waiting on | |
207 | * the runqueue. | |
425e0968 | 208 | */ |
43148951 | 209 | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) |
425e0968 | 210 | { |
97fb7a0a | 211 | unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; |
425e0968 | 212 | |
43148951 | 213 | rq_sched_info_depart(rq, delta); |
d4abc238 BR |
214 | |
215 | if (t->state == TASK_RUNNING) | |
43148951 | 216 | sched_info_queued(rq, t); |
425e0968 IM |
217 | } |
218 | ||
219 | /* | |
220 | * Called when tasks are switched involuntarily due, typically, to expiring | |
221 | * their time slice. (This may also be called when switching to or from | |
222 | * the idle task.) We are only called when prev != next. | |
223 | */ | |
224 | static inline void | |
97fb7a0a | 225 | __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) |
425e0968 | 226 | { |
425e0968 | 227 | /* |
97fb7a0a | 228 | * prev now departs the CPU. It's not interesting to record |
425e0968 IM |
229 | * stats about how efficient we were at scheduling the idle |
230 | * process, however. | |
231 | */ | |
232 | if (prev != rq->idle) | |
43148951 | 233 | sched_info_depart(rq, prev); |
425e0968 IM |
234 | |
235 | if (next != rq->idle) | |
43148951 | 236 | sched_info_arrive(rq, next); |
425e0968 | 237 | } |
97fb7a0a | 238 | |
425e0968 | 239 | static inline void |
97fb7a0a | 240 | sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) |
425e0968 IM |
241 | { |
242 | if (unlikely(sched_info_on())) | |
43148951 | 243 | __sched_info_switch(rq, prev, next); |
425e0968 | 244 | } |
97fb7a0a IM |
245 | |
246 | #else /* !CONFIG_SCHED_INFO: */ | |
247 | # define sched_info_queued(rq, t) do { } while (0) | |
248 | # define sched_info_reset_dequeued(t) do { } while (0) | |
249 | # define sched_info_dequeued(rq, t) do { } while (0) | |
250 | # define sched_info_depart(rq, t) do { } while (0) | |
251 | # define sched_info_arrive(rq, next) do { } while (0) | |
252 | # define sched_info_switch(rq, t, next) do { } while (0) | |
f6db8347 | 253 | #endif /* CONFIG_SCHED_INFO */ |