1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifdef CONFIG_SCHEDSTATS
6 * Expects runqueue lock to be held for atomicity of update
9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
12 rq->rq_sched_info.run_delay += delta;
13 rq->rq_sched_info.pcount++;
18 * Expects runqueue lock to be held for atomicity of update
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
24 rq->rq_cpu_time += delta;
28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
31 rq->rq_sched_info.run_delay += delta;
33 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
34 #define __schedstat_inc(var) do { var++; } while (0)
35 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
36 #define __schedstat_add(var, amt) do { var += (amt); } while (0)
37 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
38 #define __schedstat_set(var, val) do { var = (val); } while (0)
39 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
40 #define schedstat_val(var) (var)
41 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
43 #else /* !CONFIG_SCHEDSTATS: */
44 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
45 static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
46 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
47 # define schedstat_enabled() 0
48 # define __schedstat_inc(var) do { } while (0)
49 # define schedstat_inc(var) do { } while (0)
50 # define __schedstat_add(var, amt) do { } while (0)
51 # define schedstat_add(var, amt) do { } while (0)
52 # define __schedstat_set(var, val) do { } while (0)
53 # define schedstat_set(var, val) do { } while (0)
54 # define schedstat_val(var) 0
55 # define schedstat_val_or_zero(var) 0
56 #endif /* CONFIG_SCHEDSTATS */
60 * PSI tracks state that persists across sleeps, such as iowaits and
61 * memory stalls. As a result, it has to distinguish between sleeps,
62 * where a task's runnable state changes, and requeues, where a task
63 * and its state are being moved between CPUs and runqueues.
65 static inline void psi_enqueue(struct task_struct *p, bool wakeup)
67 int clear = 0, set = TSK_RUNNING;
69 if (static_branch_likely(&psi_disabled))
72 if (!wakeup || p->sched_psi_wake_requeue) {
75 if (p->sched_psi_wake_requeue)
76 p->sched_psi_wake_requeue = 0;
82 psi_task_change(p, clear, set);
85 static inline void psi_dequeue(struct task_struct *p, bool sleep)
87 int clear = TSK_RUNNING;
89 if (static_branch_likely(&psi_disabled))
93 * A voluntary sleep is a dequeue followed by a task switch. To
94 * avoid walking all ancestors twice, psi_task_switch() handles
95 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
102 clear |= TSK_MEMSTALL;
104 psi_task_change(p, clear, 0);
107 static inline void psi_ttwu_dequeue(struct task_struct *p)
109 if (static_branch_likely(&psi_disabled))
112 * Is the task being migrated during a wakeup? Make sure to
113 * deregister its sleep-persistent psi states from the old
114 * queue, and let psi_enqueue() know it has to requeue.
116 if (unlikely(p->in_iowait || p->in_memstall)) {
124 clear |= TSK_MEMSTALL;
126 rq = __task_rq_lock(p, &rf);
127 psi_task_change(p, clear, 0);
128 p->sched_psi_wake_requeue = 1;
129 __task_rq_unlock(rq, &rf);
133 static inline void psi_sched_switch(struct task_struct *prev,
134 struct task_struct *next,
137 if (static_branch_likely(&psi_disabled))
140 psi_task_switch(prev, next, sleep);
143 #else /* CONFIG_PSI */
144 static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
145 static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
146 static inline void psi_ttwu_dequeue(struct task_struct *p) {}
147 static inline void psi_sched_switch(struct task_struct *prev,
148 struct task_struct *next,
150 #endif /* CONFIG_PSI */
152 #ifdef CONFIG_SCHED_INFO
153 static inline void sched_info_reset_dequeued(struct task_struct *t)
155 t->sched_info.last_queued = 0;
159 * We are interested in knowing how long it was from the *first* time a
160 * task was queued to the time that it finally hit a CPU, we call this routine
161 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
162 * delta taken on each CPU would annul the skew.
164 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
166 unsigned long long now = rq_clock(rq), delta = 0;
168 if (sched_info_on()) {
169 if (t->sched_info.last_queued)
170 delta = now - t->sched_info.last_queued;
172 sched_info_reset_dequeued(t);
173 t->sched_info.run_delay += delta;
175 rq_sched_info_dequeued(rq, delta);
179 * Called when a task finally hits the CPU. We can now calculate how
180 * long it was waiting to run. We also note when it began so that we
181 * can keep stats on how long its timeslice is.
183 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
185 unsigned long long now = rq_clock(rq), delta = 0;
187 if (t->sched_info.last_queued)
188 delta = now - t->sched_info.last_queued;
189 sched_info_reset_dequeued(t);
190 t->sched_info.run_delay += delta;
191 t->sched_info.last_arrival = now;
192 t->sched_info.pcount++;
194 rq_sched_info_arrive(rq, delta);
198 * This function is only called from enqueue_task(), but also only updates
199 * the timestamp if it is already not set. It's assumed that
200 * sched_info_dequeued() will clear that stamp when appropriate.
202 static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
204 if (sched_info_on()) {
205 if (!t->sched_info.last_queued)
206 t->sched_info.last_queued = rq_clock(rq);
211 * Called when a process ceases being the active-running process involuntarily
212 * due, typically, to expiring its time slice (this may also be called when
213 * switching to the idle task). Now we can calculate how long we ran.
214 * Also, if the process is still in the TASK_RUNNING state, call
215 * sched_info_queued() to mark that it has now again started waiting on
218 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
220 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
222 rq_sched_info_depart(rq, delta);
224 if (t->state == TASK_RUNNING)
225 sched_info_queued(rq, t);
229 * Called when tasks are switched involuntarily due, typically, to expiring
230 * their time slice. (This may also be called when switching to or from
231 * the idle task.) We are only called when prev != next.
234 __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
237 * prev now departs the CPU. It's not interesting to record
238 * stats about how efficient we were at scheduling the idle
241 if (prev != rq->idle)
242 sched_info_depart(rq, prev);
244 if (next != rq->idle)
245 sched_info_arrive(rq, next);
249 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
252 __sched_info_switch(rq, prev, next);
255 #else /* !CONFIG_SCHED_INFO: */
256 # define sched_info_queued(rq, t) do { } while (0)
257 # define sched_info_reset_dequeued(t) do { } while (0)
258 # define sched_info_dequeued(rq, t) do { } while (0)
259 # define sched_info_depart(rq, t) do { } while (0)
260 # define sched_info_arrive(rq, next) do { } while (0)
261 # define sched_info_switch(rq, t, next) do { } while (0)
262 #endif /* CONFIG_SCHED_INFO */