Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
d0b6e04a LZ |
2 | #undef TRACE_SYSTEM |
3 | #define TRACE_SYSTEM sched | |
4 | ||
ea20d929 | 5 | #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) |
0a16b607 MD |
6 | #define _TRACE_SCHED_H |
7 | ||
f630c7c6 | 8 | #include <linux/kthread.h> |
6a3827d7 | 9 | #include <linux/sched/numa_balancing.h> |
0a16b607 | 10 | #include <linux/tracepoint.h> |
4ff16c25 | 11 | #include <linux/binfmts.h> |
0a16b607 | 12 | |
ea20d929 SR |
13 | /* |
14 | * Tracepoint for calling kthread_stop, performed to end a kthread: | |
15 | */ | |
16 | TRACE_EVENT(sched_kthread_stop, | |
17 | ||
18 | TP_PROTO(struct task_struct *t), | |
19 | ||
20 | TP_ARGS(t), | |
21 | ||
22 | TP_STRUCT__entry( | |
23 | __array( char, comm, TASK_COMM_LEN ) | |
24 | __field( pid_t, pid ) | |
25 | ), | |
26 | ||
27 | TP_fast_assign( | |
28 | memcpy(__entry->comm, t->comm, TASK_COMM_LEN); | |
29 | __entry->pid = t->pid; | |
30 | ), | |
31 | ||
434a83c3 | 32 | TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) |
ea20d929 SR |
33 | ); |
34 | ||
35 | /* | |
36 | * Tracepoint for the return value of the kthread stopping: | |
37 | */ | |
38 | TRACE_EVENT(sched_kthread_stop_ret, | |
39 | ||
40 | TP_PROTO(int ret), | |
41 | ||
42 | TP_ARGS(ret), | |
43 | ||
44 | TP_STRUCT__entry( | |
45 | __field( int, ret ) | |
46 | ), | |
47 | ||
48 | TP_fast_assign( | |
49 | __entry->ret = ret; | |
50 | ), | |
51 | ||
434a83c3 | 52 | TP_printk("ret=%d", __entry->ret) |
ea20d929 SR |
53 | ); |
54 | ||
f630c7c6 RC |
55 | /** |
56 | * sched_kthread_work_queue_work - called when a work gets queued | |
57 | * @worker: pointer to the kthread_worker | |
58 | * @work: pointer to struct kthread_work | |
59 | * | |
60 | * This event occurs when a work is queued immediately or once a | |
61 | * delayed work is actually queued (ie: once the delay has been | |
62 | * reached). | |
63 | */ | |
64 | TRACE_EVENT(sched_kthread_work_queue_work, | |
65 | ||
66 | TP_PROTO(struct kthread_worker *worker, | |
67 | struct kthread_work *work), | |
68 | ||
69 | TP_ARGS(worker, work), | |
70 | ||
71 | TP_STRUCT__entry( | |
72 | __field( void *, work ) | |
73 | __field( void *, function) | |
74 | __field( void *, worker) | |
75 | ), | |
76 | ||
77 | TP_fast_assign( | |
78 | __entry->work = work; | |
79 | __entry->function = work->func; | |
80 | __entry->worker = worker; | |
81 | ), | |
82 | ||
83 | TP_printk("work struct=%p function=%ps worker=%p", | |
84 | __entry->work, __entry->function, __entry->worker) | |
85 | ); | |
86 | ||
87 | /** | |
88 | * sched_kthread_work_execute_start - called immediately before the work callback | |
89 | * @work: pointer to struct kthread_work | |
90 | * | |
91 | * Allows to track kthread work execution. | |
92 | */ | |
93 | TRACE_EVENT(sched_kthread_work_execute_start, | |
94 | ||
95 | TP_PROTO(struct kthread_work *work), | |
96 | ||
97 | TP_ARGS(work), | |
98 | ||
99 | TP_STRUCT__entry( | |
100 | __field( void *, work ) | |
101 | __field( void *, function) | |
102 | ), | |
103 | ||
104 | TP_fast_assign( | |
105 | __entry->work = work; | |
106 | __entry->function = work->func; | |
107 | ), | |
108 | ||
109 | TP_printk("work struct %p: function %ps", __entry->work, __entry->function) | |
110 | ); | |
111 | ||
112 | /** | |
113 | * sched_kthread_work_execute_end - called immediately after the work callback | |
114 | * @work: pointer to struct work_struct | |
115 | * @function: pointer to worker function | |
116 | * | |
117 | * Allows to track workqueue execution. | |
118 | */ | |
119 | TRACE_EVENT(sched_kthread_work_execute_end, | |
120 | ||
121 | TP_PROTO(struct kthread_work *work, kthread_work_func_t function), | |
122 | ||
123 | TP_ARGS(work, function), | |
124 | ||
125 | TP_STRUCT__entry( | |
126 | __field( void *, work ) | |
127 | __field( void *, function) | |
128 | ), | |
129 | ||
130 | TP_fast_assign( | |
131 | __entry->work = work; | |
132 | __entry->function = function; | |
133 | ), | |
134 | ||
135 | TP_printk("work struct %p: function %ps", __entry->work, __entry->function) | |
136 | ); | |
137 | ||
ea20d929 SR |
138 | /* |
139 | * Tracepoint for waking up a task: | |
ea20d929 | 140 | */ |
091ad365 | 141 | DECLARE_EVENT_CLASS(sched_wakeup_template, |
ea20d929 | 142 | |
fbd705a0 | 143 | TP_PROTO(struct task_struct *p), |
ea20d929 | 144 | |
fbd705a0 | 145 | TP_ARGS(__perf_task(p)), |
ea20d929 SR |
146 | |
147 | TP_STRUCT__entry( | |
148 | __array( char, comm, TASK_COMM_LEN ) | |
149 | __field( pid_t, pid ) | |
150 | __field( int, prio ) | |
434a83c3 | 151 | __field( int, target_cpu ) |
ea20d929 SR |
152 | ), |
153 | ||
154 | TP_fast_assign( | |
155 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | |
156 | __entry->pid = p->pid; | |
b91473ff | 157 | __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ |
434a83c3 | 158 | __entry->target_cpu = task_cpu(p); |
ea20d929 SR |
159 | ), |
160 | ||
fbd705a0 | 161 | TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d", |
ea20d929 | 162 | __entry->comm, __entry->pid, __entry->prio, |
fbd705a0 | 163 | __entry->target_cpu) |
ea20d929 SR |
164 | ); |
165 | ||
fbd705a0 PZ |
166 | /* |
167 | * Tracepoint called when waking a task; this tracepoint is guaranteed to be | |
168 | * called from the waking context. | |
169 | */ | |
170 | DEFINE_EVENT(sched_wakeup_template, sched_waking, | |
171 | TP_PROTO(struct task_struct *p), | |
172 | TP_ARGS(p)); | |
173 | ||
174 | /* | |
f2cc020d | 175 | * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING. |
2705937a | 176 | * It is not always called from the waking context. |
fbd705a0 | 177 | */ |
75ec29ab | 178 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup, |
fbd705a0 PZ |
179 | TP_PROTO(struct task_struct *p), |
180 | TP_ARGS(p)); | |
75ec29ab | 181 | |
ea20d929 SR |
182 | /* |
183 | * Tracepoint for waking up a new task: | |
ea20d929 | 184 | */ |
75ec29ab | 185 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, |
fbd705a0 PZ |
186 | TP_PROTO(struct task_struct *p), |
187 | TP_ARGS(p)); | |
ea20d929 | 188 | |
02f72694 | 189 | #ifdef CREATE_TRACE_POINTS |
fa2c3254 VS |
190 | static inline long __trace_sched_switch_state(bool preempt, |
191 | unsigned int prev_state, | |
192 | struct task_struct *p) | |
02f72694 | 193 | { |
3054426d PK |
194 | unsigned int state; |
195 | ||
8f9fbf09 ON |
196 | #ifdef CONFIG_SCHED_DEBUG |
197 | BUG_ON(p != current); | |
198 | #endif /* CONFIG_SCHED_DEBUG */ | |
c73464b1 | 199 | |
02f72694 | 200 | /* |
c73464b1 PZ |
201 | * Preemption ignores task state, therefore preempted tasks are always |
202 | * RUNNING (we will not have dequeued if state != RUNNING). | |
02f72694 | 203 | */ |
efb40f58 | 204 | if (preempt) |
3f5fe9fe | 205 | return TASK_REPORT_MAX; |
efb40f58 | 206 | |
3054426d PK |
207 | /* |
208 | * task_state_index() uses fls() and returns a value from 0-8 range. | |
209 | * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using | |
210 | * it for left shift operation to get the correct task->state | |
211 | * mapping. | |
212 | */ | |
fa2c3254 | 213 | state = __task_state_index(prev_state, p->exit_state); |
3054426d PK |
214 | |
215 | return state ? (1 << (state - 1)) : state; | |
02f72694 | 216 | } |
8f9fbf09 | 217 | #endif /* CREATE_TRACE_POINTS */ |
02f72694 | 218 | |
ea20d929 SR |
219 | /* |
220 | * Tracepoint for task switches, performed by the scheduler: | |
ea20d929 SR |
221 | */ |
222 | TRACE_EVENT(sched_switch, | |
223 | ||
c73464b1 PZ |
224 | TP_PROTO(bool preempt, |
225 | struct task_struct *prev, | |
9c2136be DK |
226 | struct task_struct *next, |
227 | unsigned int prev_state), | |
ea20d929 | 228 | |
9c2136be | 229 | TP_ARGS(preempt, prev, next, prev_state), |
ea20d929 SR |
230 | |
231 | TP_STRUCT__entry( | |
232 | __array( char, prev_comm, TASK_COMM_LEN ) | |
233 | __field( pid_t, prev_pid ) | |
234 | __field( int, prev_prio ) | |
937cdb9d | 235 | __field( long, prev_state ) |
ea20d929 SR |
236 | __array( char, next_comm, TASK_COMM_LEN ) |
237 | __field( pid_t, next_pid ) | |
238 | __field( int, next_prio ) | |
239 | ), | |
240 | ||
241 | TP_fast_assign( | |
a3257425 | 242 | memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); |
ea20d929 SR |
243 | __entry->prev_pid = prev->pid; |
244 | __entry->prev_prio = prev->prio; | |
fa2c3254 | 245 | __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev); |
a3257425 | 246 | memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); |
ea20d929 SR |
247 | __entry->next_pid = next->pid; |
248 | __entry->next_prio = next->prio; | |
b91473ff | 249 | /* XXX SCHED_DEADLINE */ |
ea20d929 SR |
250 | ), |
251 | ||
557ab425 | 252 | TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", |
ea20d929 | 253 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, |
efb40f58 | 254 | |
06eb6184 PZ |
255 | (__entry->prev_state & (TASK_REPORT_MAX - 1)) ? |
256 | __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|", | |
ff28915f UKK |
257 | { TASK_INTERRUPTIBLE, "S" }, |
258 | { TASK_UNINTERRUPTIBLE, "D" }, | |
259 | { __TASK_STOPPED, "T" }, | |
260 | { __TASK_TRACED, "t" }, | |
261 | { EXIT_DEAD, "X" }, | |
262 | { EXIT_ZOMBIE, "Z" }, | |
263 | { TASK_PARKED, "P" }, | |
264 | { TASK_DEAD, "I" }) : | |
efb40f58 PZ |
265 | "R", |
266 | ||
3f5fe9fe | 267 | __entry->prev_state & TASK_REPORT_MAX ? "+" : "", |
ea20d929 SR |
268 | __entry->next_comm, __entry->next_pid, __entry->next_prio) |
269 | ); | |
270 | ||
271 | /* | |
272 | * Tracepoint for a task being migrated: | |
273 | */ | |
274 | TRACE_EVENT(sched_migrate_task, | |
275 | ||
de1d7286 | 276 | TP_PROTO(struct task_struct *p, int dest_cpu), |
ea20d929 | 277 | |
de1d7286 | 278 | TP_ARGS(p, dest_cpu), |
ea20d929 SR |
279 | |
280 | TP_STRUCT__entry( | |
281 | __array( char, comm, TASK_COMM_LEN ) | |
282 | __field( pid_t, pid ) | |
283 | __field( int, prio ) | |
284 | __field( int, orig_cpu ) | |
285 | __field( int, dest_cpu ) | |
286 | ), | |
287 | ||
288 | TP_fast_assign( | |
289 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | |
290 | __entry->pid = p->pid; | |
b91473ff | 291 | __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ |
de1d7286 | 292 | __entry->orig_cpu = task_cpu(p); |
ea20d929 SR |
293 | __entry->dest_cpu = dest_cpu; |
294 | ), | |
295 | ||
434a83c3 | 296 | TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", |
ea20d929 SR |
297 | __entry->comm, __entry->pid, __entry->prio, |
298 | __entry->orig_cpu, __entry->dest_cpu) | |
299 | ); | |
300 | ||
091ad365 | 301 | DECLARE_EVENT_CLASS(sched_process_template, |
ea20d929 SR |
302 | |
303 | TP_PROTO(struct task_struct *p), | |
304 | ||
305 | TP_ARGS(p), | |
306 | ||
307 | TP_STRUCT__entry( | |
308 | __array( char, comm, TASK_COMM_LEN ) | |
309 | __field( pid_t, pid ) | |
310 | __field( int, prio ) | |
311 | ), | |
312 | ||
313 | TP_fast_assign( | |
314 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | |
315 | __entry->pid = p->pid; | |
b91473ff | 316 | __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ |
ea20d929 SR |
317 | ), |
318 | ||
434a83c3 | 319 | TP_printk("comm=%s pid=%d prio=%d", |
ea20d929 SR |
320 | __entry->comm, __entry->pid, __entry->prio) |
321 | ); | |
322 | ||
323 | /* | |
75ec29ab | 324 | * Tracepoint for freeing a task: |
ea20d929 | 325 | */ |
75ec29ab SR |
326 | DEFINE_EVENT(sched_process_template, sched_process_free, |
327 | TP_PROTO(struct task_struct *p), | |
328 | TP_ARGS(p)); | |
ea20d929 | 329 | |
75ec29ab SR |
330 | /* |
331 | * Tracepoint for a task exiting: | |
332 | */ | |
333 | DEFINE_EVENT(sched_process_template, sched_process_exit, | |
334 | TP_PROTO(struct task_struct *p), | |
335 | TP_ARGS(p)); | |
ea20d929 | 336 | |
210f7669 LZ |
337 | /* |
338 | * Tracepoint for waiting on task to unschedule: | |
339 | */ | |
340 | DEFINE_EVENT(sched_process_template, sched_wait_task, | |
341 | TP_PROTO(struct task_struct *p), | |
342 | TP_ARGS(p)); | |
343 | ||
ea20d929 SR |
344 | /* |
345 | * Tracepoint for a waiting task: | |
346 | */ | |
347 | TRACE_EVENT(sched_process_wait, | |
348 | ||
349 | TP_PROTO(struct pid *pid), | |
350 | ||
351 | TP_ARGS(pid), | |
352 | ||
353 | TP_STRUCT__entry( | |
354 | __array( char, comm, TASK_COMM_LEN ) | |
355 | __field( pid_t, pid ) | |
356 | __field( int, prio ) | |
357 | ), | |
358 | ||
359 | TP_fast_assign( | |
360 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | |
361 | __entry->pid = pid_nr(pid); | |
b91473ff | 362 | __entry->prio = current->prio; /* XXX SCHED_DEADLINE */ |
ea20d929 SR |
363 | ), |
364 | ||
434a83c3 | 365 | TP_printk("comm=%s pid=%d prio=%d", |
ea20d929 SR |
366 | __entry->comm, __entry->pid, __entry->prio) |
367 | ); | |
368 | ||
369 | /* | |
cb5021ca | 370 | * Tracepoint for kernel_clone: |
ea20d929 SR |
371 | */ |
372 | TRACE_EVENT(sched_process_fork, | |
373 | ||
374 | TP_PROTO(struct task_struct *parent, struct task_struct *child), | |
375 | ||
376 | TP_ARGS(parent, child), | |
377 | ||
378 | TP_STRUCT__entry( | |
379 | __array( char, parent_comm, TASK_COMM_LEN ) | |
380 | __field( pid_t, parent_pid ) | |
381 | __array( char, child_comm, TASK_COMM_LEN ) | |
382 | __field( pid_t, child_pid ) | |
383 | ), | |
384 | ||
385 | TP_fast_assign( | |
386 | memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN); | |
387 | __entry->parent_pid = parent->pid; | |
388 | memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN); | |
389 | __entry->child_pid = child->pid; | |
390 | ), | |
391 | ||
434a83c3 | 392 | TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", |
ea20d929 SR |
393 | __entry->parent_comm, __entry->parent_pid, |
394 | __entry->child_comm, __entry->child_pid) | |
395 | ); | |
396 | ||
4ff16c25 DS |
397 | /* |
398 | * Tracepoint for exec: | |
399 | */ | |
400 | TRACE_EVENT(sched_process_exec, | |
401 | ||
402 | TP_PROTO(struct task_struct *p, pid_t old_pid, | |
403 | struct linux_binprm *bprm), | |
404 | ||
405 | TP_ARGS(p, old_pid, bprm), | |
406 | ||
407 | TP_STRUCT__entry( | |
408 | __string( filename, bprm->filename ) | |
409 | __field( pid_t, pid ) | |
410 | __field( pid_t, old_pid ) | |
411 | ), | |
412 | ||
413 | TP_fast_assign( | |
2c92ca84 | 414 | __assign_str(filename); |
4ff16c25 | 415 | __entry->pid = p->pid; |
6308191f | 416 | __entry->old_pid = old_pid; |
4ff16c25 DS |
417 | ), |
418 | ||
419 | TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename), | |
420 | __entry->pid, __entry->old_pid) | |
421 | ); | |
422 | ||
c8238994 ME |
423 | /** |
424 | * sched_prepare_exec - called before setting up new exec | |
425 | * @task: pointer to the current task | |
426 | * @bprm: pointer to linux_binprm used for new exec | |
427 | * | |
428 | * Called before flushing the old exec, where @task is still unchanged, but at | |
429 | * the point of no return during switching to the new exec. At the point it is | |
430 | * called the exec will either succeed, or on failure terminate the task. Also | |
431 | * see the "sched_process_exec" tracepoint, which is called right after @task | |
432 | * has successfully switched to the new exec. | |
433 | */ | |
434 | TRACE_EVENT(sched_prepare_exec, | |
435 | ||
436 | TP_PROTO(struct task_struct *task, struct linux_binprm *bprm), | |
437 | ||
438 | TP_ARGS(task, bprm), | |
439 | ||
440 | TP_STRUCT__entry( | |
441 | __string( interp, bprm->interp ) | |
442 | __string( filename, bprm->filename ) | |
443 | __field( pid_t, pid ) | |
444 | __string( comm, task->comm ) | |
445 | ), | |
446 | ||
447 | TP_fast_assign( | |
2c92ca84 SRG |
448 | __assign_str(interp); |
449 | __assign_str(filename); | |
c8238994 | 450 | __entry->pid = task->pid; |
2c92ca84 | 451 | __assign_str(comm); |
c8238994 ME |
452 | ), |
453 | ||
454 | TP_printk("interp=%s filename=%s pid=%d comm=%s", | |
455 | __get_str(interp), __get_str(filename), | |
456 | __entry->pid, __get_str(comm)) | |
457 | ); | |
2a09b5de YS |
458 | |
459 | #ifdef CONFIG_SCHEDSTATS | |
460 | #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT | |
461 | #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS | |
462 | #else | |
463 | #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP | |
464 | #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP | |
465 | #endif | |
466 | ||
768d0c27 PZ |
467 | /* |
468 | * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE | |
469 | * adding sched_stat support to SCHED_FIFO/RR would be welcome. | |
470 | */ | |
2a09b5de | 471 | DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template, |
768d0c27 PZ |
472 | |
473 | TP_PROTO(struct task_struct *tsk, u64 delay), | |
474 | ||
12473965 | 475 | TP_ARGS(__perf_task(tsk), __perf_count(delay)), |
768d0c27 PZ |
476 | |
477 | TP_STRUCT__entry( | |
478 | __array( char, comm, TASK_COMM_LEN ) | |
479 | __field( pid_t, pid ) | |
480 | __field( u64, delay ) | |
481 | ), | |
482 | ||
483 | TP_fast_assign( | |
484 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | |
485 | __entry->pid = tsk->pid; | |
486 | __entry->delay = delay; | |
768d0c27 PZ |
487 | ), |
488 | ||
434a83c3 | 489 | TP_printk("comm=%s pid=%d delay=%Lu [ns]", |
768d0c27 PZ |
490 | __entry->comm, __entry->pid, |
491 | (unsigned long long)__entry->delay) | |
492 | ); | |
493 | ||
75ec29ab SR |
494 | /* |
495 | * Tracepoint for accounting wait time (time the task is runnable | |
496 | * but not actually running due to scheduler contention). | |
497 | */ | |
2a09b5de | 498 | DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait, |
75ec29ab SR |
499 | TP_PROTO(struct task_struct *tsk, u64 delay), |
500 | TP_ARGS(tsk, delay)); | |
501 | ||
502 | /* | |
503 | * Tracepoint for accounting sleep time (time the task is not runnable, | |
504 | * including iowait, see below). | |
505 | */ | |
2a09b5de | 506 | DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep, |
470dda74 LZ |
507 | TP_PROTO(struct task_struct *tsk, u64 delay), |
508 | TP_ARGS(tsk, delay)); | |
75ec29ab SR |
509 | |
510 | /* | |
511 | * Tracepoint for accounting iowait time (time the task is not runnable | |
512 | * due to waiting on IO to complete). | |
513 | */ | |
2a09b5de | 514 | DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait, |
470dda74 LZ |
515 | TP_PROTO(struct task_struct *tsk, u64 delay), |
516 | TP_ARGS(tsk, delay)); | |
75ec29ab | 517 | |
b781a602 AV |
518 | /* |
519 | * Tracepoint for accounting blocked time (time the task is in uninterruptible). | |
520 | */ | |
2a09b5de | 521 | DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked, |
b781a602 AV |
522 | TP_PROTO(struct task_struct *tsk, u64 delay), |
523 | TP_ARGS(tsk, delay)); | |
524 | ||
f977bb49 IM |
525 | /* |
526 | * Tracepoint for accounting runtime (time the task is executing | |
527 | * on a CPU). | |
528 | */ | |
36009d07 | 529 | DECLARE_EVENT_CLASS(sched_stat_runtime, |
f977bb49 | 530 | |
5fe6ec8f | 531 | TP_PROTO(struct task_struct *tsk, u64 runtime), |
f977bb49 | 532 | |
5fe6ec8f | 533 | TP_ARGS(tsk, __perf_count(runtime)), |
f977bb49 IM |
534 | |
535 | TP_STRUCT__entry( | |
536 | __array( char, comm, TASK_COMM_LEN ) | |
537 | __field( pid_t, pid ) | |
538 | __field( u64, runtime ) | |
f977bb49 IM |
539 | ), |
540 | ||
541 | TP_fast_assign( | |
542 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | |
543 | __entry->pid = tsk->pid; | |
544 | __entry->runtime = runtime; | |
f977bb49 IM |
545 | ), |
546 | ||
5fe6ec8f | 547 | TP_printk("comm=%s pid=%d runtime=%Lu [ns]", |
f977bb49 | 548 | __entry->comm, __entry->pid, |
5fe6ec8f | 549 | (unsigned long long)__entry->runtime) |
f977bb49 IM |
550 | ); |
551 | ||
36009d07 | 552 | DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, |
5fe6ec8f PZ |
553 | TP_PROTO(struct task_struct *tsk, u64 runtime), |
554 | TP_ARGS(tsk, runtime)); | |
36009d07 | 555 | |
a8027073 SR |
556 | /* |
557 | * Tracepoint for showing priority inheritance modifying a tasks | |
558 | * priority. | |
559 | */ | |
560 | TRACE_EVENT(sched_pi_setprio, | |
561 | ||
b91473ff | 562 | TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task), |
a8027073 | 563 | |
b91473ff | 564 | TP_ARGS(tsk, pi_task), |
a8027073 SR |
565 | |
566 | TP_STRUCT__entry( | |
567 | __array( char, comm, TASK_COMM_LEN ) | |
568 | __field( pid_t, pid ) | |
569 | __field( int, oldprio ) | |
570 | __field( int, newprio ) | |
571 | ), | |
572 | ||
573 | TP_fast_assign( | |
574 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | |
575 | __entry->pid = tsk->pid; | |
576 | __entry->oldprio = tsk->prio; | |
4ff648de SAS |
577 | __entry->newprio = pi_task ? |
578 | min(tsk->normal_prio, pi_task->prio) : | |
579 | tsk->normal_prio; | |
b91473ff | 580 | /* XXX SCHED_DEADLINE bits missing */ |
a8027073 SR |
581 | ), |
582 | ||
583 | TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", | |
584 | __entry->comm, __entry->pid, | |
585 | __entry->oldprio, __entry->newprio) | |
586 | ); | |
587 | ||
6a716c90 ON |
588 | #ifdef CONFIG_DETECT_HUNG_TASK |
589 | TRACE_EVENT(sched_process_hang, | |
590 | TP_PROTO(struct task_struct *tsk), | |
591 | TP_ARGS(tsk), | |
592 | ||
593 | TP_STRUCT__entry( | |
594 | __array( char, comm, TASK_COMM_LEN ) | |
595 | __field( pid_t, pid ) | |
596 | ), | |
597 | ||
598 | TP_fast_assign( | |
599 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | |
600 | __entry->pid = tsk->pid; | |
601 | ), | |
602 | ||
603 | TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) | |
604 | ); | |
605 | #endif /* CONFIG_DETECT_HUNG_TASK */ | |
606 | ||
b2b2042b MG |
607 | /* |
608 | * Tracks migration of tasks from one runqueue to another. Can be used to | |
609 | * detect if automatic NUMA balancing is bouncing between nodes. | |
610 | */ | |
611 | TRACE_EVENT(sched_move_numa, | |
286549dc MG |
612 | |
613 | TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), | |
614 | ||
615 | TP_ARGS(tsk, src_cpu, dst_cpu), | |
616 | ||
617 | TP_STRUCT__entry( | |
618 | __field( pid_t, pid ) | |
619 | __field( pid_t, tgid ) | |
620 | __field( pid_t, ngid ) | |
621 | __field( int, src_cpu ) | |
622 | __field( int, src_nid ) | |
623 | __field( int, dst_cpu ) | |
624 | __field( int, dst_nid ) | |
625 | ), | |
626 | ||
627 | TP_fast_assign( | |
628 | __entry->pid = task_pid_nr(tsk); | |
629 | __entry->tgid = task_tgid_nr(tsk); | |
630 | __entry->ngid = task_numa_group_id(tsk); | |
631 | __entry->src_cpu = src_cpu; | |
632 | __entry->src_nid = cpu_to_node(src_cpu); | |
633 | __entry->dst_cpu = dst_cpu; | |
634 | __entry->dst_nid = cpu_to_node(dst_cpu); | |
635 | ), | |
636 | ||
637 | TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d", | |
638 | __entry->pid, __entry->tgid, __entry->ngid, | |
639 | __entry->src_cpu, __entry->src_nid, | |
640 | __entry->dst_cpu, __entry->dst_nid) | |
641 | ); | |
642 | ||
b2b2042b | 643 | DECLARE_EVENT_CLASS(sched_numa_pair_template, |
286549dc MG |
644 | |
645 | TP_PROTO(struct task_struct *src_tsk, int src_cpu, | |
646 | struct task_struct *dst_tsk, int dst_cpu), | |
647 | ||
648 | TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu), | |
649 | ||
650 | TP_STRUCT__entry( | |
651 | __field( pid_t, src_pid ) | |
652 | __field( pid_t, src_tgid ) | |
653 | __field( pid_t, src_ngid ) | |
654 | __field( int, src_cpu ) | |
655 | __field( int, src_nid ) | |
656 | __field( pid_t, dst_pid ) | |
657 | __field( pid_t, dst_tgid ) | |
658 | __field( pid_t, dst_ngid ) | |
659 | __field( int, dst_cpu ) | |
660 | __field( int, dst_nid ) | |
661 | ), | |
662 | ||
663 | TP_fast_assign( | |
664 | __entry->src_pid = task_pid_nr(src_tsk); | |
665 | __entry->src_tgid = task_tgid_nr(src_tsk); | |
666 | __entry->src_ngid = task_numa_group_id(src_tsk); | |
667 | __entry->src_cpu = src_cpu; | |
668 | __entry->src_nid = cpu_to_node(src_cpu); | |
b2b2042b MG |
669 | __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0; |
670 | __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0; | |
671 | __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0; | |
286549dc | 672 | __entry->dst_cpu = dst_cpu; |
b2b2042b | 673 | __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1; |
286549dc MG |
674 | ), |
675 | ||
676 | TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d", | |
677 | __entry->src_pid, __entry->src_tgid, __entry->src_ngid, | |
678 | __entry->src_cpu, __entry->src_nid, | |
679 | __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid, | |
680 | __entry->dst_cpu, __entry->dst_nid) | |
681 | ); | |
dfc68f29 | 682 | |
b2b2042b MG |
683 | DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa, |
684 | ||
685 | TP_PROTO(struct task_struct *src_tsk, int src_cpu, | |
686 | struct task_struct *dst_tsk, int dst_cpu), | |
687 | ||
688 | TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) | |
689 | ); | |
690 | ||
691 | DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa, | |
692 | ||
693 | TP_PROTO(struct task_struct *src_tsk, int src_cpu, | |
694 | struct task_struct *dst_tsk, int dst_cpu), | |
695 | ||
696 | TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) | |
697 | ); | |
698 | ||
ed2da8b7 MG |
699 | #ifdef CONFIG_NUMA_BALANCING |
700 | #define NUMAB_SKIP_REASON \ | |
701 | EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \ | |
702 | EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \ | |
703 | EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \ | |
704 | EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \ | |
b7a5b537 | 705 | EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \ |
f169c62f MG |
706 | EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \ |
707 | EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" ) | |
ed2da8b7 MG |
708 | |
709 | /* Redefine for export. */ | |
710 | #undef EM | |
711 | #undef EMe | |
712 | #define EM(a, b) TRACE_DEFINE_ENUM(a); | |
713 | #define EMe(a, b) TRACE_DEFINE_ENUM(a); | |
714 | ||
715 | NUMAB_SKIP_REASON | |
716 | ||
717 | /* Redefine for symbolic printing. */ | |
718 | #undef EM | |
719 | #undef EMe | |
720 | #define EM(a, b) { a, b }, | |
721 | #define EMe(a, b) { a, b } | |
722 | ||
723 | TRACE_EVENT(sched_skip_vma_numa, | |
724 | ||
725 | TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma, | |
726 | enum numa_vmaskip_reason reason), | |
727 | ||
728 | TP_ARGS(mm, vma, reason), | |
729 | ||
730 | TP_STRUCT__entry( | |
731 | __field(unsigned long, numa_scan_offset) | |
732 | __field(unsigned long, vm_start) | |
733 | __field(unsigned long, vm_end) | |
734 | __field(enum numa_vmaskip_reason, reason) | |
735 | ), | |
736 | ||
737 | TP_fast_assign( | |
738 | __entry->numa_scan_offset = mm->numa_scan_offset; | |
739 | __entry->vm_start = vma->vm_start; | |
740 | __entry->vm_end = vma->vm_end; | |
741 | __entry->reason = reason; | |
742 | ), | |
743 | ||
744 | TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s", | |
745 | __entry->numa_scan_offset, | |
746 | __entry->vm_start, | |
747 | __entry->vm_end, | |
748 | __print_symbolic(__entry->reason, NUMAB_SKIP_REASON)) | |
749 | ); | |
750 | #endif /* CONFIG_NUMA_BALANCING */ | |
b2b2042b | 751 | |
dfc68f29 AL |
752 | /* |
753 | * Tracepoint for waking a polling cpu without an IPI. | |
754 | */ | |
755 | TRACE_EVENT(sched_wake_idle_without_ipi, | |
756 | ||
757 | TP_PROTO(int cpu), | |
758 | ||
759 | TP_ARGS(cpu), | |
760 | ||
761 | TP_STRUCT__entry( | |
762 | __field( int, cpu ) | |
763 | ), | |
764 | ||
765 | TP_fast_assign( | |
766 | __entry->cpu = cpu; | |
767 | ), | |
768 | ||
769 | TP_printk("cpu=%d", __entry->cpu) | |
770 | ); | |
ba19f51f QY |
771 | |
772 | /* | |
773 | * Following tracepoints are not exported in tracefs and provide hooking | |
774 | * mechanisms only for testing and debugging purposes. | |
775 | * | |
776 | * Postfixed with _tp to make them easily identifiable in the code. | |
777 | */ | |
778 | DECLARE_TRACE(pelt_cfs_tp, | |
779 | TP_PROTO(struct cfs_rq *cfs_rq), | |
780 | TP_ARGS(cfs_rq)); | |
781 | ||
782 | DECLARE_TRACE(pelt_rt_tp, | |
783 | TP_PROTO(struct rq *rq), | |
784 | TP_ARGS(rq)); | |
785 | ||
786 | DECLARE_TRACE(pelt_dl_tp, | |
787 | TP_PROTO(struct rq *rq), | |
788 | TP_ARGS(rq)); | |
789 | ||
d4dbc991 | 790 | DECLARE_TRACE(pelt_hw_tp, |
76504793 TG |
791 | TP_PROTO(struct rq *rq), |
792 | TP_ARGS(rq)); | |
793 | ||
ba19f51f QY |
794 | DECLARE_TRACE(pelt_irq_tp, |
795 | TP_PROTO(struct rq *rq), | |
796 | TP_ARGS(rq)); | |
797 | ||
8de6242c QY |
798 | DECLARE_TRACE(pelt_se_tp, |
799 | TP_PROTO(struct sched_entity *se), | |
800 | TP_ARGS(se)); | |
801 | ||
51cf18c9 VD |
802 | DECLARE_TRACE(sched_cpu_capacity_tp, |
803 | TP_PROTO(struct rq *rq), | |
804 | TP_ARGS(rq)); | |
805 | ||
f9f240f9 QY |
806 | DECLARE_TRACE(sched_overutilized_tp, |
807 | TP_PROTO(struct root_domain *rd, bool overutilized), | |
808 | TP_ARGS(rd, overutilized)); | |
809 | ||
4581bea8 VD |
810 | DECLARE_TRACE(sched_util_est_cfs_tp, |
811 | TP_PROTO(struct cfs_rq *cfs_rq), | |
812 | TP_ARGS(cfs_rq)); | |
813 | ||
814 | DECLARE_TRACE(sched_util_est_se_tp, | |
815 | TP_PROTO(struct sched_entity *se), | |
816 | TP_ARGS(se)); | |
817 | ||
9d246053 PA |
818 | DECLARE_TRACE(sched_update_nr_running_tp, |
819 | TP_PROTO(struct rq *rq, int change), | |
820 | TP_ARGS(rq, change)); | |
821 | ||
15874a3d QY |
822 | DECLARE_TRACE(sched_compute_energy_tp, |
823 | TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy, | |
824 | unsigned long max_util, unsigned long busy_time), | |
825 | TP_ARGS(p, dst_cpu, energy, max_util, busy_time)); | |
826 | ||
ea20d929 | 827 | #endif /* _TRACE_SCHED_H */ |
a8d154b0 SR |
828 | |
829 | /* This part must be outside protection */ | |
830 | #include <trace/define_trace.h> |