1 // SPDX-License-Identifier: GPL-2.0
6 #include "util/cpumap.h"
7 #include "util/evlist.h"
8 #include "util/evsel.h"
9 #include "util/evsel_fprintf.h"
10 #include "util/mutex.h"
11 #include "util/symbol.h"
12 #include "util/thread.h"
13 #include "util/header.h"
14 #include "util/session.h"
15 #include "util/tool.h"
16 #include "util/cloexec.h"
17 #include "util/thread_map.h"
18 #include "util/color.h"
19 #include "util/stat.h"
20 #include "util/string2.h"
21 #include "util/callchain.h"
22 #include "util/time-utils.h"
24 #include <subcmd/pager.h>
25 #include <subcmd/parse-options.h>
26 #include "util/trace-event.h"
28 #include "util/debug.h"
29 #include "util/event.h"
30 #include "util/util.h"
32 #include <linux/kernel.h>
33 #include <linux/log2.h>
34 #include <linux/zalloc.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
40 #include <semaphore.h>
43 #include <api/fs/fs.h>
44 #include <perf/cpumap.h>
45 #include <linux/time64.h>
46 #include <linux/err.h>
48 #include <linux/ctype.h>
50 #define PR_SET_NAME 15 /* Set process name */
54 #define MAX_PID 1024000
57 static const char *cpu_list;
58 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
67 unsigned long nr_events;
68 unsigned long curr_event;
69 struct sched_atom **atoms;
79 enum sched_event_type {
86 enum sched_event_type type;
91 struct task_desc *wakee;
102 struct list_head list;
103 enum thread_state state;
111 struct list_head work_list;
112 struct thread *thread;
123 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
127 struct trace_sched_handler {
128 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
129 struct perf_sample *sample, struct machine *machine);
131 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
132 struct perf_sample *sample, struct machine *machine);
134 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
135 struct perf_sample *sample, struct machine *machine);
137 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
138 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
139 struct machine *machine);
141 int (*migrate_task_event)(struct perf_sched *sched,
143 struct perf_sample *sample,
144 struct machine *machine);
147 #define COLOR_PIDS PERF_COLOR_BLUE
148 #define COLOR_CPUS PERF_COLOR_BG_RED
150 struct perf_sched_map {
151 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
152 struct perf_cpu *comp_cpus;
154 struct perf_thread_map *color_pids;
155 const char *color_pids_str;
156 struct perf_cpu_map *color_cpus;
157 const char *color_cpus_str;
158 const char *task_name;
159 struct strlist *task_names;
161 struct perf_cpu_map *cpus;
162 const char *cpus_str;
166 struct perf_tool tool;
167 const char *sort_order;
168 unsigned long nr_tasks;
169 struct task_desc **pid_to_task;
170 struct task_desc **tasks;
171 const struct trace_sched_handler *tp_handler;
172 struct mutex start_work_mutex;
173 struct mutex work_done_wait_mutex;
176 * Track the current task - that way we can know whether there's any
177 * weird events, such as a task being switched away that is not current.
179 struct perf_cpu max_cpu;
181 struct thread **curr_thread;
182 struct thread **curr_out_thread;
183 char next_shortname1;
184 char next_shortname2;
185 unsigned int replay_repeat;
186 unsigned long nr_run_events;
187 unsigned long nr_sleep_events;
188 unsigned long nr_wakeup_events;
189 unsigned long nr_sleep_corrections;
190 unsigned long nr_run_events_optimized;
191 unsigned long targetless_wakeups;
192 unsigned long multitarget_wakeups;
193 unsigned long nr_runs;
194 unsigned long nr_timestamps;
195 unsigned long nr_unordered_timestamps;
196 unsigned long nr_context_switch_bugs;
197 unsigned long nr_events;
198 unsigned long nr_lost_chunks;
199 unsigned long nr_lost_events;
200 u64 run_measurement_overhead;
201 u64 sleep_measurement_overhead;
204 u64 runavg_cpu_usage;
205 u64 parent_cpu_usage;
206 u64 runavg_parent_cpu_usage;
212 u64 *cpu_last_switched;
213 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
214 struct list_head sort_list, cmp_pid;
217 struct perf_sched_map map;
219 /* options for timehist command */
224 unsigned int max_stack;
225 bool show_cpu_visual;
228 bool show_migrations;
233 const char *time_str;
234 struct perf_time_interval ptime;
235 struct perf_time_interval hist_time;
236 volatile bool thread_funcs_exit;
237 const char *prio_str;
238 DECLARE_BITMAP(prio_bitmap, MAX_PRIO);
241 /* per thread run time data */
242 struct thread_runtime {
243 u64 last_time; /* time of previous sched in/out event */
244 u64 dt_run; /* run time */
245 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
246 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
247 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
248 u64 dt_delay; /* time between wakeup and sched-in */
249 u64 dt_pre_mig; /* time between migration and wakeup */
250 u64 ready_to_run; /* time of wakeup */
251 u64 migrated; /* time when a thread is migrated */
253 struct stats run_stats;
255 u64 total_sleep_time;
256 u64 total_iowait_time;
257 u64 total_preempt_time;
258 u64 total_delay_time;
259 u64 total_pre_mig_time;
271 /* per event run time data */
272 struct evsel_runtime {
273 u64 *last_time; /* time this event was last seen per cpu */
274 u32 ncpu; /* highest cpu slot allocated */
277 /* per cpu idle time data */
278 struct idle_thread_runtime {
279 struct thread_runtime tr;
280 struct thread *last_thread;
281 struct rb_root_cached sorted_root;
282 struct callchain_root callchain;
283 struct callchain_cursor cursor;
286 /* track idle times per cpu */
287 static struct thread **idle_threads;
288 static int idle_max_cpu;
289 static char idle_comm[] = "<idle>";
291 static u64 get_nsecs(void)
295 clock_gettime(CLOCK_MONOTONIC, &ts);
297 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
300 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
302 u64 T0 = get_nsecs(), T1;
306 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
309 static void sleep_nsecs(u64 nsecs)
313 ts.tv_nsec = nsecs % 999999999;
314 ts.tv_sec = nsecs / 999999999;
316 nanosleep(&ts, NULL);
319 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
321 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
324 for (i = 0; i < 10; i++) {
326 burn_nsecs(sched, 0);
329 min_delta = min(min_delta, delta);
331 sched->run_measurement_overhead = min_delta;
333 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
336 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
338 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
341 for (i = 0; i < 10; i++) {
346 min_delta = min(min_delta, delta);
349 sched->sleep_measurement_overhead = min_delta;
351 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
354 static struct sched_atom *
355 get_new_event(struct task_desc *task, u64 timestamp)
357 struct sched_atom *event = zalloc(sizeof(*event));
358 unsigned long idx = task->nr_events;
361 event->timestamp = timestamp;
365 size = sizeof(struct sched_atom *) * task->nr_events;
366 task->atoms = realloc(task->atoms, size);
367 BUG_ON(!task->atoms);
369 task->atoms[idx] = event;
374 static struct sched_atom *last_event(struct task_desc *task)
376 if (!task->nr_events)
379 return task->atoms[task->nr_events - 1];
382 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
383 u64 timestamp, u64 duration)
385 struct sched_atom *event, *curr_event = last_event(task);
388 * optimize an existing RUN event by merging this one
391 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
392 sched->nr_run_events_optimized++;
393 curr_event->duration += duration;
397 event = get_new_event(task, timestamp);
399 event->type = SCHED_EVENT_RUN;
400 event->duration = duration;
402 sched->nr_run_events++;
405 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
406 u64 timestamp, struct task_desc *wakee)
408 struct sched_atom *event, *wakee_event;
410 event = get_new_event(task, timestamp);
411 event->type = SCHED_EVENT_WAKEUP;
412 event->wakee = wakee;
414 wakee_event = last_event(wakee);
415 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
416 sched->targetless_wakeups++;
419 if (wakee_event->wait_sem) {
420 sched->multitarget_wakeups++;
424 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
425 sem_init(wakee_event->wait_sem, 0, 0);
426 event->wait_sem = wakee_event->wait_sem;
428 sched->nr_wakeup_events++;
431 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
434 struct sched_atom *event = get_new_event(task, timestamp);
436 event->type = SCHED_EVENT_SLEEP;
438 sched->nr_sleep_events++;
441 static struct task_desc *register_pid(struct perf_sched *sched,
442 unsigned long pid, const char *comm)
444 struct task_desc *task;
447 if (sched->pid_to_task == NULL) {
448 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
450 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
452 if (pid >= (unsigned long)pid_max) {
453 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
454 sizeof(struct task_desc *))) == NULL);
455 while (pid >= (unsigned long)pid_max)
456 sched->pid_to_task[pid_max++] = NULL;
459 task = sched->pid_to_task[pid];
464 task = zalloc(sizeof(*task));
466 task->nr = sched->nr_tasks;
467 strcpy(task->comm, comm);
469 * every task starts in sleeping state - this gets ignored
470 * if there's no wakeup pointing to this sleep state:
472 add_sched_event_sleep(sched, task, 0);
474 sched->pid_to_task[pid] = task;
476 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
477 BUG_ON(!sched->tasks);
478 sched->tasks[task->nr] = task;
481 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
487 static void print_task_traces(struct perf_sched *sched)
489 struct task_desc *task;
492 for (i = 0; i < sched->nr_tasks; i++) {
493 task = sched->tasks[i];
494 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
495 task->nr, task->comm, task->pid, task->nr_events);
499 static void add_cross_task_wakeups(struct perf_sched *sched)
501 struct task_desc *task1, *task2;
504 for (i = 0; i < sched->nr_tasks; i++) {
505 task1 = sched->tasks[i];
507 if (j == sched->nr_tasks)
509 task2 = sched->tasks[j];
510 add_sched_event_wakeup(sched, task1, 0, task2);
514 static void perf_sched__process_event(struct perf_sched *sched,
515 struct sched_atom *atom)
519 switch (atom->type) {
520 case SCHED_EVENT_RUN:
521 burn_nsecs(sched, atom->duration);
523 case SCHED_EVENT_SLEEP:
525 ret = sem_wait(atom->wait_sem);
528 case SCHED_EVENT_WAKEUP:
530 ret = sem_post(atom->wait_sem);
538 static u64 get_cpu_usage_nsec_parent(void)
544 err = getrusage(RUSAGE_SELF, &ru);
547 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
548 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
553 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
555 struct perf_event_attr attr;
556 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
559 bool need_privilege = false;
561 memset(&attr, 0, sizeof(attr));
563 attr.type = PERF_TYPE_SOFTWARE;
564 attr.config = PERF_COUNT_SW_TASK_CLOCK;
567 fd = sys_perf_event_open(&attr, 0, -1, -1,
568 perf_event_open_cloexec_flag());
571 if (errno == EMFILE) {
573 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
574 limit.rlim_cur += sched->nr_tasks - cur_task;
575 if (limit.rlim_cur > limit.rlim_max) {
576 limit.rlim_max = limit.rlim_cur;
577 need_privilege = true;
579 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
580 if (need_privilege && errno == EPERM)
581 strcpy(info, "Need privilege\n");
585 strcpy(info, "Have a try with -f option\n");
587 pr_err("Error: sys_perf_event_open() syscall returned "
588 "with %d (%s)\n%s", fd,
589 str_error_r(errno, sbuf, sizeof(sbuf)), info);
595 static u64 get_cpu_usage_nsec_self(int fd)
600 ret = read(fd, &runtime, sizeof(runtime));
601 BUG_ON(ret != sizeof(runtime));
606 struct sched_thread_parms {
607 struct task_desc *task;
608 struct perf_sched *sched;
612 static void *thread_func(void *ctx)
614 struct sched_thread_parms *parms = ctx;
615 struct task_desc *this_task = parms->task;
616 struct perf_sched *sched = parms->sched;
617 u64 cpu_usage_0, cpu_usage_1;
618 unsigned long i, ret;
624 sprintf(comm2, ":%s", this_task->comm);
625 prctl(PR_SET_NAME, comm2);
629 while (!sched->thread_funcs_exit) {
630 ret = sem_post(&this_task->ready_for_work);
632 mutex_lock(&sched->start_work_mutex);
633 mutex_unlock(&sched->start_work_mutex);
635 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
637 for (i = 0; i < this_task->nr_events; i++) {
638 this_task->curr_event = i;
639 perf_sched__process_event(sched, this_task->atoms[i]);
642 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
643 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
644 ret = sem_post(&this_task->work_done_sem);
647 mutex_lock(&sched->work_done_wait_mutex);
648 mutex_unlock(&sched->work_done_wait_mutex);
653 static void create_tasks(struct perf_sched *sched)
654 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
655 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
657 struct task_desc *task;
662 err = pthread_attr_init(&attr);
664 err = pthread_attr_setstacksize(&attr,
665 (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
667 mutex_lock(&sched->start_work_mutex);
668 mutex_lock(&sched->work_done_wait_mutex);
669 for (i = 0; i < sched->nr_tasks; i++) {
670 struct sched_thread_parms *parms = malloc(sizeof(*parms));
671 BUG_ON(parms == NULL);
672 parms->task = task = sched->tasks[i];
673 parms->sched = sched;
674 parms->fd = self_open_counters(sched, i);
675 sem_init(&task->ready_for_work, 0, 0);
676 sem_init(&task->work_done_sem, 0, 0);
677 task->curr_event = 0;
678 err = pthread_create(&task->thread, &attr, thread_func, parms);
683 static void destroy_tasks(struct perf_sched *sched)
684 UNLOCK_FUNCTION(sched->start_work_mutex)
685 UNLOCK_FUNCTION(sched->work_done_wait_mutex)
687 struct task_desc *task;
691 mutex_unlock(&sched->start_work_mutex);
692 mutex_unlock(&sched->work_done_wait_mutex);
693 /* Get rid of threads so they won't be upset by mutex destrunction */
694 for (i = 0; i < sched->nr_tasks; i++) {
695 task = sched->tasks[i];
696 err = pthread_join(task->thread, NULL);
698 sem_destroy(&task->ready_for_work);
699 sem_destroy(&task->work_done_sem);
703 static void wait_for_tasks(struct perf_sched *sched)
704 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
705 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
707 u64 cpu_usage_0, cpu_usage_1;
708 struct task_desc *task;
709 unsigned long i, ret;
711 sched->start_time = get_nsecs();
712 sched->cpu_usage = 0;
713 mutex_unlock(&sched->work_done_wait_mutex);
715 for (i = 0; i < sched->nr_tasks; i++) {
716 task = sched->tasks[i];
717 ret = sem_wait(&task->ready_for_work);
719 sem_init(&task->ready_for_work, 0, 0);
721 mutex_lock(&sched->work_done_wait_mutex);
723 cpu_usage_0 = get_cpu_usage_nsec_parent();
725 mutex_unlock(&sched->start_work_mutex);
727 for (i = 0; i < sched->nr_tasks; i++) {
728 task = sched->tasks[i];
729 ret = sem_wait(&task->work_done_sem);
731 sem_init(&task->work_done_sem, 0, 0);
732 sched->cpu_usage += task->cpu_usage;
736 cpu_usage_1 = get_cpu_usage_nsec_parent();
737 if (!sched->runavg_cpu_usage)
738 sched->runavg_cpu_usage = sched->cpu_usage;
739 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
741 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
742 if (!sched->runavg_parent_cpu_usage)
743 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
744 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
745 sched->parent_cpu_usage)/sched->replay_repeat;
747 mutex_lock(&sched->start_work_mutex);
749 for (i = 0; i < sched->nr_tasks; i++) {
750 task = sched->tasks[i];
751 task->curr_event = 0;
755 static void run_one_test(struct perf_sched *sched)
756 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
757 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
759 u64 T0, T1, delta, avg_delta, fluct;
762 wait_for_tasks(sched);
766 sched->sum_runtime += delta;
769 avg_delta = sched->sum_runtime / sched->nr_runs;
770 if (delta < avg_delta)
771 fluct = avg_delta - delta;
773 fluct = delta - avg_delta;
774 sched->sum_fluct += fluct;
776 sched->run_avg = delta;
777 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
779 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
781 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
783 printf("cpu: %0.2f / %0.2f",
784 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
788 * rusage statistics done by the parent, these are less
789 * accurate than the sched->sum_exec_runtime based statistics:
791 printf(" [%0.2f / %0.2f]",
792 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
793 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
798 if (sched->nr_sleep_corrections)
799 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
800 sched->nr_sleep_corrections = 0;
803 static void test_calibrations(struct perf_sched *sched)
808 burn_nsecs(sched, NSEC_PER_MSEC);
811 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
814 sleep_nsecs(NSEC_PER_MSEC);
817 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
821 replay_wakeup_event(struct perf_sched *sched,
822 struct evsel *evsel, struct perf_sample *sample,
823 struct machine *machine __maybe_unused)
825 const char *comm = evsel__strval(evsel, sample, "comm");
826 const u32 pid = evsel__intval(evsel, sample, "pid");
827 struct task_desc *waker, *wakee;
830 printf("sched_wakeup event %p\n", evsel);
832 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
835 waker = register_pid(sched, sample->tid, "<unknown>");
836 wakee = register_pid(sched, pid, comm);
838 add_sched_event_wakeup(sched, waker, sample->time, wakee);
842 static int replay_switch_event(struct perf_sched *sched,
844 struct perf_sample *sample,
845 struct machine *machine __maybe_unused)
847 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
848 *next_comm = evsel__strval(evsel, sample, "next_comm");
849 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
850 next_pid = evsel__intval(evsel, sample, "next_pid");
851 struct task_desc *prev, __maybe_unused *next;
852 u64 timestamp0, timestamp = sample->time;
853 int cpu = sample->cpu;
857 printf("sched_switch event %p\n", evsel);
859 if (cpu >= MAX_CPUS || cpu < 0)
862 timestamp0 = sched->cpu_last_switched[cpu];
864 delta = timestamp - timestamp0;
869 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
873 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
874 prev_comm, prev_pid, next_comm, next_pid, delta);
876 prev = register_pid(sched, prev_pid, prev_comm);
877 next = register_pid(sched, next_pid, next_comm);
879 sched->cpu_last_switched[cpu] = timestamp;
881 add_sched_event_run(sched, prev, timestamp, delta);
882 add_sched_event_sleep(sched, prev, timestamp);
887 static int replay_fork_event(struct perf_sched *sched,
888 union perf_event *event,
889 struct machine *machine)
891 struct thread *child, *parent;
893 child = machine__findnew_thread(machine, event->fork.pid,
895 parent = machine__findnew_thread(machine, event->fork.ppid,
898 if (child == NULL || parent == NULL) {
899 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
905 printf("fork event\n");
906 printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
907 printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child));
910 register_pid(sched, thread__tid(parent), thread__comm_str(parent));
911 register_pid(sched, thread__tid(child), thread__comm_str(child));
918 struct sort_dimension {
921 struct list_head list;
924 static inline void init_prio(struct thread_runtime *r)
930 * handle runtime stats saved per thread
932 static struct thread_runtime *thread__init_runtime(struct thread *thread)
934 struct thread_runtime *r;
936 r = zalloc(sizeof(struct thread_runtime));
940 init_stats(&r->run_stats);
942 thread__set_priv(thread, r);
947 static struct thread_runtime *thread__get_runtime(struct thread *thread)
949 struct thread_runtime *tr;
951 tr = thread__priv(thread);
953 tr = thread__init_runtime(thread);
955 pr_debug("Failed to malloc memory for runtime data.\n");
962 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
964 struct sort_dimension *sort;
967 BUG_ON(list_empty(list));
969 list_for_each_entry(sort, list, list) {
970 ret = sort->cmp(l, r);
978 static struct work_atoms *
979 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
980 struct list_head *sort_list)
982 struct rb_node *node = root->rb_root.rb_node;
983 struct work_atoms key = { .thread = thread };
986 struct work_atoms *atoms;
989 atoms = container_of(node, struct work_atoms, node);
991 cmp = thread_lat_cmp(sort_list, &key, atoms);
993 node = node->rb_left;
995 node = node->rb_right;
997 BUG_ON(thread != atoms->thread);
1005 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
1006 struct list_head *sort_list)
1008 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
1009 bool leftmost = true;
1012 struct work_atoms *this;
1015 this = container_of(*new, struct work_atoms, node);
1018 cmp = thread_lat_cmp(sort_list, data, this);
1021 new = &((*new)->rb_left);
1023 new = &((*new)->rb_right);
1028 rb_link_node(&data->node, parent, new);
1029 rb_insert_color_cached(&data->node, root, leftmost);
1032 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1034 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1036 pr_err("No memory at %s\n", __func__);
1040 atoms->thread = thread__get(thread);
1041 INIT_LIST_HEAD(&atoms->work_list);
1042 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1047 add_sched_out_event(struct work_atoms *atoms,
1051 struct work_atom *atom = zalloc(sizeof(*atom));
1053 pr_err("Non memory at %s", __func__);
1057 atom->sched_out_time = timestamp;
1059 if (run_state == 'R') {
1060 atom->state = THREAD_WAIT_CPU;
1061 atom->wake_up_time = atom->sched_out_time;
1064 list_add_tail(&atom->list, &atoms->work_list);
1069 add_runtime_event(struct work_atoms *atoms, u64 delta,
1070 u64 timestamp __maybe_unused)
1072 struct work_atom *atom;
1074 BUG_ON(list_empty(&atoms->work_list));
1076 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1078 atom->runtime += delta;
1079 atoms->total_runtime += delta;
1083 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1085 struct work_atom *atom;
1088 if (list_empty(&atoms->work_list))
1091 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1093 if (atom->state != THREAD_WAIT_CPU)
1096 if (timestamp < atom->wake_up_time) {
1097 atom->state = THREAD_IGNORE;
1101 atom->state = THREAD_SCHED_IN;
1102 atom->sched_in_time = timestamp;
1104 delta = atom->sched_in_time - atom->wake_up_time;
1105 atoms->total_lat += delta;
1106 if (delta > atoms->max_lat) {
1107 atoms->max_lat = delta;
1108 atoms->max_lat_start = atom->wake_up_time;
1109 atoms->max_lat_end = timestamp;
1114 static int latency_switch_event(struct perf_sched *sched,
1115 struct evsel *evsel,
1116 struct perf_sample *sample,
1117 struct machine *machine)
1119 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1120 next_pid = evsel__intval(evsel, sample, "next_pid");
1121 const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
1122 struct work_atoms *out_events, *in_events;
1123 struct thread *sched_out, *sched_in;
1124 u64 timestamp0, timestamp = sample->time;
1125 int cpu = sample->cpu, err = -1;
1128 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1130 timestamp0 = sched->cpu_last_switched[cpu];
1131 sched->cpu_last_switched[cpu] = timestamp;
1133 delta = timestamp - timestamp0;
1138 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1142 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1143 sched_in = machine__findnew_thread(machine, -1, next_pid);
1144 if (sched_out == NULL || sched_in == NULL)
1147 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1149 if (thread_atoms_insert(sched, sched_out))
1151 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1153 pr_err("out-event: Internal tree error");
1157 if (add_sched_out_event(out_events, prev_state, timestamp))
1160 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1162 if (thread_atoms_insert(sched, sched_in))
1164 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1166 pr_err("in-event: Internal tree error");
1170 * Take came in we have not heard about yet,
1171 * add in an initial atom in runnable state:
1173 if (add_sched_out_event(in_events, 'R', timestamp))
1176 add_sched_in_event(in_events, timestamp);
1179 thread__put(sched_out);
1180 thread__put(sched_in);
1184 static int latency_runtime_event(struct perf_sched *sched,
1185 struct evsel *evsel,
1186 struct perf_sample *sample,
1187 struct machine *machine)
1189 const u32 pid = evsel__intval(evsel, sample, "pid");
1190 const u64 runtime = evsel__intval(evsel, sample, "runtime");
1191 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1192 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1193 u64 timestamp = sample->time;
1194 int cpu = sample->cpu, err = -1;
1199 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1201 if (thread_atoms_insert(sched, thread))
1203 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1205 pr_err("in-event: Internal tree error");
1208 if (add_sched_out_event(atoms, 'R', timestamp))
1212 add_runtime_event(atoms, runtime, timestamp);
1215 thread__put(thread);
1219 static int latency_wakeup_event(struct perf_sched *sched,
1220 struct evsel *evsel,
1221 struct perf_sample *sample,
1222 struct machine *machine)
1224 const u32 pid = evsel__intval(evsel, sample, "pid");
1225 struct work_atoms *atoms;
1226 struct work_atom *atom;
1227 struct thread *wakee;
1228 u64 timestamp = sample->time;
1231 wakee = machine__findnew_thread(machine, -1, pid);
1234 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1236 if (thread_atoms_insert(sched, wakee))
1238 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1240 pr_err("wakeup-event: Internal tree error");
1243 if (add_sched_out_event(atoms, 'S', timestamp))
1247 BUG_ON(list_empty(&atoms->work_list));
1249 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1252 * As we do not guarantee the wakeup event happens when
1253 * task is out of run queue, also may happen when task is
1254 * on run queue and wakeup only change ->state to TASK_RUNNING,
1255 * then we should not set the ->wake_up_time when wake up a
1256 * task which is on run queue.
1258 * You WILL be missing events if you've recorded only
1259 * one CPU, or are only looking at only one, so don't
1260 * skip in this case.
1262 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1265 sched->nr_timestamps++;
1266 if (atom->sched_out_time > timestamp) {
1267 sched->nr_unordered_timestamps++;
1271 atom->state = THREAD_WAIT_CPU;
1272 atom->wake_up_time = timestamp;
1280 static int latency_migrate_task_event(struct perf_sched *sched,
1281 struct evsel *evsel,
1282 struct perf_sample *sample,
1283 struct machine *machine)
1285 const u32 pid = evsel__intval(evsel, sample, "pid");
1286 u64 timestamp = sample->time;
1287 struct work_atoms *atoms;
1288 struct work_atom *atom;
1289 struct thread *migrant;
1293 * Only need to worry about migration when profiling one CPU.
1295 if (sched->profile_cpu == -1)
1298 migrant = machine__findnew_thread(machine, -1, pid);
1299 if (migrant == NULL)
1301 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1303 if (thread_atoms_insert(sched, migrant))
1305 register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
1306 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1308 pr_err("migration-event: Internal tree error");
1311 if (add_sched_out_event(atoms, 'R', timestamp))
1315 BUG_ON(list_empty(&atoms->work_list));
1317 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1318 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1320 sched->nr_timestamps++;
1322 if (atom->sched_out_time > timestamp)
1323 sched->nr_unordered_timestamps++;
1326 thread__put(migrant);
1330 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1335 char max_lat_start[32], max_lat_end[32];
1337 if (!work_list->nb_atoms)
1340 * Ignore idle threads:
1342 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1345 sched->all_runtime += work_list->total_runtime;
1346 sched->all_count += work_list->nb_atoms;
1348 if (work_list->num_merged > 1) {
1349 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
1350 work_list->num_merged);
1352 ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
1353 thread__tid(work_list->thread));
1356 for (i = 0; i < 24 - ret; i++)
1359 avg = work_list->total_lat / work_list->nb_atoms;
1360 timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1361 timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1363 printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1364 (double)work_list->total_runtime / NSEC_PER_MSEC,
1365 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1366 (double)work_list->max_lat / NSEC_PER_MSEC,
1367 max_lat_start, max_lat_end);
1370 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1374 if (RC_CHK_EQUAL(l->thread, r->thread))
1376 l_tid = thread__tid(l->thread);
1377 r_tid = thread__tid(r->thread);
1382 return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
1385 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1395 avgl = l->total_lat / l->nb_atoms;
1396 avgr = r->total_lat / r->nb_atoms;
1406 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1408 if (l->max_lat < r->max_lat)
1410 if (l->max_lat > r->max_lat)
1416 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1418 if (l->nb_atoms < r->nb_atoms)
1420 if (l->nb_atoms > r->nb_atoms)
1426 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1428 if (l->total_runtime < r->total_runtime)
1430 if (l->total_runtime > r->total_runtime)
1436 static int sort_dimension__add(const char *tok, struct list_head *list)
1439 static struct sort_dimension avg_sort_dimension = {
1443 static struct sort_dimension max_sort_dimension = {
1447 static struct sort_dimension pid_sort_dimension = {
1451 static struct sort_dimension runtime_sort_dimension = {
1455 static struct sort_dimension switch_sort_dimension = {
1459 struct sort_dimension *available_sorts[] = {
1460 &pid_sort_dimension,
1461 &avg_sort_dimension,
1462 &max_sort_dimension,
1463 &switch_sort_dimension,
1464 &runtime_sort_dimension,
1467 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1468 if (!strcmp(available_sorts[i]->name, tok)) {
1469 list_add_tail(&available_sorts[i]->list, list);
1478 static void perf_sched__sort_lat(struct perf_sched *sched)
1480 struct rb_node *node;
1481 struct rb_root_cached *root = &sched->atom_root;
1484 struct work_atoms *data;
1485 node = rb_first_cached(root);
1489 rb_erase_cached(node, root);
1490 data = rb_entry(node, struct work_atoms, node);
1491 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1493 if (root == &sched->atom_root) {
1494 root = &sched->merged_atom_root;
1499 static int process_sched_wakeup_event(const struct perf_tool *tool,
1500 struct evsel *evsel,
1501 struct perf_sample *sample,
1502 struct machine *machine)
1504 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1506 if (sched->tp_handler->wakeup_event)
1507 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1512 static int process_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
1513 struct evsel *evsel __maybe_unused,
1514 struct perf_sample *sample __maybe_unused,
1515 struct machine *machine __maybe_unused)
1525 static bool thread__has_color(struct thread *thread)
1527 union map_priv priv = {
1528 .ptr = thread__priv(thread),
1534 static struct thread*
1535 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1537 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1538 union map_priv priv = {
1542 if (!sched->map.color_pids || !thread || thread__priv(thread))
1545 if (thread_map__has(sched->map.color_pids, tid))
1548 thread__set_priv(thread, priv.ptr);
1552 static bool sched_match_task(struct perf_sched *sched, const char *comm_str)
1554 bool fuzzy_match = sched->map.fuzzy;
1555 struct strlist *task_names = sched->map.task_names;
1556 struct str_node *node;
1558 strlist__for_each_entry(node, task_names) {
1559 bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) :
1560 !strcmp(comm_str, node->s);
1568 static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr,
1569 const char *color, bool sched_out)
1571 for (int i = 0; i < cpus_nr; i++) {
1572 struct perf_cpu cpu = {
1573 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1575 struct thread *curr_thread = sched->curr_thread[cpu.cpu];
1576 struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
1577 struct thread_runtime *curr_tr;
1578 const char *pid_color = color;
1579 const char *cpu_color = color;
1581 struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread;
1583 if (thread_to_check && thread__has_color(thread_to_check))
1584 pid_color = COLOR_PIDS;
1586 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
1587 cpu_color = COLOR_CPUS;
1589 if (cpu.cpu == this_cpu.cpu)
1592 color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol);
1594 thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] :
1595 sched->curr_thread[cpu.cpu];
1597 if (thread_to_check) {
1598 curr_tr = thread__get_runtime(thread_to_check);
1599 if (curr_tr == NULL)
1603 if (cpu.cpu == this_cpu.cpu)
1604 color_fprintf(stdout, color, "- ");
1606 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
1607 if (curr_tr != NULL)
1608 color_fprintf(stdout, pid_color, "%2s ",
1609 curr_tr->shortname);
1612 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1614 color_fprintf(stdout, color, " ");
1618 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1619 struct perf_sample *sample, struct machine *machine)
1621 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1622 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid");
1623 struct thread *sched_in, *sched_out;
1624 struct thread_runtime *tr;
1626 u64 timestamp0, timestamp = sample->time;
1628 struct perf_cpu this_cpu = {
1633 bool new_cpu = false;
1634 const char *color = PERF_COLOR_NORMAL;
1635 char stimestamp[32];
1638 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1640 if (this_cpu.cpu > sched->max_cpu.cpu)
1641 sched->max_cpu = this_cpu;
1643 if (sched->map.comp) {
1644 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1645 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1646 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1650 cpus_nr = sched->max_cpu.cpu;
1652 timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1653 sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1655 delta = timestamp - timestamp0;
1660 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1664 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1665 sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
1666 if (sched_in == NULL || sched_out == NULL)
1669 tr = thread__get_runtime(sched_in);
1671 thread__put(sched_in);
1675 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
1676 sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
1678 str = thread__comm_str(sched_in);
1680 if (!tr->shortname[0]) {
1681 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1683 * Don't allocate a letter-number for swapper:0
1684 * as a shortname. Instead, we use '.' for it.
1686 tr->shortname[0] = '.';
1687 tr->shortname[1] = ' ';
1688 } else if (!sched->map.task_name || sched_match_task(sched, str)) {
1689 tr->shortname[0] = sched->next_shortname1;
1690 tr->shortname[1] = sched->next_shortname2;
1692 if (sched->next_shortname1 < 'Z') {
1693 sched->next_shortname1++;
1695 sched->next_shortname1 = 'A';
1696 if (sched->next_shortname2 < '9')
1697 sched->next_shortname2++;
1699 sched->next_shortname2 = '0';
1702 tr->shortname[0] = '-';
1703 tr->shortname[1] = ' ';
1708 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
1712 str = thread__comm_str(sched_in);
1714 * Check which of sched_in and sched_out matches the passed --task-name
1715 * arguments and call the corresponding print_sched_map.
1717 if (sched->map.task_name && !sched_match_task(sched, str)) {
1718 if (!sched_match_task(sched, thread__comm_str(sched_out)))
1724 str = thread__comm_str(sched_out);
1725 if (!(sched->map.task_name && !sched_match_task(sched, str)))
1731 print_sched_map(sched, this_cpu, cpus_nr, color, false);
1733 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1734 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1735 if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
1736 const char *pid_color = color;
1738 if (thread__has_color(sched_in))
1739 pid_color = COLOR_PIDS;
1741 color_fprintf(stdout, pid_color, "%s => %s:%d",
1742 tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
1743 tr->comm_changed = false;
1746 if (sched->map.comp && new_cpu)
1747 color_fprintf(stdout, color, " (CPU %d)", this_cpu.cpu);
1750 color_fprintf(stdout, color, "\n");
1755 if (sched->map.task_name) {
1756 tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]);
1757 if (strcmp(tr->shortname, "") == 0)
1761 color_fprintf(stdout, color, "\n");
1764 print_sched_map(sched, this_cpu, cpus_nr, color, true);
1765 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1766 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1769 color_fprintf(stdout, color, "\n");
1772 if (sched->map.task_name)
1773 thread__put(sched_out);
1775 thread__put(sched_in);
1780 static int process_sched_switch_event(const struct perf_tool *tool,
1781 struct evsel *evsel,
1782 struct perf_sample *sample,
1783 struct machine *machine)
1785 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1786 int this_cpu = sample->cpu, err = 0;
1787 u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1788 next_pid = evsel__intval(evsel, sample, "next_pid");
1790 if (sched->curr_pid[this_cpu] != (u32)-1) {
1792 * Are we trying to switch away a PID that is
1795 if (sched->curr_pid[this_cpu] != prev_pid)
1796 sched->nr_context_switch_bugs++;
1799 if (sched->tp_handler->switch_event)
1800 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1802 sched->curr_pid[this_cpu] = next_pid;
1806 static int process_sched_runtime_event(const struct perf_tool *tool,
1807 struct evsel *evsel,
1808 struct perf_sample *sample,
1809 struct machine *machine)
1811 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1813 if (sched->tp_handler->runtime_event)
1814 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1819 static int perf_sched__process_fork_event(const struct perf_tool *tool,
1820 union perf_event *event,
1821 struct perf_sample *sample,
1822 struct machine *machine)
1824 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1826 /* run the fork event through the perf machinery */
1827 perf_event__process_fork(tool, event, sample, machine);
1829 /* and then run additional processing needed for this command */
1830 if (sched->tp_handler->fork_event)
1831 return sched->tp_handler->fork_event(sched, event, machine);
1836 static int process_sched_migrate_task_event(const struct perf_tool *tool,
1837 struct evsel *evsel,
1838 struct perf_sample *sample,
1839 struct machine *machine)
1841 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1843 if (sched->tp_handler->migrate_task_event)
1844 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1849 typedef int (*tracepoint_handler)(const struct perf_tool *tool,
1850 struct evsel *evsel,
1851 struct perf_sample *sample,
1852 struct machine *machine);
1854 static int perf_sched__process_tracepoint_sample(const struct perf_tool *tool __maybe_unused,
1855 union perf_event *event __maybe_unused,
1856 struct perf_sample *sample,
1857 struct evsel *evsel,
1858 struct machine *machine)
1862 if (evsel->handler != NULL) {
1863 tracepoint_handler f = evsel->handler;
1864 err = f(tool, evsel, sample, machine);
1870 static int perf_sched__process_comm(const struct perf_tool *tool __maybe_unused,
1871 union perf_event *event,
1872 struct perf_sample *sample,
1873 struct machine *machine)
1875 struct thread *thread;
1876 struct thread_runtime *tr;
1879 err = perf_event__process_comm(tool, event, sample, machine);
1883 thread = machine__find_thread(machine, sample->pid, sample->tid);
1885 pr_err("Internal error: can't find thread\n");
1889 tr = thread__get_runtime(thread);
1891 thread__put(thread);
1895 tr->comm_changed = true;
1896 thread__put(thread);
1901 static int perf_sched__read_events(struct perf_sched *sched)
1903 struct evsel_str_handler handlers[] = {
1904 { "sched:sched_switch", process_sched_switch_event, },
1905 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1906 { "sched:sched_wakeup", process_sched_wakeup_event, },
1907 { "sched:sched_waking", process_sched_wakeup_event, },
1908 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1909 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1911 struct perf_session *session;
1912 struct perf_data data = {
1914 .mode = PERF_DATA_MODE_READ,
1915 .force = sched->force,
1919 session = perf_session__new(&data, &sched->tool);
1920 if (IS_ERR(session)) {
1921 pr_debug("Error creating perf session");
1922 return PTR_ERR(session);
1925 symbol__init(&session->header.env);
1927 /* prefer sched_waking if it is captured */
1928 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
1929 handlers[2].handler = process_sched_wakeup_ignore;
1931 if (perf_session__set_tracepoints_handlers(session, handlers))
1934 if (perf_session__has_traces(session, "record -R")) {
1935 int err = perf_session__process_events(session);
1937 pr_err("Failed to process events, error %d", err);
1941 sched->nr_events = session->evlist->stats.nr_events[0];
1942 sched->nr_lost_events = session->evlist->stats.total_lost;
1943 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1948 perf_session__delete(session);
1953 * scheduling times are printed as msec.usec
1955 static inline void print_sched_time(unsigned long long nsecs, int width)
1957 unsigned long msecs;
1958 unsigned long usecs;
1960 msecs = nsecs / NSEC_PER_MSEC;
1961 nsecs -= msecs * NSEC_PER_MSEC;
1962 usecs = nsecs / NSEC_PER_USEC;
1963 printf("%*lu.%03lu ", width, msecs, usecs);
1967 * returns runtime data for event, allocating memory for it the
1968 * first time it is used.
1970 static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1972 struct evsel_runtime *r = evsel->priv;
1975 r = zalloc(sizeof(struct evsel_runtime));
1983 * save last time event was seen per cpu
1985 static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
1987 struct evsel_runtime *r = evsel__get_runtime(evsel);
1992 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1993 int i, n = __roundup_pow_of_two(cpu+1);
1994 void *p = r->last_time;
1996 p = realloc(r->last_time, n * sizeof(u64));
2001 for (i = r->ncpu; i < n; ++i)
2002 r->last_time[i] = (u64) 0;
2007 r->last_time[cpu] = timestamp;
2010 /* returns last time this event was seen on the given cpu */
2011 static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
2013 struct evsel_runtime *r = evsel__get_runtime(evsel);
2015 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
2018 return r->last_time[cpu];
2021 static int comm_width = 30;
2023 static char *timehist_get_commstr(struct thread *thread)
2025 static char str[32];
2026 const char *comm = thread__comm_str(thread);
2027 pid_t tid = thread__tid(thread);
2028 pid_t pid = thread__pid(thread);
2032 n = scnprintf(str, sizeof(str), "%s", comm);
2034 else if (tid != pid)
2035 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
2038 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
2046 /* prio field format: xxx or xxx->yyy */
2047 #define MAX_PRIO_STR_LEN 8
2048 static char *timehist_get_priostr(struct evsel *evsel,
2049 struct thread *thread,
2050 struct perf_sample *sample)
2052 static char prio_str[16];
2053 int prev_prio = (int)evsel__intval(evsel, sample, "prev_prio");
2054 struct thread_runtime *tr = thread__priv(thread);
2056 if (tr->prio != prev_prio && tr->prio != -1)
2057 scnprintf(prio_str, sizeof(prio_str), "%d->%d", tr->prio, prev_prio);
2059 scnprintf(prio_str, sizeof(prio_str), "%d", prev_prio);
2064 static void timehist_header(struct perf_sched *sched)
2066 u32 ncpus = sched->max_cpu.cpu + 1;
2069 printf("%15s %6s ", "time", "cpu");
2071 if (sched->show_cpu_visual) {
2073 for (i = 0, j = 0; i < ncpus; ++i) {
2081 printf(" %-*s", comm_width, "task name");
2083 if (sched->show_prio)
2084 printf(" %-*s", MAX_PRIO_STR_LEN, "prio");
2086 printf(" %9s %9s %9s", "wait time", "sch delay", "run time");
2088 if (sched->pre_migrations)
2089 printf(" %9s", "pre-mig time");
2091 if (sched->show_state)
2092 printf(" %s", "state");
2099 printf("%15s %-6s ", "", "");
2101 if (sched->show_cpu_visual)
2102 printf(" %*s ", ncpus, "");
2104 printf(" %-*s", comm_width, "[tid/pid]");
2106 if (sched->show_prio)
2107 printf(" %-*s", MAX_PRIO_STR_LEN, "");
2109 printf(" %9s %9s %9s", "(msec)", "(msec)", "(msec)");
2111 if (sched->pre_migrations)
2112 printf(" %9s", "(msec)");
2119 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
2121 if (sched->show_cpu_visual)
2122 printf(" %.*s ", ncpus, graph_dotted_line);
2124 printf(" %.*s", comm_width, graph_dotted_line);
2126 if (sched->show_prio)
2127 printf(" %.*s", MAX_PRIO_STR_LEN, graph_dotted_line);
2129 printf(" %.9s %.9s %.9s", graph_dotted_line, graph_dotted_line, graph_dotted_line);
2131 if (sched->pre_migrations)
2132 printf(" %.9s", graph_dotted_line);
2134 if (sched->show_state)
2135 printf(" %.5s", graph_dotted_line);
2140 static void timehist_print_sample(struct perf_sched *sched,
2141 struct evsel *evsel,
2142 struct perf_sample *sample,
2143 struct addr_location *al,
2144 struct thread *thread,
2145 u64 t, const char state)
2147 struct thread_runtime *tr = thread__priv(thread);
2148 const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2149 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2150 u32 max_cpus = sched->max_cpu.cpu + 1;
2155 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2158 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2159 printf("%15s [%04d] ", tstr, sample->cpu);
2161 if (sched->show_cpu_visual) {
2166 for (i = 0; i < max_cpus; ++i) {
2167 /* flag idle times with 'i'; others are sched events */
2168 if (i == sample->cpu)
2169 c = (thread__tid(thread) == 0) ? 'i' : 's';
2177 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2179 if (sched->show_prio)
2180 printf(" %-*s ", MAX_PRIO_STR_LEN, timehist_get_priostr(evsel, thread, sample));
2182 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2183 print_sched_time(wait_time, 6);
2185 print_sched_time(tr->dt_delay, 6);
2186 print_sched_time(tr->dt_run, 6);
2187 if (sched->pre_migrations)
2188 print_sched_time(tr->dt_pre_mig, 6);
2190 if (sched->show_state)
2191 printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state);
2193 if (sched->show_next) {
2194 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2195 printf(" %-*s", comm_width, nstr);
2198 if (sched->show_wakeups && !sched->show_next)
2199 printf(" %-*s", comm_width, "");
2201 if (thread__tid(thread) == 0)
2204 if (sched->show_callchain)
2207 sample__fprintf_sym(sample, al, 0,
2208 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2209 EVSEL__PRINT_CALLCHAIN_ARROW |
2210 EVSEL__PRINT_SKIP_IGNORED,
2211 get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout);
2218 * Explanation of delta-time stats:
2220 * t = time of current schedule out event
2221 * tprev = time of previous sched out event
2222 * also time of schedule-in event for current task
2223 * last_time = time of last sched change event for current task
2224 * (i.e, time process was last scheduled out)
2225 * ready_to_run = time of wakeup for current task
2226 * migrated = time of task migration to another CPU
2228 * -----|-------------|-------------|-------------|-------------|-----
2229 * last ready migrated tprev t
2232 * |---------------- dt_wait ----------------|
2233 * |--------- dt_delay ---------|-- dt_run --|
2236 * dt_run = run time of current task
2237 * dt_wait = time between last schedule out event for task and tprev
2238 * represents time spent off the cpu
2239 * dt_delay = time between wakeup and schedule-in of task
2240 * dt_pre_mig = time between wakeup and migration to another CPU
2243 static void timehist_update_runtime_stats(struct thread_runtime *r,
2254 r->dt_run = t - tprev;
2255 if (r->ready_to_run) {
2256 if (r->ready_to_run > tprev)
2257 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2259 r->dt_delay = tprev - r->ready_to_run;
2261 if ((r->migrated > r->ready_to_run) && (r->migrated < tprev))
2262 r->dt_pre_mig = r->migrated - r->ready_to_run;
2265 if (r->last_time > tprev)
2266 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2267 else if (r->last_time) {
2268 u64 dt_wait = tprev - r->last_time;
2270 if (r->last_state == 'R')
2271 r->dt_preempt = dt_wait;
2272 else if (r->last_state == 'D')
2273 r->dt_iowait = dt_wait;
2275 r->dt_sleep = dt_wait;
2279 update_stats(&r->run_stats, r->dt_run);
2281 r->total_run_time += r->dt_run;
2282 r->total_delay_time += r->dt_delay;
2283 r->total_sleep_time += r->dt_sleep;
2284 r->total_iowait_time += r->dt_iowait;
2285 r->total_preempt_time += r->dt_preempt;
2286 r->total_pre_mig_time += r->dt_pre_mig;
2289 static bool is_idle_sample(struct perf_sample *sample,
2290 struct evsel *evsel)
2292 /* pid 0 == swapper == idle task */
2293 if (evsel__name_is(evsel, "sched:sched_switch"))
2294 return evsel__intval(evsel, sample, "prev_pid") == 0;
2296 return sample->pid == 0;
2299 static void save_task_callchain(struct perf_sched *sched,
2300 struct perf_sample *sample,
2301 struct evsel *evsel,
2302 struct machine *machine)
2304 struct callchain_cursor *cursor;
2305 struct thread *thread;
2307 /* want main thread for process - has maps */
2308 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2309 if (thread == NULL) {
2310 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2314 if (!sched->show_callchain || sample->callchain == NULL)
2317 cursor = get_tls_callchain_cursor();
2319 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2320 NULL, NULL, sched->max_stack + 2) != 0) {
2322 pr_err("Failed to resolve callchain. Skipping\n");
2327 callchain_cursor_commit(cursor);
2330 struct callchain_cursor_node *node;
2333 node = callchain_cursor_current(cursor);
2339 if (!strcmp(sym->name, "schedule") ||
2340 !strcmp(sym->name, "__schedule") ||
2341 !strcmp(sym->name, "preempt_schedule"))
2345 callchain_cursor_advance(cursor);
2349 static int init_idle_thread(struct thread *thread)
2351 struct idle_thread_runtime *itr;
2353 thread__set_comm(thread, idle_comm, 0);
2355 itr = zalloc(sizeof(*itr));
2359 init_prio(&itr->tr);
2360 init_stats(&itr->tr.run_stats);
2361 callchain_init(&itr->callchain);
2362 callchain_cursor_reset(&itr->cursor);
2363 thread__set_priv(thread, itr);
2369 * Track idle stats per cpu by maintaining a local thread
2370 * struct for the idle task on each cpu.
2372 static int init_idle_threads(int ncpu)
2376 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2380 idle_max_cpu = ncpu;
2382 /* allocate the actual thread struct if needed */
2383 for (i = 0; i < ncpu; ++i) {
2384 idle_threads[i] = thread__new(0, 0);
2385 if (idle_threads[i] == NULL)
2388 ret = init_idle_thread(idle_threads[i]);
2396 static void free_idle_threads(void)
2400 if (idle_threads == NULL)
2403 for (i = 0; i < idle_max_cpu; ++i) {
2404 if ((idle_threads[i]))
2405 thread__delete(idle_threads[i]);
2411 static struct thread *get_idle_thread(int cpu)
2414 * expand/allocate array of pointers to local thread
2417 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2418 int i, j = __roundup_pow_of_two(cpu+1);
2421 p = realloc(idle_threads, j * sizeof(struct thread *));
2425 idle_threads = (struct thread **) p;
2426 for (i = idle_max_cpu; i < j; ++i)
2427 idle_threads[i] = NULL;
2432 /* allocate a new thread struct if needed */
2433 if (idle_threads[cpu] == NULL) {
2434 idle_threads[cpu] = thread__new(0, 0);
2436 if (idle_threads[cpu]) {
2437 if (init_idle_thread(idle_threads[cpu]) < 0)
2442 return idle_threads[cpu];
2445 static void save_idle_callchain(struct perf_sched *sched,
2446 struct idle_thread_runtime *itr,
2447 struct perf_sample *sample)
2449 struct callchain_cursor *cursor;
2451 if (!sched->show_callchain || sample->callchain == NULL)
2454 cursor = get_tls_callchain_cursor();
2458 callchain_cursor__copy(&itr->cursor, cursor);
2461 static struct thread *timehist_get_thread(struct perf_sched *sched,
2462 struct perf_sample *sample,
2463 struct machine *machine,
2464 struct evsel *evsel)
2466 struct thread *thread;
2468 if (is_idle_sample(sample, evsel)) {
2469 thread = get_idle_thread(sample->cpu);
2471 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2474 /* there were samples with tid 0 but non-zero pid */
2475 thread = machine__findnew_thread(machine, sample->pid,
2476 sample->tid ?: sample->pid);
2477 if (thread == NULL) {
2478 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2482 save_task_callchain(sched, sample, evsel, machine);
2483 if (sched->idle_hist) {
2484 struct thread *idle;
2485 struct idle_thread_runtime *itr;
2487 idle = get_idle_thread(sample->cpu);
2489 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2493 itr = thread__priv(idle);
2497 itr->last_thread = thread;
2499 /* copy task callchain when entering to idle */
2500 if (evsel__intval(evsel, sample, "next_pid") == 0)
2501 save_idle_callchain(sched, itr, sample);
2508 static bool timehist_skip_sample(struct perf_sched *sched,
2509 struct thread *thread,
2510 struct evsel *evsel,
2511 struct perf_sample *sample)
2515 struct thread_runtime *tr = NULL;
2517 if (thread__is_filtered(thread)) {
2519 sched->skipped_samples++;
2522 if (sched->prio_str) {
2524 * Because priority may be changed during task execution,
2525 * first read priority from prev sched_in event for current task.
2526 * If prev sched_in event is not saved, then read priority from
2527 * current task sched_out event.
2529 tr = thread__get_runtime(thread);
2530 if (tr && tr->prio != -1)
2532 else if (evsel__name_is(evsel, "sched:sched_switch"))
2533 prio = evsel__intval(evsel, sample, "prev_prio");
2535 if (prio != -1 && !test_bit(prio, sched->prio_bitmap)) {
2537 sched->skipped_samples++;
2541 if (sched->idle_hist) {
2542 if (!evsel__name_is(evsel, "sched:sched_switch"))
2544 else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2545 evsel__intval(evsel, sample, "next_pid") != 0)
2552 static void timehist_print_wakeup_event(struct perf_sched *sched,
2553 struct evsel *evsel,
2554 struct perf_sample *sample,
2555 struct machine *machine,
2556 struct thread *awakened)
2558 struct thread *thread;
2561 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2565 /* show wakeup unless both awakee and awaker are filtered */
2566 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2567 timehist_skip_sample(sched, awakened, evsel, sample)) {
2571 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2572 printf("%15s [%04d] ", tstr, sample->cpu);
2573 if (sched->show_cpu_visual)
2574 printf(" %*s ", sched->max_cpu.cpu + 1, "");
2576 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2579 printf(" %9s %9s %9s ", "", "", "");
2581 printf("awakened: %s", timehist_get_commstr(awakened));
2586 static int timehist_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
2587 union perf_event *event __maybe_unused,
2588 struct evsel *evsel __maybe_unused,
2589 struct perf_sample *sample __maybe_unused,
2590 struct machine *machine __maybe_unused)
2595 static int timehist_sched_wakeup_event(const struct perf_tool *tool,
2596 union perf_event *event __maybe_unused,
2597 struct evsel *evsel,
2598 struct perf_sample *sample,
2599 struct machine *machine)
2601 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2602 struct thread *thread;
2603 struct thread_runtime *tr = NULL;
2604 /* want pid of awakened task not pid in sample */
2605 const u32 pid = evsel__intval(evsel, sample, "pid");
2607 thread = machine__findnew_thread(machine, 0, pid);
2611 tr = thread__get_runtime(thread);
2615 if (tr->ready_to_run == 0)
2616 tr->ready_to_run = sample->time;
2618 /* show wakeups if requested */
2619 if (sched->show_wakeups &&
2620 !perf_time__skip_sample(&sched->ptime, sample->time))
2621 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2626 static void timehist_print_migration_event(struct perf_sched *sched,
2627 struct evsel *evsel,
2628 struct perf_sample *sample,
2629 struct machine *machine,
2630 struct thread *migrated)
2632 struct thread *thread;
2637 if (sched->summary_only)
2640 max_cpus = sched->max_cpu.cpu + 1;
2641 ocpu = evsel__intval(evsel, sample, "orig_cpu");
2642 dcpu = evsel__intval(evsel, sample, "dest_cpu");
2644 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2648 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2649 timehist_skip_sample(sched, migrated, evsel, sample)) {
2653 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2654 printf("%15s [%04d] ", tstr, sample->cpu);
2656 if (sched->show_cpu_visual) {
2661 for (i = 0; i < max_cpus; ++i) {
2662 c = (i == sample->cpu) ? 'm' : ' ';
2668 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2671 printf(" %9s %9s %9s ", "", "", "");
2673 printf("migrated: %s", timehist_get_commstr(migrated));
2674 printf(" cpu %d => %d", ocpu, dcpu);
2679 static int timehist_migrate_task_event(const struct perf_tool *tool,
2680 union perf_event *event __maybe_unused,
2681 struct evsel *evsel,
2682 struct perf_sample *sample,
2683 struct machine *machine)
2685 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2686 struct thread *thread;
2687 struct thread_runtime *tr = NULL;
2688 /* want pid of migrated task not pid in sample */
2689 const u32 pid = evsel__intval(evsel, sample, "pid");
2691 thread = machine__findnew_thread(machine, 0, pid);
2695 tr = thread__get_runtime(thread);
2700 tr->migrated = sample->time;
2702 /* show migrations if requested */
2703 if (sched->show_migrations) {
2704 timehist_print_migration_event(sched, evsel, sample,
2711 static void timehist_update_task_prio(struct evsel *evsel,
2712 struct perf_sample *sample,
2713 struct machine *machine)
2715 struct thread *thread;
2716 struct thread_runtime *tr = NULL;
2717 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2718 const u32 next_prio = evsel__intval(evsel, sample, "next_prio");
2721 thread = get_idle_thread(sample->cpu);
2723 thread = machine__findnew_thread(machine, -1, next_pid);
2728 tr = thread__get_runtime(thread);
2732 tr->prio = next_prio;
2735 static int timehist_sched_change_event(const struct perf_tool *tool,
2736 union perf_event *event,
2737 struct evsel *evsel,
2738 struct perf_sample *sample,
2739 struct machine *machine)
2741 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2742 struct perf_time_interval *ptime = &sched->ptime;
2743 struct addr_location al;
2744 struct thread *thread;
2745 struct thread_runtime *tr = NULL;
2746 u64 tprev, t = sample->time;
2748 const char state = evsel__taskstate(evsel, sample, "prev_state");
2750 addr_location__init(&al);
2751 if (machine__resolve(machine, &al, sample) < 0) {
2752 pr_err("problem processing %d event. skipping it\n",
2753 event->header.type);
2758 if (sched->show_prio || sched->prio_str)
2759 timehist_update_task_prio(evsel, sample, machine);
2761 thread = timehist_get_thread(sched, sample, machine, evsel);
2762 if (thread == NULL) {
2767 if (timehist_skip_sample(sched, thread, evsel, sample))
2770 tr = thread__get_runtime(thread);
2776 tprev = evsel__get_time(evsel, sample->cpu);
2779 * If start time given:
2780 * - sample time is under window user cares about - skip sample
2781 * - tprev is under window user cares about - reset to start of window
2783 if (ptime->start && ptime->start > t)
2786 if (tprev && ptime->start > tprev)
2787 tprev = ptime->start;
2790 * If end time given:
2791 * - previous sched event is out of window - we are done
2792 * - sample time is beyond window user cares about - reset it
2793 * to close out stats for time window interest
2794 * - If tprev is 0, that is, sched_in event for current task is
2795 * not recorded, cannot determine whether sched_in event is
2796 * within time window interest - ignore it
2799 if (!tprev || tprev > ptime->end)
2806 if (!sched->idle_hist || thread__tid(thread) == 0) {
2807 if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2808 timehist_update_runtime_stats(tr, t, tprev);
2810 if (sched->idle_hist) {
2811 struct idle_thread_runtime *itr = (void *)tr;
2812 struct thread_runtime *last_tr;
2814 if (itr->last_thread == NULL)
2817 /* add current idle time as last thread's runtime */
2818 last_tr = thread__get_runtime(itr->last_thread);
2819 if (last_tr == NULL)
2822 timehist_update_runtime_stats(last_tr, t, tprev);
2824 * remove delta time of last thread as it's not updated
2825 * and otherwise it will show an invalid value next
2826 * time. we only care total run time and run stat.
2828 last_tr->dt_run = 0;
2829 last_tr->dt_delay = 0;
2830 last_tr->dt_sleep = 0;
2831 last_tr->dt_iowait = 0;
2832 last_tr->dt_preempt = 0;
2835 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2837 itr->last_thread = NULL;
2840 if (!sched->summary_only)
2841 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2845 if (sched->hist_time.start == 0 && t >= ptime->start)
2846 sched->hist_time.start = t;
2847 if (ptime->end == 0 || t <= ptime->end)
2848 sched->hist_time.end = t;
2851 /* time of this sched_switch event becomes last time task seen */
2852 tr->last_time = sample->time;
2854 /* last state is used to determine where to account wait time */
2855 tr->last_state = state;
2857 /* sched out event for task so reset ready to run time and migrated time */
2859 tr->ready_to_run = t;
2861 tr->ready_to_run = 0;
2866 evsel__save_time(evsel, sample->time, sample->cpu);
2868 addr_location__exit(&al);
2872 static int timehist_sched_switch_event(const struct perf_tool *tool,
2873 union perf_event *event,
2874 struct evsel *evsel,
2875 struct perf_sample *sample,
2876 struct machine *machine __maybe_unused)
2878 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2881 static int process_lost(const struct perf_tool *tool __maybe_unused,
2882 union perf_event *event,
2883 struct perf_sample *sample,
2884 struct machine *machine __maybe_unused)
2888 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2889 printf("%15s ", tstr);
2890 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2896 static void print_thread_runtime(struct thread *t,
2897 struct thread_runtime *r)
2899 double mean = avg_stats(&r->run_stats);
2902 printf("%*s %5d %9" PRIu64 " ",
2903 comm_width, timehist_get_commstr(t), thread__ppid(t),
2904 (u64) r->run_stats.n);
2906 print_sched_time(r->total_run_time, 8);
2907 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2908 print_sched_time(r->run_stats.min, 6);
2910 print_sched_time((u64) mean, 6);
2912 print_sched_time(r->run_stats.max, 6);
2914 printf("%5.2f", stddev);
2915 printf(" %5" PRIu64, r->migrations);
2919 static void print_thread_waittime(struct thread *t,
2920 struct thread_runtime *r)
2922 printf("%*s %5d %9" PRIu64 " ",
2923 comm_width, timehist_get_commstr(t), thread__ppid(t),
2924 (u64) r->run_stats.n);
2926 print_sched_time(r->total_run_time, 8);
2927 print_sched_time(r->total_sleep_time, 6);
2929 print_sched_time(r->total_iowait_time, 6);
2931 print_sched_time(r->total_preempt_time, 6);
2933 print_sched_time(r->total_delay_time, 6);
2937 struct total_run_stats {
2938 struct perf_sched *sched;
2944 static int show_thread_runtime(struct thread *t, void *priv)
2946 struct total_run_stats *stats = priv;
2947 struct thread_runtime *r;
2949 if (thread__is_filtered(t))
2952 r = thread__priv(t);
2953 if (r && r->run_stats.n) {
2954 stats->task_count++;
2955 stats->sched_count += r->run_stats.n;
2956 stats->total_run_time += r->total_run_time;
2958 if (stats->sched->show_state)
2959 print_thread_waittime(t, r);
2961 print_thread_runtime(t, r);
2967 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2969 const char *sep = " <- ";
2970 struct callchain_list *chain;
2978 ret = callchain__fprintf_folded(fp, node->parent);
2981 list_for_each_entry(chain, &node->val, list) {
2982 if (chain->ip >= PERF_CONTEXT_MAX)
2984 if (chain->ms.sym && chain->ms.sym->ignore)
2986 ret += fprintf(fp, "%s%s", first ? "" : sep,
2987 callchain_list__sym_name(chain, bf, sizeof(bf),
2995 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2999 struct callchain_node *chain;
3000 struct rb_node *rb_node = rb_first_cached(root);
3002 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
3003 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
3007 chain = rb_entry(rb_node, struct callchain_node, rb_node);
3008 rb_node = rb_next(rb_node);
3010 ret += fprintf(fp, " ");
3011 print_sched_time(chain->hit, 12);
3012 ret += 16; /* print_sched_time returns 2nd arg + 4 */
3013 ret += fprintf(fp, " %8d ", chain->count);
3014 ret += callchain__fprintf_folded(fp, chain);
3015 ret += fprintf(fp, "\n");
3021 static void timehist_print_summary(struct perf_sched *sched,
3022 struct perf_session *session)
3024 struct machine *m = &session->machines.host;
3025 struct total_run_stats totals;
3028 struct thread_runtime *r;
3030 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
3032 memset(&totals, 0, sizeof(totals));
3033 totals.sched = sched;
3035 if (sched->idle_hist) {
3036 printf("\nIdle-time summary\n");
3037 printf("%*s parent sched-out ", comm_width, "comm");
3038 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
3039 } else if (sched->show_state) {
3040 printf("\nWait-time summary\n");
3041 printf("%*s parent sched-in ", comm_width, "comm");
3042 printf(" run-time sleep iowait preempt delay\n");
3044 printf("\nRuntime summary\n");
3045 printf("%*s parent sched-in ", comm_width, "comm");
3046 printf(" run-time min-run avg-run max-run stddev migrations\n");
3048 printf("%*s (count) ", comm_width, "");
3049 printf(" (msec) (msec) (msec) (msec) %s\n",
3050 sched->show_state ? "(msec)" : "%");
3051 printf("%.117s\n", graph_dotted_line);
3053 machine__for_each_thread(m, show_thread_runtime, &totals);
3054 task_count = totals.task_count;
3056 printf("<no still running tasks>\n");
3058 /* CPU idle stats not tracked when samples were skipped */
3059 if (sched->skipped_samples && !sched->idle_hist)
3062 printf("\nIdle stats:\n");
3063 for (i = 0; i < idle_max_cpu; ++i) {
3064 if (cpu_list && !test_bit(i, cpu_bitmap))
3067 t = idle_threads[i];
3071 r = thread__priv(t);
3072 if (r && r->run_stats.n) {
3073 totals.sched_count += r->run_stats.n;
3074 printf(" CPU %2d idle for ", i);
3075 print_sched_time(r->total_run_time, 6);
3076 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
3078 printf(" CPU %2d idle entire time window\n", i);
3081 if (sched->idle_hist && sched->show_callchain) {
3082 callchain_param.mode = CHAIN_FOLDED;
3083 callchain_param.value = CCVAL_PERIOD;
3085 callchain_register_param(&callchain_param);
3087 printf("\nIdle stats by callchain:\n");
3088 for (i = 0; i < idle_max_cpu; ++i) {
3089 struct idle_thread_runtime *itr;
3091 t = idle_threads[i];
3095 itr = thread__priv(t);
3099 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
3100 0, &callchain_param);
3102 printf(" CPU %2d:", i);
3103 print_sched_time(itr->tr.total_run_time, 6);
3105 timehist_print_idlehist_callchain(&itr->sorted_root);
3111 " Total number of unique tasks: %" PRIu64 "\n"
3112 "Total number of context switches: %" PRIu64 "\n",
3113 totals.task_count, totals.sched_count);
3115 printf(" Total run time (msec): ");
3116 print_sched_time(totals.total_run_time, 2);
3119 printf(" Total scheduling time (msec): ");
3120 print_sched_time(hist_time, 2);
3121 printf(" (x %d)\n", sched->max_cpu.cpu);
3124 typedef int (*sched_handler)(const struct perf_tool *tool,
3125 union perf_event *event,
3126 struct evsel *evsel,
3127 struct perf_sample *sample,
3128 struct machine *machine);
3130 static int perf_timehist__process_sample(const struct perf_tool *tool,
3131 union perf_event *event,
3132 struct perf_sample *sample,
3133 struct evsel *evsel,
3134 struct machine *machine)
3136 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
3138 struct perf_cpu this_cpu = {
3142 if (this_cpu.cpu > sched->max_cpu.cpu)
3143 sched->max_cpu = this_cpu;
3145 if (evsel->handler != NULL) {
3146 sched_handler f = evsel->handler;
3148 err = f(tool, event, evsel, sample, machine);
3154 static int timehist_check_attr(struct perf_sched *sched,
3155 struct evlist *evlist)
3157 struct evsel *evsel;
3158 struct evsel_runtime *er;
3160 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
3161 er = evsel__get_runtime(evsel);
3163 pr_err("Failed to allocate memory for evsel runtime data\n");
3167 /* only need to save callchain related to sched_switch event */
3168 if (sched->show_callchain &&
3169 evsel__name_is(evsel, "sched:sched_switch") &&
3170 !evsel__has_callchain(evsel)) {
3171 pr_info("Samples of sched_switch event do not have callchains.\n");
3172 sched->show_callchain = 0;
3173 symbol_conf.use_callchain = 0;
3180 static int timehist_parse_prio_str(struct perf_sched *sched)
3183 unsigned long start_prio, end_prio;
3184 const char *str = sched->prio_str;
3189 while (isdigit(*str)) {
3191 start_prio = strtoul(str, &p, 0);
3192 if (start_prio >= MAX_PRIO || (*p != '\0' && *p != ',' && *p != '-'))
3198 end_prio = strtoul(str, &p, 0);
3200 if (end_prio >= MAX_PRIO || (*p != '\0' && *p != ','))
3203 if (end_prio < start_prio)
3206 end_prio = start_prio;
3209 for (; start_prio <= end_prio; start_prio++)
3210 __set_bit(start_prio, sched->prio_bitmap);
3221 static int perf_sched__timehist(struct perf_sched *sched)
3223 struct evsel_str_handler handlers[] = {
3224 { "sched:sched_switch", timehist_sched_switch_event, },
3225 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
3226 { "sched:sched_waking", timehist_sched_wakeup_event, },
3227 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
3229 const struct evsel_str_handler migrate_handlers[] = {
3230 { "sched:sched_migrate_task", timehist_migrate_task_event, },
3232 struct perf_data data = {
3234 .mode = PERF_DATA_MODE_READ,
3235 .force = sched->force,
3238 struct perf_session *session;
3239 struct evlist *evlist;
3243 * event handlers for timehist option
3245 sched->tool.sample = perf_timehist__process_sample;
3246 sched->tool.mmap = perf_event__process_mmap;
3247 sched->tool.comm = perf_event__process_comm;
3248 sched->tool.exit = perf_event__process_exit;
3249 sched->tool.fork = perf_event__process_fork;
3250 sched->tool.lost = process_lost;
3251 sched->tool.attr = perf_event__process_attr;
3252 sched->tool.tracing_data = perf_event__process_tracing_data;
3253 sched->tool.build_id = perf_event__process_build_id;
3255 sched->tool.ordering_requires_timestamps = true;
3257 symbol_conf.use_callchain = sched->show_callchain;
3259 session = perf_session__new(&data, &sched->tool);
3260 if (IS_ERR(session))
3261 return PTR_ERR(session);
3264 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3269 evlist = session->evlist;
3271 symbol__init(&session->header.env);
3273 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3274 pr_err("Invalid time string\n");
3279 if (timehist_check_attr(sched, evlist) != 0)
3282 if (timehist_parse_prio_str(sched) != 0) {
3283 pr_err("Invalid prio string\n");
3289 /* prefer sched_waking if it is captured */
3290 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
3291 handlers[1].handler = timehist_sched_wakeup_ignore;
3293 /* setup per-evsel handlers */
3294 if (perf_session__set_tracepoints_handlers(session, handlers))
3297 /* sched_switch event at a minimum needs to exist */
3298 if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
3299 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3303 if ((sched->show_migrations || sched->pre_migrations) &&
3304 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3307 /* pre-allocate struct for per-CPU idle stats */
3308 sched->max_cpu.cpu = session->header.env.nr_cpus_online;
3309 if (sched->max_cpu.cpu == 0)
3310 sched->max_cpu.cpu = 4;
3311 if (init_idle_threads(sched->max_cpu.cpu))
3314 /* summary_only implies summary option, but don't overwrite summary if set */
3315 if (sched->summary_only)
3316 sched->summary = sched->summary_only;
3318 if (!sched->summary_only)
3319 timehist_header(sched);
3321 err = perf_session__process_events(session);
3323 pr_err("Failed to process events, error %d", err);
3327 sched->nr_events = evlist->stats.nr_events[0];
3328 sched->nr_lost_events = evlist->stats.total_lost;
3329 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3332 timehist_print_summary(sched, session);
3335 free_idle_threads();
3336 perf_session__delete(session);
3342 static void print_bad_events(struct perf_sched *sched)
3344 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3345 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3346 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3347 sched->nr_unordered_timestamps, sched->nr_timestamps);
3349 if (sched->nr_lost_events && sched->nr_events) {
3350 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3351 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3352 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3354 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3355 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3356 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3357 sched->nr_context_switch_bugs, sched->nr_timestamps);
3358 if (sched->nr_lost_events)
3359 printf(" (due to lost events?)");
3364 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3366 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3367 struct work_atoms *this;
3368 const char *comm = thread__comm_str(data->thread), *this_comm;
3369 bool leftmost = true;
3374 this = container_of(*new, struct work_atoms, node);
3377 this_comm = thread__comm_str(this->thread);
3378 cmp = strcmp(comm, this_comm);
3380 new = &((*new)->rb_left);
3381 } else if (cmp < 0) {
3382 new = &((*new)->rb_right);
3386 this->total_runtime += data->total_runtime;
3387 this->nb_atoms += data->nb_atoms;
3388 this->total_lat += data->total_lat;
3389 list_splice(&data->work_list, &this->work_list);
3390 if (this->max_lat < data->max_lat) {
3391 this->max_lat = data->max_lat;
3392 this->max_lat_start = data->max_lat_start;
3393 this->max_lat_end = data->max_lat_end;
3401 rb_link_node(&data->node, parent, new);
3402 rb_insert_color_cached(&data->node, root, leftmost);
3405 static void perf_sched__merge_lat(struct perf_sched *sched)
3407 struct work_atoms *data;
3408 struct rb_node *node;
3410 if (sched->skip_merge)
3413 while ((node = rb_first_cached(&sched->atom_root))) {
3414 rb_erase_cached(node, &sched->atom_root);
3415 data = rb_entry(node, struct work_atoms, node);
3416 __merge_work_atoms(&sched->merged_atom_root, data);
3420 static int setup_cpus_switch_event(struct perf_sched *sched)
3424 sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
3425 if (!sched->cpu_last_switched)
3428 sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
3429 if (!sched->curr_pid) {
3430 zfree(&sched->cpu_last_switched);
3434 for (i = 0; i < MAX_CPUS; i++)
3435 sched->curr_pid[i] = -1;
3440 static void free_cpus_switch_event(struct perf_sched *sched)
3442 zfree(&sched->curr_pid);
3443 zfree(&sched->cpu_last_switched);
3446 static int perf_sched__lat(struct perf_sched *sched)
3449 struct rb_node *next;
3453 if (setup_cpus_switch_event(sched))
3456 if (perf_sched__read_events(sched))
3457 goto out_free_cpus_switch_event;
3459 perf_sched__merge_lat(sched);
3460 perf_sched__sort_lat(sched);
3462 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3463 printf(" Task | Runtime ms | Count | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
3464 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3466 next = rb_first_cached(&sched->sorted_atom_root);
3469 struct work_atoms *work_list;
3471 work_list = rb_entry(next, struct work_atoms, node);
3472 output_lat_thread(sched, work_list);
3473 next = rb_next(next);
3474 thread__zput(work_list->thread);
3477 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3478 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3479 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3481 printf(" ---------------------------------------------------\n");
3483 print_bad_events(sched);
3488 out_free_cpus_switch_event:
3489 free_cpus_switch_event(sched);
3493 static int setup_map_cpus(struct perf_sched *sched)
3495 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
3497 if (sched->map.comp) {
3498 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
3499 if (!sched->map.comp_cpus)
3503 if (sched->map.cpus_str) {
3504 sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
3505 if (!sched->map.cpus) {
3506 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3507 zfree(&sched->map.comp_cpus);
3515 static int setup_color_pids(struct perf_sched *sched)
3517 struct perf_thread_map *map;
3519 if (!sched->map.color_pids_str)
3522 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3524 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3528 sched->map.color_pids = map;
3532 static int setup_color_cpus(struct perf_sched *sched)
3534 struct perf_cpu_map *map;
3536 if (!sched->map.color_cpus_str)
3539 map = perf_cpu_map__new(sched->map.color_cpus_str);
3541 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3545 sched->map.color_cpus = map;
3549 static int perf_sched__map(struct perf_sched *sched)
3553 sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
3554 if (!sched->curr_thread)
3557 sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
3558 if (!sched->curr_out_thread)
3561 if (setup_cpus_switch_event(sched))
3562 goto out_free_curr_thread;
3564 if (setup_map_cpus(sched))
3565 goto out_free_cpus_switch_event;
3567 if (setup_color_pids(sched))
3568 goto out_put_map_cpus;
3570 if (setup_color_cpus(sched))
3571 goto out_put_color_pids;
3574 if (perf_sched__read_events(sched))
3575 goto out_put_color_cpus;
3578 print_bad_events(sched);
3581 perf_cpu_map__put(sched->map.color_cpus);
3584 perf_thread_map__put(sched->map.color_pids);
3587 zfree(&sched->map.comp_cpus);
3588 perf_cpu_map__put(sched->map.cpus);
3590 out_free_cpus_switch_event:
3591 free_cpus_switch_event(sched);
3593 out_free_curr_thread:
3594 zfree(&sched->curr_thread);
3598 static int perf_sched__replay(struct perf_sched *sched)
3603 mutex_init(&sched->start_work_mutex);
3604 mutex_init(&sched->work_done_wait_mutex);
3606 ret = setup_cpus_switch_event(sched);
3608 goto out_mutex_destroy;
3610 calibrate_run_measurement_overhead(sched);
3611 calibrate_sleep_measurement_overhead(sched);
3613 test_calibrations(sched);
3615 ret = perf_sched__read_events(sched);
3617 goto out_free_cpus_switch_event;
3619 printf("nr_run_events: %ld\n", sched->nr_run_events);
3620 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3621 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3623 if (sched->targetless_wakeups)
3624 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3625 if (sched->multitarget_wakeups)
3626 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3627 if (sched->nr_run_events_optimized)
3628 printf("run atoms optimized: %ld\n",
3629 sched->nr_run_events_optimized);
3631 print_task_traces(sched);
3632 add_cross_task_wakeups(sched);
3634 sched->thread_funcs_exit = false;
3635 create_tasks(sched);
3636 printf("------------------------------------------------------------\n");
3637 if (sched->replay_repeat == 0)
3638 sched->replay_repeat = UINT_MAX;
3640 for (i = 0; i < sched->replay_repeat; i++)
3641 run_one_test(sched);
3643 sched->thread_funcs_exit = true;
3644 destroy_tasks(sched);
3646 out_free_cpus_switch_event:
3647 free_cpus_switch_event(sched);
3650 mutex_destroy(&sched->start_work_mutex);
3651 mutex_destroy(&sched->work_done_wait_mutex);
3655 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3656 const char * const usage_msg[])
3658 char *tmp, *tok, *str = strdup(sched->sort_order);
3660 for (tok = strtok_r(str, ", ", &tmp);
3661 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3662 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3663 usage_with_options_msg(usage_msg, options,
3664 "Unknown --sort key: `%s'", tok);
3670 sort_dimension__add("pid", &sched->cmp_pid);
3673 static bool schedstat_events_exposed(void)
3676 * Select "sched:sched_stat_wait" event to check
3677 * whether schedstat tracepoints are exposed.
3679 return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3683 static int __cmd_record(int argc, const char **argv)
3685 unsigned int rec_argc, i, j;
3687 const char **rec_argv_copy;
3688 const char * const record_args[] = {
3694 "-e", "sched:sched_switch",
3695 "-e", "sched:sched_stat_runtime",
3696 "-e", "sched:sched_process_fork",
3697 "-e", "sched:sched_wakeup_new",
3698 "-e", "sched:sched_migrate_task",
3702 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3703 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3704 * to prevent "perf sched record" execution failure, determine
3705 * whether to record schedstat events according to actual situation.
3707 const char * const schedstat_args[] = {
3708 "-e", "sched:sched_stat_wait",
3709 "-e", "sched:sched_stat_sleep",
3710 "-e", "sched:sched_stat_iowait",
3712 unsigned int schedstat_argc = schedstat_events_exposed() ?
3713 ARRAY_SIZE(schedstat_args) : 0;
3715 struct tep_event *waking_event;
3719 * +2 for either "-e", "sched:sched_wakeup" or
3720 * "-e", "sched:sched_waking"
3722 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3723 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3724 if (rec_argv == NULL)
3726 rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3727 if (rec_argv_copy == NULL) {
3732 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3733 rec_argv[i] = strdup(record_args[i]);
3735 rec_argv[i++] = strdup("-e");
3736 waking_event = trace_event__tp_format("sched", "sched_waking");
3737 if (!IS_ERR(waking_event))
3738 rec_argv[i++] = strdup("sched:sched_waking");
3740 rec_argv[i++] = strdup("sched:sched_wakeup");
3742 for (j = 0; j < schedstat_argc; j++)
3743 rec_argv[i++] = strdup(schedstat_args[j]);
3745 for (j = 1; j < (unsigned int)argc; j++, i++)
3746 rec_argv[i] = strdup(argv[j]);
3748 BUG_ON(i != rec_argc);
3750 memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3751 ret = cmd_record(rec_argc, rec_argv_copy);
3753 for (i = 0; i < rec_argc; i++)
3756 free(rec_argv_copy);
3761 int cmd_sched(int argc, const char **argv)
3763 static const char default_sort_order[] = "avg, max, switch, runtime";
3764 struct perf_sched sched = {
3765 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3766 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3767 .sort_order = default_sort_order,
3768 .replay_repeat = 10,
3770 .next_shortname1 = 'A',
3771 .next_shortname2 = '0',
3773 .show_callchain = 1,
3776 const struct option sched_options[] = {
3777 OPT_STRING('i', "input", &input_name, "file",
3779 OPT_INCR('v', "verbose", &verbose,
3780 "be more verbose (show symbol address, etc)"),
3781 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3782 "dump raw trace in ASCII"),
3783 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3786 const struct option latency_options[] = {
3787 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3788 "sort by key(s): runtime, switch, avg, max"),
3789 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3790 "CPU to profile on"),
3791 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3792 "latency stats per pid instead of per comm"),
3793 OPT_PARENT(sched_options)
3795 const struct option replay_options[] = {
3796 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3797 "repeat the workload replay N times (0: infinite)"),
3798 OPT_PARENT(sched_options)
3800 const struct option map_options[] = {
3801 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3802 "map output in compact mode"),
3803 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3804 "highlight given pids in map"),
3805 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3806 "highlight given CPUs in map"),
3807 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3808 "display given CPUs in map"),
3809 OPT_STRING(0, "task-name", &sched.map.task_name, "task",
3810 "map output only for the given task name(s)."),
3811 OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy,
3812 "given command name can be partially matched (fuzzy matching)"),
3813 OPT_PARENT(sched_options)
3815 const struct option timehist_options[] = {
3816 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3817 "file", "vmlinux pathname"),
3818 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3819 "file", "kallsyms pathname"),
3820 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3821 "Display call chains if present (default on)"),
3822 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3823 "Maximum number of functions to display backtrace."),
3824 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3825 "Look for files with symbols relative to this directory"),
3826 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3827 "Show only syscall summary with statistics"),
3828 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3829 "Show all syscalls and summary with statistics"),
3830 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3831 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3832 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3833 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3834 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3835 OPT_STRING(0, "time", &sched.time_str, "str",
3836 "Time span for analysis (start,stop)"),
3837 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3838 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3839 "analyze events only for given process id(s)"),
3840 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3841 "analyze events only for given thread id(s)"),
3842 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3843 OPT_BOOLEAN(0, "show-prio", &sched.show_prio, "Show task priority"),
3844 OPT_STRING(0, "prio", &sched.prio_str, "prio",
3845 "analyze events only for given task priority(ies)"),
3846 OPT_BOOLEAN('P', "pre-migrations", &sched.pre_migrations, "Show pre-migration wait time"),
3847 OPT_PARENT(sched_options)
3850 const char * const latency_usage[] = {
3851 "perf sched latency [<options>]",
3854 const char * const replay_usage[] = {
3855 "perf sched replay [<options>]",
3858 const char * const map_usage[] = {
3859 "perf sched map [<options>]",
3862 const char * const timehist_usage[] = {
3863 "perf sched timehist [<options>]",
3866 const char *const sched_subcommands[] = { "record", "latency", "map",
3869 const char *sched_usage[] = {
3873 struct trace_sched_handler lat_ops = {
3874 .wakeup_event = latency_wakeup_event,
3875 .switch_event = latency_switch_event,
3876 .runtime_event = latency_runtime_event,
3877 .migrate_task_event = latency_migrate_task_event,
3879 struct trace_sched_handler map_ops = {
3880 .switch_event = map_switch_event,
3882 struct trace_sched_handler replay_ops = {
3883 .wakeup_event = replay_wakeup_event,
3884 .switch_event = replay_switch_event,
3885 .fork_event = replay_fork_event,
3889 perf_tool__init(&sched.tool, /*ordered_events=*/true);
3890 sched.tool.sample = perf_sched__process_tracepoint_sample;
3891 sched.tool.comm = perf_sched__process_comm;
3892 sched.tool.namespaces = perf_event__process_namespaces;
3893 sched.tool.lost = perf_event__process_lost;
3894 sched.tool.fork = perf_sched__process_fork_event;
3896 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3897 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3899 usage_with_options(sched_usage, sched_options);
3902 * Aliased to 'perf script' for now:
3904 if (!strcmp(argv[0], "script")) {
3905 return cmd_script(argc, argv);
3906 } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3907 return __cmd_record(argc, argv);
3908 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
3909 sched.tp_handler = &lat_ops;
3911 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3913 usage_with_options(latency_usage, latency_options);
3915 setup_sorting(&sched, latency_options, latency_usage);
3916 return perf_sched__lat(&sched);
3917 } else if (!strcmp(argv[0], "map")) {
3919 argc = parse_options(argc, argv, map_options, map_usage, 0);
3921 usage_with_options(map_usage, map_options);
3923 if (sched.map.task_name) {
3924 sched.map.task_names = strlist__new(sched.map.task_name, NULL);
3925 if (sched.map.task_names == NULL) {
3926 fprintf(stderr, "Failed to parse task names\n");
3931 sched.tp_handler = &map_ops;
3932 setup_sorting(&sched, latency_options, latency_usage);
3933 return perf_sched__map(&sched);
3934 } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
3935 sched.tp_handler = &replay_ops;
3937 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3939 usage_with_options(replay_usage, replay_options);
3941 return perf_sched__replay(&sched);
3942 } else if (!strcmp(argv[0], "timehist")) {
3944 argc = parse_options(argc, argv, timehist_options,
3947 usage_with_options(timehist_usage, timehist_options);
3949 if ((sched.show_wakeups || sched.show_next) &&
3950 sched.summary_only) {
3951 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3952 parse_options_usage(timehist_usage, timehist_options, "s", true);
3953 if (sched.show_wakeups)
3954 parse_options_usage(NULL, timehist_options, "w", true);
3955 if (sched.show_next)
3956 parse_options_usage(NULL, timehist_options, "n", true);
3959 ret = symbol__validate_sym_arguments();
3963 return perf_sched__timehist(&sched);
3965 usage_with_options(sched_usage, sched_options);
3968 /* free usage string allocated by parse_options_subcommand */
3969 free((void *)sched_usage[0]);