1 // SPDX-License-Identifier: GPL-2.0
6 #include "util/cpumap.h"
7 #include "util/evlist.h"
8 #include "util/evsel.h"
9 #include "util/symbol.h"
10 #include "util/thread.h"
11 #include "util/header.h"
12 #include "util/session.h"
13 #include "util/tool.h"
14 #include "util/cloexec.h"
15 #include "util/thread_map.h"
16 #include "util/color.h"
17 #include "util/stat.h"
18 #include "util/string2.h"
19 #include "util/callchain.h"
20 #include "util/time-utils.h"
22 #include <subcmd/pager.h>
23 #include <subcmd/parse-options.h>
24 #include "util/trace-event.h"
26 #include "util/debug.h"
27 #include "util/event.h"
29 #include <linux/kernel.h>
30 #include <linux/log2.h>
31 #include <linux/zalloc.h>
32 #include <sys/prctl.h>
33 #include <sys/resource.h>
37 #include <semaphore.h>
40 #include <api/fs/fs.h>
41 #include <perf/cpumap.h>
42 #include <linux/time64.h>
43 #include <linux/err.h>
45 #include <linux/ctype.h>
47 #define PR_SET_NAME 15 /* Set process name */
51 #define MAX_PID 1024000
60 unsigned long nr_events;
61 unsigned long curr_event;
62 struct sched_atom **atoms;
73 enum sched_event_type {
77 SCHED_EVENT_MIGRATION,
81 enum sched_event_type type;
87 struct task_desc *wakee;
90 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
92 /* task state bitmask, copied from include/linux/sched.h */
93 #define TASK_RUNNING 0
94 #define TASK_INTERRUPTIBLE 1
95 #define TASK_UNINTERRUPTIBLE 2
96 #define __TASK_STOPPED 4
97 #define __TASK_TRACED 8
98 /* in tsk->exit_state */
100 #define EXIT_ZOMBIE 32
101 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
102 /* in tsk->state again */
104 #define TASK_WAKEKILL 128
105 #define TASK_WAKING 256
106 #define TASK_PARKED 512
116 struct list_head list;
117 enum thread_state state;
125 struct list_head work_list;
126 struct thread *thread;
136 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
140 struct trace_sched_handler {
141 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
142 struct perf_sample *sample, struct machine *machine);
144 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
145 struct perf_sample *sample, struct machine *machine);
147 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
148 struct perf_sample *sample, struct machine *machine);
150 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
151 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
152 struct machine *machine);
154 int (*migrate_task_event)(struct perf_sched *sched,
156 struct perf_sample *sample,
157 struct machine *machine);
160 #define COLOR_PIDS PERF_COLOR_BLUE
161 #define COLOR_CPUS PERF_COLOR_BG_RED
163 struct perf_sched_map {
164 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
167 struct perf_thread_map *color_pids;
168 const char *color_pids_str;
169 struct perf_cpu_map *color_cpus;
170 const char *color_cpus_str;
171 struct perf_cpu_map *cpus;
172 const char *cpus_str;
176 struct perf_tool tool;
177 const char *sort_order;
178 unsigned long nr_tasks;
179 struct task_desc **pid_to_task;
180 struct task_desc **tasks;
181 const struct trace_sched_handler *tp_handler;
182 pthread_mutex_t start_work_mutex;
183 pthread_mutex_t work_done_wait_mutex;
186 * Track the current task - that way we can know whether there's any
187 * weird events, such as a task being switched away that is not current.
190 u32 curr_pid[MAX_CPUS];
191 struct thread *curr_thread[MAX_CPUS];
192 char next_shortname1;
193 char next_shortname2;
194 unsigned int replay_repeat;
195 unsigned long nr_run_events;
196 unsigned long nr_sleep_events;
197 unsigned long nr_wakeup_events;
198 unsigned long nr_sleep_corrections;
199 unsigned long nr_run_events_optimized;
200 unsigned long targetless_wakeups;
201 unsigned long multitarget_wakeups;
202 unsigned long nr_runs;
203 unsigned long nr_timestamps;
204 unsigned long nr_unordered_timestamps;
205 unsigned long nr_context_switch_bugs;
206 unsigned long nr_events;
207 unsigned long nr_lost_chunks;
208 unsigned long nr_lost_events;
209 u64 run_measurement_overhead;
210 u64 sleep_measurement_overhead;
213 u64 runavg_cpu_usage;
214 u64 parent_cpu_usage;
215 u64 runavg_parent_cpu_usage;
221 u64 cpu_last_switched[MAX_CPUS];
222 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
223 struct list_head sort_list, cmp_pid;
226 struct perf_sched_map map;
228 /* options for timehist command */
233 unsigned int max_stack;
234 bool show_cpu_visual;
237 bool show_migrations;
240 const char *time_str;
241 struct perf_time_interval ptime;
242 struct perf_time_interval hist_time;
245 /* per thread run time data */
246 struct thread_runtime {
247 u64 last_time; /* time of previous sched in/out event */
248 u64 dt_run; /* run time */
249 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
250 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
251 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
252 u64 dt_delay; /* time between wakeup and sched-in */
253 u64 ready_to_run; /* time of wakeup */
255 struct stats run_stats;
257 u64 total_sleep_time;
258 u64 total_iowait_time;
259 u64 total_preempt_time;
260 u64 total_delay_time;
270 /* per event run time data */
271 struct evsel_runtime {
272 u64 *last_time; /* time this event was last seen per cpu */
273 u32 ncpu; /* highest cpu slot allocated */
276 /* per cpu idle time data */
277 struct idle_thread_runtime {
278 struct thread_runtime tr;
279 struct thread *last_thread;
280 struct rb_root_cached sorted_root;
281 struct callchain_root callchain;
282 struct callchain_cursor cursor;
285 /* track idle times per cpu */
286 static struct thread **idle_threads;
287 static int idle_max_cpu;
288 static char idle_comm[] = "<idle>";
290 static u64 get_nsecs(void)
294 clock_gettime(CLOCK_MONOTONIC, &ts);
296 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
299 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
301 u64 T0 = get_nsecs(), T1;
305 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
308 static void sleep_nsecs(u64 nsecs)
312 ts.tv_nsec = nsecs % 999999999;
313 ts.tv_sec = nsecs / 999999999;
315 nanosleep(&ts, NULL);
318 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
320 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
323 for (i = 0; i < 10; i++) {
325 burn_nsecs(sched, 0);
328 min_delta = min(min_delta, delta);
330 sched->run_measurement_overhead = min_delta;
332 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
335 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
337 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
340 for (i = 0; i < 10; i++) {
345 min_delta = min(min_delta, delta);
348 sched->sleep_measurement_overhead = min_delta;
350 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
353 static struct sched_atom *
354 get_new_event(struct task_desc *task, u64 timestamp)
356 struct sched_atom *event = zalloc(sizeof(*event));
357 unsigned long idx = task->nr_events;
360 event->timestamp = timestamp;
364 size = sizeof(struct sched_atom *) * task->nr_events;
365 task->atoms = realloc(task->atoms, size);
366 BUG_ON(!task->atoms);
368 task->atoms[idx] = event;
373 static struct sched_atom *last_event(struct task_desc *task)
375 if (!task->nr_events)
378 return task->atoms[task->nr_events - 1];
381 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
382 u64 timestamp, u64 duration)
384 struct sched_atom *event, *curr_event = last_event(task);
387 * optimize an existing RUN event by merging this one
390 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
391 sched->nr_run_events_optimized++;
392 curr_event->duration += duration;
396 event = get_new_event(task, timestamp);
398 event->type = SCHED_EVENT_RUN;
399 event->duration = duration;
401 sched->nr_run_events++;
404 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
405 u64 timestamp, struct task_desc *wakee)
407 struct sched_atom *event, *wakee_event;
409 event = get_new_event(task, timestamp);
410 event->type = SCHED_EVENT_WAKEUP;
411 event->wakee = wakee;
413 wakee_event = last_event(wakee);
414 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
415 sched->targetless_wakeups++;
418 if (wakee_event->wait_sem) {
419 sched->multitarget_wakeups++;
423 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
424 sem_init(wakee_event->wait_sem, 0, 0);
425 wakee_event->specific_wait = 1;
426 event->wait_sem = wakee_event->wait_sem;
428 sched->nr_wakeup_events++;
431 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
432 u64 timestamp, u64 task_state __maybe_unused)
434 struct sched_atom *event = get_new_event(task, timestamp);
436 event->type = SCHED_EVENT_SLEEP;
438 sched->nr_sleep_events++;
441 static struct task_desc *register_pid(struct perf_sched *sched,
442 unsigned long pid, const char *comm)
444 struct task_desc *task;
447 if (sched->pid_to_task == NULL) {
448 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
450 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
452 if (pid >= (unsigned long)pid_max) {
453 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
454 sizeof(struct task_desc *))) == NULL);
455 while (pid >= (unsigned long)pid_max)
456 sched->pid_to_task[pid_max++] = NULL;
459 task = sched->pid_to_task[pid];
464 task = zalloc(sizeof(*task));
466 task->nr = sched->nr_tasks;
467 strcpy(task->comm, comm);
469 * every task starts in sleeping state - this gets ignored
470 * if there's no wakeup pointing to this sleep state:
472 add_sched_event_sleep(sched, task, 0, 0);
474 sched->pid_to_task[pid] = task;
476 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
477 BUG_ON(!sched->tasks);
478 sched->tasks[task->nr] = task;
481 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
487 static void print_task_traces(struct perf_sched *sched)
489 struct task_desc *task;
492 for (i = 0; i < sched->nr_tasks; i++) {
493 task = sched->tasks[i];
494 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
495 task->nr, task->comm, task->pid, task->nr_events);
499 static void add_cross_task_wakeups(struct perf_sched *sched)
501 struct task_desc *task1, *task2;
504 for (i = 0; i < sched->nr_tasks; i++) {
505 task1 = sched->tasks[i];
507 if (j == sched->nr_tasks)
509 task2 = sched->tasks[j];
510 add_sched_event_wakeup(sched, task1, 0, task2);
514 static void perf_sched__process_event(struct perf_sched *sched,
515 struct sched_atom *atom)
519 switch (atom->type) {
520 case SCHED_EVENT_RUN:
521 burn_nsecs(sched, atom->duration);
523 case SCHED_EVENT_SLEEP:
525 ret = sem_wait(atom->wait_sem);
528 case SCHED_EVENT_WAKEUP:
530 ret = sem_post(atom->wait_sem);
533 case SCHED_EVENT_MIGRATION:
540 static u64 get_cpu_usage_nsec_parent(void)
546 err = getrusage(RUSAGE_SELF, &ru);
549 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
550 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
555 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
557 struct perf_event_attr attr;
558 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
561 bool need_privilege = false;
563 memset(&attr, 0, sizeof(attr));
565 attr.type = PERF_TYPE_SOFTWARE;
566 attr.config = PERF_COUNT_SW_TASK_CLOCK;
569 fd = sys_perf_event_open(&attr, 0, -1, -1,
570 perf_event_open_cloexec_flag());
573 if (errno == EMFILE) {
575 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
576 limit.rlim_cur += sched->nr_tasks - cur_task;
577 if (limit.rlim_cur > limit.rlim_max) {
578 limit.rlim_max = limit.rlim_cur;
579 need_privilege = true;
581 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
582 if (need_privilege && errno == EPERM)
583 strcpy(info, "Need privilege\n");
587 strcpy(info, "Have a try with -f option\n");
589 pr_err("Error: sys_perf_event_open() syscall returned "
590 "with %d (%s)\n%s", fd,
591 str_error_r(errno, sbuf, sizeof(sbuf)), info);
597 static u64 get_cpu_usage_nsec_self(int fd)
602 ret = read(fd, &runtime, sizeof(runtime));
603 BUG_ON(ret != sizeof(runtime));
608 struct sched_thread_parms {
609 struct task_desc *task;
610 struct perf_sched *sched;
614 static void *thread_func(void *ctx)
616 struct sched_thread_parms *parms = ctx;
617 struct task_desc *this_task = parms->task;
618 struct perf_sched *sched = parms->sched;
619 u64 cpu_usage_0, cpu_usage_1;
620 unsigned long i, ret;
626 sprintf(comm2, ":%s", this_task->comm);
627 prctl(PR_SET_NAME, comm2);
631 ret = sem_post(&this_task->ready_for_work);
633 ret = pthread_mutex_lock(&sched->start_work_mutex);
635 ret = pthread_mutex_unlock(&sched->start_work_mutex);
638 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
640 for (i = 0; i < this_task->nr_events; i++) {
641 this_task->curr_event = i;
642 perf_sched__process_event(sched, this_task->atoms[i]);
645 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
646 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
647 ret = sem_post(&this_task->work_done_sem);
650 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
652 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
658 static void create_tasks(struct perf_sched *sched)
660 struct task_desc *task;
665 err = pthread_attr_init(&attr);
667 err = pthread_attr_setstacksize(&attr,
668 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
670 err = pthread_mutex_lock(&sched->start_work_mutex);
672 err = pthread_mutex_lock(&sched->work_done_wait_mutex);
674 for (i = 0; i < sched->nr_tasks; i++) {
675 struct sched_thread_parms *parms = malloc(sizeof(*parms));
676 BUG_ON(parms == NULL);
677 parms->task = task = sched->tasks[i];
678 parms->sched = sched;
679 parms->fd = self_open_counters(sched, i);
680 sem_init(&task->sleep_sem, 0, 0);
681 sem_init(&task->ready_for_work, 0, 0);
682 sem_init(&task->work_done_sem, 0, 0);
683 task->curr_event = 0;
684 err = pthread_create(&task->thread, &attr, thread_func, parms);
689 static void wait_for_tasks(struct perf_sched *sched)
691 u64 cpu_usage_0, cpu_usage_1;
692 struct task_desc *task;
693 unsigned long i, ret;
695 sched->start_time = get_nsecs();
696 sched->cpu_usage = 0;
697 pthread_mutex_unlock(&sched->work_done_wait_mutex);
699 for (i = 0; i < sched->nr_tasks; i++) {
700 task = sched->tasks[i];
701 ret = sem_wait(&task->ready_for_work);
703 sem_init(&task->ready_for_work, 0, 0);
705 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
708 cpu_usage_0 = get_cpu_usage_nsec_parent();
710 pthread_mutex_unlock(&sched->start_work_mutex);
712 for (i = 0; i < sched->nr_tasks; i++) {
713 task = sched->tasks[i];
714 ret = sem_wait(&task->work_done_sem);
716 sem_init(&task->work_done_sem, 0, 0);
717 sched->cpu_usage += task->cpu_usage;
721 cpu_usage_1 = get_cpu_usage_nsec_parent();
722 if (!sched->runavg_cpu_usage)
723 sched->runavg_cpu_usage = sched->cpu_usage;
724 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
726 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
727 if (!sched->runavg_parent_cpu_usage)
728 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
729 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
730 sched->parent_cpu_usage)/sched->replay_repeat;
732 ret = pthread_mutex_lock(&sched->start_work_mutex);
735 for (i = 0; i < sched->nr_tasks; i++) {
736 task = sched->tasks[i];
737 sem_init(&task->sleep_sem, 0, 0);
738 task->curr_event = 0;
742 static void run_one_test(struct perf_sched *sched)
744 u64 T0, T1, delta, avg_delta, fluct;
747 wait_for_tasks(sched);
751 sched->sum_runtime += delta;
754 avg_delta = sched->sum_runtime / sched->nr_runs;
755 if (delta < avg_delta)
756 fluct = avg_delta - delta;
758 fluct = delta - avg_delta;
759 sched->sum_fluct += fluct;
761 sched->run_avg = delta;
762 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
764 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
766 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
768 printf("cpu: %0.2f / %0.2f",
769 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
773 * rusage statistics done by the parent, these are less
774 * accurate than the sched->sum_exec_runtime based statistics:
776 printf(" [%0.2f / %0.2f]",
777 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
778 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
783 if (sched->nr_sleep_corrections)
784 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
785 sched->nr_sleep_corrections = 0;
788 static void test_calibrations(struct perf_sched *sched)
793 burn_nsecs(sched, NSEC_PER_MSEC);
796 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
799 sleep_nsecs(NSEC_PER_MSEC);
802 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
806 replay_wakeup_event(struct perf_sched *sched,
807 struct evsel *evsel, struct perf_sample *sample,
808 struct machine *machine __maybe_unused)
810 const char *comm = perf_evsel__strval(evsel, sample, "comm");
811 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
812 struct task_desc *waker, *wakee;
815 printf("sched_wakeup event %p\n", evsel);
817 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
820 waker = register_pid(sched, sample->tid, "<unknown>");
821 wakee = register_pid(sched, pid, comm);
823 add_sched_event_wakeup(sched, waker, sample->time, wakee);
827 static int replay_switch_event(struct perf_sched *sched,
829 struct perf_sample *sample,
830 struct machine *machine __maybe_unused)
832 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
833 *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
834 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
835 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
836 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
837 struct task_desc *prev, __maybe_unused *next;
838 u64 timestamp0, timestamp = sample->time;
839 int cpu = sample->cpu;
843 printf("sched_switch event %p\n", evsel);
845 if (cpu >= MAX_CPUS || cpu < 0)
848 timestamp0 = sched->cpu_last_switched[cpu];
850 delta = timestamp - timestamp0;
855 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
859 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
860 prev_comm, prev_pid, next_comm, next_pid, delta);
862 prev = register_pid(sched, prev_pid, prev_comm);
863 next = register_pid(sched, next_pid, next_comm);
865 sched->cpu_last_switched[cpu] = timestamp;
867 add_sched_event_run(sched, prev, timestamp, delta);
868 add_sched_event_sleep(sched, prev, timestamp, prev_state);
873 static int replay_fork_event(struct perf_sched *sched,
874 union perf_event *event,
875 struct machine *machine)
877 struct thread *child, *parent;
879 child = machine__findnew_thread(machine, event->fork.pid,
881 parent = machine__findnew_thread(machine, event->fork.ppid,
884 if (child == NULL || parent == NULL) {
885 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
891 printf("fork event\n");
892 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
893 printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
896 register_pid(sched, parent->tid, thread__comm_str(parent));
897 register_pid(sched, child->tid, thread__comm_str(child));
904 struct sort_dimension {
907 struct list_head list;
911 * handle runtime stats saved per thread
913 static struct thread_runtime *thread__init_runtime(struct thread *thread)
915 struct thread_runtime *r;
917 r = zalloc(sizeof(struct thread_runtime));
921 init_stats(&r->run_stats);
922 thread__set_priv(thread, r);
927 static struct thread_runtime *thread__get_runtime(struct thread *thread)
929 struct thread_runtime *tr;
931 tr = thread__priv(thread);
933 tr = thread__init_runtime(thread);
935 pr_debug("Failed to malloc memory for runtime data.\n");
942 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
944 struct sort_dimension *sort;
947 BUG_ON(list_empty(list));
949 list_for_each_entry(sort, list, list) {
950 ret = sort->cmp(l, r);
958 static struct work_atoms *
959 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
960 struct list_head *sort_list)
962 struct rb_node *node = root->rb_root.rb_node;
963 struct work_atoms key = { .thread = thread };
966 struct work_atoms *atoms;
969 atoms = container_of(node, struct work_atoms, node);
971 cmp = thread_lat_cmp(sort_list, &key, atoms);
973 node = node->rb_left;
975 node = node->rb_right;
977 BUG_ON(thread != atoms->thread);
985 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
986 struct list_head *sort_list)
988 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
989 bool leftmost = true;
992 struct work_atoms *this;
995 this = container_of(*new, struct work_atoms, node);
998 cmp = thread_lat_cmp(sort_list, data, this);
1001 new = &((*new)->rb_left);
1003 new = &((*new)->rb_right);
1008 rb_link_node(&data->node, parent, new);
1009 rb_insert_color_cached(&data->node, root, leftmost);
1012 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1014 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1016 pr_err("No memory at %s\n", __func__);
1020 atoms->thread = thread__get(thread);
1021 INIT_LIST_HEAD(&atoms->work_list);
1022 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1026 static char sched_out_state(u64 prev_state)
1028 const char *str = TASK_STATE_TO_CHAR_STR;
1030 return str[prev_state];
1034 add_sched_out_event(struct work_atoms *atoms,
1038 struct work_atom *atom = zalloc(sizeof(*atom));
1040 pr_err("Non memory at %s", __func__);
1044 atom->sched_out_time = timestamp;
1046 if (run_state == 'R') {
1047 atom->state = THREAD_WAIT_CPU;
1048 atom->wake_up_time = atom->sched_out_time;
1051 list_add_tail(&atom->list, &atoms->work_list);
1056 add_runtime_event(struct work_atoms *atoms, u64 delta,
1057 u64 timestamp __maybe_unused)
1059 struct work_atom *atom;
1061 BUG_ON(list_empty(&atoms->work_list));
1063 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1065 atom->runtime += delta;
1066 atoms->total_runtime += delta;
1070 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1072 struct work_atom *atom;
1075 if (list_empty(&atoms->work_list))
1078 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1080 if (atom->state != THREAD_WAIT_CPU)
1083 if (timestamp < atom->wake_up_time) {
1084 atom->state = THREAD_IGNORE;
1088 atom->state = THREAD_SCHED_IN;
1089 atom->sched_in_time = timestamp;
1091 delta = atom->sched_in_time - atom->wake_up_time;
1092 atoms->total_lat += delta;
1093 if (delta > atoms->max_lat) {
1094 atoms->max_lat = delta;
1095 atoms->max_lat_at = timestamp;
1100 static int latency_switch_event(struct perf_sched *sched,
1101 struct evsel *evsel,
1102 struct perf_sample *sample,
1103 struct machine *machine)
1105 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1106 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1107 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
1108 struct work_atoms *out_events, *in_events;
1109 struct thread *sched_out, *sched_in;
1110 u64 timestamp0, timestamp = sample->time;
1111 int cpu = sample->cpu, err = -1;
1114 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1116 timestamp0 = sched->cpu_last_switched[cpu];
1117 sched->cpu_last_switched[cpu] = timestamp;
1119 delta = timestamp - timestamp0;
1124 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1128 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1129 sched_in = machine__findnew_thread(machine, -1, next_pid);
1130 if (sched_out == NULL || sched_in == NULL)
1133 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1135 if (thread_atoms_insert(sched, sched_out))
1137 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1139 pr_err("out-event: Internal tree error");
1143 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1146 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1148 if (thread_atoms_insert(sched, sched_in))
1150 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1152 pr_err("in-event: Internal tree error");
1156 * Take came in we have not heard about yet,
1157 * add in an initial atom in runnable state:
1159 if (add_sched_out_event(in_events, 'R', timestamp))
1162 add_sched_in_event(in_events, timestamp);
1165 thread__put(sched_out);
1166 thread__put(sched_in);
1170 static int latency_runtime_event(struct perf_sched *sched,
1171 struct evsel *evsel,
1172 struct perf_sample *sample,
1173 struct machine *machine)
1175 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1176 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1177 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1178 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1179 u64 timestamp = sample->time;
1180 int cpu = sample->cpu, err = -1;
1185 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1187 if (thread_atoms_insert(sched, thread))
1189 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1191 pr_err("in-event: Internal tree error");
1194 if (add_sched_out_event(atoms, 'R', timestamp))
1198 add_runtime_event(atoms, runtime, timestamp);
1201 thread__put(thread);
1205 static int latency_wakeup_event(struct perf_sched *sched,
1206 struct evsel *evsel,
1207 struct perf_sample *sample,
1208 struct machine *machine)
1210 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1211 struct work_atoms *atoms;
1212 struct work_atom *atom;
1213 struct thread *wakee;
1214 u64 timestamp = sample->time;
1217 wakee = machine__findnew_thread(machine, -1, pid);
1220 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1222 if (thread_atoms_insert(sched, wakee))
1224 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1226 pr_err("wakeup-event: Internal tree error");
1229 if (add_sched_out_event(atoms, 'S', timestamp))
1233 BUG_ON(list_empty(&atoms->work_list));
1235 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1238 * As we do not guarantee the wakeup event happens when
1239 * task is out of run queue, also may happen when task is
1240 * on run queue and wakeup only change ->state to TASK_RUNNING,
1241 * then we should not set the ->wake_up_time when wake up a
1242 * task which is on run queue.
1244 * You WILL be missing events if you've recorded only
1245 * one CPU, or are only looking at only one, so don't
1246 * skip in this case.
1248 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1251 sched->nr_timestamps++;
1252 if (atom->sched_out_time > timestamp) {
1253 sched->nr_unordered_timestamps++;
1257 atom->state = THREAD_WAIT_CPU;
1258 atom->wake_up_time = timestamp;
1266 static int latency_migrate_task_event(struct perf_sched *sched,
1267 struct evsel *evsel,
1268 struct perf_sample *sample,
1269 struct machine *machine)
1271 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1272 u64 timestamp = sample->time;
1273 struct work_atoms *atoms;
1274 struct work_atom *atom;
1275 struct thread *migrant;
1279 * Only need to worry about migration when profiling one CPU.
1281 if (sched->profile_cpu == -1)
1284 migrant = machine__findnew_thread(machine, -1, pid);
1285 if (migrant == NULL)
1287 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1289 if (thread_atoms_insert(sched, migrant))
1291 register_pid(sched, migrant->tid, thread__comm_str(migrant));
1292 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1294 pr_err("migration-event: Internal tree error");
1297 if (add_sched_out_event(atoms, 'R', timestamp))
1301 BUG_ON(list_empty(&atoms->work_list));
1303 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1304 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1306 sched->nr_timestamps++;
1308 if (atom->sched_out_time > timestamp)
1309 sched->nr_unordered_timestamps++;
1312 thread__put(migrant);
1316 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1321 char max_lat_at[32];
1323 if (!work_list->nb_atoms)
1326 * Ignore idle threads:
1328 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1331 sched->all_runtime += work_list->total_runtime;
1332 sched->all_count += work_list->nb_atoms;
1334 if (work_list->num_merged > 1)
1335 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1337 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1339 for (i = 0; i < 24 - ret; i++)
1342 avg = work_list->total_lat / work_list->nb_atoms;
1343 timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at));
1345 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
1346 (double)work_list->total_runtime / NSEC_PER_MSEC,
1347 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1348 (double)work_list->max_lat / NSEC_PER_MSEC,
1352 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1354 if (l->thread == r->thread)
1356 if (l->thread->tid < r->thread->tid)
1358 if (l->thread->tid > r->thread->tid)
1360 return (int)(l->thread - r->thread);
1363 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1373 avgl = l->total_lat / l->nb_atoms;
1374 avgr = r->total_lat / r->nb_atoms;
1384 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1386 if (l->max_lat < r->max_lat)
1388 if (l->max_lat > r->max_lat)
1394 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1396 if (l->nb_atoms < r->nb_atoms)
1398 if (l->nb_atoms > r->nb_atoms)
1404 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1406 if (l->total_runtime < r->total_runtime)
1408 if (l->total_runtime > r->total_runtime)
1414 static int sort_dimension__add(const char *tok, struct list_head *list)
1417 static struct sort_dimension avg_sort_dimension = {
1421 static struct sort_dimension max_sort_dimension = {
1425 static struct sort_dimension pid_sort_dimension = {
1429 static struct sort_dimension runtime_sort_dimension = {
1433 static struct sort_dimension switch_sort_dimension = {
1437 struct sort_dimension *available_sorts[] = {
1438 &pid_sort_dimension,
1439 &avg_sort_dimension,
1440 &max_sort_dimension,
1441 &switch_sort_dimension,
1442 &runtime_sort_dimension,
1445 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1446 if (!strcmp(available_sorts[i]->name, tok)) {
1447 list_add_tail(&available_sorts[i]->list, list);
1456 static void perf_sched__sort_lat(struct perf_sched *sched)
1458 struct rb_node *node;
1459 struct rb_root_cached *root = &sched->atom_root;
1462 struct work_atoms *data;
1463 node = rb_first_cached(root);
1467 rb_erase_cached(node, root);
1468 data = rb_entry(node, struct work_atoms, node);
1469 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1471 if (root == &sched->atom_root) {
1472 root = &sched->merged_atom_root;
1477 static int process_sched_wakeup_event(struct perf_tool *tool,
1478 struct evsel *evsel,
1479 struct perf_sample *sample,
1480 struct machine *machine)
1482 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1484 if (sched->tp_handler->wakeup_event)
1485 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1495 static bool thread__has_color(struct thread *thread)
1497 union map_priv priv = {
1498 .ptr = thread__priv(thread),
1504 static struct thread*
1505 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1507 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1508 union map_priv priv = {
1512 if (!sched->map.color_pids || !thread || thread__priv(thread))
1515 if (thread_map__has(sched->map.color_pids, tid))
1518 thread__set_priv(thread, priv.ptr);
1522 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1523 struct perf_sample *sample, struct machine *machine)
1525 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1526 struct thread *sched_in;
1527 struct thread_runtime *tr;
1529 u64 timestamp0, timestamp = sample->time;
1531 int i, this_cpu = sample->cpu;
1533 bool new_cpu = false;
1534 const char *color = PERF_COLOR_NORMAL;
1535 char stimestamp[32];
1537 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1539 if (this_cpu > sched->max_cpu)
1540 sched->max_cpu = this_cpu;
1542 if (sched->map.comp) {
1543 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1544 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
1545 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1549 cpus_nr = sched->max_cpu;
1551 timestamp0 = sched->cpu_last_switched[this_cpu];
1552 sched->cpu_last_switched[this_cpu] = timestamp;
1554 delta = timestamp - timestamp0;
1559 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1563 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1564 if (sched_in == NULL)
1567 tr = thread__get_runtime(sched_in);
1569 thread__put(sched_in);
1573 sched->curr_thread[this_cpu] = thread__get(sched_in);
1578 if (!tr->shortname[0]) {
1579 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1581 * Don't allocate a letter-number for swapper:0
1582 * as a shortname. Instead, we use '.' for it.
1584 tr->shortname[0] = '.';
1585 tr->shortname[1] = ' ';
1587 tr->shortname[0] = sched->next_shortname1;
1588 tr->shortname[1] = sched->next_shortname2;
1590 if (sched->next_shortname1 < 'Z') {
1591 sched->next_shortname1++;
1593 sched->next_shortname1 = 'A';
1594 if (sched->next_shortname2 < '9')
1595 sched->next_shortname2++;
1597 sched->next_shortname2 = '0';
1603 for (i = 0; i < cpus_nr; i++) {
1604 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
1605 struct thread *curr_thread = sched->curr_thread[cpu];
1606 struct thread_runtime *curr_tr;
1607 const char *pid_color = color;
1608 const char *cpu_color = color;
1610 if (curr_thread && thread__has_color(curr_thread))
1611 pid_color = COLOR_PIDS;
1613 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
1616 if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
1617 cpu_color = COLOR_CPUS;
1619 if (cpu != this_cpu)
1620 color_fprintf(stdout, color, " ");
1622 color_fprintf(stdout, cpu_color, "*");
1624 if (sched->curr_thread[cpu]) {
1625 curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
1626 if (curr_tr == NULL) {
1627 thread__put(sched_in);
1630 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1632 color_fprintf(stdout, color, " ");
1635 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
1638 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1639 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1640 if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
1641 const char *pid_color = color;
1643 if (thread__has_color(sched_in))
1644 pid_color = COLOR_PIDS;
1646 color_fprintf(stdout, pid_color, "%s => %s:%d",
1647 tr->shortname, thread__comm_str(sched_in), sched_in->tid);
1648 tr->comm_changed = false;
1651 if (sched->map.comp && new_cpu)
1652 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1655 color_fprintf(stdout, color, "\n");
1657 thread__put(sched_in);
1662 static int process_sched_switch_event(struct perf_tool *tool,
1663 struct evsel *evsel,
1664 struct perf_sample *sample,
1665 struct machine *machine)
1667 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1668 int this_cpu = sample->cpu, err = 0;
1669 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1670 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1672 if (sched->curr_pid[this_cpu] != (u32)-1) {
1674 * Are we trying to switch away a PID that is
1677 if (sched->curr_pid[this_cpu] != prev_pid)
1678 sched->nr_context_switch_bugs++;
1681 if (sched->tp_handler->switch_event)
1682 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1684 sched->curr_pid[this_cpu] = next_pid;
1688 static int process_sched_runtime_event(struct perf_tool *tool,
1689 struct evsel *evsel,
1690 struct perf_sample *sample,
1691 struct machine *machine)
1693 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1695 if (sched->tp_handler->runtime_event)
1696 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1701 static int perf_sched__process_fork_event(struct perf_tool *tool,
1702 union perf_event *event,
1703 struct perf_sample *sample,
1704 struct machine *machine)
1706 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1708 /* run the fork event through the perf machineruy */
1709 perf_event__process_fork(tool, event, sample, machine);
1711 /* and then run additional processing needed for this command */
1712 if (sched->tp_handler->fork_event)
1713 return sched->tp_handler->fork_event(sched, event, machine);
1718 static int process_sched_migrate_task_event(struct perf_tool *tool,
1719 struct evsel *evsel,
1720 struct perf_sample *sample,
1721 struct machine *machine)
1723 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1725 if (sched->tp_handler->migrate_task_event)
1726 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1731 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1732 struct evsel *evsel,
1733 struct perf_sample *sample,
1734 struct machine *machine);
1736 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1737 union perf_event *event __maybe_unused,
1738 struct perf_sample *sample,
1739 struct evsel *evsel,
1740 struct machine *machine)
1744 if (evsel->handler != NULL) {
1745 tracepoint_handler f = evsel->handler;
1746 err = f(tool, evsel, sample, machine);
1752 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1753 union perf_event *event,
1754 struct perf_sample *sample,
1755 struct machine *machine)
1757 struct thread *thread;
1758 struct thread_runtime *tr;
1761 err = perf_event__process_comm(tool, event, sample, machine);
1765 thread = machine__find_thread(machine, sample->pid, sample->tid);
1767 pr_err("Internal error: can't find thread\n");
1771 tr = thread__get_runtime(thread);
1773 thread__put(thread);
1777 tr->comm_changed = true;
1778 thread__put(thread);
1783 static int perf_sched__read_events(struct perf_sched *sched)
1785 const struct evsel_str_handler handlers[] = {
1786 { "sched:sched_switch", process_sched_switch_event, },
1787 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1788 { "sched:sched_wakeup", process_sched_wakeup_event, },
1789 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1790 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1792 struct perf_session *session;
1793 struct perf_data data = {
1795 .mode = PERF_DATA_MODE_READ,
1796 .force = sched->force,
1800 session = perf_session__new(&data, false, &sched->tool);
1801 if (IS_ERR(session)) {
1802 pr_debug("Error creating perf session");
1803 return PTR_ERR(session);
1806 symbol__init(&session->header.env);
1808 if (perf_session__set_tracepoints_handlers(session, handlers))
1811 if (perf_session__has_traces(session, "record -R")) {
1812 int err = perf_session__process_events(session);
1814 pr_err("Failed to process events, error %d", err);
1818 sched->nr_events = session->evlist->stats.nr_events[0];
1819 sched->nr_lost_events = session->evlist->stats.total_lost;
1820 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1825 perf_session__delete(session);
1830 * scheduling times are printed as msec.usec
1832 static inline void print_sched_time(unsigned long long nsecs, int width)
1834 unsigned long msecs;
1835 unsigned long usecs;
1837 msecs = nsecs / NSEC_PER_MSEC;
1838 nsecs -= msecs * NSEC_PER_MSEC;
1839 usecs = nsecs / NSEC_PER_USEC;
1840 printf("%*lu.%03lu ", width, msecs, usecs);
1844 * returns runtime data for event, allocating memory for it the
1845 * first time it is used.
1847 static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel)
1849 struct evsel_runtime *r = evsel->priv;
1852 r = zalloc(sizeof(struct evsel_runtime));
1860 * save last time event was seen per cpu
1862 static void perf_evsel__save_time(struct evsel *evsel,
1863 u64 timestamp, u32 cpu)
1865 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1870 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1871 int i, n = __roundup_pow_of_two(cpu+1);
1872 void *p = r->last_time;
1874 p = realloc(r->last_time, n * sizeof(u64));
1879 for (i = r->ncpu; i < n; ++i)
1880 r->last_time[i] = (u64) 0;
1885 r->last_time[cpu] = timestamp;
1888 /* returns last time this event was seen on the given cpu */
1889 static u64 perf_evsel__get_time(struct evsel *evsel, u32 cpu)
1891 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1893 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1896 return r->last_time[cpu];
1899 static int comm_width = 30;
1901 static char *timehist_get_commstr(struct thread *thread)
1903 static char str[32];
1904 const char *comm = thread__comm_str(thread);
1905 pid_t tid = thread->tid;
1906 pid_t pid = thread->pid_;
1910 n = scnprintf(str, sizeof(str), "%s", comm);
1912 else if (tid != pid)
1913 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1916 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1924 static void timehist_header(struct perf_sched *sched)
1926 u32 ncpus = sched->max_cpu + 1;
1929 printf("%15s %6s ", "time", "cpu");
1931 if (sched->show_cpu_visual) {
1933 for (i = 0, j = 0; i < ncpus; ++i) {
1941 printf(" %-*s %9s %9s %9s", comm_width,
1942 "task name", "wait time", "sch delay", "run time");
1944 if (sched->show_state)
1945 printf(" %s", "state");
1952 printf("%15s %-6s ", "", "");
1954 if (sched->show_cpu_visual)
1955 printf(" %*s ", ncpus, "");
1957 printf(" %-*s %9s %9s %9s", comm_width,
1958 "[tid/pid]", "(msec)", "(msec)", "(msec)");
1960 if (sched->show_state)
1968 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
1970 if (sched->show_cpu_visual)
1971 printf(" %.*s ", ncpus, graph_dotted_line);
1973 printf(" %.*s %.9s %.9s %.9s", comm_width,
1974 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1977 if (sched->show_state)
1978 printf(" %.5s", graph_dotted_line);
1983 static char task_state_char(struct thread *thread, int state)
1985 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1986 unsigned bit = state ? ffs(state) : 0;
1989 if (thread->tid == 0)
1992 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1995 static void timehist_print_sample(struct perf_sched *sched,
1996 struct evsel *evsel,
1997 struct perf_sample *sample,
1998 struct addr_location *al,
1999 struct thread *thread,
2002 struct thread_runtime *tr = thread__priv(thread);
2003 const char *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
2004 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
2005 u32 max_cpus = sched->max_cpu + 1;
2010 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2011 printf("%15s [%04d] ", tstr, sample->cpu);
2013 if (sched->show_cpu_visual) {
2018 for (i = 0; i < max_cpus; ++i) {
2019 /* flag idle times with 'i'; others are sched events */
2020 if (i == sample->cpu)
2021 c = (thread->tid == 0) ? 'i' : 's';
2029 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2031 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2032 print_sched_time(wait_time, 6);
2034 print_sched_time(tr->dt_delay, 6);
2035 print_sched_time(tr->dt_run, 6);
2037 if (sched->show_state)
2038 printf(" %5c ", task_state_char(thread, state));
2040 if (sched->show_next) {
2041 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2042 printf(" %-*s", comm_width, nstr);
2045 if (sched->show_wakeups && !sched->show_next)
2046 printf(" %-*s", comm_width, "");
2048 if (thread->tid == 0)
2051 if (sched->show_callchain)
2054 sample__fprintf_sym(sample, al, 0,
2055 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2056 EVSEL__PRINT_CALLCHAIN_ARROW |
2057 EVSEL__PRINT_SKIP_IGNORED,
2058 &callchain_cursor, stdout);
2065 * Explanation of delta-time stats:
2067 * t = time of current schedule out event
2068 * tprev = time of previous sched out event
2069 * also time of schedule-in event for current task
2070 * last_time = time of last sched change event for current task
2071 * (i.e, time process was last scheduled out)
2072 * ready_to_run = time of wakeup for current task
2074 * -----|------------|------------|------------|------
2075 * last ready tprev t
2078 * |-------- dt_wait --------|
2079 * |- dt_delay -|-- dt_run --|
2081 * dt_run = run time of current task
2082 * dt_wait = time between last schedule out event for task and tprev
2083 * represents time spent off the cpu
2084 * dt_delay = time between wakeup and schedule-in of task
2087 static void timehist_update_runtime_stats(struct thread_runtime *r,
2097 r->dt_run = t - tprev;
2098 if (r->ready_to_run) {
2099 if (r->ready_to_run > tprev)
2100 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2102 r->dt_delay = tprev - r->ready_to_run;
2105 if (r->last_time > tprev)
2106 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2107 else if (r->last_time) {
2108 u64 dt_wait = tprev - r->last_time;
2110 if (r->last_state == TASK_RUNNING)
2111 r->dt_preempt = dt_wait;
2112 else if (r->last_state == TASK_UNINTERRUPTIBLE)
2113 r->dt_iowait = dt_wait;
2115 r->dt_sleep = dt_wait;
2119 update_stats(&r->run_stats, r->dt_run);
2121 r->total_run_time += r->dt_run;
2122 r->total_delay_time += r->dt_delay;
2123 r->total_sleep_time += r->dt_sleep;
2124 r->total_iowait_time += r->dt_iowait;
2125 r->total_preempt_time += r->dt_preempt;
2128 static bool is_idle_sample(struct perf_sample *sample,
2129 struct evsel *evsel)
2131 /* pid 0 == swapper == idle task */
2132 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0)
2133 return perf_evsel__intval(evsel, sample, "prev_pid") == 0;
2135 return sample->pid == 0;
2138 static void save_task_callchain(struct perf_sched *sched,
2139 struct perf_sample *sample,
2140 struct evsel *evsel,
2141 struct machine *machine)
2143 struct callchain_cursor *cursor = &callchain_cursor;
2144 struct thread *thread;
2146 /* want main thread for process - has maps */
2147 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2148 if (thread == NULL) {
2149 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2153 if (!sched->show_callchain || sample->callchain == NULL)
2156 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2157 NULL, NULL, sched->max_stack + 2) != 0) {
2159 pr_err("Failed to resolve callchain. Skipping\n");
2164 callchain_cursor_commit(cursor);
2167 struct callchain_cursor_node *node;
2170 node = callchain_cursor_current(cursor);
2176 if (!strcmp(sym->name, "schedule") ||
2177 !strcmp(sym->name, "__schedule") ||
2178 !strcmp(sym->name, "preempt_schedule"))
2182 callchain_cursor_advance(cursor);
2186 static int init_idle_thread(struct thread *thread)
2188 struct idle_thread_runtime *itr;
2190 thread__set_comm(thread, idle_comm, 0);
2192 itr = zalloc(sizeof(*itr));
2196 init_stats(&itr->tr.run_stats);
2197 callchain_init(&itr->callchain);
2198 callchain_cursor_reset(&itr->cursor);
2199 thread__set_priv(thread, itr);
2205 * Track idle stats per cpu by maintaining a local thread
2206 * struct for the idle task on each cpu.
2208 static int init_idle_threads(int ncpu)
2212 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2216 idle_max_cpu = ncpu;
2218 /* allocate the actual thread struct if needed */
2219 for (i = 0; i < ncpu; ++i) {
2220 idle_threads[i] = thread__new(0, 0);
2221 if (idle_threads[i] == NULL)
2224 ret = init_idle_thread(idle_threads[i]);
2232 static void free_idle_threads(void)
2236 if (idle_threads == NULL)
2239 for (i = 0; i < idle_max_cpu; ++i) {
2240 if ((idle_threads[i]))
2241 thread__delete(idle_threads[i]);
2247 static struct thread *get_idle_thread(int cpu)
2250 * expand/allocate array of pointers to local thread
2253 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2254 int i, j = __roundup_pow_of_two(cpu+1);
2257 p = realloc(idle_threads, j * sizeof(struct thread *));
2261 idle_threads = (struct thread **) p;
2262 for (i = idle_max_cpu; i < j; ++i)
2263 idle_threads[i] = NULL;
2268 /* allocate a new thread struct if needed */
2269 if (idle_threads[cpu] == NULL) {
2270 idle_threads[cpu] = thread__new(0, 0);
2272 if (idle_threads[cpu]) {
2273 if (init_idle_thread(idle_threads[cpu]) < 0)
2278 return idle_threads[cpu];
2281 static void save_idle_callchain(struct perf_sched *sched,
2282 struct idle_thread_runtime *itr,
2283 struct perf_sample *sample)
2285 if (!sched->show_callchain || sample->callchain == NULL)
2288 callchain_cursor__copy(&itr->cursor, &callchain_cursor);
2291 static struct thread *timehist_get_thread(struct perf_sched *sched,
2292 struct perf_sample *sample,
2293 struct machine *machine,
2294 struct evsel *evsel)
2296 struct thread *thread;
2298 if (is_idle_sample(sample, evsel)) {
2299 thread = get_idle_thread(sample->cpu);
2301 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2304 /* there were samples with tid 0 but non-zero pid */
2305 thread = machine__findnew_thread(machine, sample->pid,
2306 sample->tid ?: sample->pid);
2307 if (thread == NULL) {
2308 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2312 save_task_callchain(sched, sample, evsel, machine);
2313 if (sched->idle_hist) {
2314 struct thread *idle;
2315 struct idle_thread_runtime *itr;
2317 idle = get_idle_thread(sample->cpu);
2319 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2323 itr = thread__priv(idle);
2327 itr->last_thread = thread;
2329 /* copy task callchain when entering to idle */
2330 if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
2331 save_idle_callchain(sched, itr, sample);
2338 static bool timehist_skip_sample(struct perf_sched *sched,
2339 struct thread *thread,
2340 struct evsel *evsel,
2341 struct perf_sample *sample)
2345 if (thread__is_filtered(thread)) {
2347 sched->skipped_samples++;
2350 if (sched->idle_hist) {
2351 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch"))
2353 else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 &&
2354 perf_evsel__intval(evsel, sample, "next_pid") != 0)
2361 static void timehist_print_wakeup_event(struct perf_sched *sched,
2362 struct evsel *evsel,
2363 struct perf_sample *sample,
2364 struct machine *machine,
2365 struct thread *awakened)
2367 struct thread *thread;
2370 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2374 /* show wakeup unless both awakee and awaker are filtered */
2375 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2376 timehist_skip_sample(sched, awakened, evsel, sample)) {
2380 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2381 printf("%15s [%04d] ", tstr, sample->cpu);
2382 if (sched->show_cpu_visual)
2383 printf(" %*s ", sched->max_cpu + 1, "");
2385 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2388 printf(" %9s %9s %9s ", "", "", "");
2390 printf("awakened: %s", timehist_get_commstr(awakened));
2395 static int timehist_sched_wakeup_event(struct perf_tool *tool,
2396 union perf_event *event __maybe_unused,
2397 struct evsel *evsel,
2398 struct perf_sample *sample,
2399 struct machine *machine)
2401 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2402 struct thread *thread;
2403 struct thread_runtime *tr = NULL;
2404 /* want pid of awakened task not pid in sample */
2405 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2407 thread = machine__findnew_thread(machine, 0, pid);
2411 tr = thread__get_runtime(thread);
2415 if (tr->ready_to_run == 0)
2416 tr->ready_to_run = sample->time;
2418 /* show wakeups if requested */
2419 if (sched->show_wakeups &&
2420 !perf_time__skip_sample(&sched->ptime, sample->time))
2421 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2426 static void timehist_print_migration_event(struct perf_sched *sched,
2427 struct evsel *evsel,
2428 struct perf_sample *sample,
2429 struct machine *machine,
2430 struct thread *migrated)
2432 struct thread *thread;
2434 u32 max_cpus = sched->max_cpu + 1;
2437 if (sched->summary_only)
2440 max_cpus = sched->max_cpu + 1;
2441 ocpu = perf_evsel__intval(evsel, sample, "orig_cpu");
2442 dcpu = perf_evsel__intval(evsel, sample, "dest_cpu");
2444 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2448 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2449 timehist_skip_sample(sched, migrated, evsel, sample)) {
2453 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2454 printf("%15s [%04d] ", tstr, sample->cpu);
2456 if (sched->show_cpu_visual) {
2461 for (i = 0; i < max_cpus; ++i) {
2462 c = (i == sample->cpu) ? 'm' : ' ';
2468 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2471 printf(" %9s %9s %9s ", "", "", "");
2473 printf("migrated: %s", timehist_get_commstr(migrated));
2474 printf(" cpu %d => %d", ocpu, dcpu);
2479 static int timehist_migrate_task_event(struct perf_tool *tool,
2480 union perf_event *event __maybe_unused,
2481 struct evsel *evsel,
2482 struct perf_sample *sample,
2483 struct machine *machine)
2485 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2486 struct thread *thread;
2487 struct thread_runtime *tr = NULL;
2488 /* want pid of migrated task not pid in sample */
2489 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2491 thread = machine__findnew_thread(machine, 0, pid);
2495 tr = thread__get_runtime(thread);
2501 /* show migrations if requested */
2502 timehist_print_migration_event(sched, evsel, sample, machine, thread);
2507 static int timehist_sched_change_event(struct perf_tool *tool,
2508 union perf_event *event,
2509 struct evsel *evsel,
2510 struct perf_sample *sample,
2511 struct machine *machine)
2513 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2514 struct perf_time_interval *ptime = &sched->ptime;
2515 struct addr_location al;
2516 struct thread *thread;
2517 struct thread_runtime *tr = NULL;
2518 u64 tprev, t = sample->time;
2520 int state = perf_evsel__intval(evsel, sample, "prev_state");
2523 if (machine__resolve(machine, &al, sample) < 0) {
2524 pr_err("problem processing %d event. skipping it\n",
2525 event->header.type);
2530 thread = timehist_get_thread(sched, sample, machine, evsel);
2531 if (thread == NULL) {
2536 if (timehist_skip_sample(sched, thread, evsel, sample))
2539 tr = thread__get_runtime(thread);
2545 tprev = perf_evsel__get_time(evsel, sample->cpu);
2548 * If start time given:
2549 * - sample time is under window user cares about - skip sample
2550 * - tprev is under window user cares about - reset to start of window
2552 if (ptime->start && ptime->start > t)
2555 if (tprev && ptime->start > tprev)
2556 tprev = ptime->start;
2559 * If end time given:
2560 * - previous sched event is out of window - we are done
2561 * - sample time is beyond window user cares about - reset it
2562 * to close out stats for time window interest
2565 if (tprev > ptime->end)
2572 if (!sched->idle_hist || thread->tid == 0) {
2573 timehist_update_runtime_stats(tr, t, tprev);
2575 if (sched->idle_hist) {
2576 struct idle_thread_runtime *itr = (void *)tr;
2577 struct thread_runtime *last_tr;
2579 BUG_ON(thread->tid != 0);
2581 if (itr->last_thread == NULL)
2584 /* add current idle time as last thread's runtime */
2585 last_tr = thread__get_runtime(itr->last_thread);
2586 if (last_tr == NULL)
2589 timehist_update_runtime_stats(last_tr, t, tprev);
2591 * remove delta time of last thread as it's not updated
2592 * and otherwise it will show an invalid value next
2593 * time. we only care total run time and run stat.
2595 last_tr->dt_run = 0;
2596 last_tr->dt_delay = 0;
2597 last_tr->dt_sleep = 0;
2598 last_tr->dt_iowait = 0;
2599 last_tr->dt_preempt = 0;
2602 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2604 itr->last_thread = NULL;
2608 if (!sched->summary_only)
2609 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2612 if (sched->hist_time.start == 0 && t >= ptime->start)
2613 sched->hist_time.start = t;
2614 if (ptime->end == 0 || t <= ptime->end)
2615 sched->hist_time.end = t;
2618 /* time of this sched_switch event becomes last time task seen */
2619 tr->last_time = sample->time;
2621 /* last state is used to determine where to account wait time */
2622 tr->last_state = state;
2624 /* sched out event for task so reset ready to run time */
2625 tr->ready_to_run = 0;
2628 perf_evsel__save_time(evsel, sample->time, sample->cpu);
2633 static int timehist_sched_switch_event(struct perf_tool *tool,
2634 union perf_event *event,
2635 struct evsel *evsel,
2636 struct perf_sample *sample,
2637 struct machine *machine __maybe_unused)
2639 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2642 static int process_lost(struct perf_tool *tool __maybe_unused,
2643 union perf_event *event,
2644 struct perf_sample *sample,
2645 struct machine *machine __maybe_unused)
2649 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2650 printf("%15s ", tstr);
2651 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2657 static void print_thread_runtime(struct thread *t,
2658 struct thread_runtime *r)
2660 double mean = avg_stats(&r->run_stats);
2663 printf("%*s %5d %9" PRIu64 " ",
2664 comm_width, timehist_get_commstr(t), t->ppid,
2665 (u64) r->run_stats.n);
2667 print_sched_time(r->total_run_time, 8);
2668 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2669 print_sched_time(r->run_stats.min, 6);
2671 print_sched_time((u64) mean, 6);
2673 print_sched_time(r->run_stats.max, 6);
2675 printf("%5.2f", stddev);
2676 printf(" %5" PRIu64, r->migrations);
2680 static void print_thread_waittime(struct thread *t,
2681 struct thread_runtime *r)
2683 printf("%*s %5d %9" PRIu64 " ",
2684 comm_width, timehist_get_commstr(t), t->ppid,
2685 (u64) r->run_stats.n);
2687 print_sched_time(r->total_run_time, 8);
2688 print_sched_time(r->total_sleep_time, 6);
2690 print_sched_time(r->total_iowait_time, 6);
2692 print_sched_time(r->total_preempt_time, 6);
2694 print_sched_time(r->total_delay_time, 6);
2698 struct total_run_stats {
2699 struct perf_sched *sched;
2705 static int __show_thread_runtime(struct thread *t, void *priv)
2707 struct total_run_stats *stats = priv;
2708 struct thread_runtime *r;
2710 if (thread__is_filtered(t))
2713 r = thread__priv(t);
2714 if (r && r->run_stats.n) {
2715 stats->task_count++;
2716 stats->sched_count += r->run_stats.n;
2717 stats->total_run_time += r->total_run_time;
2719 if (stats->sched->show_state)
2720 print_thread_waittime(t, r);
2722 print_thread_runtime(t, r);
2728 static int show_thread_runtime(struct thread *t, void *priv)
2733 return __show_thread_runtime(t, priv);
2736 static int show_deadthread_runtime(struct thread *t, void *priv)
2741 return __show_thread_runtime(t, priv);
2744 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2746 const char *sep = " <- ";
2747 struct callchain_list *chain;
2755 ret = callchain__fprintf_folded(fp, node->parent);
2758 list_for_each_entry(chain, &node->val, list) {
2759 if (chain->ip >= PERF_CONTEXT_MAX)
2761 if (chain->ms.sym && chain->ms.sym->ignore)
2763 ret += fprintf(fp, "%s%s", first ? "" : sep,
2764 callchain_list__sym_name(chain, bf, sizeof(bf),
2772 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2776 struct callchain_node *chain;
2777 struct rb_node *rb_node = rb_first_cached(root);
2779 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2780 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
2784 chain = rb_entry(rb_node, struct callchain_node, rb_node);
2785 rb_node = rb_next(rb_node);
2787 ret += fprintf(fp, " ");
2788 print_sched_time(chain->hit, 12);
2789 ret += 16; /* print_sched_time returns 2nd arg + 4 */
2790 ret += fprintf(fp, " %8d ", chain->count);
2791 ret += callchain__fprintf_folded(fp, chain);
2792 ret += fprintf(fp, "\n");
2798 static void timehist_print_summary(struct perf_sched *sched,
2799 struct perf_session *session)
2801 struct machine *m = &session->machines.host;
2802 struct total_run_stats totals;
2805 struct thread_runtime *r;
2807 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2809 memset(&totals, 0, sizeof(totals));
2810 totals.sched = sched;
2812 if (sched->idle_hist) {
2813 printf("\nIdle-time summary\n");
2814 printf("%*s parent sched-out ", comm_width, "comm");
2815 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
2816 } else if (sched->show_state) {
2817 printf("\nWait-time summary\n");
2818 printf("%*s parent sched-in ", comm_width, "comm");
2819 printf(" run-time sleep iowait preempt delay\n");
2821 printf("\nRuntime summary\n");
2822 printf("%*s parent sched-in ", comm_width, "comm");
2823 printf(" run-time min-run avg-run max-run stddev migrations\n");
2825 printf("%*s (count) ", comm_width, "");
2826 printf(" (msec) (msec) (msec) (msec) %s\n",
2827 sched->show_state ? "(msec)" : "%");
2828 printf("%.117s\n", graph_dotted_line);
2830 machine__for_each_thread(m, show_thread_runtime, &totals);
2831 task_count = totals.task_count;
2833 printf("<no still running tasks>\n");
2835 printf("\nTerminated tasks:\n");
2836 machine__for_each_thread(m, show_deadthread_runtime, &totals);
2837 if (task_count == totals.task_count)
2838 printf("<no terminated tasks>\n");
2840 /* CPU idle stats not tracked when samples were skipped */
2841 if (sched->skipped_samples && !sched->idle_hist)
2844 printf("\nIdle stats:\n");
2845 for (i = 0; i < idle_max_cpu; ++i) {
2846 t = idle_threads[i];
2850 r = thread__priv(t);
2851 if (r && r->run_stats.n) {
2852 totals.sched_count += r->run_stats.n;
2853 printf(" CPU %2d idle for ", i);
2854 print_sched_time(r->total_run_time, 6);
2855 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2857 printf(" CPU %2d idle entire time window\n", i);
2860 if (sched->idle_hist && sched->show_callchain) {
2861 callchain_param.mode = CHAIN_FOLDED;
2862 callchain_param.value = CCVAL_PERIOD;
2864 callchain_register_param(&callchain_param);
2866 printf("\nIdle stats by callchain:\n");
2867 for (i = 0; i < idle_max_cpu; ++i) {
2868 struct idle_thread_runtime *itr;
2870 t = idle_threads[i];
2874 itr = thread__priv(t);
2878 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2879 0, &callchain_param);
2881 printf(" CPU %2d:", i);
2882 print_sched_time(itr->tr.total_run_time, 6);
2884 timehist_print_idlehist_callchain(&itr->sorted_root);
2890 " Total number of unique tasks: %" PRIu64 "\n"
2891 "Total number of context switches: %" PRIu64 "\n",
2892 totals.task_count, totals.sched_count);
2894 printf(" Total run time (msec): ");
2895 print_sched_time(totals.total_run_time, 2);
2898 printf(" Total scheduling time (msec): ");
2899 print_sched_time(hist_time, 2);
2900 printf(" (x %d)\n", sched->max_cpu);
2903 typedef int (*sched_handler)(struct perf_tool *tool,
2904 union perf_event *event,
2905 struct evsel *evsel,
2906 struct perf_sample *sample,
2907 struct machine *machine);
2909 static int perf_timehist__process_sample(struct perf_tool *tool,
2910 union perf_event *event,
2911 struct perf_sample *sample,
2912 struct evsel *evsel,
2913 struct machine *machine)
2915 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2917 int this_cpu = sample->cpu;
2919 if (this_cpu > sched->max_cpu)
2920 sched->max_cpu = this_cpu;
2922 if (evsel->handler != NULL) {
2923 sched_handler f = evsel->handler;
2925 err = f(tool, event, evsel, sample, machine);
2931 static int timehist_check_attr(struct perf_sched *sched,
2932 struct evlist *evlist)
2934 struct evsel *evsel;
2935 struct evsel_runtime *er;
2937 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
2938 er = perf_evsel__get_runtime(evsel);
2940 pr_err("Failed to allocate memory for evsel runtime data\n");
2944 if (sched->show_callchain && !evsel__has_callchain(evsel)) {
2945 pr_info("Samples do not have callchains.\n");
2946 sched->show_callchain = 0;
2947 symbol_conf.use_callchain = 0;
2954 static int perf_sched__timehist(struct perf_sched *sched)
2956 const struct evsel_str_handler handlers[] = {
2957 { "sched:sched_switch", timehist_sched_switch_event, },
2958 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
2959 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
2961 const struct evsel_str_handler migrate_handlers[] = {
2962 { "sched:sched_migrate_task", timehist_migrate_task_event, },
2964 struct perf_data data = {
2966 .mode = PERF_DATA_MODE_READ,
2967 .force = sched->force,
2970 struct perf_session *session;
2971 struct evlist *evlist;
2975 * event handlers for timehist option
2977 sched->tool.sample = perf_timehist__process_sample;
2978 sched->tool.mmap = perf_event__process_mmap;
2979 sched->tool.comm = perf_event__process_comm;
2980 sched->tool.exit = perf_event__process_exit;
2981 sched->tool.fork = perf_event__process_fork;
2982 sched->tool.lost = process_lost;
2983 sched->tool.attr = perf_event__process_attr;
2984 sched->tool.tracing_data = perf_event__process_tracing_data;
2985 sched->tool.build_id = perf_event__process_build_id;
2987 sched->tool.ordered_events = true;
2988 sched->tool.ordering_requires_timestamps = true;
2990 symbol_conf.use_callchain = sched->show_callchain;
2992 session = perf_session__new(&data, false, &sched->tool);
2993 if (IS_ERR(session))
2994 return PTR_ERR(session);
2996 evlist = session->evlist;
2998 symbol__init(&session->header.env);
3000 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3001 pr_err("Invalid time string\n");
3005 if (timehist_check_attr(sched, evlist) != 0)
3010 /* setup per-evsel handlers */
3011 if (perf_session__set_tracepoints_handlers(session, handlers))
3014 /* sched_switch event at a minimum needs to exist */
3015 if (!perf_evlist__find_tracepoint_by_name(session->evlist,
3016 "sched:sched_switch")) {
3017 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3021 if (sched->show_migrations &&
3022 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3025 /* pre-allocate struct for per-CPU idle stats */
3026 sched->max_cpu = session->header.env.nr_cpus_online;
3027 if (sched->max_cpu == 0)
3029 if (init_idle_threads(sched->max_cpu))
3032 /* summary_only implies summary option, but don't overwrite summary if set */
3033 if (sched->summary_only)
3034 sched->summary = sched->summary_only;
3036 if (!sched->summary_only)
3037 timehist_header(sched);
3039 err = perf_session__process_events(session);
3041 pr_err("Failed to process events, error %d", err);
3045 sched->nr_events = evlist->stats.nr_events[0];
3046 sched->nr_lost_events = evlist->stats.total_lost;
3047 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3050 timehist_print_summary(sched, session);
3053 free_idle_threads();
3054 perf_session__delete(session);
3060 static void print_bad_events(struct perf_sched *sched)
3062 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3063 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3064 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3065 sched->nr_unordered_timestamps, sched->nr_timestamps);
3067 if (sched->nr_lost_events && sched->nr_events) {
3068 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3069 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3070 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3072 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3073 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3074 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3075 sched->nr_context_switch_bugs, sched->nr_timestamps);
3076 if (sched->nr_lost_events)
3077 printf(" (due to lost events?)");
3082 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3084 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3085 struct work_atoms *this;
3086 const char *comm = thread__comm_str(data->thread), *this_comm;
3087 bool leftmost = true;
3092 this = container_of(*new, struct work_atoms, node);
3095 this_comm = thread__comm_str(this->thread);
3096 cmp = strcmp(comm, this_comm);
3098 new = &((*new)->rb_left);
3099 } else if (cmp < 0) {
3100 new = &((*new)->rb_right);
3104 this->total_runtime += data->total_runtime;
3105 this->nb_atoms += data->nb_atoms;
3106 this->total_lat += data->total_lat;
3107 list_splice(&data->work_list, &this->work_list);
3108 if (this->max_lat < data->max_lat) {
3109 this->max_lat = data->max_lat;
3110 this->max_lat_at = data->max_lat_at;
3118 rb_link_node(&data->node, parent, new);
3119 rb_insert_color_cached(&data->node, root, leftmost);
3122 static void perf_sched__merge_lat(struct perf_sched *sched)
3124 struct work_atoms *data;
3125 struct rb_node *node;
3127 if (sched->skip_merge)
3130 while ((node = rb_first_cached(&sched->atom_root))) {
3131 rb_erase_cached(node, &sched->atom_root);
3132 data = rb_entry(node, struct work_atoms, node);
3133 __merge_work_atoms(&sched->merged_atom_root, data);
3137 static int perf_sched__lat(struct perf_sched *sched)
3139 struct rb_node *next;
3143 if (perf_sched__read_events(sched))
3146 perf_sched__merge_lat(sched);
3147 perf_sched__sort_lat(sched);
3149 printf("\n -----------------------------------------------------------------------------------------------------------------\n");
3150 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
3151 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3153 next = rb_first_cached(&sched->sorted_atom_root);
3156 struct work_atoms *work_list;
3158 work_list = rb_entry(next, struct work_atoms, node);
3159 output_lat_thread(sched, work_list);
3160 next = rb_next(next);
3161 thread__zput(work_list->thread);
3164 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3165 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3166 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3168 printf(" ---------------------------------------------------\n");
3170 print_bad_events(sched);
3176 static int setup_map_cpus(struct perf_sched *sched)
3178 struct perf_cpu_map *map;
3180 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
3182 if (sched->map.comp) {
3183 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
3184 if (!sched->map.comp_cpus)
3188 if (!sched->map.cpus_str)
3191 map = perf_cpu_map__new(sched->map.cpus_str);
3193 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3197 sched->map.cpus = map;
3201 static int setup_color_pids(struct perf_sched *sched)
3203 struct perf_thread_map *map;
3205 if (!sched->map.color_pids_str)
3208 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3210 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3214 sched->map.color_pids = map;
3218 static int setup_color_cpus(struct perf_sched *sched)
3220 struct perf_cpu_map *map;
3222 if (!sched->map.color_cpus_str)
3225 map = perf_cpu_map__new(sched->map.color_cpus_str);
3227 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3231 sched->map.color_cpus = map;
3235 static int perf_sched__map(struct perf_sched *sched)
3237 if (setup_map_cpus(sched))
3240 if (setup_color_pids(sched))
3243 if (setup_color_cpus(sched))
3247 if (perf_sched__read_events(sched))
3249 print_bad_events(sched);
3253 static int perf_sched__replay(struct perf_sched *sched)
3257 calibrate_run_measurement_overhead(sched);
3258 calibrate_sleep_measurement_overhead(sched);
3260 test_calibrations(sched);
3262 if (perf_sched__read_events(sched))
3265 printf("nr_run_events: %ld\n", sched->nr_run_events);
3266 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3267 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3269 if (sched->targetless_wakeups)
3270 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3271 if (sched->multitarget_wakeups)
3272 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3273 if (sched->nr_run_events_optimized)
3274 printf("run atoms optimized: %ld\n",
3275 sched->nr_run_events_optimized);
3277 print_task_traces(sched);
3278 add_cross_task_wakeups(sched);
3280 create_tasks(sched);
3281 printf("------------------------------------------------------------\n");
3282 for (i = 0; i < sched->replay_repeat; i++)
3283 run_one_test(sched);
3288 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3289 const char * const usage_msg[])
3291 char *tmp, *tok, *str = strdup(sched->sort_order);
3293 for (tok = strtok_r(str, ", ", &tmp);
3294 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3295 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3296 usage_with_options_msg(usage_msg, options,
3297 "Unknown --sort key: `%s'", tok);
3303 sort_dimension__add("pid", &sched->cmp_pid);
3306 static int __cmd_record(int argc, const char **argv)
3308 unsigned int rec_argc, i, j;
3309 const char **rec_argv;
3310 const char * const record_args[] = {
3316 "-e", "sched:sched_switch",
3317 "-e", "sched:sched_stat_wait",
3318 "-e", "sched:sched_stat_sleep",
3319 "-e", "sched:sched_stat_iowait",
3320 "-e", "sched:sched_stat_runtime",
3321 "-e", "sched:sched_process_fork",
3322 "-e", "sched:sched_wakeup",
3323 "-e", "sched:sched_wakeup_new",
3324 "-e", "sched:sched_migrate_task",
3327 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
3328 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3330 if (rec_argv == NULL)
3333 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3334 rec_argv[i] = strdup(record_args[i]);
3336 for (j = 1; j < (unsigned int)argc; j++, i++)
3337 rec_argv[i] = argv[j];
3339 BUG_ON(i != rec_argc);
3341 return cmd_record(i, rec_argv);
3344 int cmd_sched(int argc, const char **argv)
3346 static const char default_sort_order[] = "avg, max, switch, runtime";
3347 struct perf_sched sched = {
3349 .sample = perf_sched__process_tracepoint_sample,
3350 .comm = perf_sched__process_comm,
3351 .namespaces = perf_event__process_namespaces,
3352 .lost = perf_event__process_lost,
3353 .fork = perf_sched__process_fork_event,
3354 .ordered_events = true,
3356 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3357 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3358 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
3359 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
3360 .sort_order = default_sort_order,
3361 .replay_repeat = 10,
3363 .next_shortname1 = 'A',
3364 .next_shortname2 = '0',
3366 .show_callchain = 1,
3369 const struct option sched_options[] = {
3370 OPT_STRING('i', "input", &input_name, "file",
3372 OPT_INCR('v', "verbose", &verbose,
3373 "be more verbose (show symbol address, etc)"),
3374 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3375 "dump raw trace in ASCII"),
3376 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3379 const struct option latency_options[] = {
3380 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3381 "sort by key(s): runtime, switch, avg, max"),
3382 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3383 "CPU to profile on"),
3384 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3385 "latency stats per pid instead of per comm"),
3386 OPT_PARENT(sched_options)
3388 const struct option replay_options[] = {
3389 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3390 "repeat the workload replay N times (-1: infinite)"),
3391 OPT_PARENT(sched_options)
3393 const struct option map_options[] = {
3394 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3395 "map output in compact mode"),
3396 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3397 "highlight given pids in map"),
3398 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3399 "highlight given CPUs in map"),
3400 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3401 "display given CPUs in map"),
3402 OPT_PARENT(sched_options)
3404 const struct option timehist_options[] = {
3405 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3406 "file", "vmlinux pathname"),
3407 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3408 "file", "kallsyms pathname"),
3409 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3410 "Display call chains if present (default on)"),
3411 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3412 "Maximum number of functions to display backtrace."),
3413 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3414 "Look for files with symbols relative to this directory"),
3415 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3416 "Show only syscall summary with statistics"),
3417 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3418 "Show all syscalls and summary with statistics"),
3419 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3420 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3421 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3422 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3423 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3424 OPT_STRING(0, "time", &sched.time_str, "str",
3425 "Time span for analysis (start,stop)"),
3426 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3427 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3428 "analyze events only for given process id(s)"),
3429 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3430 "analyze events only for given thread id(s)"),
3431 OPT_PARENT(sched_options)
3434 const char * const latency_usage[] = {
3435 "perf sched latency [<options>]",
3438 const char * const replay_usage[] = {
3439 "perf sched replay [<options>]",
3442 const char * const map_usage[] = {
3443 "perf sched map [<options>]",
3446 const char * const timehist_usage[] = {
3447 "perf sched timehist [<options>]",
3450 const char *const sched_subcommands[] = { "record", "latency", "map",
3453 const char *sched_usage[] = {
3457 struct trace_sched_handler lat_ops = {
3458 .wakeup_event = latency_wakeup_event,
3459 .switch_event = latency_switch_event,
3460 .runtime_event = latency_runtime_event,
3461 .migrate_task_event = latency_migrate_task_event,
3463 struct trace_sched_handler map_ops = {
3464 .switch_event = map_switch_event,
3466 struct trace_sched_handler replay_ops = {
3467 .wakeup_event = replay_wakeup_event,
3468 .switch_event = replay_switch_event,
3469 .fork_event = replay_fork_event,
3473 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
3474 sched.curr_pid[i] = -1;
3476 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3477 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3479 usage_with_options(sched_usage, sched_options);
3482 * Aliased to 'perf script' for now:
3484 if (!strcmp(argv[0], "script"))
3485 return cmd_script(argc, argv);
3487 if (!strncmp(argv[0], "rec", 3)) {
3488 return __cmd_record(argc, argv);
3489 } else if (!strncmp(argv[0], "lat", 3)) {
3490 sched.tp_handler = &lat_ops;
3492 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3494 usage_with_options(latency_usage, latency_options);
3496 setup_sorting(&sched, latency_options, latency_usage);
3497 return perf_sched__lat(&sched);
3498 } else if (!strcmp(argv[0], "map")) {
3500 argc = parse_options(argc, argv, map_options, map_usage, 0);
3502 usage_with_options(map_usage, map_options);
3504 sched.tp_handler = &map_ops;
3505 setup_sorting(&sched, latency_options, latency_usage);
3506 return perf_sched__map(&sched);
3507 } else if (!strncmp(argv[0], "rep", 3)) {
3508 sched.tp_handler = &replay_ops;
3510 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3512 usage_with_options(replay_usage, replay_options);
3514 return perf_sched__replay(&sched);
3515 } else if (!strcmp(argv[0], "timehist")) {
3517 argc = parse_options(argc, argv, timehist_options,
3520 usage_with_options(timehist_usage, timehist_options);
3522 if ((sched.show_wakeups || sched.show_next) &&
3523 sched.summary_only) {
3524 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3525 parse_options_usage(timehist_usage, timehist_options, "s", true);
3526 if (sched.show_wakeups)
3527 parse_options_usage(NULL, timehist_options, "w", true);
3528 if (sched.show_next)
3529 parse_options_usage(NULL, timehist_options, "n", true);
3533 return perf_sched__timehist(&sched);
3535 usage_with_options(sched_usage, sched_options);