5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/cloexec.h"
14 #include "util/thread_map.h"
15 #include "util/color.h"
16 #include "util/stat.h"
17 #include "util/callchain.h"
19 #include <subcmd/parse-options.h>
20 #include "util/trace-event.h"
22 #include "util/debug.h"
24 #include <linux/log2.h>
25 #include <sys/prctl.h>
26 #include <sys/resource.h>
28 #include <semaphore.h>
31 #include <api/fs/fs.h>
32 #include <linux/time64.h>
34 #define PR_SET_NAME 15 /* Set process name */
38 #define MAX_PID 1024000
47 unsigned long nr_events;
48 unsigned long curr_event;
49 struct sched_atom **atoms;
60 enum sched_event_type {
64 SCHED_EVENT_MIGRATION,
68 enum sched_event_type type;
74 struct task_desc *wakee;
77 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
87 struct list_head list;
88 enum thread_state state;
96 struct list_head work_list;
97 struct thread *thread;
107 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
111 struct trace_sched_handler {
112 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
113 struct perf_sample *sample, struct machine *machine);
115 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
116 struct perf_sample *sample, struct machine *machine);
118 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
119 struct perf_sample *sample, struct machine *machine);
121 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
122 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
123 struct machine *machine);
125 int (*migrate_task_event)(struct perf_sched *sched,
126 struct perf_evsel *evsel,
127 struct perf_sample *sample,
128 struct machine *machine);
131 #define COLOR_PIDS PERF_COLOR_BLUE
132 #define COLOR_CPUS PERF_COLOR_BG_RED
134 struct perf_sched_map {
135 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
138 struct thread_map *color_pids;
139 const char *color_pids_str;
140 struct cpu_map *color_cpus;
141 const char *color_cpus_str;
142 struct cpu_map *cpus;
143 const char *cpus_str;
147 struct perf_tool tool;
148 const char *sort_order;
149 unsigned long nr_tasks;
150 struct task_desc **pid_to_task;
151 struct task_desc **tasks;
152 const struct trace_sched_handler *tp_handler;
153 pthread_mutex_t start_work_mutex;
154 pthread_mutex_t work_done_wait_mutex;
157 * Track the current task - that way we can know whether there's any
158 * weird events, such as a task being switched away that is not current.
161 u32 curr_pid[MAX_CPUS];
162 struct thread *curr_thread[MAX_CPUS];
163 char next_shortname1;
164 char next_shortname2;
165 unsigned int replay_repeat;
166 unsigned long nr_run_events;
167 unsigned long nr_sleep_events;
168 unsigned long nr_wakeup_events;
169 unsigned long nr_sleep_corrections;
170 unsigned long nr_run_events_optimized;
171 unsigned long targetless_wakeups;
172 unsigned long multitarget_wakeups;
173 unsigned long nr_runs;
174 unsigned long nr_timestamps;
175 unsigned long nr_unordered_timestamps;
176 unsigned long nr_context_switch_bugs;
177 unsigned long nr_events;
178 unsigned long nr_lost_chunks;
179 unsigned long nr_lost_events;
180 u64 run_measurement_overhead;
181 u64 sleep_measurement_overhead;
184 u64 runavg_cpu_usage;
185 u64 parent_cpu_usage;
186 u64 runavg_parent_cpu_usage;
192 u64 cpu_last_switched[MAX_CPUS];
193 struct rb_root atom_root, sorted_atom_root, merged_atom_root;
194 struct list_head sort_list, cmp_pid;
197 struct perf_sched_map map;
199 /* options for timehist command */
203 unsigned int max_stack;
204 bool show_cpu_visual;
209 /* per thread run time data */
210 struct thread_runtime {
211 u64 last_time; /* time of previous sched in/out event */
212 u64 dt_run; /* run time */
213 u64 dt_wait; /* time between CPU access (off cpu) */
214 u64 dt_delay; /* time between wakeup and sched-in */
215 u64 ready_to_run; /* time of wakeup */
217 struct stats run_stats;
221 /* per event run time data */
222 struct evsel_runtime {
223 u64 *last_time; /* time this event was last seen per cpu */
224 u32 ncpu; /* highest cpu slot allocated */
227 /* track idle times per cpu */
228 static struct thread **idle_threads;
229 static int idle_max_cpu;
230 static char idle_comm[] = "<idle>";
232 static u64 get_nsecs(void)
236 clock_gettime(CLOCK_MONOTONIC, &ts);
238 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
241 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
243 u64 T0 = get_nsecs(), T1;
247 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
250 static void sleep_nsecs(u64 nsecs)
254 ts.tv_nsec = nsecs % 999999999;
255 ts.tv_sec = nsecs / 999999999;
257 nanosleep(&ts, NULL);
260 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
262 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
265 for (i = 0; i < 10; i++) {
267 burn_nsecs(sched, 0);
270 min_delta = min(min_delta, delta);
272 sched->run_measurement_overhead = min_delta;
274 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
277 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
279 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
282 for (i = 0; i < 10; i++) {
287 min_delta = min(min_delta, delta);
290 sched->sleep_measurement_overhead = min_delta;
292 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
295 static struct sched_atom *
296 get_new_event(struct task_desc *task, u64 timestamp)
298 struct sched_atom *event = zalloc(sizeof(*event));
299 unsigned long idx = task->nr_events;
302 event->timestamp = timestamp;
306 size = sizeof(struct sched_atom *) * task->nr_events;
307 task->atoms = realloc(task->atoms, size);
308 BUG_ON(!task->atoms);
310 task->atoms[idx] = event;
315 static struct sched_atom *last_event(struct task_desc *task)
317 if (!task->nr_events)
320 return task->atoms[task->nr_events - 1];
323 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
324 u64 timestamp, u64 duration)
326 struct sched_atom *event, *curr_event = last_event(task);
329 * optimize an existing RUN event by merging this one
332 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
333 sched->nr_run_events_optimized++;
334 curr_event->duration += duration;
338 event = get_new_event(task, timestamp);
340 event->type = SCHED_EVENT_RUN;
341 event->duration = duration;
343 sched->nr_run_events++;
346 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
347 u64 timestamp, struct task_desc *wakee)
349 struct sched_atom *event, *wakee_event;
351 event = get_new_event(task, timestamp);
352 event->type = SCHED_EVENT_WAKEUP;
353 event->wakee = wakee;
355 wakee_event = last_event(wakee);
356 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
357 sched->targetless_wakeups++;
360 if (wakee_event->wait_sem) {
361 sched->multitarget_wakeups++;
365 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
366 sem_init(wakee_event->wait_sem, 0, 0);
367 wakee_event->specific_wait = 1;
368 event->wait_sem = wakee_event->wait_sem;
370 sched->nr_wakeup_events++;
373 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
374 u64 timestamp, u64 task_state __maybe_unused)
376 struct sched_atom *event = get_new_event(task, timestamp);
378 event->type = SCHED_EVENT_SLEEP;
380 sched->nr_sleep_events++;
383 static struct task_desc *register_pid(struct perf_sched *sched,
384 unsigned long pid, const char *comm)
386 struct task_desc *task;
389 if (sched->pid_to_task == NULL) {
390 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
392 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
394 if (pid >= (unsigned long)pid_max) {
395 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
396 sizeof(struct task_desc *))) == NULL);
397 while (pid >= (unsigned long)pid_max)
398 sched->pid_to_task[pid_max++] = NULL;
401 task = sched->pid_to_task[pid];
406 task = zalloc(sizeof(*task));
408 task->nr = sched->nr_tasks;
409 strcpy(task->comm, comm);
411 * every task starts in sleeping state - this gets ignored
412 * if there's no wakeup pointing to this sleep state:
414 add_sched_event_sleep(sched, task, 0, 0);
416 sched->pid_to_task[pid] = task;
418 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
419 BUG_ON(!sched->tasks);
420 sched->tasks[task->nr] = task;
423 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
429 static void print_task_traces(struct perf_sched *sched)
431 struct task_desc *task;
434 for (i = 0; i < sched->nr_tasks; i++) {
435 task = sched->tasks[i];
436 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
437 task->nr, task->comm, task->pid, task->nr_events);
441 static void add_cross_task_wakeups(struct perf_sched *sched)
443 struct task_desc *task1, *task2;
446 for (i = 0; i < sched->nr_tasks; i++) {
447 task1 = sched->tasks[i];
449 if (j == sched->nr_tasks)
451 task2 = sched->tasks[j];
452 add_sched_event_wakeup(sched, task1, 0, task2);
456 static void perf_sched__process_event(struct perf_sched *sched,
457 struct sched_atom *atom)
461 switch (atom->type) {
462 case SCHED_EVENT_RUN:
463 burn_nsecs(sched, atom->duration);
465 case SCHED_EVENT_SLEEP:
467 ret = sem_wait(atom->wait_sem);
470 case SCHED_EVENT_WAKEUP:
472 ret = sem_post(atom->wait_sem);
475 case SCHED_EVENT_MIGRATION:
482 static u64 get_cpu_usage_nsec_parent(void)
488 err = getrusage(RUSAGE_SELF, &ru);
491 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
492 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
497 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
499 struct perf_event_attr attr;
500 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
503 bool need_privilege = false;
505 memset(&attr, 0, sizeof(attr));
507 attr.type = PERF_TYPE_SOFTWARE;
508 attr.config = PERF_COUNT_SW_TASK_CLOCK;
511 fd = sys_perf_event_open(&attr, 0, -1, -1,
512 perf_event_open_cloexec_flag());
515 if (errno == EMFILE) {
517 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
518 limit.rlim_cur += sched->nr_tasks - cur_task;
519 if (limit.rlim_cur > limit.rlim_max) {
520 limit.rlim_max = limit.rlim_cur;
521 need_privilege = true;
523 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
524 if (need_privilege && errno == EPERM)
525 strcpy(info, "Need privilege\n");
529 strcpy(info, "Have a try with -f option\n");
531 pr_err("Error: sys_perf_event_open() syscall returned "
532 "with %d (%s)\n%s", fd,
533 str_error_r(errno, sbuf, sizeof(sbuf)), info);
539 static u64 get_cpu_usage_nsec_self(int fd)
544 ret = read(fd, &runtime, sizeof(runtime));
545 BUG_ON(ret != sizeof(runtime));
550 struct sched_thread_parms {
551 struct task_desc *task;
552 struct perf_sched *sched;
556 static void *thread_func(void *ctx)
558 struct sched_thread_parms *parms = ctx;
559 struct task_desc *this_task = parms->task;
560 struct perf_sched *sched = parms->sched;
561 u64 cpu_usage_0, cpu_usage_1;
562 unsigned long i, ret;
568 sprintf(comm2, ":%s", this_task->comm);
569 prctl(PR_SET_NAME, comm2);
573 ret = sem_post(&this_task->ready_for_work);
575 ret = pthread_mutex_lock(&sched->start_work_mutex);
577 ret = pthread_mutex_unlock(&sched->start_work_mutex);
580 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
582 for (i = 0; i < this_task->nr_events; i++) {
583 this_task->curr_event = i;
584 perf_sched__process_event(sched, this_task->atoms[i]);
587 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
588 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
589 ret = sem_post(&this_task->work_done_sem);
592 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
594 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
600 static void create_tasks(struct perf_sched *sched)
602 struct task_desc *task;
607 err = pthread_attr_init(&attr);
609 err = pthread_attr_setstacksize(&attr,
610 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
612 err = pthread_mutex_lock(&sched->start_work_mutex);
614 err = pthread_mutex_lock(&sched->work_done_wait_mutex);
616 for (i = 0; i < sched->nr_tasks; i++) {
617 struct sched_thread_parms *parms = malloc(sizeof(*parms));
618 BUG_ON(parms == NULL);
619 parms->task = task = sched->tasks[i];
620 parms->sched = sched;
621 parms->fd = self_open_counters(sched, i);
622 sem_init(&task->sleep_sem, 0, 0);
623 sem_init(&task->ready_for_work, 0, 0);
624 sem_init(&task->work_done_sem, 0, 0);
625 task->curr_event = 0;
626 err = pthread_create(&task->thread, &attr, thread_func, parms);
631 static void wait_for_tasks(struct perf_sched *sched)
633 u64 cpu_usage_0, cpu_usage_1;
634 struct task_desc *task;
635 unsigned long i, ret;
637 sched->start_time = get_nsecs();
638 sched->cpu_usage = 0;
639 pthread_mutex_unlock(&sched->work_done_wait_mutex);
641 for (i = 0; i < sched->nr_tasks; i++) {
642 task = sched->tasks[i];
643 ret = sem_wait(&task->ready_for_work);
645 sem_init(&task->ready_for_work, 0, 0);
647 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
650 cpu_usage_0 = get_cpu_usage_nsec_parent();
652 pthread_mutex_unlock(&sched->start_work_mutex);
654 for (i = 0; i < sched->nr_tasks; i++) {
655 task = sched->tasks[i];
656 ret = sem_wait(&task->work_done_sem);
658 sem_init(&task->work_done_sem, 0, 0);
659 sched->cpu_usage += task->cpu_usage;
663 cpu_usage_1 = get_cpu_usage_nsec_parent();
664 if (!sched->runavg_cpu_usage)
665 sched->runavg_cpu_usage = sched->cpu_usage;
666 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
668 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
669 if (!sched->runavg_parent_cpu_usage)
670 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
671 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
672 sched->parent_cpu_usage)/sched->replay_repeat;
674 ret = pthread_mutex_lock(&sched->start_work_mutex);
677 for (i = 0; i < sched->nr_tasks; i++) {
678 task = sched->tasks[i];
679 sem_init(&task->sleep_sem, 0, 0);
680 task->curr_event = 0;
684 static void run_one_test(struct perf_sched *sched)
686 u64 T0, T1, delta, avg_delta, fluct;
689 wait_for_tasks(sched);
693 sched->sum_runtime += delta;
696 avg_delta = sched->sum_runtime / sched->nr_runs;
697 if (delta < avg_delta)
698 fluct = avg_delta - delta;
700 fluct = delta - avg_delta;
701 sched->sum_fluct += fluct;
703 sched->run_avg = delta;
704 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
706 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
708 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
710 printf("cpu: %0.2f / %0.2f",
711 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
715 * rusage statistics done by the parent, these are less
716 * accurate than the sched->sum_exec_runtime based statistics:
718 printf(" [%0.2f / %0.2f]",
719 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
720 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
725 if (sched->nr_sleep_corrections)
726 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
727 sched->nr_sleep_corrections = 0;
730 static void test_calibrations(struct perf_sched *sched)
735 burn_nsecs(sched, NSEC_PER_MSEC);
738 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
741 sleep_nsecs(NSEC_PER_MSEC);
744 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
748 replay_wakeup_event(struct perf_sched *sched,
749 struct perf_evsel *evsel, struct perf_sample *sample,
750 struct machine *machine __maybe_unused)
752 const char *comm = perf_evsel__strval(evsel, sample, "comm");
753 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
754 struct task_desc *waker, *wakee;
757 printf("sched_wakeup event %p\n", evsel);
759 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
762 waker = register_pid(sched, sample->tid, "<unknown>");
763 wakee = register_pid(sched, pid, comm);
765 add_sched_event_wakeup(sched, waker, sample->time, wakee);
769 static int replay_switch_event(struct perf_sched *sched,
770 struct perf_evsel *evsel,
771 struct perf_sample *sample,
772 struct machine *machine __maybe_unused)
774 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
775 *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
776 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
777 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
778 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
779 struct task_desc *prev, __maybe_unused *next;
780 u64 timestamp0, timestamp = sample->time;
781 int cpu = sample->cpu;
785 printf("sched_switch event %p\n", evsel);
787 if (cpu >= MAX_CPUS || cpu < 0)
790 timestamp0 = sched->cpu_last_switched[cpu];
792 delta = timestamp - timestamp0;
797 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
801 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
802 prev_comm, prev_pid, next_comm, next_pid, delta);
804 prev = register_pid(sched, prev_pid, prev_comm);
805 next = register_pid(sched, next_pid, next_comm);
807 sched->cpu_last_switched[cpu] = timestamp;
809 add_sched_event_run(sched, prev, timestamp, delta);
810 add_sched_event_sleep(sched, prev, timestamp, prev_state);
815 static int replay_fork_event(struct perf_sched *sched,
816 union perf_event *event,
817 struct machine *machine)
819 struct thread *child, *parent;
821 child = machine__findnew_thread(machine, event->fork.pid,
823 parent = machine__findnew_thread(machine, event->fork.ppid,
826 if (child == NULL || parent == NULL) {
827 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
833 printf("fork event\n");
834 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
835 printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
838 register_pid(sched, parent->tid, thread__comm_str(parent));
839 register_pid(sched, child->tid, thread__comm_str(child));
846 struct sort_dimension {
849 struct list_head list;
853 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
855 struct sort_dimension *sort;
858 BUG_ON(list_empty(list));
860 list_for_each_entry(sort, list, list) {
861 ret = sort->cmp(l, r);
869 static struct work_atoms *
870 thread_atoms_search(struct rb_root *root, struct thread *thread,
871 struct list_head *sort_list)
873 struct rb_node *node = root->rb_node;
874 struct work_atoms key = { .thread = thread };
877 struct work_atoms *atoms;
880 atoms = container_of(node, struct work_atoms, node);
882 cmp = thread_lat_cmp(sort_list, &key, atoms);
884 node = node->rb_left;
886 node = node->rb_right;
888 BUG_ON(thread != atoms->thread);
896 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
897 struct list_head *sort_list)
899 struct rb_node **new = &(root->rb_node), *parent = NULL;
902 struct work_atoms *this;
905 this = container_of(*new, struct work_atoms, node);
908 cmp = thread_lat_cmp(sort_list, data, this);
911 new = &((*new)->rb_left);
913 new = &((*new)->rb_right);
916 rb_link_node(&data->node, parent, new);
917 rb_insert_color(&data->node, root);
920 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
922 struct work_atoms *atoms = zalloc(sizeof(*atoms));
924 pr_err("No memory at %s\n", __func__);
928 atoms->thread = thread__get(thread);
929 INIT_LIST_HEAD(&atoms->work_list);
930 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
934 static char sched_out_state(u64 prev_state)
936 const char *str = TASK_STATE_TO_CHAR_STR;
938 return str[prev_state];
942 add_sched_out_event(struct work_atoms *atoms,
946 struct work_atom *atom = zalloc(sizeof(*atom));
948 pr_err("Non memory at %s", __func__);
952 atom->sched_out_time = timestamp;
954 if (run_state == 'R') {
955 atom->state = THREAD_WAIT_CPU;
956 atom->wake_up_time = atom->sched_out_time;
959 list_add_tail(&atom->list, &atoms->work_list);
964 add_runtime_event(struct work_atoms *atoms, u64 delta,
965 u64 timestamp __maybe_unused)
967 struct work_atom *atom;
969 BUG_ON(list_empty(&atoms->work_list));
971 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
973 atom->runtime += delta;
974 atoms->total_runtime += delta;
978 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
980 struct work_atom *atom;
983 if (list_empty(&atoms->work_list))
986 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
988 if (atom->state != THREAD_WAIT_CPU)
991 if (timestamp < atom->wake_up_time) {
992 atom->state = THREAD_IGNORE;
996 atom->state = THREAD_SCHED_IN;
997 atom->sched_in_time = timestamp;
999 delta = atom->sched_in_time - atom->wake_up_time;
1000 atoms->total_lat += delta;
1001 if (delta > atoms->max_lat) {
1002 atoms->max_lat = delta;
1003 atoms->max_lat_at = timestamp;
1008 static int latency_switch_event(struct perf_sched *sched,
1009 struct perf_evsel *evsel,
1010 struct perf_sample *sample,
1011 struct machine *machine)
1013 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1014 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1015 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
1016 struct work_atoms *out_events, *in_events;
1017 struct thread *sched_out, *sched_in;
1018 u64 timestamp0, timestamp = sample->time;
1019 int cpu = sample->cpu, err = -1;
1022 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1024 timestamp0 = sched->cpu_last_switched[cpu];
1025 sched->cpu_last_switched[cpu] = timestamp;
1027 delta = timestamp - timestamp0;
1032 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1036 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1037 sched_in = machine__findnew_thread(machine, -1, next_pid);
1038 if (sched_out == NULL || sched_in == NULL)
1041 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1043 if (thread_atoms_insert(sched, sched_out))
1045 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1047 pr_err("out-event: Internal tree error");
1051 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1054 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1056 if (thread_atoms_insert(sched, sched_in))
1058 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1060 pr_err("in-event: Internal tree error");
1064 * Take came in we have not heard about yet,
1065 * add in an initial atom in runnable state:
1067 if (add_sched_out_event(in_events, 'R', timestamp))
1070 add_sched_in_event(in_events, timestamp);
1073 thread__put(sched_out);
1074 thread__put(sched_in);
1078 static int latency_runtime_event(struct perf_sched *sched,
1079 struct perf_evsel *evsel,
1080 struct perf_sample *sample,
1081 struct machine *machine)
1083 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1084 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1085 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1086 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1087 u64 timestamp = sample->time;
1088 int cpu = sample->cpu, err = -1;
1093 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1095 if (thread_atoms_insert(sched, thread))
1097 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1099 pr_err("in-event: Internal tree error");
1102 if (add_sched_out_event(atoms, 'R', timestamp))
1106 add_runtime_event(atoms, runtime, timestamp);
1109 thread__put(thread);
1113 static int latency_wakeup_event(struct perf_sched *sched,
1114 struct perf_evsel *evsel,
1115 struct perf_sample *sample,
1116 struct machine *machine)
1118 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1119 struct work_atoms *atoms;
1120 struct work_atom *atom;
1121 struct thread *wakee;
1122 u64 timestamp = sample->time;
1125 wakee = machine__findnew_thread(machine, -1, pid);
1128 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1130 if (thread_atoms_insert(sched, wakee))
1132 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1134 pr_err("wakeup-event: Internal tree error");
1137 if (add_sched_out_event(atoms, 'S', timestamp))
1141 BUG_ON(list_empty(&atoms->work_list));
1143 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1146 * As we do not guarantee the wakeup event happens when
1147 * task is out of run queue, also may happen when task is
1148 * on run queue and wakeup only change ->state to TASK_RUNNING,
1149 * then we should not set the ->wake_up_time when wake up a
1150 * task which is on run queue.
1152 * You WILL be missing events if you've recorded only
1153 * one CPU, or are only looking at only one, so don't
1154 * skip in this case.
1156 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1159 sched->nr_timestamps++;
1160 if (atom->sched_out_time > timestamp) {
1161 sched->nr_unordered_timestamps++;
1165 atom->state = THREAD_WAIT_CPU;
1166 atom->wake_up_time = timestamp;
1174 static int latency_migrate_task_event(struct perf_sched *sched,
1175 struct perf_evsel *evsel,
1176 struct perf_sample *sample,
1177 struct machine *machine)
1179 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1180 u64 timestamp = sample->time;
1181 struct work_atoms *atoms;
1182 struct work_atom *atom;
1183 struct thread *migrant;
1187 * Only need to worry about migration when profiling one CPU.
1189 if (sched->profile_cpu == -1)
1192 migrant = machine__findnew_thread(machine, -1, pid);
1193 if (migrant == NULL)
1195 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1197 if (thread_atoms_insert(sched, migrant))
1199 register_pid(sched, migrant->tid, thread__comm_str(migrant));
1200 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1202 pr_err("migration-event: Internal tree error");
1205 if (add_sched_out_event(atoms, 'R', timestamp))
1209 BUG_ON(list_empty(&atoms->work_list));
1211 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1212 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1214 sched->nr_timestamps++;
1216 if (atom->sched_out_time > timestamp)
1217 sched->nr_unordered_timestamps++;
1220 thread__put(migrant);
1224 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1229 char max_lat_at[32];
1231 if (!work_list->nb_atoms)
1234 * Ignore idle threads:
1236 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1239 sched->all_runtime += work_list->total_runtime;
1240 sched->all_count += work_list->nb_atoms;
1242 if (work_list->num_merged > 1)
1243 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1245 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1247 for (i = 0; i < 24 - ret; i++)
1250 avg = work_list->total_lat / work_list->nb_atoms;
1251 timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at));
1253 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
1254 (double)work_list->total_runtime / NSEC_PER_MSEC,
1255 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1256 (double)work_list->max_lat / NSEC_PER_MSEC,
1260 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1262 if (l->thread == r->thread)
1264 if (l->thread->tid < r->thread->tid)
1266 if (l->thread->tid > r->thread->tid)
1268 return (int)(l->thread - r->thread);
1271 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1281 avgl = l->total_lat / l->nb_atoms;
1282 avgr = r->total_lat / r->nb_atoms;
1292 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1294 if (l->max_lat < r->max_lat)
1296 if (l->max_lat > r->max_lat)
1302 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1304 if (l->nb_atoms < r->nb_atoms)
1306 if (l->nb_atoms > r->nb_atoms)
1312 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1314 if (l->total_runtime < r->total_runtime)
1316 if (l->total_runtime > r->total_runtime)
1322 static int sort_dimension__add(const char *tok, struct list_head *list)
1325 static struct sort_dimension avg_sort_dimension = {
1329 static struct sort_dimension max_sort_dimension = {
1333 static struct sort_dimension pid_sort_dimension = {
1337 static struct sort_dimension runtime_sort_dimension = {
1341 static struct sort_dimension switch_sort_dimension = {
1345 struct sort_dimension *available_sorts[] = {
1346 &pid_sort_dimension,
1347 &avg_sort_dimension,
1348 &max_sort_dimension,
1349 &switch_sort_dimension,
1350 &runtime_sort_dimension,
1353 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1354 if (!strcmp(available_sorts[i]->name, tok)) {
1355 list_add_tail(&available_sorts[i]->list, list);
1364 static void perf_sched__sort_lat(struct perf_sched *sched)
1366 struct rb_node *node;
1367 struct rb_root *root = &sched->atom_root;
1370 struct work_atoms *data;
1371 node = rb_first(root);
1375 rb_erase(node, root);
1376 data = rb_entry(node, struct work_atoms, node);
1377 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1379 if (root == &sched->atom_root) {
1380 root = &sched->merged_atom_root;
1385 static int process_sched_wakeup_event(struct perf_tool *tool,
1386 struct perf_evsel *evsel,
1387 struct perf_sample *sample,
1388 struct machine *machine)
1390 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1392 if (sched->tp_handler->wakeup_event)
1393 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1403 static bool thread__has_color(struct thread *thread)
1405 union map_priv priv = {
1406 .ptr = thread__priv(thread),
1412 static struct thread*
1413 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1415 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1416 union map_priv priv = {
1420 if (!sched->map.color_pids || !thread || thread__priv(thread))
1423 if (thread_map__has(sched->map.color_pids, tid))
1426 thread__set_priv(thread, priv.ptr);
1430 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
1431 struct perf_sample *sample, struct machine *machine)
1433 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1434 struct thread *sched_in;
1436 u64 timestamp0, timestamp = sample->time;
1438 int i, this_cpu = sample->cpu;
1440 bool new_cpu = false;
1441 const char *color = PERF_COLOR_NORMAL;
1442 char stimestamp[32];
1444 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1446 if (this_cpu > sched->max_cpu)
1447 sched->max_cpu = this_cpu;
1449 if (sched->map.comp) {
1450 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1451 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
1452 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1456 cpus_nr = sched->max_cpu;
1458 timestamp0 = sched->cpu_last_switched[this_cpu];
1459 sched->cpu_last_switched[this_cpu] = timestamp;
1461 delta = timestamp - timestamp0;
1466 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1470 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1471 if (sched_in == NULL)
1474 sched->curr_thread[this_cpu] = thread__get(sched_in);
1479 if (!sched_in->shortname[0]) {
1480 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1482 * Don't allocate a letter-number for swapper:0
1483 * as a shortname. Instead, we use '.' for it.
1485 sched_in->shortname[0] = '.';
1486 sched_in->shortname[1] = ' ';
1488 sched_in->shortname[0] = sched->next_shortname1;
1489 sched_in->shortname[1] = sched->next_shortname2;
1491 if (sched->next_shortname1 < 'Z') {
1492 sched->next_shortname1++;
1494 sched->next_shortname1 = 'A';
1495 if (sched->next_shortname2 < '9')
1496 sched->next_shortname2++;
1498 sched->next_shortname2 = '0';
1504 for (i = 0; i < cpus_nr; i++) {
1505 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
1506 struct thread *curr_thread = sched->curr_thread[cpu];
1507 const char *pid_color = color;
1508 const char *cpu_color = color;
1510 if (curr_thread && thread__has_color(curr_thread))
1511 pid_color = COLOR_PIDS;
1513 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
1516 if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
1517 cpu_color = COLOR_CPUS;
1519 if (cpu != this_cpu)
1520 color_fprintf(stdout, color, " ");
1522 color_fprintf(stdout, cpu_color, "*");
1524 if (sched->curr_thread[cpu])
1525 color_fprintf(stdout, pid_color, "%2s ", sched->curr_thread[cpu]->shortname);
1527 color_fprintf(stdout, color, " ");
1530 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
1533 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1534 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1535 if (new_shortname || (verbose && sched_in->tid)) {
1536 const char *pid_color = color;
1538 if (thread__has_color(sched_in))
1539 pid_color = COLOR_PIDS;
1541 color_fprintf(stdout, pid_color, "%s => %s:%d",
1542 sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
1545 if (sched->map.comp && new_cpu)
1546 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1549 color_fprintf(stdout, color, "\n");
1551 thread__put(sched_in);
1556 static int process_sched_switch_event(struct perf_tool *tool,
1557 struct perf_evsel *evsel,
1558 struct perf_sample *sample,
1559 struct machine *machine)
1561 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1562 int this_cpu = sample->cpu, err = 0;
1563 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1564 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1566 if (sched->curr_pid[this_cpu] != (u32)-1) {
1568 * Are we trying to switch away a PID that is
1571 if (sched->curr_pid[this_cpu] != prev_pid)
1572 sched->nr_context_switch_bugs++;
1575 if (sched->tp_handler->switch_event)
1576 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1578 sched->curr_pid[this_cpu] = next_pid;
1582 static int process_sched_runtime_event(struct perf_tool *tool,
1583 struct perf_evsel *evsel,
1584 struct perf_sample *sample,
1585 struct machine *machine)
1587 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1589 if (sched->tp_handler->runtime_event)
1590 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1595 static int perf_sched__process_fork_event(struct perf_tool *tool,
1596 union perf_event *event,
1597 struct perf_sample *sample,
1598 struct machine *machine)
1600 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1602 /* run the fork event through the perf machineruy */
1603 perf_event__process_fork(tool, event, sample, machine);
1605 /* and then run additional processing needed for this command */
1606 if (sched->tp_handler->fork_event)
1607 return sched->tp_handler->fork_event(sched, event, machine);
1612 static int process_sched_migrate_task_event(struct perf_tool *tool,
1613 struct perf_evsel *evsel,
1614 struct perf_sample *sample,
1615 struct machine *machine)
1617 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1619 if (sched->tp_handler->migrate_task_event)
1620 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1625 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1626 struct perf_evsel *evsel,
1627 struct perf_sample *sample,
1628 struct machine *machine);
1630 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1631 union perf_event *event __maybe_unused,
1632 struct perf_sample *sample,
1633 struct perf_evsel *evsel,
1634 struct machine *machine)
1638 if (evsel->handler != NULL) {
1639 tracepoint_handler f = evsel->handler;
1640 err = f(tool, evsel, sample, machine);
1646 static int perf_sched__read_events(struct perf_sched *sched)
1648 const struct perf_evsel_str_handler handlers[] = {
1649 { "sched:sched_switch", process_sched_switch_event, },
1650 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1651 { "sched:sched_wakeup", process_sched_wakeup_event, },
1652 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1653 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1655 struct perf_session *session;
1656 struct perf_data_file file = {
1658 .mode = PERF_DATA_MODE_READ,
1659 .force = sched->force,
1663 session = perf_session__new(&file, false, &sched->tool);
1664 if (session == NULL) {
1665 pr_debug("No Memory for session\n");
1669 symbol__init(&session->header.env);
1671 if (perf_session__set_tracepoints_handlers(session, handlers))
1674 if (perf_session__has_traces(session, "record -R")) {
1675 int err = perf_session__process_events(session);
1677 pr_err("Failed to process events, error %d", err);
1681 sched->nr_events = session->evlist->stats.nr_events[0];
1682 sched->nr_lost_events = session->evlist->stats.total_lost;
1683 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1688 perf_session__delete(session);
1693 * scheduling times are printed as msec.usec
1695 static inline void print_sched_time(unsigned long long nsecs, int width)
1697 unsigned long msecs;
1698 unsigned long usecs;
1700 msecs = nsecs / NSEC_PER_MSEC;
1701 nsecs -= msecs * NSEC_PER_MSEC;
1702 usecs = nsecs / NSEC_PER_USEC;
1703 printf("%*lu.%03lu ", width, msecs, usecs);
1707 * returns runtime data for event, allocating memory for it the
1708 * first time it is used.
1710 static struct evsel_runtime *perf_evsel__get_runtime(struct perf_evsel *evsel)
1712 struct evsel_runtime *r = evsel->priv;
1715 r = zalloc(sizeof(struct evsel_runtime));
1723 * save last time event was seen per cpu
1725 static void perf_evsel__save_time(struct perf_evsel *evsel,
1726 u64 timestamp, u32 cpu)
1728 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1733 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1734 int i, n = __roundup_pow_of_two(cpu+1);
1735 void *p = r->last_time;
1737 p = realloc(r->last_time, n * sizeof(u64));
1742 for (i = r->ncpu; i < n; ++i)
1743 r->last_time[i] = (u64) 0;
1748 r->last_time[cpu] = timestamp;
1751 /* returns last time this event was seen on the given cpu */
1752 static u64 perf_evsel__get_time(struct perf_evsel *evsel, u32 cpu)
1754 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1756 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1759 return r->last_time[cpu];
1762 static int comm_width = 20;
1764 static char *timehist_get_commstr(struct thread *thread)
1766 static char str[32];
1767 const char *comm = thread__comm_str(thread);
1768 pid_t tid = thread->tid;
1769 pid_t pid = thread->pid_;
1773 n = scnprintf(str, sizeof(str), "%s", comm);
1775 else if (tid != pid)
1776 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1779 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1787 static void timehist_header(struct perf_sched *sched)
1789 u32 ncpus = sched->max_cpu + 1;
1792 printf("%15s %6s ", "time", "cpu");
1794 if (sched->show_cpu_visual) {
1796 for (i = 0, j = 0; i < ncpus; ++i) {
1804 printf(" %-20s %9s %9s %9s",
1805 "task name", "wait time", "sch delay", "run time");
1812 printf("%15s %-6s ", "", "");
1814 if (sched->show_cpu_visual)
1815 printf(" %*s ", ncpus, "");
1817 printf(" %-20s %9s %9s %9s\n", "[tid/pid]", "(msec)", "(msec)", "(msec)");
1822 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
1824 if (sched->show_cpu_visual)
1825 printf(" %.*s ", ncpus, graph_dotted_line);
1827 printf(" %.20s %.9s %.9s %.9s",
1828 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1834 static void timehist_print_sample(struct perf_sched *sched,
1835 struct perf_sample *sample,
1836 struct addr_location *al,
1837 struct thread *thread)
1839 struct thread_runtime *tr = thread__priv(thread);
1840 u32 max_cpus = sched->max_cpu + 1;
1843 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
1844 printf("%15s [%04d] ", tstr, sample->cpu);
1846 if (sched->show_cpu_visual) {
1851 for (i = 0; i < max_cpus; ++i) {
1852 /* flag idle times with 'i'; others are sched events */
1853 if (i == sample->cpu)
1854 c = (thread->tid == 0) ? 'i' : 's';
1862 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
1864 print_sched_time(tr->dt_wait, 6);
1865 print_sched_time(tr->dt_delay, 6);
1866 print_sched_time(tr->dt_run, 6);
1868 if (sched->show_wakeups)
1869 printf(" %-*s", comm_width, "");
1871 if (thread->tid == 0)
1874 if (sched->show_callchain)
1877 sample__fprintf_sym(sample, al, 0,
1878 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
1879 EVSEL__PRINT_CALLCHAIN_ARROW |
1880 EVSEL__PRINT_SKIP_IGNORED,
1881 &callchain_cursor, stdout);
1888 * Explanation of delta-time stats:
1890 * t = time of current schedule out event
1891 * tprev = time of previous sched out event
1892 * also time of schedule-in event for current task
1893 * last_time = time of last sched change event for current task
1894 * (i.e, time process was last scheduled out)
1895 * ready_to_run = time of wakeup for current task
1897 * -----|------------|------------|------------|------
1898 * last ready tprev t
1901 * |-------- dt_wait --------|
1902 * |- dt_delay -|-- dt_run --|
1904 * dt_run = run time of current task
1905 * dt_wait = time between last schedule out event for task and tprev
1906 * represents time spent off the cpu
1907 * dt_delay = time between wakeup and schedule-in of task
1910 static void timehist_update_runtime_stats(struct thread_runtime *r,
1917 r->dt_run = t - tprev;
1918 if (r->ready_to_run) {
1919 if (r->ready_to_run > tprev)
1920 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
1922 r->dt_delay = tprev - r->ready_to_run;
1925 if (r->last_time > tprev)
1926 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
1927 else if (r->last_time)
1928 r->dt_wait = tprev - r->last_time;
1931 update_stats(&r->run_stats, r->dt_run);
1932 r->total_run_time += r->dt_run;
1935 static bool is_idle_sample(struct perf_sched *sched,
1936 struct perf_sample *sample,
1937 struct perf_evsel *evsel,
1938 struct machine *machine)
1940 struct thread *thread;
1941 struct callchain_cursor *cursor = &callchain_cursor;
1943 /* pid 0 == swapper == idle task */
1944 if (sample->pid == 0)
1947 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0) {
1948 if (perf_evsel__intval(evsel, sample, "prev_pid") == 0)
1952 /* want main thread for process - has maps */
1953 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
1954 if (thread == NULL) {
1955 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
1959 if (!symbol_conf.use_callchain || sample->callchain == NULL)
1962 if (thread__resolve_callchain(thread, cursor, evsel, sample,
1963 NULL, NULL, sched->max_stack) != 0) {
1965 error("Failed to resolve callchain. Skipping\n");
1969 callchain_cursor_commit(cursor);
1974 * Track idle stats per cpu by maintaining a local thread
1975 * struct for the idle task on each cpu.
1977 static int init_idle_threads(int ncpu)
1981 idle_threads = zalloc(ncpu * sizeof(struct thread *));
1985 idle_max_cpu = ncpu - 1;
1987 /* allocate the actual thread struct if needed */
1988 for (i = 0; i < ncpu; ++i) {
1989 idle_threads[i] = thread__new(0, 0);
1990 if (idle_threads[i] == NULL)
1993 thread__set_comm(idle_threads[i], idle_comm, 0);
1999 static void free_idle_threads(void)
2003 if (idle_threads == NULL)
2006 for (i = 0; i <= idle_max_cpu; ++i) {
2007 if ((idle_threads[i]))
2008 thread__delete(idle_threads[i]);
2014 static struct thread *get_idle_thread(int cpu)
2017 * expand/allocate array of pointers to local thread
2020 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2021 int i, j = __roundup_pow_of_two(cpu+1);
2024 p = realloc(idle_threads, j * sizeof(struct thread *));
2028 idle_threads = (struct thread **) p;
2029 i = idle_max_cpu ? idle_max_cpu + 1 : 0;
2031 idle_threads[i] = NULL;
2036 /* allocate a new thread struct if needed */
2037 if (idle_threads[cpu] == NULL) {
2038 idle_threads[cpu] = thread__new(0, 0);
2040 if (idle_threads[cpu]) {
2041 idle_threads[cpu]->tid = 0;
2042 thread__set_comm(idle_threads[cpu], idle_comm, 0);
2046 return idle_threads[cpu];
2050 * handle runtime stats saved per thread
2052 static struct thread_runtime *thread__init_runtime(struct thread *thread)
2054 struct thread_runtime *r;
2056 r = zalloc(sizeof(struct thread_runtime));
2060 init_stats(&r->run_stats);
2061 thread__set_priv(thread, r);
2066 static struct thread_runtime *thread__get_runtime(struct thread *thread)
2068 struct thread_runtime *tr;
2070 tr = thread__priv(thread);
2072 tr = thread__init_runtime(thread);
2074 pr_debug("Failed to malloc memory for runtime data.\n");
2080 static struct thread *timehist_get_thread(struct perf_sched *sched,
2081 struct perf_sample *sample,
2082 struct machine *machine,
2083 struct perf_evsel *evsel)
2085 struct thread *thread;
2087 if (is_idle_sample(sched, sample, evsel, machine)) {
2088 thread = get_idle_thread(sample->cpu);
2090 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2093 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2094 if (thread == NULL) {
2095 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2103 static bool timehist_skip_sample(struct perf_sched *sched,
2104 struct thread *thread)
2108 if (thread__is_filtered(thread)) {
2110 sched->skipped_samples++;
2116 static void timehist_print_wakeup_event(struct perf_sched *sched,
2117 struct perf_sample *sample,
2118 struct machine *machine,
2119 struct thread *awakened)
2121 struct thread *thread;
2124 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2128 /* show wakeup unless both awakee and awaker are filtered */
2129 if (timehist_skip_sample(sched, thread) &&
2130 timehist_skip_sample(sched, awakened)) {
2134 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2135 printf("%15s [%04d] ", tstr, sample->cpu);
2136 if (sched->show_cpu_visual)
2137 printf(" %*s ", sched->max_cpu + 1, "");
2139 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2142 printf(" %9s %9s %9s ", "", "", "");
2144 printf("awakened: %s", timehist_get_commstr(awakened));
2149 static int timehist_sched_wakeup_event(struct perf_tool *tool,
2150 union perf_event *event __maybe_unused,
2151 struct perf_evsel *evsel,
2152 struct perf_sample *sample,
2153 struct machine *machine)
2155 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2156 struct thread *thread;
2157 struct thread_runtime *tr = NULL;
2158 /* want pid of awakened task not pid in sample */
2159 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2161 thread = machine__findnew_thread(machine, 0, pid);
2165 tr = thread__get_runtime(thread);
2169 if (tr->ready_to_run == 0)
2170 tr->ready_to_run = sample->time;
2172 /* show wakeups if requested */
2173 if (sched->show_wakeups)
2174 timehist_print_wakeup_event(sched, sample, machine, thread);
2179 static int timehist_sched_change_event(struct perf_tool *tool,
2180 union perf_event *event,
2181 struct perf_evsel *evsel,
2182 struct perf_sample *sample,
2183 struct machine *machine)
2185 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2186 struct addr_location al;
2187 struct thread *thread;
2188 struct thread_runtime *tr = NULL;
2192 if (machine__resolve(machine, &al, sample) < 0) {
2193 pr_err("problem processing %d event. skipping it\n",
2194 event->header.type);
2199 thread = timehist_get_thread(sched, sample, machine, evsel);
2200 if (thread == NULL) {
2205 if (timehist_skip_sample(sched, thread))
2208 tr = thread__get_runtime(thread);
2214 tprev = perf_evsel__get_time(evsel, sample->cpu);
2216 timehist_update_runtime_stats(tr, sample->time, tprev);
2217 if (!sched->summary_only)
2218 timehist_print_sample(sched, sample, &al, thread);
2222 /* time of this sched_switch event becomes last time task seen */
2223 tr->last_time = sample->time;
2225 /* sched out event for task so reset ready to run time */
2226 tr->ready_to_run = 0;
2229 perf_evsel__save_time(evsel, sample->time, sample->cpu);
2234 static int timehist_sched_switch_event(struct perf_tool *tool,
2235 union perf_event *event,
2236 struct perf_evsel *evsel,
2237 struct perf_sample *sample,
2238 struct machine *machine __maybe_unused)
2240 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2243 static int process_lost(struct perf_tool *tool __maybe_unused,
2244 union perf_event *event,
2245 struct perf_sample *sample,
2246 struct machine *machine __maybe_unused)
2250 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2251 printf("%15s ", tstr);
2252 printf("lost %" PRIu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2258 static void print_thread_runtime(struct thread *t,
2259 struct thread_runtime *r)
2261 double mean = avg_stats(&r->run_stats);
2264 printf("%*s %5d %9" PRIu64 " ",
2265 comm_width, timehist_get_commstr(t), t->ppid,
2266 (u64) r->run_stats.n);
2268 print_sched_time(r->total_run_time, 8);
2269 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2270 print_sched_time(r->run_stats.min, 6);
2272 print_sched_time((u64) mean, 6);
2274 print_sched_time(r->run_stats.max, 6);
2276 printf("%5.2f", stddev);
2280 struct total_run_stats {
2286 static int __show_thread_runtime(struct thread *t, void *priv)
2288 struct total_run_stats *stats = priv;
2289 struct thread_runtime *r;
2291 if (thread__is_filtered(t))
2294 r = thread__priv(t);
2295 if (r && r->run_stats.n) {
2296 stats->task_count++;
2297 stats->sched_count += r->run_stats.n;
2298 stats->total_run_time += r->total_run_time;
2299 print_thread_runtime(t, r);
2305 static int show_thread_runtime(struct thread *t, void *priv)
2310 return __show_thread_runtime(t, priv);
2313 static int show_deadthread_runtime(struct thread *t, void *priv)
2318 return __show_thread_runtime(t, priv);
2321 static void timehist_print_summary(struct perf_sched *sched,
2322 struct perf_session *session)
2324 struct machine *m = &session->machines.host;
2325 struct total_run_stats totals;
2328 struct thread_runtime *r;
2331 memset(&totals, 0, sizeof(totals));
2333 if (comm_width < 30)
2336 printf("\nRuntime summary\n");
2337 printf("%*s parent sched-in ", comm_width, "comm");
2338 printf(" run-time min-run avg-run max-run stddev\n");
2339 printf("%*s (count) ", comm_width, "");
2340 printf(" (msec) (msec) (msec) (msec) %%\n");
2341 printf("%.105s\n", graph_dotted_line);
2343 machine__for_each_thread(m, show_thread_runtime, &totals);
2344 task_count = totals.task_count;
2346 printf("<no still running tasks>\n");
2348 printf("\nTerminated tasks:\n");
2349 machine__for_each_thread(m, show_deadthread_runtime, &totals);
2350 if (task_count == totals.task_count)
2351 printf("<no terminated tasks>\n");
2353 /* CPU idle stats not tracked when samples were skipped */
2354 if (sched->skipped_samples)
2357 printf("\nIdle stats:\n");
2358 for (i = 0; i <= idle_max_cpu; ++i) {
2359 t = idle_threads[i];
2363 r = thread__priv(t);
2364 if (r && r->run_stats.n) {
2365 totals.sched_count += r->run_stats.n;
2366 printf(" CPU %2d idle for ", i);
2367 print_sched_time(r->total_run_time, 6);
2370 printf(" CPU %2d idle entire time window\n", i);
2374 " Total number of unique tasks: %" PRIu64 "\n"
2375 "Total number of context switches: %" PRIu64 "\n"
2376 " Total run time (msec): ",
2377 totals.task_count, totals.sched_count);
2379 print_sched_time(totals.total_run_time, 2);
2383 typedef int (*sched_handler)(struct perf_tool *tool,
2384 union perf_event *event,
2385 struct perf_evsel *evsel,
2386 struct perf_sample *sample,
2387 struct machine *machine);
2389 static int perf_timehist__process_sample(struct perf_tool *tool,
2390 union perf_event *event,
2391 struct perf_sample *sample,
2392 struct perf_evsel *evsel,
2393 struct machine *machine)
2395 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2397 int this_cpu = sample->cpu;
2399 if (this_cpu > sched->max_cpu)
2400 sched->max_cpu = this_cpu;
2402 if (evsel->handler != NULL) {
2403 sched_handler f = evsel->handler;
2405 err = f(tool, event, evsel, sample, machine);
2411 static int timehist_check_attr(struct perf_sched *sched,
2412 struct perf_evlist *evlist)
2414 struct perf_evsel *evsel;
2415 struct evsel_runtime *er;
2417 list_for_each_entry(evsel, &evlist->entries, node) {
2418 er = perf_evsel__get_runtime(evsel);
2420 pr_err("Failed to allocate memory for evsel runtime data\n");
2424 if (sched->show_callchain &&
2425 !(evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) {
2426 pr_info("Samples do not have callchains.\n");
2427 sched->show_callchain = 0;
2428 symbol_conf.use_callchain = 0;
2435 static int perf_sched__timehist(struct perf_sched *sched)
2437 const struct perf_evsel_str_handler handlers[] = {
2438 { "sched:sched_switch", timehist_sched_switch_event, },
2439 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
2440 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
2442 struct perf_data_file file = {
2444 .mode = PERF_DATA_MODE_READ,
2447 struct perf_session *session;
2448 struct perf_evlist *evlist;
2452 * event handlers for timehist option
2454 sched->tool.sample = perf_timehist__process_sample;
2455 sched->tool.mmap = perf_event__process_mmap;
2456 sched->tool.comm = perf_event__process_comm;
2457 sched->tool.exit = perf_event__process_exit;
2458 sched->tool.fork = perf_event__process_fork;
2459 sched->tool.lost = process_lost;
2460 sched->tool.attr = perf_event__process_attr;
2461 sched->tool.tracing_data = perf_event__process_tracing_data;
2462 sched->tool.build_id = perf_event__process_build_id;
2464 sched->tool.ordered_events = true;
2465 sched->tool.ordering_requires_timestamps = true;
2467 symbol_conf.use_callchain = sched->show_callchain;
2469 session = perf_session__new(&file, false, &sched->tool);
2470 if (session == NULL)
2473 evlist = session->evlist;
2475 symbol__init(&session->header.env);
2477 if (timehist_check_attr(sched, evlist) != 0)
2482 /* setup per-evsel handlers */
2483 if (perf_session__set_tracepoints_handlers(session, handlers))
2486 if (!perf_session__has_traces(session, "record -R"))
2489 /* pre-allocate struct for per-CPU idle stats */
2490 sched->max_cpu = session->header.env.nr_cpus_online;
2491 if (sched->max_cpu == 0)
2493 if (init_idle_threads(sched->max_cpu))
2496 /* summary_only implies summary option, but don't overwrite summary if set */
2497 if (sched->summary_only)
2498 sched->summary = sched->summary_only;
2500 if (!sched->summary_only)
2501 timehist_header(sched);
2503 err = perf_session__process_events(session);
2505 pr_err("Failed to process events, error %d", err);
2509 sched->nr_events = evlist->stats.nr_events[0];
2510 sched->nr_lost_events = evlist->stats.total_lost;
2511 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
2514 timehist_print_summary(sched, session);
2517 free_idle_threads();
2518 perf_session__delete(session);
2524 static void print_bad_events(struct perf_sched *sched)
2526 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
2527 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
2528 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
2529 sched->nr_unordered_timestamps, sched->nr_timestamps);
2531 if (sched->nr_lost_events && sched->nr_events) {
2532 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
2533 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
2534 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
2536 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
2537 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
2538 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
2539 sched->nr_context_switch_bugs, sched->nr_timestamps);
2540 if (sched->nr_lost_events)
2541 printf(" (due to lost events?)");
2546 static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
2548 struct rb_node **new = &(root->rb_node), *parent = NULL;
2549 struct work_atoms *this;
2550 const char *comm = thread__comm_str(data->thread), *this_comm;
2555 this = container_of(*new, struct work_atoms, node);
2558 this_comm = thread__comm_str(this->thread);
2559 cmp = strcmp(comm, this_comm);
2561 new = &((*new)->rb_left);
2562 } else if (cmp < 0) {
2563 new = &((*new)->rb_right);
2566 this->total_runtime += data->total_runtime;
2567 this->nb_atoms += data->nb_atoms;
2568 this->total_lat += data->total_lat;
2569 list_splice(&data->work_list, &this->work_list);
2570 if (this->max_lat < data->max_lat) {
2571 this->max_lat = data->max_lat;
2572 this->max_lat_at = data->max_lat_at;
2580 rb_link_node(&data->node, parent, new);
2581 rb_insert_color(&data->node, root);
2584 static void perf_sched__merge_lat(struct perf_sched *sched)
2586 struct work_atoms *data;
2587 struct rb_node *node;
2589 if (sched->skip_merge)
2592 while ((node = rb_first(&sched->atom_root))) {
2593 rb_erase(node, &sched->atom_root);
2594 data = rb_entry(node, struct work_atoms, node);
2595 __merge_work_atoms(&sched->merged_atom_root, data);
2599 static int perf_sched__lat(struct perf_sched *sched)
2601 struct rb_node *next;
2605 if (perf_sched__read_events(sched))
2608 perf_sched__merge_lat(sched);
2609 perf_sched__sort_lat(sched);
2611 printf("\n -----------------------------------------------------------------------------------------------------------------\n");
2612 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
2613 printf(" -----------------------------------------------------------------------------------------------------------------\n");
2615 next = rb_first(&sched->sorted_atom_root);
2618 struct work_atoms *work_list;
2620 work_list = rb_entry(next, struct work_atoms, node);
2621 output_lat_thread(sched, work_list);
2622 next = rb_next(next);
2623 thread__zput(work_list->thread);
2626 printf(" -----------------------------------------------------------------------------------------------------------------\n");
2627 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
2628 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
2630 printf(" ---------------------------------------------------\n");
2632 print_bad_events(sched);
2638 static int setup_map_cpus(struct perf_sched *sched)
2640 struct cpu_map *map;
2642 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
2644 if (sched->map.comp) {
2645 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
2646 if (!sched->map.comp_cpus)
2650 if (!sched->map.cpus_str)
2653 map = cpu_map__new(sched->map.cpus_str);
2655 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
2659 sched->map.cpus = map;
2663 static int setup_color_pids(struct perf_sched *sched)
2665 struct thread_map *map;
2667 if (!sched->map.color_pids_str)
2670 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
2672 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
2676 sched->map.color_pids = map;
2680 static int setup_color_cpus(struct perf_sched *sched)
2682 struct cpu_map *map;
2684 if (!sched->map.color_cpus_str)
2687 map = cpu_map__new(sched->map.color_cpus_str);
2689 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
2693 sched->map.color_cpus = map;
2697 static int perf_sched__map(struct perf_sched *sched)
2699 if (setup_map_cpus(sched))
2702 if (setup_color_pids(sched))
2705 if (setup_color_cpus(sched))
2709 if (perf_sched__read_events(sched))
2711 print_bad_events(sched);
2715 static int perf_sched__replay(struct perf_sched *sched)
2719 calibrate_run_measurement_overhead(sched);
2720 calibrate_sleep_measurement_overhead(sched);
2722 test_calibrations(sched);
2724 if (perf_sched__read_events(sched))
2727 printf("nr_run_events: %ld\n", sched->nr_run_events);
2728 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
2729 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
2731 if (sched->targetless_wakeups)
2732 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
2733 if (sched->multitarget_wakeups)
2734 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
2735 if (sched->nr_run_events_optimized)
2736 printf("run atoms optimized: %ld\n",
2737 sched->nr_run_events_optimized);
2739 print_task_traces(sched);
2740 add_cross_task_wakeups(sched);
2742 create_tasks(sched);
2743 printf("------------------------------------------------------------\n");
2744 for (i = 0; i < sched->replay_repeat; i++)
2745 run_one_test(sched);
2750 static void setup_sorting(struct perf_sched *sched, const struct option *options,
2751 const char * const usage_msg[])
2753 char *tmp, *tok, *str = strdup(sched->sort_order);
2755 for (tok = strtok_r(str, ", ", &tmp);
2756 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2757 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
2758 usage_with_options_msg(usage_msg, options,
2759 "Unknown --sort key: `%s'", tok);
2765 sort_dimension__add("pid", &sched->cmp_pid);
2768 static int __cmd_record(int argc, const char **argv)
2770 unsigned int rec_argc, i, j;
2771 const char **rec_argv;
2772 const char * const record_args[] = {
2778 "-e", "sched:sched_switch",
2779 "-e", "sched:sched_stat_wait",
2780 "-e", "sched:sched_stat_sleep",
2781 "-e", "sched:sched_stat_iowait",
2782 "-e", "sched:sched_stat_runtime",
2783 "-e", "sched:sched_process_fork",
2784 "-e", "sched:sched_wakeup",
2785 "-e", "sched:sched_wakeup_new",
2786 "-e", "sched:sched_migrate_task",
2789 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
2790 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2792 if (rec_argv == NULL)
2795 for (i = 0; i < ARRAY_SIZE(record_args); i++)
2796 rec_argv[i] = strdup(record_args[i]);
2798 for (j = 1; j < (unsigned int)argc; j++, i++)
2799 rec_argv[i] = argv[j];
2801 BUG_ON(i != rec_argc);
2803 return cmd_record(i, rec_argv, NULL);
2806 int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
2808 const char default_sort_order[] = "avg, max, switch, runtime";
2809 struct perf_sched sched = {
2811 .sample = perf_sched__process_tracepoint_sample,
2812 .comm = perf_event__process_comm,
2813 .lost = perf_event__process_lost,
2814 .fork = perf_sched__process_fork_event,
2815 .ordered_events = true,
2817 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
2818 .sort_list = LIST_HEAD_INIT(sched.sort_list),
2819 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
2820 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
2821 .sort_order = default_sort_order,
2822 .replay_repeat = 10,
2824 .next_shortname1 = 'A',
2825 .next_shortname2 = '0',
2827 .show_callchain = 1,
2830 const struct option sched_options[] = {
2831 OPT_STRING('i', "input", &input_name, "file",
2833 OPT_INCR('v', "verbose", &verbose,
2834 "be more verbose (show symbol address, etc)"),
2835 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
2836 "dump raw trace in ASCII"),
2839 const struct option latency_options[] = {
2840 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
2841 "sort by key(s): runtime, switch, avg, max"),
2842 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
2843 "CPU to profile on"),
2844 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
2845 "dump raw trace in ASCII"),
2846 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
2847 "latency stats per pid instead of per comm"),
2848 OPT_PARENT(sched_options)
2850 const struct option replay_options[] = {
2851 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
2852 "repeat the workload replay N times (-1: infinite)"),
2853 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
2854 "dump raw trace in ASCII"),
2855 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
2856 OPT_PARENT(sched_options)
2858 const struct option map_options[] = {
2859 OPT_BOOLEAN(0, "compact", &sched.map.comp,
2860 "map output in compact mode"),
2861 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
2862 "highlight given pids in map"),
2863 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
2864 "highlight given CPUs in map"),
2865 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
2866 "display given CPUs in map"),
2867 OPT_PARENT(sched_options)
2869 const struct option timehist_options[] = {
2870 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2871 "file", "vmlinux pathname"),
2872 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
2873 "file", "kallsyms pathname"),
2874 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
2875 "Display call chains if present (default on)"),
2876 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
2877 "Maximum number of functions to display backtrace."),
2878 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
2879 "Look for files with symbols relative to this directory"),
2880 OPT_BOOLEAN('s', "summary", &sched.summary_only,
2881 "Show only syscall summary with statistics"),
2882 OPT_BOOLEAN('S', "with-summary", &sched.summary,
2883 "Show all syscalls and summary with statistics"),
2884 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
2885 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
2886 OPT_PARENT(sched_options)
2889 const char * const latency_usage[] = {
2890 "perf sched latency [<options>]",
2893 const char * const replay_usage[] = {
2894 "perf sched replay [<options>]",
2897 const char * const map_usage[] = {
2898 "perf sched map [<options>]",
2901 const char * const timehist_usage[] = {
2902 "perf sched timehist [<options>]",
2905 const char *const sched_subcommands[] = { "record", "latency", "map",
2908 const char *sched_usage[] = {
2912 struct trace_sched_handler lat_ops = {
2913 .wakeup_event = latency_wakeup_event,
2914 .switch_event = latency_switch_event,
2915 .runtime_event = latency_runtime_event,
2916 .migrate_task_event = latency_migrate_task_event,
2918 struct trace_sched_handler map_ops = {
2919 .switch_event = map_switch_event,
2921 struct trace_sched_handler replay_ops = {
2922 .wakeup_event = replay_wakeup_event,
2923 .switch_event = replay_switch_event,
2924 .fork_event = replay_fork_event,
2928 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
2929 sched.curr_pid[i] = -1;
2931 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
2932 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2934 usage_with_options(sched_usage, sched_options);
2937 * Aliased to 'perf script' for now:
2939 if (!strcmp(argv[0], "script"))
2940 return cmd_script(argc, argv, prefix);
2942 if (!strncmp(argv[0], "rec", 3)) {
2943 return __cmd_record(argc, argv);
2944 } else if (!strncmp(argv[0], "lat", 3)) {
2945 sched.tp_handler = &lat_ops;
2947 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
2949 usage_with_options(latency_usage, latency_options);
2951 setup_sorting(&sched, latency_options, latency_usage);
2952 return perf_sched__lat(&sched);
2953 } else if (!strcmp(argv[0], "map")) {
2955 argc = parse_options(argc, argv, map_options, map_usage, 0);
2957 usage_with_options(map_usage, map_options);
2959 sched.tp_handler = &map_ops;
2960 setup_sorting(&sched, latency_options, latency_usage);
2961 return perf_sched__map(&sched);
2962 } else if (!strncmp(argv[0], "rep", 3)) {
2963 sched.tp_handler = &replay_ops;
2965 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
2967 usage_with_options(replay_usage, replay_options);
2969 return perf_sched__replay(&sched);
2970 } else if (!strcmp(argv[0], "timehist")) {
2972 argc = parse_options(argc, argv, timehist_options,
2975 usage_with_options(timehist_usage, timehist_options);
2977 if (sched.show_wakeups && sched.summary_only) {
2978 pr_err(" Error: -s and -w are mutually exclusive.\n");
2979 parse_options_usage(timehist_usage, timehist_options, "s", true);
2980 parse_options_usage(NULL, timehist_options, "w", true);
2984 return perf_sched__timehist(&sched);
2986 usage_with_options(sched_usage, sched_options);