5 #include "util/cache.h"
6 #include "util/evsel.h"
7 #include "util/symbol.h"
8 #include "util/thread.h"
9 #include "util/header.h"
10 #include "util/session.h"
11 #include "util/tool.h"
13 #include "util/parse-options.h"
14 #include "util/trace-event.h"
16 #include "util/debug.h"
18 #include <sys/prctl.h>
20 #include <semaphore.h>
24 static char const *input_name = "perf.data";
26 static char default_sort_order[] = "avg, max, switch, runtime";
27 static const char *sort_order = default_sort_order;
29 static int profile_cpu = -1;
31 #define PR_SET_NAME 15 /* Set process name */
34 static u64 run_measurement_overhead;
35 static u64 sleep_measurement_overhead;
42 static unsigned long nr_tasks;
51 unsigned long nr_events;
52 unsigned long curr_event;
53 struct sched_atom **atoms;
64 enum sched_event_type {
68 SCHED_EVENT_MIGRATION,
72 enum sched_event_type type;
78 struct task_desc *wakee;
81 static struct task_desc *pid_to_task[MAX_PID];
83 static struct task_desc **tasks;
85 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
86 static u64 start_time;
88 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
90 static unsigned long nr_run_events;
91 static unsigned long nr_sleep_events;
92 static unsigned long nr_wakeup_events;
94 static unsigned long nr_sleep_corrections;
95 static unsigned long nr_run_events_optimized;
97 static unsigned long targetless_wakeups;
98 static unsigned long multitarget_wakeups;
100 static u64 cpu_usage;
101 static u64 runavg_cpu_usage;
102 static u64 parent_cpu_usage;
103 static u64 runavg_parent_cpu_usage;
105 static unsigned long nr_runs;
106 static u64 sum_runtime;
107 static u64 sum_fluct;
110 static unsigned int replay_repeat = 10;
111 static unsigned long nr_timestamps;
112 static unsigned long nr_unordered_timestamps;
113 static unsigned long nr_state_machine_bugs;
114 static unsigned long nr_context_switch_bugs;
115 static unsigned long nr_events;
116 static unsigned long nr_lost_chunks;
117 static unsigned long nr_lost_events;
119 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
129 struct list_head list;
130 enum thread_state state;
138 struct list_head work_list;
139 struct thread *thread;
148 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
150 static struct rb_root atom_root, sorted_atom_root;
152 static u64 all_runtime;
153 static u64 all_count;
156 static u64 get_nsecs(void)
160 clock_gettime(CLOCK_MONOTONIC, &ts);
162 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
165 static void burn_nsecs(u64 nsecs)
167 u64 T0 = get_nsecs(), T1;
171 } while (T1 + run_measurement_overhead < T0 + nsecs);
174 static void sleep_nsecs(u64 nsecs)
178 ts.tv_nsec = nsecs % 999999999;
179 ts.tv_sec = nsecs / 999999999;
181 nanosleep(&ts, NULL);
184 static void calibrate_run_measurement_overhead(void)
186 u64 T0, T1, delta, min_delta = 1000000000ULL;
189 for (i = 0; i < 10; i++) {
194 min_delta = min(min_delta, delta);
196 run_measurement_overhead = min_delta;
198 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
201 static void calibrate_sleep_measurement_overhead(void)
203 u64 T0, T1, delta, min_delta = 1000000000ULL;
206 for (i = 0; i < 10; i++) {
211 min_delta = min(min_delta, delta);
214 sleep_measurement_overhead = min_delta;
216 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
219 static struct sched_atom *
220 get_new_event(struct task_desc *task, u64 timestamp)
222 struct sched_atom *event = zalloc(sizeof(*event));
223 unsigned long idx = task->nr_events;
226 event->timestamp = timestamp;
230 size = sizeof(struct sched_atom *) * task->nr_events;
231 task->atoms = realloc(task->atoms, size);
232 BUG_ON(!task->atoms);
234 task->atoms[idx] = event;
239 static struct sched_atom *last_event(struct task_desc *task)
241 if (!task->nr_events)
244 return task->atoms[task->nr_events - 1];
248 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
250 struct sched_atom *event, *curr_event = last_event(task);
253 * optimize an existing RUN event by merging this one
256 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
257 nr_run_events_optimized++;
258 curr_event->duration += duration;
262 event = get_new_event(task, timestamp);
264 event->type = SCHED_EVENT_RUN;
265 event->duration = duration;
271 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
272 struct task_desc *wakee)
274 struct sched_atom *event, *wakee_event;
276 event = get_new_event(task, timestamp);
277 event->type = SCHED_EVENT_WAKEUP;
278 event->wakee = wakee;
280 wakee_event = last_event(wakee);
281 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
282 targetless_wakeups++;
285 if (wakee_event->wait_sem) {
286 multitarget_wakeups++;
290 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
291 sem_init(wakee_event->wait_sem, 0, 0);
292 wakee_event->specific_wait = 1;
293 event->wait_sem = wakee_event->wait_sem;
299 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
300 u64 task_state __used)
302 struct sched_atom *event = get_new_event(task, timestamp);
304 event->type = SCHED_EVENT_SLEEP;
309 static struct task_desc *register_pid(unsigned long pid, const char *comm)
311 struct task_desc *task;
313 BUG_ON(pid >= MAX_PID);
315 task = pid_to_task[pid];
320 task = zalloc(sizeof(*task));
323 strcpy(task->comm, comm);
325 * every task starts in sleeping state - this gets ignored
326 * if there's no wakeup pointing to this sleep state:
328 add_sched_event_sleep(task, 0, 0);
330 pid_to_task[pid] = task;
332 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
334 tasks[task->nr] = task;
337 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
343 static void print_task_traces(void)
345 struct task_desc *task;
348 for (i = 0; i < nr_tasks; i++) {
350 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
351 task->nr, task->comm, task->pid, task->nr_events);
355 static void add_cross_task_wakeups(void)
357 struct task_desc *task1, *task2;
360 for (i = 0; i < nr_tasks; i++) {
366 add_sched_event_wakeup(task1, 0, task2);
371 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
375 switch (atom->type) {
376 case SCHED_EVENT_RUN:
377 burn_nsecs(atom->duration);
379 case SCHED_EVENT_SLEEP:
381 ret = sem_wait(atom->wait_sem);
384 case SCHED_EVENT_WAKEUP:
386 ret = sem_post(atom->wait_sem);
389 case SCHED_EVENT_MIGRATION:
396 static u64 get_cpu_usage_nsec_parent(void)
402 err = getrusage(RUSAGE_SELF, &ru);
405 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
406 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
411 static int self_open_counters(void)
413 struct perf_event_attr attr;
416 memset(&attr, 0, sizeof(attr));
418 attr.type = PERF_TYPE_SOFTWARE;
419 attr.config = PERF_COUNT_SW_TASK_CLOCK;
421 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
424 die("Error: sys_perf_event_open() syscall returned"
425 "with %d (%s)\n", fd, strerror(errno));
429 static u64 get_cpu_usage_nsec_self(int fd)
434 ret = read(fd, &runtime, sizeof(runtime));
435 BUG_ON(ret != sizeof(runtime));
440 static void *thread_func(void *ctx)
442 struct task_desc *this_task = ctx;
443 u64 cpu_usage_0, cpu_usage_1;
444 unsigned long i, ret;
448 sprintf(comm2, ":%s", this_task->comm);
449 prctl(PR_SET_NAME, comm2);
450 fd = self_open_counters();
453 ret = sem_post(&this_task->ready_for_work);
455 ret = pthread_mutex_lock(&start_work_mutex);
457 ret = pthread_mutex_unlock(&start_work_mutex);
460 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
462 for (i = 0; i < this_task->nr_events; i++) {
463 this_task->curr_event = i;
464 process_sched_event(this_task, this_task->atoms[i]);
467 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
468 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
469 ret = sem_post(&this_task->work_done_sem);
472 ret = pthread_mutex_lock(&work_done_wait_mutex);
474 ret = pthread_mutex_unlock(&work_done_wait_mutex);
480 static void create_tasks(void)
482 struct task_desc *task;
487 err = pthread_attr_init(&attr);
489 err = pthread_attr_setstacksize(&attr,
490 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
492 err = pthread_mutex_lock(&start_work_mutex);
494 err = pthread_mutex_lock(&work_done_wait_mutex);
496 for (i = 0; i < nr_tasks; i++) {
498 sem_init(&task->sleep_sem, 0, 0);
499 sem_init(&task->ready_for_work, 0, 0);
500 sem_init(&task->work_done_sem, 0, 0);
501 task->curr_event = 0;
502 err = pthread_create(&task->thread, &attr, thread_func, task);
507 static void wait_for_tasks(void)
509 u64 cpu_usage_0, cpu_usage_1;
510 struct task_desc *task;
511 unsigned long i, ret;
513 start_time = get_nsecs();
515 pthread_mutex_unlock(&work_done_wait_mutex);
517 for (i = 0; i < nr_tasks; i++) {
519 ret = sem_wait(&task->ready_for_work);
521 sem_init(&task->ready_for_work, 0, 0);
523 ret = pthread_mutex_lock(&work_done_wait_mutex);
526 cpu_usage_0 = get_cpu_usage_nsec_parent();
528 pthread_mutex_unlock(&start_work_mutex);
530 for (i = 0; i < nr_tasks; i++) {
532 ret = sem_wait(&task->work_done_sem);
534 sem_init(&task->work_done_sem, 0, 0);
535 cpu_usage += task->cpu_usage;
539 cpu_usage_1 = get_cpu_usage_nsec_parent();
540 if (!runavg_cpu_usage)
541 runavg_cpu_usage = cpu_usage;
542 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
544 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
545 if (!runavg_parent_cpu_usage)
546 runavg_parent_cpu_usage = parent_cpu_usage;
547 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
548 parent_cpu_usage)/10;
550 ret = pthread_mutex_lock(&start_work_mutex);
553 for (i = 0; i < nr_tasks; i++) {
555 sem_init(&task->sleep_sem, 0, 0);
556 task->curr_event = 0;
560 static void run_one_test(void)
562 u64 T0, T1, delta, avg_delta, fluct;
569 sum_runtime += delta;
572 avg_delta = sum_runtime / nr_runs;
573 if (delta < avg_delta)
574 fluct = avg_delta - delta;
576 fluct = delta - avg_delta;
580 run_avg = (run_avg*9 + delta)/10;
582 printf("#%-3ld: %0.3f, ",
583 nr_runs, (double)delta/1000000.0);
585 printf("ravg: %0.2f, ",
586 (double)run_avg/1e6);
588 printf("cpu: %0.2f / %0.2f",
589 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
593 * rusage statistics done by the parent, these are less
594 * accurate than the sum_exec_runtime based statistics:
596 printf(" [%0.2f / %0.2f]",
597 (double)parent_cpu_usage/1e6,
598 (double)runavg_parent_cpu_usage/1e6);
603 if (nr_sleep_corrections)
604 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
605 nr_sleep_corrections = 0;
608 static void test_calibrations(void)
616 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
622 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
625 #define FILL_FIELD(ptr, field, event, data) \
626 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
628 #define FILL_ARRAY(ptr, array, event, data) \
630 void *__array = raw_field_ptr(event, #array, data); \
631 memcpy(ptr.array, __array, sizeof(ptr.array)); \
634 #define FILL_COMMON_FIELDS(ptr, event, data) \
636 FILL_FIELD(ptr, common_type, event, data); \
637 FILL_FIELD(ptr, common_flags, event, data); \
638 FILL_FIELD(ptr, common_preempt_count, event, data); \
639 FILL_FIELD(ptr, common_pid, event, data); \
640 FILL_FIELD(ptr, common_tgid, event, data); \
645 struct trace_switch_event {
650 u8 common_preempt_count;
663 struct trace_runtime_event {
668 u8 common_preempt_count;
678 struct trace_wakeup_event {
683 u8 common_preempt_count;
695 struct trace_fork_event {
700 u8 common_preempt_count;
704 char parent_comm[16];
710 struct trace_migrate_task_event {
715 u8 common_preempt_count;
726 struct trace_sched_handler {
727 void (*switch_event)(struct trace_switch_event *,
732 struct thread *thread);
734 void (*runtime_event)(struct trace_runtime_event *,
739 struct thread *thread);
741 void (*wakeup_event)(struct trace_wakeup_event *,
746 struct thread *thread);
748 void (*fork_event)(struct trace_fork_event *,
752 struct thread *thread);
754 void (*migrate_task_event)(struct trace_migrate_task_event *,
755 struct machine *machine,
759 struct thread *thread);
764 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
765 struct machine *machine __used,
768 u64 timestamp __used,
769 struct thread *thread __used)
771 struct task_desc *waker, *wakee;
774 printf("sched_wakeup event %p\n", event);
776 printf(" ... pid %d woke up %s/%d\n",
777 wakeup_event->common_pid,
782 waker = register_pid(wakeup_event->common_pid, "<unknown>");
783 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
785 add_sched_event_wakeup(waker, timestamp, wakee);
788 static u64 cpu_last_switched[MAX_CPUS];
791 replay_switch_event(struct trace_switch_event *switch_event,
792 struct machine *machine __used,
796 struct thread *thread __used)
798 struct task_desc *prev, __used *next;
803 printf("sched_switch event %p\n", event);
805 if (cpu >= MAX_CPUS || cpu < 0)
808 timestamp0 = cpu_last_switched[cpu];
810 delta = timestamp - timestamp0;
815 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
818 printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
819 switch_event->prev_comm, switch_event->prev_pid,
820 switch_event->next_comm, switch_event->next_pid,
824 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
825 next = register_pid(switch_event->next_pid, switch_event->next_comm);
827 cpu_last_switched[cpu] = timestamp;
829 add_sched_event_run(prev, timestamp, delta);
830 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
835 replay_fork_event(struct trace_fork_event *fork_event,
838 u64 timestamp __used,
839 struct thread *thread __used)
842 printf("sched_fork event %p\n", event);
843 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
844 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
846 register_pid(fork_event->parent_pid, fork_event->parent_comm);
847 register_pid(fork_event->child_pid, fork_event->child_comm);
850 static struct trace_sched_handler replay_ops = {
851 .wakeup_event = replay_wakeup_event,
852 .switch_event = replay_switch_event,
853 .fork_event = replay_fork_event,
856 struct sort_dimension {
859 struct list_head list;
862 static LIST_HEAD(cmp_pid);
865 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
867 struct sort_dimension *sort;
870 BUG_ON(list_empty(list));
872 list_for_each_entry(sort, list, list) {
873 ret = sort->cmp(l, r);
881 static struct work_atoms *
882 thread_atoms_search(struct rb_root *root, struct thread *thread,
883 struct list_head *sort_list)
885 struct rb_node *node = root->rb_node;
886 struct work_atoms key = { .thread = thread };
889 struct work_atoms *atoms;
892 atoms = container_of(node, struct work_atoms, node);
894 cmp = thread_lat_cmp(sort_list, &key, atoms);
896 node = node->rb_left;
898 node = node->rb_right;
900 BUG_ON(thread != atoms->thread);
908 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
909 struct list_head *sort_list)
911 struct rb_node **new = &(root->rb_node), *parent = NULL;
914 struct work_atoms *this;
917 this = container_of(*new, struct work_atoms, node);
920 cmp = thread_lat_cmp(sort_list, data, this);
923 new = &((*new)->rb_left);
925 new = &((*new)->rb_right);
928 rb_link_node(&data->node, parent, new);
929 rb_insert_color(&data->node, root);
932 static void thread_atoms_insert(struct thread *thread)
934 struct work_atoms *atoms = zalloc(sizeof(*atoms));
938 atoms->thread = thread;
939 INIT_LIST_HEAD(&atoms->work_list);
940 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
944 latency_fork_event(struct trace_fork_event *fork_event __used,
945 struct event *event __used,
947 u64 timestamp __used,
948 struct thread *thread __used)
950 /* should insert the newcomer */
954 static char sched_out_state(struct trace_switch_event *switch_event)
956 const char *str = TASK_STATE_TO_CHAR_STR;
958 return str[switch_event->prev_state];
962 add_sched_out_event(struct work_atoms *atoms,
966 struct work_atom *atom = zalloc(sizeof(*atom));
970 atom->sched_out_time = timestamp;
972 if (run_state == 'R') {
973 atom->state = THREAD_WAIT_CPU;
974 atom->wake_up_time = atom->sched_out_time;
977 list_add_tail(&atom->list, &atoms->work_list);
981 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
983 struct work_atom *atom;
985 BUG_ON(list_empty(&atoms->work_list));
987 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
989 atom->runtime += delta;
990 atoms->total_runtime += delta;
994 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
996 struct work_atom *atom;
999 if (list_empty(&atoms->work_list))
1002 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1004 if (atom->state != THREAD_WAIT_CPU)
1007 if (timestamp < atom->wake_up_time) {
1008 atom->state = THREAD_IGNORE;
1012 atom->state = THREAD_SCHED_IN;
1013 atom->sched_in_time = timestamp;
1015 delta = atom->sched_in_time - atom->wake_up_time;
1016 atoms->total_lat += delta;
1017 if (delta > atoms->max_lat) {
1018 atoms->max_lat = delta;
1019 atoms->max_lat_at = timestamp;
1025 latency_switch_event(struct trace_switch_event *switch_event,
1026 struct machine *machine,
1027 struct event *event __used,
1030 struct thread *thread __used)
1032 struct work_atoms *out_events, *in_events;
1033 struct thread *sched_out, *sched_in;
1037 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1039 timestamp0 = cpu_last_switched[cpu];
1040 cpu_last_switched[cpu] = timestamp;
1042 delta = timestamp - timestamp0;
1047 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1050 sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1051 sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1053 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1055 thread_atoms_insert(sched_out);
1056 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1058 die("out-event: Internal tree error");
1060 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1062 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1064 thread_atoms_insert(sched_in);
1065 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1067 die("in-event: Internal tree error");
1069 * Take came in we have not heard about yet,
1070 * add in an initial atom in runnable state:
1072 add_sched_out_event(in_events, 'R', timestamp);
1074 add_sched_in_event(in_events, timestamp);
1078 latency_runtime_event(struct trace_runtime_event *runtime_event,
1079 struct machine *machine,
1080 struct event *event __used,
1083 struct thread *this_thread __used)
1085 struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
1086 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1088 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1090 thread_atoms_insert(thread);
1091 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1093 die("in-event: Internal tree error");
1094 add_sched_out_event(atoms, 'R', timestamp);
1097 add_runtime_event(atoms, runtime_event->runtime, timestamp);
1101 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1102 struct machine *machine,
1103 struct event *__event __used,
1106 struct thread *thread __used)
1108 struct work_atoms *atoms;
1109 struct work_atom *atom;
1110 struct thread *wakee;
1112 /* Note for later, it may be interesting to observe the failing cases */
1113 if (!wakeup_event->success)
1116 wakee = machine__findnew_thread(machine, wakeup_event->pid);
1117 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1119 thread_atoms_insert(wakee);
1120 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1122 die("wakeup-event: Internal tree error");
1123 add_sched_out_event(atoms, 'S', timestamp);
1126 BUG_ON(list_empty(&atoms->work_list));
1128 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1131 * You WILL be missing events if you've recorded only
1132 * one CPU, or are only looking at only one, so don't
1133 * make useless noise.
1135 if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1136 nr_state_machine_bugs++;
1139 if (atom->sched_out_time > timestamp) {
1140 nr_unordered_timestamps++;
1144 atom->state = THREAD_WAIT_CPU;
1145 atom->wake_up_time = timestamp;
1149 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1150 struct machine *machine,
1151 struct event *__event __used,
1154 struct thread *thread __used)
1156 struct work_atoms *atoms;
1157 struct work_atom *atom;
1158 struct thread *migrant;
1161 * Only need to worry about migration when profiling one CPU.
1163 if (profile_cpu == -1)
1166 migrant = machine__findnew_thread(machine, migrate_task_event->pid);
1167 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1169 thread_atoms_insert(migrant);
1170 register_pid(migrant->pid, migrant->comm);
1171 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1173 die("migration-event: Internal tree error");
1174 add_sched_out_event(atoms, 'R', timestamp);
1177 BUG_ON(list_empty(&atoms->work_list));
1179 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1180 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1184 if (atom->sched_out_time > timestamp)
1185 nr_unordered_timestamps++;
1188 static struct trace_sched_handler lat_ops = {
1189 .wakeup_event = latency_wakeup_event,
1190 .switch_event = latency_switch_event,
1191 .runtime_event = latency_runtime_event,
1192 .fork_event = latency_fork_event,
1193 .migrate_task_event = latency_migrate_task_event,
1196 static void output_lat_thread(struct work_atoms *work_list)
1202 if (!work_list->nb_atoms)
1205 * Ignore idle threads:
1207 if (!strcmp(work_list->thread->comm, "swapper"))
1210 all_runtime += work_list->total_runtime;
1211 all_count += work_list->nb_atoms;
1213 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
1215 for (i = 0; i < 24 - ret; i++)
1218 avg = work_list->total_lat / work_list->nb_atoms;
1220 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1221 (double)work_list->total_runtime / 1e6,
1222 work_list->nb_atoms, (double)avg / 1e6,
1223 (double)work_list->max_lat / 1e6,
1224 (double)work_list->max_lat_at / 1e9);
1227 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1229 if (l->thread->pid < r->thread->pid)
1231 if (l->thread->pid > r->thread->pid)
1237 static struct sort_dimension pid_sort_dimension = {
1242 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1252 avgl = l->total_lat / l->nb_atoms;
1253 avgr = r->total_lat / r->nb_atoms;
1263 static struct sort_dimension avg_sort_dimension = {
1268 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1270 if (l->max_lat < r->max_lat)
1272 if (l->max_lat > r->max_lat)
1278 static struct sort_dimension max_sort_dimension = {
1283 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1285 if (l->nb_atoms < r->nb_atoms)
1287 if (l->nb_atoms > r->nb_atoms)
1293 static struct sort_dimension switch_sort_dimension = {
1298 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1300 if (l->total_runtime < r->total_runtime)
1302 if (l->total_runtime > r->total_runtime)
1308 static struct sort_dimension runtime_sort_dimension = {
1313 static struct sort_dimension *available_sorts[] = {
1314 &pid_sort_dimension,
1315 &avg_sort_dimension,
1316 &max_sort_dimension,
1317 &switch_sort_dimension,
1318 &runtime_sort_dimension,
1321 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1323 static LIST_HEAD(sort_list);
1325 static int sort_dimension__add(const char *tok, struct list_head *list)
1329 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1330 if (!strcmp(available_sorts[i]->name, tok)) {
1331 list_add_tail(&available_sorts[i]->list, list);
1340 static void setup_sorting(void);
1342 static void sort_lat(void)
1344 struct rb_node *node;
1347 struct work_atoms *data;
1348 node = rb_first(&atom_root);
1352 rb_erase(node, &atom_root);
1353 data = rb_entry(node, struct work_atoms, node);
1354 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1358 static struct trace_sched_handler *trace_handler;
1361 process_sched_wakeup_event(void *data, struct machine *machine,
1362 struct event *event,
1364 u64 timestamp __used,
1365 struct thread *thread __used)
1367 struct trace_wakeup_event wakeup_event;
1369 FILL_COMMON_FIELDS(wakeup_event, event, data);
1371 FILL_ARRAY(wakeup_event, comm, event, data);
1372 FILL_FIELD(wakeup_event, pid, event, data);
1373 FILL_FIELD(wakeup_event, prio, event, data);
1374 FILL_FIELD(wakeup_event, success, event, data);
1375 FILL_FIELD(wakeup_event, cpu, event, data);
1377 if (trace_handler->wakeup_event)
1378 trace_handler->wakeup_event(&wakeup_event, machine, event,
1379 cpu, timestamp, thread);
1383 * Track the current task - that way we can know whether there's any
1384 * weird events, such as a task being switched away that is not current.
1388 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1390 static struct thread *curr_thread[MAX_CPUS];
1392 static char next_shortname1 = 'A';
1393 static char next_shortname2 = '0';
1396 map_switch_event(struct trace_switch_event *switch_event,
1397 struct machine *machine,
1398 struct event *event __used,
1401 struct thread *thread __used)
1403 struct thread *sched_out __used, *sched_in;
1409 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1411 if (this_cpu > max_cpu)
1414 timestamp0 = cpu_last_switched[this_cpu];
1415 cpu_last_switched[this_cpu] = timestamp;
1417 delta = timestamp - timestamp0;
1422 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1425 sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1426 sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1428 curr_thread[this_cpu] = sched_in;
1433 if (!sched_in->shortname[0]) {
1434 sched_in->shortname[0] = next_shortname1;
1435 sched_in->shortname[1] = next_shortname2;
1437 if (next_shortname1 < 'Z') {
1440 next_shortname1='A';
1441 if (next_shortname2 < '9') {
1444 next_shortname2='0';
1450 for (cpu = 0; cpu <= max_cpu; cpu++) {
1451 if (cpu != this_cpu)
1456 if (curr_thread[cpu]) {
1457 if (curr_thread[cpu]->pid)
1458 printf("%2s ", curr_thread[cpu]->shortname);
1465 printf(" %12.6f secs ", (double)timestamp/1e9);
1466 if (new_shortname) {
1467 printf("%s => %s:%d\n",
1468 sched_in->shortname, sched_in->comm, sched_in->pid);
1476 process_sched_switch_event(void *data, struct machine *machine,
1477 struct event *event,
1479 u64 timestamp __used,
1480 struct thread *thread __used)
1482 struct trace_switch_event switch_event;
1484 FILL_COMMON_FIELDS(switch_event, event, data);
1486 FILL_ARRAY(switch_event, prev_comm, event, data);
1487 FILL_FIELD(switch_event, prev_pid, event, data);
1488 FILL_FIELD(switch_event, prev_prio, event, data);
1489 FILL_FIELD(switch_event, prev_state, event, data);
1490 FILL_ARRAY(switch_event, next_comm, event, data);
1491 FILL_FIELD(switch_event, next_pid, event, data);
1492 FILL_FIELD(switch_event, next_prio, event, data);
1494 if (curr_pid[this_cpu] != (u32)-1) {
1496 * Are we trying to switch away a PID that is
1499 if (curr_pid[this_cpu] != switch_event.prev_pid)
1500 nr_context_switch_bugs++;
1502 if (trace_handler->switch_event)
1503 trace_handler->switch_event(&switch_event, machine, event,
1504 this_cpu, timestamp, thread);
1506 curr_pid[this_cpu] = switch_event.next_pid;
1510 process_sched_runtime_event(void *data, struct machine *machine,
1511 struct event *event,
1513 u64 timestamp __used,
1514 struct thread *thread __used)
1516 struct trace_runtime_event runtime_event;
1518 FILL_ARRAY(runtime_event, comm, event, data);
1519 FILL_FIELD(runtime_event, pid, event, data);
1520 FILL_FIELD(runtime_event, runtime, event, data);
1521 FILL_FIELD(runtime_event, vruntime, event, data);
1523 if (trace_handler->runtime_event)
1524 trace_handler->runtime_event(&runtime_event, machine, event, cpu, timestamp, thread);
1528 process_sched_fork_event(void *data,
1529 struct event *event,
1531 u64 timestamp __used,
1532 struct thread *thread __used)
1534 struct trace_fork_event fork_event;
1536 FILL_COMMON_FIELDS(fork_event, event, data);
1538 FILL_ARRAY(fork_event, parent_comm, event, data);
1539 FILL_FIELD(fork_event, parent_pid, event, data);
1540 FILL_ARRAY(fork_event, child_comm, event, data);
1541 FILL_FIELD(fork_event, child_pid, event, data);
1543 if (trace_handler->fork_event)
1544 trace_handler->fork_event(&fork_event, event,
1545 cpu, timestamp, thread);
1549 process_sched_exit_event(struct event *event,
1551 u64 timestamp __used,
1552 struct thread *thread __used)
1555 printf("sched_exit event %p\n", event);
1559 process_sched_migrate_task_event(void *data, struct machine *machine,
1560 struct event *event,
1562 u64 timestamp __used,
1563 struct thread *thread __used)
1565 struct trace_migrate_task_event migrate_task_event;
1567 FILL_COMMON_FIELDS(migrate_task_event, event, data);
1569 FILL_ARRAY(migrate_task_event, comm, event, data);
1570 FILL_FIELD(migrate_task_event, pid, event, data);
1571 FILL_FIELD(migrate_task_event, prio, event, data);
1572 FILL_FIELD(migrate_task_event, cpu, event, data);
1574 if (trace_handler->migrate_task_event)
1575 trace_handler->migrate_task_event(&migrate_task_event, machine,
1576 event, cpu, timestamp, thread);
1579 static void process_raw_event(union perf_event *raw_event __used,
1580 struct machine *machine, void *data, int cpu,
1581 u64 timestamp, struct thread *thread)
1583 struct event *event;
1587 type = trace_parse_common_type(data);
1588 event = trace_find_event(type);
1590 if (!strcmp(event->name, "sched_switch"))
1591 process_sched_switch_event(data, machine, event, cpu, timestamp, thread);
1592 if (!strcmp(event->name, "sched_stat_runtime"))
1593 process_sched_runtime_event(data, machine, event, cpu, timestamp, thread);
1594 if (!strcmp(event->name, "sched_wakeup"))
1595 process_sched_wakeup_event(data, machine, event, cpu, timestamp, thread);
1596 if (!strcmp(event->name, "sched_wakeup_new"))
1597 process_sched_wakeup_event(data, machine, event, cpu, timestamp, thread);
1598 if (!strcmp(event->name, "sched_process_fork"))
1599 process_sched_fork_event(data, event, cpu, timestamp, thread);
1600 if (!strcmp(event->name, "sched_process_exit"))
1601 process_sched_exit_event(event, cpu, timestamp, thread);
1602 if (!strcmp(event->name, "sched_migrate_task"))
1603 process_sched_migrate_task_event(data, machine, event, cpu, timestamp, thread);
1606 static int process_sample_event(struct perf_tool *tool __used,
1607 union perf_event *event,
1608 struct perf_sample *sample,
1609 struct perf_evsel *evsel,
1610 struct machine *machine)
1612 struct thread *thread;
1614 if (!(evsel->attr.sample_type & PERF_SAMPLE_RAW))
1617 thread = machine__findnew_thread(machine, sample->pid);
1618 if (thread == NULL) {
1619 pr_debug("problem processing %d event, skipping it.\n",
1620 event->header.type);
1624 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1626 if (profile_cpu != -1 && profile_cpu != (int)sample->cpu)
1629 process_raw_event(event, machine, sample->raw_data, sample->cpu,
1630 sample->time, thread);
1635 static struct perf_tool perf_sched = {
1636 .sample = process_sample_event,
1637 .comm = perf_event__process_comm,
1638 .lost = perf_event__process_lost,
1639 .fork = perf_event__process_task,
1640 .ordered_samples = true,
1643 static void read_events(bool destroy, struct perf_session **psession)
1646 struct perf_session *session = perf_session__new(input_name, O_RDONLY,
1647 0, false, &perf_sched);
1648 if (session == NULL)
1651 if (perf_session__has_traces(session, "record -R")) {
1652 err = perf_session__process_events(session, &perf_sched);
1654 die("Failed to process events, error %d", err);
1656 nr_events = session->hists.stats.nr_events[0];
1657 nr_lost_events = session->hists.stats.total_lost;
1658 nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
1662 perf_session__delete(session);
1665 *psession = session;
1668 static void print_bad_events(void)
1670 if (nr_unordered_timestamps && nr_timestamps) {
1671 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1672 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1673 nr_unordered_timestamps, nr_timestamps);
1675 if (nr_lost_events && nr_events) {
1676 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1677 (double)nr_lost_events/(double)nr_events*100.0,
1678 nr_lost_events, nr_events, nr_lost_chunks);
1680 if (nr_state_machine_bugs && nr_timestamps) {
1681 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1682 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1683 nr_state_machine_bugs, nr_timestamps);
1685 printf(" (due to lost events?)");
1688 if (nr_context_switch_bugs && nr_timestamps) {
1689 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1690 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1691 nr_context_switch_bugs, nr_timestamps);
1693 printf(" (due to lost events?)");
1698 static void __cmd_lat(void)
1700 struct rb_node *next;
1701 struct perf_session *session;
1704 read_events(false, &session);
1707 printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1708 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
1709 printf(" ---------------------------------------------------------------------------------------------------------------\n");
1711 next = rb_first(&sorted_atom_root);
1714 struct work_atoms *work_list;
1716 work_list = rb_entry(next, struct work_atoms, node);
1717 output_lat_thread(work_list);
1718 next = rb_next(next);
1721 printf(" -----------------------------------------------------------------------------------------\n");
1722 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
1723 (double)all_runtime/1e6, all_count);
1725 printf(" ---------------------------------------------------\n");
1730 perf_session__delete(session);
1733 static struct trace_sched_handler map_ops = {
1734 .wakeup_event = NULL,
1735 .switch_event = map_switch_event,
1736 .runtime_event = NULL,
1740 static void __cmd_map(void)
1742 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1745 read_events(true, NULL);
1749 static void __cmd_replay(void)
1753 calibrate_run_measurement_overhead();
1754 calibrate_sleep_measurement_overhead();
1756 test_calibrations();
1758 read_events(true, NULL);
1760 printf("nr_run_events: %ld\n", nr_run_events);
1761 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1762 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1764 if (targetless_wakeups)
1765 printf("target-less wakeups: %ld\n", targetless_wakeups);
1766 if (multitarget_wakeups)
1767 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1768 if (nr_run_events_optimized)
1769 printf("run atoms optimized: %ld\n",
1770 nr_run_events_optimized);
1772 print_task_traces();
1773 add_cross_task_wakeups();
1776 printf("------------------------------------------------------------\n");
1777 for (i = 0; i < replay_repeat; i++)
1782 static const char * const sched_usage[] = {
1783 "perf sched [<options>] {record|latency|map|replay|script}",
1787 static const struct option sched_options[] = {
1788 OPT_STRING('i', "input", &input_name, "file",
1790 OPT_INCR('v', "verbose", &verbose,
1791 "be more verbose (show symbol address, etc)"),
1792 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1793 "dump raw trace in ASCII"),
1797 static const char * const latency_usage[] = {
1798 "perf sched latency [<options>]",
1802 static const struct option latency_options[] = {
1803 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1804 "sort by key(s): runtime, switch, avg, max"),
1805 OPT_INCR('v', "verbose", &verbose,
1806 "be more verbose (show symbol address, etc)"),
1807 OPT_INTEGER('C', "CPU", &profile_cpu,
1808 "CPU to profile on"),
1809 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1810 "dump raw trace in ASCII"),
1814 static const char * const replay_usage[] = {
1815 "perf sched replay [<options>]",
1819 static const struct option replay_options[] = {
1820 OPT_UINTEGER('r', "repeat", &replay_repeat,
1821 "repeat the workload replay N times (-1: infinite)"),
1822 OPT_INCR('v', "verbose", &verbose,
1823 "be more verbose (show symbol address, etc)"),
1824 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1825 "dump raw trace in ASCII"),
1829 static void setup_sorting(void)
1831 char *tmp, *tok, *str = strdup(sort_order);
1833 for (tok = strtok_r(str, ", ", &tmp);
1834 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1835 if (sort_dimension__add(tok, &sort_list) < 0) {
1836 error("Unknown --sort key: `%s'", tok);
1837 usage_with_options(latency_usage, latency_options);
1843 sort_dimension__add("pid", &cmp_pid);
1846 static const char *record_args[] = {
1853 "-e", "sched:sched_switch",
1854 "-e", "sched:sched_stat_wait",
1855 "-e", "sched:sched_stat_sleep",
1856 "-e", "sched:sched_stat_iowait",
1857 "-e", "sched:sched_stat_runtime",
1858 "-e", "sched:sched_process_exit",
1859 "-e", "sched:sched_process_fork",
1860 "-e", "sched:sched_wakeup",
1861 "-e", "sched:sched_migrate_task",
1864 static int __cmd_record(int argc, const char **argv)
1866 unsigned int rec_argc, i, j;
1867 const char **rec_argv;
1869 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1870 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1872 if (rec_argv == NULL)
1875 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1876 rec_argv[i] = strdup(record_args[i]);
1878 for (j = 1; j < (unsigned int)argc; j++, i++)
1879 rec_argv[i] = argv[j];
1881 BUG_ON(i != rec_argc);
1883 return cmd_record(i, rec_argv, NULL);
1886 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1888 argc = parse_options(argc, argv, sched_options, sched_usage,
1889 PARSE_OPT_STOP_AT_NON_OPTION);
1891 usage_with_options(sched_usage, sched_options);
1894 * Aliased to 'perf script' for now:
1896 if (!strcmp(argv[0], "script"))
1897 return cmd_script(argc, argv, prefix);
1900 if (!strncmp(argv[0], "rec", 3)) {
1901 return __cmd_record(argc, argv);
1902 } else if (!strncmp(argv[0], "lat", 3)) {
1903 trace_handler = &lat_ops;
1905 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1907 usage_with_options(latency_usage, latency_options);
1911 } else if (!strcmp(argv[0], "map")) {
1912 trace_handler = &map_ops;
1915 } else if (!strncmp(argv[0], "rep", 3)) {
1916 trace_handler = &replay_ops;
1918 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1920 usage_with_options(replay_usage, replay_options);
1924 usage_with_options(sched_usage, sched_options);