2 * block queue tracing parse application
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <sys/types.h>
39 static char blkparse_version[] = "1.0.0";
42 unsigned long start, end;
43 struct skip_info *prev, *next;
51 unsigned long long events;
52 unsigned long long first_reported_time;
53 unsigned long long last_reported_time;
54 unsigned long long last_read_time;
55 struct io_stats io_stats;
57 unsigned long long seq_skips;
58 unsigned int max_depth[2];
59 unsigned int cur_depth[2];
61 struct rb_root rb_track;
66 unsigned long *cpu_map;
67 unsigned int cpu_map_max;
69 struct per_cpu_info *cpus;
73 * some duplicated effort here, we can unify this hash and the ppi hash later
75 struct process_pid_map {
78 struct process_pid_map *hash_next, *list_next;
81 #define PPM_HASH_SHIFT (8)
82 #define PPM_HASH_SIZE (1 << PPM_HASH_SHIFT)
83 #define PPM_HASH_MASK (PPM_HASH_SIZE - 1)
84 static struct process_pid_map *ppm_hash_table[PPM_HASH_SIZE];
86 struct per_process_info {
87 struct process_pid_map *ppm;
88 struct io_stats io_stats;
89 struct per_process_info *hash_next, *list_next;
95 unsigned long long longest_allocation_wait[2];
96 unsigned long long longest_dispatch_wait[2];
97 unsigned long long longest_completion_wait[2];
100 #define PPI_HASH_SHIFT (8)
101 #define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
102 #define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
103 static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
104 static struct per_process_info *ppi_list;
105 static int ppi_list_entries;
107 #define S_OPTS "a:A:b:D:d:f:F:hi:o:Oqstw:vV"
108 static struct option l_opts[] = {
111 .has_arg = required_argument,
117 .has_arg = required_argument,
123 .has_arg = required_argument,
128 .name = "input-directory",
129 .has_arg = required_argument,
134 .name = "dump-binary",
135 .has_arg = required_argument,
141 .has_arg = required_argument,
146 .name = "format-spec",
147 .has_arg = required_argument,
152 .name = "hash-by-name",
153 .has_arg = no_argument,
159 .has_arg = required_argument,
165 .has_arg = required_argument,
170 .name = "no-text-output",
171 .has_arg = no_argument,
177 .has_arg = no_argument,
182 .name = "per-program-stats",
183 .has_arg = no_argument,
189 .has_arg = no_argument,
195 .has_arg = required_argument,
201 .has_arg = no_argument,
207 .has_arg = no_argument,
217 * for sorting the displayed output
220 struct blk_io_trace *bit;
221 struct rb_node rb_node;
223 unsigned long read_sequence;
226 static struct rb_root rb_sort_root;
227 static unsigned long rb_sort_entries;
229 static struct trace *trace_list;
234 static struct blk_io_trace *bit_alloc_list;
235 static struct trace *t_alloc_list;
238 * for tracking individual ios
241 struct rb_node rb_node;
243 struct process_pid_map *ppm;
245 unsigned long long allocation_time;
246 unsigned long long queue_time;
247 unsigned long long dispatch_time;
248 unsigned long long completion_time;
252 static struct per_dev_info *devices;
253 static char *get_dev_name(struct per_dev_info *, char *, int);
254 static int trace_rb_insert_last(struct per_dev_info *, struct trace *);
257 static char *output_name;
258 static char *input_dir;
260 static unsigned long long genesis_time;
261 static unsigned long long last_allowed_time;
262 static unsigned long long stopwatch_start; /* start from zero by default */
263 static unsigned long long stopwatch_end = -1ULL; /* "infinity" */
264 static unsigned long read_sequence;
266 static int per_process_stats;
267 static int per_device_and_cpu_stats = 1;
268 static int track_ios;
269 static int ppi_hash_by_pid = 1;
271 static unsigned int act_mask = -1U;
272 static int stats_printed;
273 int data_is_native = -1;
275 static FILE *dump_fp;
276 static char *dump_binary;
278 static unsigned int t_alloc_cache;
279 static unsigned int bit_alloc_cache;
281 #define RB_BATCH_DEFAULT (512)
282 static unsigned int rb_batch = RB_BATCH_DEFAULT;
285 static char *pipename;
287 static int text_output = 1;
289 #define is_done() (*(volatile int *)(&done))
290 static volatile int done;
292 struct timespec abs_start_time;
293 static unsigned long long start_timestamp;
295 static int have_drv_data = 0;
297 #define JHASH_RANDOM (0x3af5f2ee)
299 #define CPUS_PER_LONG (8 * sizeof(unsigned long))
300 #define CPU_IDX(cpu) ((cpu) / CPUS_PER_LONG)
301 #define CPU_BIT(cpu) ((cpu) & (CPUS_PER_LONG - 1))
303 static void output_binary(void *buf, int len)
306 size_t n = fwrite(buf, len, 1, dump_fp);
315 static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
317 struct per_cpu_info *cpus = pdi->cpus;
318 int ncpus = pdi->ncpus;
319 int new_count = cpu + 1;
323 size = new_count * sizeof(struct per_cpu_info);
324 cpus = realloc(cpus, size);
327 fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
328 get_dev_name(pdi, name, sizeof(name)), size);
332 new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
333 new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
334 memset(new_start, 0, new_space);
336 pdi->ncpus = new_count;
339 for (new_count = 0; new_count < pdi->ncpus; new_count++) {
340 struct per_cpu_info *pci = &pdi->cpus[new_count];
344 memset(&pci->rb_last, 0, sizeof(pci->rb_last));
345 pci->rb_last_entries = 0;
346 pci->last_sequence = -1;
351 static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
353 struct per_cpu_info *pci;
355 if (cpu >= pdi->ncpus)
356 resize_cpu_info(pdi, cpu);
358 pci = &pdi->cpus[cpu];
364 static int resize_devices(char *name)
366 int size = (ndevices + 1) * sizeof(struct per_dev_info);
368 devices = realloc(devices, size);
370 fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
373 memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
374 devices[ndevices].name = name;
379 static struct per_dev_info *get_dev_info(dev_t dev)
381 struct per_dev_info *pdi;
384 for (i = 0; i < ndevices; i++) {
386 devices[i].dev = dev;
387 if (devices[i].dev == dev)
391 if (resize_devices(NULL))
394 pdi = &devices[ndevices - 1];
396 pdi->first_reported_time = 0;
397 pdi->last_read_time = 0;
402 static void insert_skip(struct per_cpu_info *pci, unsigned long start,
405 struct skip_info *sip;
407 for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
408 if (end == (sip->start - 1)) {
411 } else if (start == (sip->end + 1)) {
417 sip = malloc(sizeof(struct skip_info));
420 sip->prev = sip->next = NULL;
421 if (pci->skips_tail == NULL)
422 pci->skips_head = pci->skips_tail = sip;
424 sip->prev = pci->skips_tail;
425 pci->skips_tail->next = sip;
426 pci->skips_tail = sip;
430 static void remove_sip(struct per_cpu_info *pci, struct skip_info *sip)
432 if (sip->prev == NULL) {
433 if (sip->next == NULL)
434 pci->skips_head = pci->skips_tail = NULL;
436 pci->skips_head = sip->next;
437 sip->next->prev = NULL;
439 } else if (sip->next == NULL) {
440 pci->skips_tail = sip->prev;
441 sip->prev->next = NULL;
443 sip->prev->next = sip->next;
444 sip->next->prev = sip->prev;
447 sip->prev = sip->next = NULL;
451 #define IN_SKIP(sip,seq) (((sip)->start <= (seq)) && ((seq) <= sip->end))
452 static int check_current_skips(struct per_cpu_info *pci, unsigned long seq)
454 struct skip_info *sip;
456 for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
457 if (IN_SKIP(sip, seq)) {
458 if (sip->start == seq) {
460 remove_sip(pci, sip);
463 } else if (sip->end == seq)
467 insert_skip(pci, seq + 1, sip->end);
476 static void collect_pdi_skips(struct per_dev_info *pdi)
478 struct skip_info *sip;
484 for (cpu = 0; cpu < pdi->ncpus; cpu++) {
485 struct per_cpu_info *pci = &pdi->cpus[cpu];
487 for (sip = pci->skips_head; sip != NULL; sip = sip->next) {
489 pdi->seq_skips += (sip->end - sip->start + 1);
491 fprintf(stderr,"(%d,%d): skipping %lu -> %lu\n",
492 MAJOR(pdi->dev), MINOR(pdi->dev),
493 sip->start, sip->end);
498 static void cpu_mark_online(struct per_dev_info *pdi, unsigned int cpu)
500 if (cpu >= pdi->cpu_map_max || !pdi->cpu_map) {
501 int new_max = (cpu + CPUS_PER_LONG) & ~(CPUS_PER_LONG - 1);
502 unsigned long *map = malloc(new_max / sizeof(long));
504 memset(map, 0, new_max / sizeof(long));
507 memcpy(map, pdi->cpu_map, pdi->cpu_map_max / sizeof(long));
512 pdi->cpu_map_max = new_max;
515 pdi->cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
518 static inline void cpu_mark_offline(struct per_dev_info *pdi, int cpu)
520 pdi->cpu_map[CPU_IDX(cpu)] &= ~(1UL << CPU_BIT(cpu));
523 static inline int cpu_is_online(struct per_dev_info *pdi, int cpu)
525 return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
528 static inline int ppm_hash_pid(pid_t pid)
530 return jhash_1word(pid, JHASH_RANDOM) & PPM_HASH_MASK;
533 static struct process_pid_map *find_ppm(pid_t pid)
535 const int hash_idx = ppm_hash_pid(pid);
536 struct process_pid_map *ppm;
538 ppm = ppm_hash_table[hash_idx];
543 ppm = ppm->hash_next;
549 static struct process_pid_map *add_ppm_hash(pid_t pid, const char *name)
551 const int hash_idx = ppm_hash_pid(pid);
552 struct process_pid_map *ppm;
556 ppm = malloc(sizeof(*ppm));
557 memset(ppm, 0, sizeof(*ppm));
559 strcpy(ppm->comm, name);
560 ppm->hash_next = ppm_hash_table[hash_idx];
561 ppm_hash_table[hash_idx] = ppm;
567 static void handle_notify(struct blk_io_trace *bit)
569 void *payload = (caddr_t) bit + sizeof(*bit);
572 switch (bit->action) {
574 add_ppm_hash(bit->pid, payload);
577 case BLK_TN_TIMESTAMP:
578 if (bit->pdu_len != sizeof(two32))
580 memcpy(two32, payload, sizeof(two32));
581 if (!data_is_native) {
582 two32[0] = be32_to_cpu(two32[0]);
583 two32[1] = be32_to_cpu(two32[1]);
585 start_timestamp = bit->time;
586 abs_start_time.tv_sec = two32[0];
587 abs_start_time.tv_nsec = two32[1];
588 if (abs_start_time.tv_nsec < 0) {
589 abs_start_time.tv_sec--;
590 abs_start_time.tv_nsec += 1000000000;
596 if (bit->pdu_len > 0) {
597 char msg[bit->pdu_len+1];
599 memcpy(msg, (char *)payload, bit->pdu_len);
600 msg[bit->pdu_len] = '\0';
603 "%3d,%-3d %2d %8s %5d.%09lu %5u %2s %3s %s\n",
604 MAJOR(bit->device), MINOR(bit->device),
605 bit->cpu, "0", (int) SECONDS(bit->time),
606 (unsigned long) NANO_SECONDS(bit->time),
612 /* Ignore unknown notify events */
617 char *find_process_name(pid_t pid)
619 struct process_pid_map *ppm = find_ppm(pid);
627 static inline int ppi_hash_pid(pid_t pid)
629 return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
632 static inline int ppi_hash_name(const char *name)
634 return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
637 static inline int ppi_hash(struct per_process_info *ppi)
639 struct process_pid_map *ppm = ppi->ppm;
642 return ppi_hash_pid(ppm->pid);
644 return ppi_hash_name(ppm->comm);
647 static inline void add_ppi_to_hash(struct per_process_info *ppi)
649 const int hash_idx = ppi_hash(ppi);
651 ppi->hash_next = ppi_hash_table[hash_idx];
652 ppi_hash_table[hash_idx] = ppi;
655 static inline void add_ppi_to_list(struct per_process_info *ppi)
657 ppi->list_next = ppi_list;
662 static struct per_process_info *find_ppi_by_name(char *name)
664 const int hash_idx = ppi_hash_name(name);
665 struct per_process_info *ppi;
667 ppi = ppi_hash_table[hash_idx];
669 struct process_pid_map *ppm = ppi->ppm;
671 if (!strcmp(ppm->comm, name))
674 ppi = ppi->hash_next;
680 static struct per_process_info *find_ppi_by_pid(pid_t pid)
682 const int hash_idx = ppi_hash_pid(pid);
683 struct per_process_info *ppi;
685 ppi = ppi_hash_table[hash_idx];
687 struct process_pid_map *ppm = ppi->ppm;
692 ppi = ppi->hash_next;
698 static struct per_process_info *find_ppi(pid_t pid)
700 struct per_process_info *ppi;
704 return find_ppi_by_pid(pid);
706 name = find_process_name(pid);
710 ppi = find_ppi_by_name(name);
711 if (ppi && ppi->ppm->pid != pid)
712 ppi->more_than_one = 1;
718 * struct trace and blktrace allocation cache, we do potentially
719 * millions of mallocs for these structures while only using at most
720 * a few thousand at the time
722 static inline void t_free(struct trace *t)
724 if (t_alloc_cache < 1024) {
725 t->next = t_alloc_list;
732 static inline struct trace *t_alloc(void)
734 struct trace *t = t_alloc_list;
737 t_alloc_list = t->next;
742 return malloc(sizeof(*t));
745 static inline void bit_free(struct blk_io_trace *bit)
747 if (bit_alloc_cache < 1024 && !bit->pdu_len) {
749 * abuse a 64-bit field for a next pointer for the free item
751 bit->time = (__u64) (unsigned long) bit_alloc_list;
752 bit_alloc_list = (struct blk_io_trace *) bit;
758 static inline struct blk_io_trace *bit_alloc(void)
760 struct blk_io_trace *bit = bit_alloc_list;
763 bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
769 return malloc(sizeof(*bit));
772 static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
774 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
776 rb_erase(&t->rb_node, &pci->rb_last);
777 pci->rb_last_entries--;
783 static void put_trace(struct per_dev_info *pdi, struct trace *t)
785 rb_erase(&t->rb_node, &rb_sort_root);
788 trace_rb_insert_last(pdi, t);
791 static inline int trace_rb_insert(struct trace *t, struct rb_root *root)
793 struct rb_node **p = &root->rb_node;
794 struct rb_node *parent = NULL;
800 __t = rb_entry(parent, struct trace, rb_node);
802 if (t->bit->time < __t->bit->time)
804 else if (t->bit->time > __t->bit->time)
806 else if (t->bit->device < __t->bit->device)
808 else if (t->bit->device > __t->bit->device)
810 else if (t->bit->sequence < __t->bit->sequence)
812 else /* >= sequence */
816 rb_link_node(&t->rb_node, parent, p);
817 rb_insert_color(&t->rb_node, root);
821 static inline int trace_rb_insert_sort(struct trace *t)
823 if (!trace_rb_insert(t, &rb_sort_root)) {
831 static int trace_rb_insert_last(struct per_dev_info *pdi, struct trace *t)
833 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
835 if (trace_rb_insert(t, &pci->rb_last))
838 pci->rb_last_entries++;
840 if (pci->rb_last_entries > rb_batch * pdi->nfiles) {
841 struct rb_node *n = rb_first(&pci->rb_last);
843 t = rb_entry(n, struct trace, rb_node);
844 __put_trace_last(pdi, t);
850 static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
851 struct rb_root *root, int order)
853 struct rb_node *n = root->rb_node;
854 struct rb_node *prev = NULL;
858 __t = rb_entry(n, struct trace, rb_node);
861 if (device < __t->bit->device)
863 else if (device > __t->bit->device)
865 else if (sequence < __t->bit->sequence)
867 else if (sequence > __t->bit->sequence)
874 * hack - the list may not be sequence ordered because some
875 * events don't have sequence and time matched. so we end up
876 * being a little off in the rb lookup here, because we don't
877 * know the time we are looking for. compensate by browsing
878 * a little ahead from the last entry to find the match
883 while (((n = rb_next(prev)) != NULL) && max--) {
884 __t = rb_entry(n, struct trace, rb_node);
886 if (__t->bit->device == device &&
887 __t->bit->sequence == sequence)
897 static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
898 struct per_cpu_info *pci,
901 return trace_rb_find(pdi->dev, seq, &pci->rb_last, 0);
904 static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
906 struct rb_node **p = &pdi->rb_track.rb_node;
907 struct rb_node *parent = NULL;
908 struct io_track *__iot;
912 __iot = rb_entry(parent, struct io_track, rb_node);
914 if (iot->sector < __iot->sector)
916 else if (iot->sector > __iot->sector)
920 "sector alias (%Lu) on device %d,%d!\n",
921 (unsigned long long) iot->sector,
922 MAJOR(pdi->dev), MINOR(pdi->dev));
927 rb_link_node(&iot->rb_node, parent, p);
928 rb_insert_color(&iot->rb_node, &pdi->rb_track);
932 static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
934 struct rb_node *n = pdi->rb_track.rb_node;
935 struct io_track *__iot;
938 __iot = rb_entry(n, struct io_track, rb_node);
940 if (sector < __iot->sector)
942 else if (sector > __iot->sector)
951 static struct io_track *find_track(struct per_dev_info *pdi, pid_t pid,
954 struct io_track *iot;
956 iot = __find_track(pdi, sector);
958 iot = malloc(sizeof(*iot));
959 iot->ppm = find_ppm(pid);
961 iot->ppm = add_ppm_hash(pid, "unknown");
962 iot->sector = sector;
963 track_rb_insert(pdi, iot);
969 static void log_track_frontmerge(struct per_dev_info *pdi,
970 struct blk_io_trace *t)
972 struct io_track *iot;
977 iot = __find_track(pdi, t->sector + t_sec(t));
980 fprintf(stderr, "merge not found for (%d,%d): %llu\n",
981 MAJOR(pdi->dev), MINOR(pdi->dev),
982 (unsigned long long) t->sector + t_sec(t));
986 rb_erase(&iot->rb_node, &pdi->rb_track);
987 iot->sector -= t_sec(t);
988 track_rb_insert(pdi, iot);
991 static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
993 struct io_track *iot;
998 iot = find_track(pdi, t->pid, t->sector);
999 iot->allocation_time = t->time;
1002 static inline int is_remapper(struct per_dev_info *pdi)
1004 int major = MAJOR(pdi->dev);
1006 return (major == 253 || major == 9);
1010 * for md/dm setups, the interesting cycle is Q -> C. So track queueing
1011 * time here, as dispatch time
1013 static void log_track_queue(struct per_dev_info *pdi, struct blk_io_trace *t)
1015 struct io_track *iot;
1019 if (!is_remapper(pdi))
1022 iot = find_track(pdi, t->pid, t->sector);
1023 iot->dispatch_time = t->time;
1027 * return time between rq allocation and insertion
1029 static unsigned long long log_track_insert(struct per_dev_info *pdi,
1030 struct blk_io_trace *t)
1032 unsigned long long elapsed;
1033 struct io_track *iot;
1038 iot = find_track(pdi, t->pid, t->sector);
1039 iot->queue_time = t->time;
1041 if (!iot->allocation_time)
1044 elapsed = iot->queue_time - iot->allocation_time;
1046 if (per_process_stats) {
1047 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
1048 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1050 if (ppi && elapsed > ppi->longest_allocation_wait[w])
1051 ppi->longest_allocation_wait[w] = elapsed;
1058 * return time between queue and issue
1060 static unsigned long long log_track_issue(struct per_dev_info *pdi,
1061 struct blk_io_trace *t)
1063 unsigned long long elapsed;
1064 struct io_track *iot;
1068 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
1071 iot = __find_track(pdi, t->sector);
1074 fprintf(stderr, "issue not found for (%d,%d): %llu\n",
1075 MAJOR(pdi->dev), MINOR(pdi->dev),
1076 (unsigned long long) t->sector);
1080 iot->dispatch_time = t->time;
1081 elapsed = iot->dispatch_time - iot->queue_time;
1083 if (per_process_stats) {
1084 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
1085 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1087 if (ppi && elapsed > ppi->longest_dispatch_wait[w])
1088 ppi->longest_dispatch_wait[w] = elapsed;
1095 * return time between dispatch and complete
1097 static unsigned long long log_track_complete(struct per_dev_info *pdi,
1098 struct blk_io_trace *t)
1100 unsigned long long elapsed;
1101 struct io_track *iot;
1106 iot = __find_track(pdi, t->sector);
1109 fprintf(stderr,"complete not found for (%d,%d): %llu\n",
1110 MAJOR(pdi->dev), MINOR(pdi->dev),
1111 (unsigned long long) t->sector);
1115 iot->completion_time = t->time;
1116 elapsed = iot->completion_time - iot->dispatch_time;
1118 if (per_process_stats) {
1119 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
1120 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1122 if (ppi && elapsed > ppi->longest_completion_wait[w])
1123 ppi->longest_completion_wait[w] = elapsed;
1127 * kill the trace, we don't need it after completion
1129 rb_erase(&iot->rb_node, &pdi->rb_track);
1136 static struct io_stats *find_process_io_stats(pid_t pid)
1138 struct per_process_info *ppi = find_ppi(pid);
1141 ppi = malloc(sizeof(*ppi));
1142 memset(ppi, 0, sizeof(*ppi));
1143 ppi->ppm = find_ppm(pid);
1145 ppi->ppm = add_ppm_hash(pid, "unknown");
1146 add_ppi_to_hash(ppi);
1147 add_ppi_to_list(ppi);
1150 return &ppi->io_stats;
1153 static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
1156 snprintf(buffer, size, "%s", pdi->name);
1158 snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
1162 static void check_time(struct per_dev_info *pdi, struct blk_io_trace *bit)
1164 unsigned long long this = bit->time;
1165 unsigned long long last = pdi->last_reported_time;
1167 pdi->backwards = (this < last) ? 'B' : ' ';
1168 pdi->last_reported_time = this;
1171 static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
1176 ios->mwrite_kb += t_kb(t);
1179 ios->mread_kb += t_kb(t);
1183 static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
1186 __account_m(&pci->io_stats, t, rw);
1188 if (per_process_stats) {
1189 struct io_stats *ios = find_process_io_stats(t->pid);
1191 __account_m(ios, t, rw);
1195 static inline void __account_pc_queue(struct io_stats *ios,
1196 struct blk_io_trace *t, int rw)
1200 ios->qwrite_kb_pc += t_kb(t);
1203 ios->qread_kb += t_kb(t);
1207 static inline void account_pc_queue(struct blk_io_trace *t,
1208 struct per_cpu_info *pci, int rw)
1210 __account_pc_queue(&pci->io_stats, t, rw);
1212 if (per_process_stats) {
1213 struct io_stats *ios = find_process_io_stats(t->pid);
1215 __account_pc_queue(ios, t, rw);
1219 static inline void __account_pc_issue(struct io_stats *ios, int rw,
1224 ios->iwrite_kb_pc += bytes >> 10;
1227 ios->iread_kb_pc += bytes >> 10;
1231 static inline void account_pc_issue(struct blk_io_trace *t,
1232 struct per_cpu_info *pci, int rw)
1234 __account_pc_issue(&pci->io_stats, rw, t->bytes);
1236 if (per_process_stats) {
1237 struct io_stats *ios = find_process_io_stats(t->pid);
1239 __account_pc_issue(ios, rw, t->bytes);
1243 static inline void __account_pc_requeue(struct io_stats *ios,
1244 struct blk_io_trace *t, int rw)
1248 ios->iwrite_kb_pc -= t_kb(t);
1251 ios->iread_kb_pc -= t_kb(t);
1255 static inline void account_pc_requeue(struct blk_io_trace *t,
1256 struct per_cpu_info *pci, int rw)
1258 __account_pc_requeue(&pci->io_stats, t, rw);
1260 if (per_process_stats) {
1261 struct io_stats *ios = find_process_io_stats(t->pid);
1263 __account_pc_requeue(ios, t, rw);
1267 static inline void __account_pc_c(struct io_stats *ios, int rw)
1275 static inline void account_pc_c(struct blk_io_trace *t,
1276 struct per_cpu_info *pci, int rw)
1278 __account_pc_c(&pci->io_stats, rw);
1280 if (per_process_stats) {
1281 struct io_stats *ios = find_process_io_stats(t->pid);
1283 __account_pc_c(ios, rw);
1287 static inline void __account_queue(struct io_stats *ios, struct blk_io_trace *t,
1292 ios->qwrite_kb += t_kb(t);
1295 ios->qread_kb += t_kb(t);
1299 static inline void account_queue(struct blk_io_trace *t,
1300 struct per_cpu_info *pci, int rw)
1302 __account_queue(&pci->io_stats, t, rw);
1304 if (per_process_stats) {
1305 struct io_stats *ios = find_process_io_stats(t->pid);
1307 __account_queue(ios, t, rw);
1311 static inline void __account_c(struct io_stats *ios, int rw, int bytes)
1315 ios->cwrite_kb += bytes >> 10;
1318 ios->cread_kb += bytes >> 10;
1322 static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
1325 __account_c(&pci->io_stats, rw, bytes);
1327 if (per_process_stats) {
1328 struct io_stats *ios = find_process_io_stats(t->pid);
1330 __account_c(ios, rw, bytes);
1334 static inline void __account_issue(struct io_stats *ios, int rw,
1339 ios->iwrite_kb += bytes >> 10;
1342 ios->iread_kb += bytes >> 10;
1346 static inline void account_issue(struct blk_io_trace *t,
1347 struct per_cpu_info *pci, int rw)
1349 __account_issue(&pci->io_stats, rw, t->bytes);
1351 if (per_process_stats) {
1352 struct io_stats *ios = find_process_io_stats(t->pid);
1354 __account_issue(ios, rw, t->bytes);
1358 static inline void __account_unplug(struct io_stats *ios, int timer)
1361 ios->timer_unplugs++;
1366 static inline void account_unplug(struct blk_io_trace *t,
1367 struct per_cpu_info *pci, int timer)
1369 __account_unplug(&pci->io_stats, timer);
1371 if (per_process_stats) {
1372 struct io_stats *ios = find_process_io_stats(t->pid);
1374 __account_unplug(ios, timer);
1378 static inline void __account_requeue(struct io_stats *ios,
1379 struct blk_io_trace *t, int rw)
1383 ios->iwrite_kb -= t_kb(t);
1386 ios->iread_kb -= t_kb(t);
1390 static inline void account_requeue(struct blk_io_trace *t,
1391 struct per_cpu_info *pci, int rw)
1393 __account_requeue(&pci->io_stats, t, rw);
1395 if (per_process_stats) {
1396 struct io_stats *ios = find_process_io_stats(t->pid);
1398 __account_requeue(ios, t, rw);
1402 static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
1403 struct blk_io_trace *t, char *act)
1405 process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
1408 static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
1409 struct blk_io_trace *t, char *act)
1411 process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
1414 static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
1417 process_fmt(act, pci, t, -1, 0, NULL);
1420 static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
1421 struct blk_io_trace *t, char *act)
1423 process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
1426 static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
1427 struct blk_io_trace *t, char *act)
1430 log_track_frontmerge(pdi, t);
1432 process_fmt(act, pci, t, -1ULL, 0, NULL);
1435 static void log_action(struct per_cpu_info *pci, struct blk_io_trace *t,
1438 process_fmt(act, pci, t, -1ULL, 0, NULL);
1441 static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
1444 process_fmt(act, pci, t, -1ULL, 0, NULL);
1447 static void log_unplug(struct per_cpu_info *pci, struct blk_io_trace *t,
1450 process_fmt(act, pci, t, -1ULL, 0, NULL);
1453 static void log_split(struct per_cpu_info *pci, struct blk_io_trace *t,
1456 process_fmt(act, pci, t, -1ULL, 0, NULL);
1459 static void log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char *act)
1461 unsigned char *buf = (unsigned char *) t + sizeof(*t);
1463 process_fmt(act, pci, t, -1ULL, t->pdu_len, buf);
1466 static void dump_trace_pc(struct blk_io_trace *t, struct per_dev_info *pdi,
1467 struct per_cpu_info *pci)
1469 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1470 int act = t->action & 0xffff;
1473 case __BLK_TA_QUEUE:
1474 log_generic(pci, t, "Q");
1475 account_pc_queue(t, pci, w);
1477 case __BLK_TA_GETRQ:
1478 log_generic(pci, t, "G");
1480 case __BLK_TA_SLEEPRQ:
1481 log_generic(pci, t, "S");
1483 case __BLK_TA_REQUEUE:
1485 * can happen if we miss traces, don't let it go
1488 if (pdi->cur_depth[w])
1489 pdi->cur_depth[w]--;
1490 account_pc_requeue(t, pci, w);
1491 log_generic(pci, t, "R");
1493 case __BLK_TA_ISSUE:
1494 account_pc_issue(t, pci, w);
1495 pdi->cur_depth[w]++;
1496 if (pdi->cur_depth[w] > pdi->max_depth[w])
1497 pdi->max_depth[w] = pdi->cur_depth[w];
1498 log_pc(pci, t, "D");
1500 case __BLK_TA_COMPLETE:
1501 if (pdi->cur_depth[w])
1502 pdi->cur_depth[w]--;
1503 log_pc(pci, t, "C");
1504 account_pc_c(t, pci, w);
1506 case __BLK_TA_INSERT:
1507 log_pc(pci, t, "I");
1510 fprintf(stderr, "Bad pc action %x\n", act);
1515 static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
1516 struct per_cpu_info *pci)
1518 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1519 int act = t->action & 0xffff;
1522 case __BLK_TA_QUEUE:
1523 log_track_queue(pdi, t);
1524 account_queue(t, pci, w);
1525 log_queue(pci, t, "Q");
1527 case __BLK_TA_INSERT:
1528 log_insert(pdi, pci, t, "I");
1530 case __BLK_TA_BACKMERGE:
1531 account_m(t, pci, w);
1532 log_merge(pdi, pci, t, "M");
1534 case __BLK_TA_FRONTMERGE:
1535 account_m(t, pci, w);
1536 log_merge(pdi, pci, t, "F");
1538 case __BLK_TA_GETRQ:
1539 log_track_getrq(pdi, t);
1540 log_generic(pci, t, "G");
1542 case __BLK_TA_SLEEPRQ:
1543 log_generic(pci, t, "S");
1545 case __BLK_TA_REQUEUE:
1547 * can happen if we miss traces, don't let it go
1550 if (pdi->cur_depth[w])
1551 pdi->cur_depth[w]--;
1552 account_requeue(t, pci, w);
1553 log_queue(pci, t, "R");
1555 case __BLK_TA_ISSUE:
1556 account_issue(t, pci, w);
1557 pdi->cur_depth[w]++;
1558 if (pdi->cur_depth[w] > pdi->max_depth[w])
1559 pdi->max_depth[w] = pdi->cur_depth[w];
1560 log_issue(pdi, pci, t, "D");
1562 case __BLK_TA_COMPLETE:
1563 if (pdi->cur_depth[w])
1564 pdi->cur_depth[w]--;
1565 account_c(t, pci, w, t->bytes);
1566 log_complete(pdi, pci, t, "C");
1569 log_action(pci, t, "P");
1571 case __BLK_TA_UNPLUG_IO:
1572 account_unplug(t, pci, 0);
1573 log_unplug(pci, t, "U");
1575 case __BLK_TA_UNPLUG_TIMER:
1576 account_unplug(t, pci, 1);
1577 log_unplug(pci, t, "UT");
1579 case __BLK_TA_SPLIT:
1580 log_split(pci, t, "X");
1582 case __BLK_TA_BOUNCE:
1583 log_generic(pci, t, "B");
1585 case __BLK_TA_REMAP:
1586 log_generic(pci, t, "A");
1588 case __BLK_TA_DRV_DATA:
1590 /* dump to binary file only */
1593 fprintf(stderr, "Bad fs action %x\n", t->action);
1598 static void dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci,
1599 struct per_dev_info *pdi)
1602 if (t->action == BLK_TN_MESSAGE)
1604 else if (t->action & BLK_TC_ACT(BLK_TC_PC))
1605 dump_trace_pc(t, pdi, pci);
1607 dump_trace_fs(t, pdi, pci);
1611 pdi->first_reported_time = t->time;
1615 output_binary(t, sizeof(*t) + t->pdu_len);
1619 * print in a proper way, not too small and not too big. if more than
1620 * 1000,000K, turn into M and so on
1622 static char *size_cnv(char *dst, unsigned long long num, int in_kb)
1624 char suff[] = { '\0', 'K', 'M', 'G', 'P' };
1630 while (num > 1000 * 1000ULL && (i < sizeof(suff) - 1)) {
1635 sprintf(dst, "%'8Lu%c", num, suff[i]);
1639 static void dump_io_stats(struct per_dev_info *pdi, struct io_stats *ios,
1642 static char x[256], y[256];
1644 fprintf(ofp, "%s\n", msg);
1646 fprintf(ofp, " Reads Queued: %s, %siB\t", size_cnv(x, ios->qreads, 0), size_cnv(y, ios->qread_kb, 1));
1647 fprintf(ofp, " Writes Queued: %s, %siB\n", size_cnv(x, ios->qwrites, 0), size_cnv(y, ios->qwrite_kb, 1));
1648 fprintf(ofp, " Read Dispatches: %s, %siB\t", size_cnv(x, ios->ireads, 0), size_cnv(y, ios->iread_kb, 1));
1649 fprintf(ofp, " Write Dispatches: %s, %siB\n", size_cnv(x, ios->iwrites, 0), size_cnv(y, ios->iwrite_kb, 1));
1650 fprintf(ofp, " Reads Requeued: %s\t\t", size_cnv(x, ios->rrqueue, 0));
1651 fprintf(ofp, " Writes Requeued: %s\n", size_cnv(x, ios->wrqueue, 0));
1652 fprintf(ofp, " Reads Completed: %s, %siB\t", size_cnv(x, ios->creads, 0), size_cnv(y, ios->cread_kb, 1));
1653 fprintf(ofp, " Writes Completed: %s, %siB\n", size_cnv(x, ios->cwrites, 0), size_cnv(y, ios->cwrite_kb, 1));
1654 fprintf(ofp, " Read Merges: %s, %siB\t", size_cnv(x, ios->mreads, 0), size_cnv(y, ios->mread_kb, 1));
1655 fprintf(ofp, " Write Merges: %s, %siB\n", size_cnv(x, ios->mwrites, 0), size_cnv(y, ios->mwrite_kb, 1));
1657 fprintf(ofp, " Read depth: %'8u%8c\t", pdi->max_depth[0], ' ');
1658 fprintf(ofp, " Write depth: %'8u\n", pdi->max_depth[1]);
1660 if (ios->qreads_pc || ios->qwrites_pc || ios->ireads_pc || ios->iwrites_pc ||
1661 ios->rrqueue_pc || ios->wrqueue_pc || ios->creads_pc || ios->cwrites_pc) {
1662 fprintf(ofp, " PC Reads Queued: %s, %siB\t", size_cnv(x, ios->qreads_pc, 0), size_cnv(y, ios->qread_kb_pc, 1));
1663 fprintf(ofp, " PC Writes Queued: %s, %siB\n", size_cnv(x, ios->qwrites_pc, 0), size_cnv(y, ios->qwrite_kb_pc, 1));
1664 fprintf(ofp, " PC Read Disp.: %s, %siB\t", size_cnv(x, ios->ireads_pc, 0), size_cnv(y, ios->iread_kb_pc, 1));
1665 fprintf(ofp, " PC Write Disp.: %s, %siB\n", size_cnv(x, ios->iwrites_pc, 0), size_cnv(y, ios->iwrite_kb_pc, 1));
1666 fprintf(ofp, " PC Reads Req.: %s\t\t", size_cnv(x, ios->rrqueue_pc, 0));
1667 fprintf(ofp, " PC Writes Req.: %s\n", size_cnv(x, ios->wrqueue_pc, 0));
1668 fprintf(ofp, " PC Reads Compl.: %s\t\t", size_cnv(x, ios->creads_pc, 0));
1669 fprintf(ofp, " PC Writes Compl.: %s\n", size_cnv(x, ios->cwrites, 0));
1671 fprintf(ofp, " IO unplugs: %'8lu%8c\t", ios->io_unplugs, ' ');
1672 fprintf(ofp, " Timer unplugs: %'8lu\n", ios->timer_unplugs);
1675 static void dump_wait_stats(struct per_process_info *ppi)
1677 unsigned long rawait = ppi->longest_allocation_wait[0] / 1000;
1678 unsigned long rdwait = ppi->longest_dispatch_wait[0] / 1000;
1679 unsigned long rcwait = ppi->longest_completion_wait[0] / 1000;
1680 unsigned long wawait = ppi->longest_allocation_wait[1] / 1000;
1681 unsigned long wdwait = ppi->longest_dispatch_wait[1] / 1000;
1682 unsigned long wcwait = ppi->longest_completion_wait[1] / 1000;
1684 fprintf(ofp, " Allocation wait: %'8lu%8c\t", rawait, ' ');
1685 fprintf(ofp, " Allocation wait: %'8lu\n", wawait);
1686 fprintf(ofp, " Dispatch wait: %'8lu%8c\t", rdwait, ' ');
1687 fprintf(ofp, " Dispatch wait: %'8lu\n", wdwait);
1688 fprintf(ofp, " Completion wait: %'8lu%8c\t", rcwait, ' ');
1689 fprintf(ofp, " Completion wait: %'8lu\n", wcwait);
1692 static int ppi_name_compare(const void *p1, const void *p2)
1694 struct per_process_info *ppi1 = *((struct per_process_info **) p1);
1695 struct per_process_info *ppi2 = *((struct per_process_info **) p2);
1698 res = strverscmp(ppi1->ppm->comm, ppi2->ppm->comm);
1700 res = ppi1->ppm->pid > ppi2->ppm->pid;
1705 static void sort_process_list(void)
1707 struct per_process_info **ppis;
1708 struct per_process_info *ppi;
1711 ppis = malloc(ppi_list_entries * sizeof(struct per_process_info *));
1716 ppi = ppi->list_next;
1719 qsort(ppis, ppi_list_entries, sizeof(ppi), ppi_name_compare);
1721 i = ppi_list_entries - 1;
1726 ppi->list_next = ppi_list;
1734 static void show_process_stats(void)
1736 struct per_process_info *ppi;
1738 sort_process_list();
1742 struct process_pid_map *ppm = ppi->ppm;
1745 if (ppi->more_than_one)
1746 sprintf(name, "%s (%u, ...)", ppm->comm, ppm->pid);
1748 sprintf(name, "%s (%u)", ppm->comm, ppm->pid);
1750 dump_io_stats(NULL, &ppi->io_stats, name);
1751 dump_wait_stats(ppi);
1752 ppi = ppi->list_next;
1758 static void show_device_and_cpu_stats(void)
1760 struct per_dev_info *pdi;
1761 struct per_cpu_info *pci;
1762 struct io_stats total, *ios;
1763 unsigned long long rrate, wrate, msec;
1764 int i, j, pci_events;
1765 char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
1769 for (pdi = devices, i = 0; i < ndevices; i++, pdi++) {
1771 memset(&total, 0, sizeof(total));
1777 for (pci = pdi->cpus, j = 0; j < pdi->ncpus; j++, pci++) {
1781 ios = &pci->io_stats;
1782 total.qreads += ios->qreads;
1783 total.qwrites += ios->qwrites;
1784 total.creads += ios->creads;
1785 total.cwrites += ios->cwrites;
1786 total.mreads += ios->mreads;
1787 total.mwrites += ios->mwrites;
1788 total.ireads += ios->ireads;
1789 total.iwrites += ios->iwrites;
1790 total.rrqueue += ios->rrqueue;
1791 total.wrqueue += ios->wrqueue;
1792 total.qread_kb += ios->qread_kb;
1793 total.qwrite_kb += ios->qwrite_kb;
1794 total.cread_kb += ios->cread_kb;
1795 total.cwrite_kb += ios->cwrite_kb;
1796 total.iread_kb += ios->iread_kb;
1797 total.iwrite_kb += ios->iwrite_kb;
1798 total.mread_kb += ios->mread_kb;
1799 total.mwrite_kb += ios->mwrite_kb;
1801 total.qreads_pc += ios->qreads_pc;
1802 total.qwrites_pc += ios->qwrites_pc;
1803 total.creads_pc += ios->creads_pc;
1804 total.cwrites_pc += ios->cwrites_pc;
1805 total.ireads_pc += ios->ireads_pc;
1806 total.iwrites_pc += ios->iwrites_pc;
1807 total.rrqueue_pc += ios->rrqueue_pc;
1808 total.wrqueue_pc += ios->wrqueue_pc;
1809 total.qread_kb_pc += ios->qread_kb_pc;
1810 total.qwrite_kb_pc += ios->qwrite_kb_pc;
1811 total.iread_kb_pc += ios->iread_kb_pc;
1812 total.iwrite_kb_pc += ios->iwrite_kb_pc;
1814 total.timer_unplugs += ios->timer_unplugs;
1815 total.io_unplugs += ios->io_unplugs;
1817 snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
1818 j, get_dev_name(pdi, name, sizeof(name)));
1819 dump_io_stats(pdi, ios, line);
1823 if (pci_events > 1) {
1825 snprintf(line, sizeof(line) - 1, "Total (%s):",
1826 get_dev_name(pdi, name, sizeof(name)));
1827 dump_io_stats(NULL, &total, line);
1831 msec = (pdi->last_reported_time - pdi->first_reported_time) / 1000000;
1833 rrate = 1000 * total.cread_kb / msec;
1834 wrate = 1000 * total.cwrite_kb / msec;
1837 fprintf(ofp, "\nThroughput (R/W): %'LuKiB/s / %'LuKiB/s\n",
1839 fprintf(ofp, "Events (%s): %'Lu entries\n",
1840 get_dev_name(pdi, line, sizeof(line)), pdi->events);
1842 collect_pdi_skips(pdi);
1843 if (!pdi->skips && !pdi->events)
1846 ratio = 100.0 * ((double)pdi->seq_skips /
1847 (double)(pdi->events + pdi->seq_skips));
1848 fprintf(ofp, "Skips: %'lu forward (%'llu - %5.1lf%%)\n",
1849 pdi->skips, pdi->seq_skips, ratio);
1853 static void find_genesis(void)
1855 struct trace *t = trace_list;
1857 genesis_time = -1ULL;
1859 if (t->bit->time < genesis_time)
1860 genesis_time = t->bit->time;
1865 /* The time stamp record will usually be the first
1866 * record in the trace, but not always.
1869 && start_timestamp != genesis_time) {
1870 long delta = genesis_time - start_timestamp;
1872 abs_start_time.tv_sec += SECONDS(delta);
1873 abs_start_time.tv_nsec += NANO_SECONDS(delta);
1874 if (abs_start_time.tv_nsec < 0) {
1875 abs_start_time.tv_nsec += 1000000000;
1876 abs_start_time.tv_sec -= 1;
1878 if (abs_start_time.tv_nsec > 1000000000) {
1879 abs_start_time.tv_nsec -= 1000000000;
1880 abs_start_time.tv_sec += 1;
1885 static inline int check_stopwatch(struct blk_io_trace *bit)
1887 if (bit->time < stopwatch_end &&
1888 bit->time >= stopwatch_start)
1895 * return youngest entry read
1897 static int sort_entries(unsigned long long *youngest)
1899 struct per_dev_info *pdi = NULL;
1900 struct per_cpu_info *pci = NULL;
1907 while ((t = trace_list) != NULL) {
1908 struct blk_io_trace *bit = t->bit;
1910 trace_list = t->next;
1912 bit->time -= genesis_time;
1914 if (bit->time < *youngest || !*youngest)
1915 *youngest = bit->time;
1917 if (!pdi || pdi->dev != bit->device) {
1918 pdi = get_dev_info(bit->device);
1922 if (!pci || pci->cpu != bit->cpu)
1923 pci = get_cpu_info(pdi, bit->cpu);
1925 if (bit->sequence < pci->smallest_seq_read)
1926 pci->smallest_seq_read = bit->sequence;
1928 if (check_stopwatch(bit)) {
1934 if (trace_rb_insert_sort(t))
1942 * to continue, we must have traces from all online cpus in the tree
1944 static int check_cpu_map(struct per_dev_info *pdi)
1946 unsigned long *cpu_map;
1953 * create a map of the cpus we have traces for
1955 cpu_map = malloc(pdi->cpu_map_max / sizeof(long));
1956 n = rb_first(&rb_sort_root);
1958 __t = rb_entry(n, struct trace, rb_node);
1959 cpu = __t->bit->cpu;
1961 cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
1966 * we can't continue if pdi->cpu_map has entries set that we don't
1967 * have in the sort rbtree. the opposite is not a problem, though
1970 for (i = 0; i < pdi->cpu_map_max / CPUS_PER_LONG; i++) {
1971 if (pdi->cpu_map[i] & ~(cpu_map[i])) {
1981 static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
1983 struct blk_io_trace *bit = t->bit;
1984 unsigned long expected_sequence;
1985 struct per_cpu_info *pci;
1988 pci = get_cpu_info(pdi, bit->cpu);
1989 expected_sequence = pci->last_sequence + 1;
1991 if (!expected_sequence) {
1993 * 1 should be the first entry, just allow it
1995 if (bit->sequence == 1)
1997 if (bit->sequence == pci->smallest_seq_read)
2000 return check_cpu_map(pdi);
2003 if (bit->sequence == expected_sequence)
2007 * we may not have seen that sequence yet. if we are not doing
2008 * the final run, break and wait for more entries.
2010 if (expected_sequence < pci->smallest_seq_read) {
2011 __t = trace_rb_find_last(pdi, pci, expected_sequence);
2015 __put_trace_last(pdi, __t);
2017 } else if (!force) {
2021 if (check_current_skips(pci, bit->sequence))
2024 if (expected_sequence < bit->sequence)
2025 insert_skip(pci, expected_sequence, bit->sequence - 1);
2030 static void show_entries_rb(int force)
2032 struct per_dev_info *pdi = NULL;
2033 struct per_cpu_info *pci = NULL;
2034 struct blk_io_trace *bit;
2038 while ((n = rb_first(&rb_sort_root)) != NULL) {
2039 if (is_done() && !force && !pipeline)
2042 t = rb_entry(n, struct trace, rb_node);
2045 if (read_sequence - t->read_sequence < 1 && !force)
2048 if (!pdi || pdi->dev != bit->device) {
2049 pdi = get_dev_info(bit->device);
2054 fprintf(stderr, "Unknown device ID? (%d,%d)\n",
2055 MAJOR(bit->device), MINOR(bit->device));
2059 if (check_sequence(pdi, t, force))
2062 if (!force && bit->time > last_allowed_time)
2065 check_time(pdi, bit);
2067 if (!pci || pci->cpu != bit->cpu)
2068 pci = get_cpu_info(pdi, bit->cpu);
2070 pci->last_sequence = bit->sequence;
2074 if (bit->action & (act_mask << BLK_TC_SHIFT))
2075 dump_trace(bit, pci, pdi);
2081 static int read_data(int fd, void *buffer, int bytes, int block, int *fdblock)
2083 int ret, bytes_left, fl;
2086 if (block != *fdblock) {
2087 fl = fcntl(fd, F_GETFL);
2091 fcntl(fd, F_SETFL, fl | O_NONBLOCK);
2094 fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
2100 while (bytes_left > 0) {
2101 ret = read(fd, p, bytes_left);
2105 if (errno != EAGAIN) {
2111 * never do partial reads. we can return if we
2112 * didn't read anything and we should not block,
2113 * otherwise wait for data
2115 if ((bytes_left == bytes) && !block)
2129 static inline __u16 get_pdulen(struct blk_io_trace *bit)
2132 return bit->pdu_len;
2134 return __bswap_16(bit->pdu_len);
2137 static inline __u32 get_magic(struct blk_io_trace *bit)
2142 return __bswap_32(bit->magic);
2145 static int read_events(int fd, int always_block, int *fdblock)
2147 struct per_dev_info *pdi = NULL;
2148 unsigned int events = 0;
2150 while (!is_done() && events < rb_batch) {
2151 struct blk_io_trace *bit;
2153 int pdu_len, should_block, ret;
2158 should_block = !events || always_block;
2160 ret = read_data(fd, bit, sizeof(*bit), should_block, fdblock);
2163 if (!events && ret < 0)
2169 * look at first trace to check whether we need to convert
2170 * data in the future
2172 if (data_is_native == -1 && check_data_endianness(bit->magic))
2175 magic = get_magic(bit);
2176 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
2177 fprintf(stderr, "Bad magic %x\n", magic);
2181 pdu_len = get_pdulen(bit);
2183 void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
2185 if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1, fdblock)) {
2195 if (verify_trace(bit)) {
2201 * not a real trace, so grab and handle it here
2203 if (bit->action & BLK_TC_ACT(BLK_TC_NOTIFY) && bit->action != BLK_TN_MESSAGE) {
2205 output_binary(bit, sizeof(*bit) + bit->pdu_len);
2210 memset(t, 0, sizeof(*t));
2212 t->read_sequence = read_sequence;
2214 t->next = trace_list;
2217 if (!pdi || pdi->dev != bit->device)
2218 pdi = get_dev_info(bit->device);
2220 if (bit->time > pdi->last_read_time)
2221 pdi->last_read_time = bit->time;
2230 * Managing input streams
2234 struct ms_stream *next;
2235 struct trace *first, *last;
2236 struct per_dev_info *pdi;
2240 #define MS_HASH(d, c) ((MAJOR(d) & 0xff) ^ (MINOR(d) & 0xff) ^ (cpu & 0xff))
2242 struct ms_stream *ms_head;
2243 struct ms_stream *ms_hash[256];
2245 static void ms_sort(struct ms_stream *msp);
2246 static int ms_prime(struct ms_stream *msp);
2248 static inline struct trace *ms_peek(struct ms_stream *msp)
2250 return (msp == NULL) ? NULL : msp->first;
2253 static inline __u64 ms_peek_time(struct ms_stream *msp)
2255 return ms_peek(msp)->bit->time;
2258 static inline void ms_resort(struct ms_stream *msp)
2260 if (msp->next && ms_peek_time(msp) > ms_peek_time(msp->next)) {
2261 ms_head = msp->next;
2267 static inline void ms_deq(struct ms_stream *msp)
2269 msp->first = msp->first->next;
2272 if (!ms_prime(msp)) {
2273 ms_head = msp->next;
2282 static void ms_sort(struct ms_stream *msp)
2284 __u64 msp_t = ms_peek_time(msp);
2285 struct ms_stream *this_msp = ms_head;
2287 if (this_msp == NULL)
2289 else if (msp_t < ms_peek_time(this_msp)) {
2290 msp->next = this_msp;
2294 while (this_msp->next && ms_peek_time(this_msp->next) < msp_t)
2295 this_msp = this_msp->next;
2297 msp->next = this_msp->next;
2298 this_msp->next = msp;
2302 static int ms_prime(struct ms_stream *msp)
2307 struct per_dev_info *pdi = msp->pdi;
2308 struct per_cpu_info *pci = get_cpu_info(pdi, msp->cpu);
2309 struct blk_io_trace *bit = NULL;
2310 int ret, pdu_len, ndone = 0;
2312 for (i = 0; !is_done() && pci->fd >= 0 && i < rb_batch; i++) {
2314 ret = read_data(pci->fd, bit, sizeof(*bit), 1, &pci->fdblock);
2318 if (data_is_native == -1 && check_data_endianness(bit->magic))
2321 magic = get_magic(bit);
2322 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
2323 fprintf(stderr, "Bad magic %x\n", magic);
2328 pdu_len = get_pdulen(bit);
2330 void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
2331 ret = read_data(pci->fd, ptr + sizeof(*bit), pdu_len,
2343 if (verify_trace(bit))
2346 if (bit->action & BLK_TC_ACT(BLK_TC_NOTIFY) && bit->action != BLK_TN_MESSAGE) {
2348 output_binary(bit, sizeof(*bit) + bit->pdu_len);
2355 if (bit->time > pdi->last_read_time)
2356 pdi->last_read_time = bit->time;
2359 memset(t, 0, sizeof(*t));
2362 if (msp->first == NULL)
2363 msp->first = msp->last = t;
2365 msp->last->next = t;
2375 if (bit) bit_free(bit);
2377 cpu_mark_offline(pdi, pci->cpu);
2384 static struct ms_stream *ms_alloc(struct per_dev_info *pdi, int cpu)
2386 struct ms_stream *msp = malloc(sizeof(*msp));
2389 msp->first = msp->last = NULL;
2399 static int setup_file(struct per_dev_info *pdi, int cpu)
2404 struct per_cpu_info *pci = get_cpu_info(pdi, cpu);
2409 p = strdup(pdi->name);
2411 if (strcmp(dname, ".")) {
2413 p = strdup(pdi->name);
2414 strcpy(pdi->name, basename(p));
2419 len = sprintf(pci->fname, "%s/", input_dir);
2421 snprintf(pci->fname + len, sizeof(pci->fname)-1-len,
2422 "%s.blktrace.%d", pdi->name, pci->cpu);
2423 if (stat(pci->fname, &st) < 0)
2428 pci->fd = open(pci->fname, O_RDONLY);
2434 printf("Input file %s added\n", pci->fname);
2435 cpu_mark_online(pdi, pci->cpu);
2438 ms_alloc(pdi, pci->cpu);
2443 static int handle(struct ms_stream *msp)
2446 struct per_dev_info *pdi;
2447 struct per_cpu_info *pci;
2448 struct blk_io_trace *bit;
2454 pci = get_cpu_info(pdi, msp->cpu);
2456 bit->time -= genesis_time;
2458 if (t->bit->time > stopwatch_end)
2461 pdi->last_reported_time = bit->time;
2462 if ((bit->action & (act_mask << BLK_TC_SHIFT))&&
2463 t->bit->time >= stopwatch_start)
2464 dump_trace(bit, pci, pdi);
2469 trace_rb_insert_last(pdi, t);
2479 * Check if we need to sanitize the name. We allow 'foo', or if foo.blktrace.X
2480 * is given, then strip back down to 'foo' to avoid missing files.
2482 static int name_fixup(char *name)
2489 b = strstr(name, ".blktrace.");
2496 static int do_file(void)
2499 struct per_dev_info *pdi;
2502 * first prepare all files for reading
2504 for (i = 0; i < ndevices; i++) {
2506 ret = name_fixup(pdi->name);
2510 for (cpu = 0; setup_file(pdi, cpu); cpu++)
2515 * Get the initial time stamp
2518 genesis_time = ms_peek_time(ms_head);
2521 * Keep processing traces while any are left
2523 while (!is_done() && ms_head && handle(ms_head))
2529 static void do_pipe(int fd)
2531 unsigned long long youngest;
2532 int events, fdblock;
2534 last_allowed_time = -1ULL;
2536 while ((events = read_events(fd, 0, &fdblock)) > 0) {
2540 smallest_seq_read = -1U;
2543 if (sort_entries(&youngest))
2546 if (youngest > stopwatch_end)
2552 if (rb_sort_entries)
2556 static int do_fifo(void)
2560 if (!strcmp(pipename, "-"))
2561 fd = dup(STDIN_FILENO);
2563 fd = open(pipename, O_RDONLY);
2566 perror("dup stdin");
2575 static void show_stats(void)
2584 if (per_process_stats)
2585 show_process_stats();
2587 if (per_device_and_cpu_stats)
2588 show_device_and_cpu_stats();
2593 static void handle_sigint(__attribute__((__unused__)) int sig)
2599 * Extract start and duration times from a string, allowing
2600 * us to specify a time interval of interest within a trace.
2601 * Format: "duration" (start is zero) or "start:duration".
2603 static int find_stopwatch_interval(char *string)
2608 value = strtod(string, &sp);
2610 fprintf(stderr,"Invalid stopwatch timer: %s\n", string);
2614 stopwatch_start = DOUBLE_TO_NANO_ULL(value);
2616 value = strtod(string, &sp);
2617 if (sp == string || *sp != '\0') {
2618 fprintf(stderr,"Invalid stopwatch duration time: %s\n",
2622 } else if (*sp != '\0') {
2623 fprintf(stderr,"Invalid stopwatch start timer: %s\n", string);
2626 stopwatch_end = DOUBLE_TO_NANO_ULL(value);
2627 if (stopwatch_end <= stopwatch_start) {
2628 fprintf(stderr, "Invalid stopwatch interval: %Lu -> %Lu\n",
2629 stopwatch_start, stopwatch_end);
2636 static int is_pipe(const char *str)
2640 if (!strcmp(str, "-"))
2642 if (!stat(str, &st) && S_ISFIFO(st.st_mode))
2648 #define S_OPTS "a:A:b:D:d:f:F:hi:o:Oqstw:vV"
2649 static char usage_str[] = "\n\n" \
2650 "-i <file> | --input=<file>\n" \
2651 "[ -a <action field> | --act-mask=<action field> ]\n" \
2652 "[ -A <action mask> | --set-mask=<action mask> ]\n" \
2653 "[ -b <traces> | --batch=<traces> ]\n" \
2654 "[ -d <file> | --dump-binary=<file> ]\n" \
2655 "[ -D <dir> | --input-directory=<dir> ]\n" \
2656 "[ -f <format> | --format=<format> ]\n" \
2657 "[ -F <spec> | --format-spec=<spec> ]\n" \
2658 "[ -h | --hash-by-name ]\n" \
2659 "[ -o <file> | --output=<file> ]\n" \
2660 "[ -O | --no-text-output ]\n" \
2661 "[ -q | --quiet ]\n" \
2662 "[ -s | --per-program-stats ]\n" \
2663 "[ -t | --track-ios ]\n" \
2664 "[ -w <time> | --stopwatch=<time> ]\n" \
2665 "[ -v | --verbose ]\n" \
2666 "[ -V | --version ]\n\n" \
2667 "\t-b stdin read batching\n" \
2668 "\t-d Output file. If specified, binary data is written to file\n" \
2669 "\t-D Directory to prepend to input file names\n" \
2670 "\t-f Output format. Customize the output format. The format field\n" \
2671 "\t identifies can be found in the documentation\n" \
2672 "\t-F Format specification. Can be found in the documentation\n" \
2673 "\t-h Hash processes by name, not pid\n" \
2674 "\t-i Input file containing trace data, or '-' for stdin\n" \
2675 "\t-o Output file. If not given, output is stdout\n" \
2676 "\t-O Do NOT output text data\n" \
2677 "\t-q Quiet. Don't display any stats at the end of the trace\n" \
2678 "\t-s Show per-program io statistics\n" \
2679 "\t-t Track individual ios. Will tell you the time a request took\n" \
2680 "\t to get queued, to get dispatched, and to get completed\n" \
2681 "\t-w Only parse data between the given time interval in seconds.\n" \
2682 "\t If 'start' isn't given, blkparse defaults the start time to 0\n" \
2683 "\t-v More verbose for marginal errors\n" \
2684 "\t-V Print program version info\n\n";
2686 static void usage(char *prog)
2688 fprintf(stderr, "Usage: %s %s %s", prog, blkparse_version, usage_str);
2691 int main(int argc, char *argv[])
2693 int i, c, ret, mode;
2694 int act_mask_tmp = 0;
2695 char *ofp_buffer = NULL;
2696 char *bin_ofp_buffer = NULL;
2698 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
2701 i = find_mask_map(optarg);
2703 fprintf(stderr,"Invalid action mask %s\n",
2711 if ((sscanf(optarg, "%x", &i) != 1) ||
2712 !valid_act_opt(i)) {
2714 "Invalid set action mask %s/0x%x\n",
2721 if (is_pipe(optarg) && !pipeline) {
2723 pipename = strdup(optarg);
2724 } else if (resize_devices(optarg) != 0)
2731 output_name = optarg;
2737 rb_batch = atoi(optarg);
2739 rb_batch = RB_BATCH_DEFAULT;
2742 per_process_stats = 1;
2748 per_device_and_cpu_stats = 0;
2751 if (find_stopwatch_interval(optarg) != 0)
2755 set_all_format_specs(optarg);
2758 if (add_format_spec(optarg) != 0)
2762 ppi_hash_by_pid = 0;
2768 printf("%s version %s\n", argv[0], blkparse_version);
2771 dump_binary = optarg;
2779 while (optind < argc) {
2780 if (is_pipe(argv[optind]) && !pipeline) {
2782 pipename = strdup(argv[optind]);
2783 } else if (resize_devices(argv[optind]) != 0)
2788 if (!pipeline && !ndevices) {
2793 if (act_mask_tmp != 0)
2794 act_mask = act_mask_tmp;
2796 memset(&rb_sort_root, 0, sizeof(rb_sort_root));
2798 signal(SIGINT, handle_sigint);
2799 signal(SIGHUP, handle_sigint);
2800 signal(SIGTERM, handle_sigint);
2802 setlocale(LC_NUMERIC, "en_US");
2806 ofp = fdopen(STDOUT_FILENO, "w");
2811 snprintf(ofname, sizeof(ofname) - 1, "%s", output_name);
2812 ofp = fopen(ofname, "w");
2821 ofp_buffer = malloc(4096);
2822 if (setvbuf(ofp, ofp_buffer, mode, 4096)) {
2829 dump_fp = fopen(dump_binary, "w");
2831 perror(dump_binary);
2835 bin_ofp_buffer = malloc(128 * 1024);
2836 if (setvbuf(dump_fp, bin_ofp_buffer, _IOFBF, 128 * 1024)) {
2837 perror("setvbuf binary");
2850 if (have_drv_data && !dump_binary)
2851 printf("\ndiscarded traces containing low-level device driver "
2852 "specific data (only available in binary output)\n");
2858 if (bin_ofp_buffer) {
2860 free(bin_ofp_buffer);