2 * block queue tracing parse application
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <sys/types.h>
38 static char blkparse_version[] = "0.99";
45 unsigned long long events;
46 unsigned long long first_reported_time;
47 unsigned long long last_reported_time;
48 unsigned long long last_read_time;
49 struct io_stats io_stats;
50 unsigned long last_sequence;
51 unsigned long skips, nskips;
52 unsigned long long seq_skips, seq_nskips;
54 struct rb_root rb_last;
55 unsigned long rb_last_entries;
57 struct rb_root rb_track;
62 unsigned long *cpu_map;
63 unsigned int cpu_map_max;
65 struct per_cpu_info *cpus;
68 struct per_process_info {
71 struct io_stats io_stats;
72 struct per_process_info *hash_next, *list_next;
78 unsigned long long longest_allocation_wait[2];
79 unsigned long long longest_dispatch_wait[2];
80 unsigned long long longest_completion_wait[2];
83 #define PPI_HASH_SHIFT (8)
84 #define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
85 #define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
86 static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
87 static struct per_process_info *ppi_list;
88 static int ppi_list_entries;
90 #define S_OPTS "a:A:i:o:b:stqw:f:F:vVhD:"
91 static struct option l_opts[] = {
94 .has_arg = required_argument,
100 .has_arg = required_argument,
106 .has_arg = required_argument,
112 .has_arg = required_argument,
118 .has_arg = required_argument,
123 .name = "per-program-stats",
124 .has_arg = no_argument,
130 .has_arg = no_argument,
136 .has_arg = no_argument,
142 .has_arg = required_argument,
148 .has_arg = required_argument,
153 .name = "format-spec",
154 .has_arg = required_argument,
159 .name = "hash-by-name",
160 .has_arg = no_argument,
166 .has_arg = no_argument,
172 .has_arg = no_argument,
177 .name = "input-directory",
178 .has_arg = required_argument,
188 * for sorting the displayed output
191 struct blk_io_trace *bit;
192 struct rb_node rb_node;
196 static struct rb_root rb_sort_root;
197 static unsigned long rb_sort_entries;
199 static struct trace *trace_list;
204 static struct blk_io_trace *bit_alloc_list;
205 static struct trace *t_alloc_list;
208 * for tracking individual ios
211 struct rb_node rb_node;
216 unsigned long long allocation_time;
217 unsigned long long queue_time;
218 unsigned long long dispatch_time;
219 unsigned long long completion_time;
223 static struct per_dev_info *devices;
224 static char *get_dev_name(struct per_dev_info *, char *, int);
227 static char *output_name;
228 static char *input_dir;
230 static unsigned long long genesis_time;
231 static unsigned long long last_allowed_time;
232 static unsigned int smallest_seq_read;
233 static unsigned long long stopwatch_start; /* start from zero by default */
234 static unsigned long long stopwatch_end = -1ULL; /* "infinity" */
236 static int per_process_stats;
237 static int per_device_and_cpu_stats = 1;
238 static int track_ios;
239 static int ppi_hash_by_pid = 1;
241 static unsigned int act_mask = -1U;
242 static int stats_printed;
244 static unsigned int t_alloc_cache;
245 static unsigned int bit_alloc_cache;
247 #define RB_BATCH_DEFAULT (512)
248 static unsigned int rb_batch = RB_BATCH_DEFAULT;
252 #define is_done() (*(volatile int *)(&done))
253 static volatile int done;
255 #define JHASH_RANDOM (0x3af5f2ee)
257 #define CPUS_PER_LONG (8 * sizeof(unsigned long))
258 #define CPU_IDX(cpu) ((cpu) / CPUS_PER_LONG)
259 #define CPU_BIT(cpu) ((cpu) & (CPUS_PER_LONG - 1))
261 static void cpu_mark_online(struct per_dev_info *pdi, unsigned int cpu)
263 if (cpu >= pdi->cpu_map_max || !pdi->cpu_map) {
264 int new_max = (cpu + CPUS_PER_LONG) & ~(CPUS_PER_LONG - 1);
265 unsigned long *map = malloc(new_max / sizeof(long));
267 memset(map, 0, new_max / sizeof(long));
270 memcpy(map, pdi->cpu_map, pdi->cpu_map_max / sizeof(long));
275 pdi->cpu_map_max = new_max;
278 pdi->cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
281 static inline void cpu_mark_offline(struct per_dev_info *pdi, int cpu)
283 pdi->cpu_map[CPU_IDX(cpu)] &= ~(1UL << CPU_BIT(cpu));
286 static inline int cpu_is_online(struct per_dev_info *pdi, int cpu)
288 return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
291 static inline int ppi_hash_pid(__u32 pid)
293 return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
296 static inline int ppi_hash_name(const char *name)
298 return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
301 static inline int ppi_hash(struct per_process_info *ppi)
304 return ppi_hash_pid(ppi->pid);
306 return ppi_hash_name(ppi->name);
309 static inline void add_process_to_hash(struct per_process_info *ppi)
311 const int hash_idx = ppi_hash(ppi);
313 ppi->hash_next = ppi_hash_table[hash_idx];
314 ppi_hash_table[hash_idx] = ppi;
317 static inline void add_process_to_list(struct per_process_info *ppi)
319 ppi->list_next = ppi_list;
324 static struct per_process_info *find_process_by_name(char *name)
326 const int hash_idx = ppi_hash_name(name);
327 struct per_process_info *ppi;
329 ppi = ppi_hash_table[hash_idx];
331 if (!strcmp(ppi->name, name))
334 ppi = ppi->hash_next;
340 static struct per_process_info *find_process_by_pid(__u32 pid)
342 const int hash_idx = ppi_hash_pid(pid);
343 struct per_process_info *ppi;
345 ppi = ppi_hash_table[hash_idx];
350 ppi = ppi->hash_next;
356 static struct per_process_info *find_process(__u32 pid, char *name)
358 struct per_process_info *ppi;
361 return find_process_by_pid(pid);
363 ppi = find_process_by_name(name);
364 if (ppi && ppi->pid != pid)
365 ppi->more_than_one = 1;
370 static inline int trace_rb_insert(struct trace *t, struct rb_root *root,
373 struct rb_node **p = &root->rb_node;
374 struct rb_node *parent = NULL;
380 __t = rb_entry(parent, struct trace, rb_node);
383 if (t->bit->time < __t->bit->time) {
386 } else if (t->bit->time > __t->bit->time) {
391 if (t->bit->device < __t->bit->device)
393 else if (t->bit->device > __t->bit->device)
395 else if (t->bit->sequence < __t->bit->sequence)
397 else /* >= sequence */
401 rb_link_node(&t->rb_node, parent, p);
402 rb_insert_color(&t->rb_node, root);
406 static inline int trace_rb_insert_sort(struct trace *t)
408 if (!trace_rb_insert(t, &rb_sort_root, 1)) {
416 static inline int trace_rb_insert_last(struct per_dev_info *pdi,struct trace *t)
418 if (!trace_rb_insert(t, &pdi->rb_last, 1)) {
419 pdi->rb_last_entries++;
426 static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
427 struct rb_root *root, int order)
429 struct rb_node *n = root->rb_node;
430 struct rb_node *prev = NULL;
434 __t = rb_entry(n, struct trace, rb_node);
437 if (device < __t->bit->device)
439 else if (device > __t->bit->device)
441 else if (sequence < __t->bit->sequence)
443 else if (sequence > __t->bit->sequence)
450 * hack - the list may not be sequence ordered because some
451 * events don't have sequence and time matched. so we end up
452 * being a little off in the rb lookup here, because we don't
453 * know the time we are looking for. compensate by browsing
454 * a little ahead from the last entry to find the match
459 while (((n = rb_next(prev)) != NULL) && max--) {
460 __t = rb_entry(n, struct trace, rb_node);
462 if (__t->bit->device == device &&
463 __t->bit->sequence == sequence)
473 static inline struct trace *trace_rb_find_sort(dev_t dev, unsigned long seq)
475 return trace_rb_find(dev, seq, &rb_sort_root, 1);
478 static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
481 return trace_rb_find(pdi->dev, seq, &pdi->rb_last, 0);
484 static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
486 struct rb_node **p = &pdi->rb_track.rb_node;
487 struct rb_node *parent = NULL;
488 struct io_track *__iot;
492 __iot = rb_entry(parent, struct io_track, rb_node);
494 if (iot->sector < __iot->sector)
496 else if (iot->sector > __iot->sector)
500 "sector alias (%Lu) on device %d,%d!\n",
501 (unsigned long long) iot->sector,
502 MAJOR(pdi->dev), MINOR(pdi->dev));
507 rb_link_node(&iot->rb_node, parent, p);
508 rb_insert_color(&iot->rb_node, &pdi->rb_track);
512 static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
514 struct rb_node *n = pdi->rb_track.rb_node;
515 struct io_track *__iot;
518 __iot = rb_entry(n, struct io_track, rb_node);
520 if (sector < __iot->sector)
522 else if (sector > __iot->sector)
531 static struct io_track *find_track(struct per_dev_info *pdi, __u32 pid,
532 char *comm, __u64 sector)
534 struct io_track *iot;
536 iot = __find_track(pdi, sector);
538 iot = malloc(sizeof(*iot));
540 memcpy(iot->comm, comm, sizeof(iot->comm));
541 iot->sector = sector;
542 track_rb_insert(pdi, iot);
548 static void log_track_frontmerge(struct per_dev_info *pdi,
549 struct blk_io_trace *t)
551 struct io_track *iot;
556 iot = __find_track(pdi, t->sector + t_sec(t));
559 fprintf(stderr, "merge not found for (%d,%d): %llu\n",
560 MAJOR(pdi->dev), MINOR(pdi->dev),
561 (unsigned long long) t->sector + t_sec(t));
565 rb_erase(&iot->rb_node, &pdi->rb_track);
566 iot->sector -= t_sec(t);
567 track_rb_insert(pdi, iot);
570 static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
572 struct io_track *iot;
577 iot = find_track(pdi, t->pid, t->comm, t->sector);
578 iot->allocation_time = t->time;
582 * return time between rq allocation and insertion
584 static unsigned long long log_track_insert(struct per_dev_info *pdi,
585 struct blk_io_trace *t)
587 unsigned long long elapsed;
588 struct io_track *iot;
593 iot = find_track(pdi, t->pid, t->comm, t->sector);
594 iot->queue_time = t->time;
596 if (!iot->allocation_time)
599 elapsed = iot->queue_time - iot->allocation_time;
601 if (per_process_stats) {
602 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
603 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
605 if (ppi && elapsed > ppi->longest_allocation_wait[w])
606 ppi->longest_allocation_wait[w] = elapsed;
613 * return time between queue and issue
615 static unsigned long long log_track_issue(struct per_dev_info *pdi,
616 struct blk_io_trace *t)
618 unsigned long long elapsed;
619 struct io_track *iot;
623 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
626 iot = __find_track(pdi, t->sector);
629 fprintf(stderr, "issue not found for (%d,%d): %llu\n",
630 MAJOR(pdi->dev), MINOR(pdi->dev),
631 (unsigned long long) t->sector);
635 iot->dispatch_time = t->time;
636 elapsed = iot->dispatch_time - iot->queue_time;
638 if (per_process_stats) {
639 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
640 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
642 if (ppi && elapsed > ppi->longest_dispatch_wait[w])
643 ppi->longest_dispatch_wait[w] = elapsed;
650 * return time between dispatch and complete
652 static unsigned long long log_track_complete(struct per_dev_info *pdi,
653 struct blk_io_trace *t)
655 unsigned long long elapsed;
656 struct io_track *iot;
660 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
663 iot = __find_track(pdi, t->sector);
666 fprintf(stderr,"complete not found for (%d,%d): %llu\n",
667 MAJOR(pdi->dev), MINOR(pdi->dev),
668 (unsigned long long) t->sector);
672 iot->completion_time = t->time;
673 elapsed = iot->completion_time - iot->dispatch_time;
675 if (per_process_stats) {
676 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
677 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
679 if (ppi && elapsed > ppi->longest_completion_wait[w])
680 ppi->longest_completion_wait[w] = elapsed;
684 * kill the trace, we don't need it after completion
686 rb_erase(&iot->rb_node, &pdi->rb_track);
693 static struct io_stats *find_process_io_stats(__u32 pid, char *name)
695 struct per_process_info *ppi = find_process(pid, name);
698 ppi = malloc(sizeof(*ppi));
699 memset(ppi, 0, sizeof(*ppi));
700 memcpy(ppi->name, name, 16);
702 add_process_to_hash(ppi);
703 add_process_to_list(ppi);
706 return &ppi->io_stats;
709 static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
711 struct per_cpu_info *cpus = pdi->cpus;
712 int ncpus = pdi->ncpus;
713 int new_count = cpu + 1;
717 size = new_count * sizeof(struct per_cpu_info);
718 cpus = realloc(cpus, size);
721 fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
722 get_dev_name(pdi, name, sizeof(name)), size);
726 new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
727 new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
728 memset(new_start, 0, new_space);
730 pdi->ncpus = new_count;
734 static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
736 struct per_cpu_info *pci;
738 if (cpu >= pdi->ncpus)
739 resize_cpu_info(pdi, cpu);
741 pci = &pdi->cpus[cpu];
747 static int resize_devices(char *name)
749 int size = (ndevices + 1) * sizeof(struct per_dev_info);
751 devices = realloc(devices, size);
753 fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
756 memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
757 devices[ndevices].name = name;
762 static struct per_dev_info *get_dev_info(dev_t dev)
764 struct per_dev_info *pdi;
767 for (i = 0; i < ndevices; i++) {
769 devices[i].dev = dev;
770 if (devices[i].dev == dev)
774 if (resize_devices(NULL))
777 pdi = &devices[ndevices - 1];
779 pdi->first_reported_time = 0;
780 pdi->last_sequence = -1;
781 pdi->last_read_time = 0;
782 memset(&pdi->rb_last, 0, sizeof(pdi->rb_last));
783 pdi->rb_last_entries = 0;
787 static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
790 snprintf(buffer, size, "%s", pdi->name);
792 snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
796 static void check_time(struct per_dev_info *pdi, struct blk_io_trace *bit)
798 unsigned long long this = bit->time;
799 unsigned long long last = pdi->last_reported_time;
801 pdi->backwards = (this < last) ? 'B' : ' ';
802 pdi->last_reported_time = this;
805 static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
810 ios->qwrite_kb += t_kb(t);
813 ios->qread_kb += t_kb(t);
817 static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
820 __account_m(&pci->io_stats, t, rw);
822 if (per_process_stats) {
823 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
825 __account_m(ios, t, rw);
829 static inline void __account_queue(struct io_stats *ios, struct blk_io_trace *t,
834 ios->qwrite_kb += t_kb(t);
837 ios->qread_kb += t_kb(t);
841 static inline void account_queue(struct blk_io_trace *t,
842 struct per_cpu_info *pci, int rw)
844 __account_queue(&pci->io_stats, t, rw);
846 if (per_process_stats) {
847 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
849 __account_queue(ios, t, rw);
853 static inline void __account_c(struct io_stats *ios, int rw, int bytes)
857 ios->cwrite_kb += bytes >> 10;
860 ios->cread_kb += bytes >> 10;
864 static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
867 __account_c(&pci->io_stats, rw, bytes);
869 if (per_process_stats) {
870 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
872 __account_c(ios, rw, bytes);
876 static inline void __account_issue(struct io_stats *ios, int rw,
881 ios->iwrite_kb += bytes >> 10;
884 ios->iread_kb += bytes >> 10;
888 static inline void account_issue(struct blk_io_trace *t,
889 struct per_cpu_info *pci, int rw)
891 __account_issue(&pci->io_stats, rw, t->bytes);
893 if (per_process_stats) {
894 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
896 __account_issue(ios, rw, t->bytes);
900 static inline void __account_unplug(struct io_stats *ios, int timer)
903 ios->timer_unplugs++;
908 static inline void account_unplug(struct blk_io_trace *t,
909 struct per_cpu_info *pci, int timer)
911 __account_unplug(&pci->io_stats, timer);
913 if (per_process_stats) {
914 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
916 __account_unplug(ios, timer);
920 static inline void __account_requeue(struct io_stats *ios,
921 struct blk_io_trace *t, int rw)
925 ios->iwrite_kb -= t_kb(t);
928 ios->iread_kb -= t_kb(t);
932 static inline void account_requeue(struct blk_io_trace *t,
933 struct per_cpu_info *pci, int rw)
935 __account_requeue(&pci->io_stats, t, rw);
937 if (per_process_stats) {
938 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
940 __account_requeue(ios, t, rw);
944 static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
945 struct blk_io_trace *t, char *act)
947 process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
950 static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
951 struct blk_io_trace *t, char *act)
953 process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
956 static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
959 process_fmt(act, pci, t, -1, 0, NULL);
962 static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
963 struct blk_io_trace *t, char *act)
965 process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
968 static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
969 struct blk_io_trace *t, char *act)
972 log_track_frontmerge(pdi, t);
974 process_fmt(act, pci, t, -1ULL, 0, NULL);
977 static void log_action(struct per_cpu_info *pci, struct blk_io_trace *t,
980 process_fmt(act, pci, t, -1ULL, 0, NULL);
983 static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
986 process_fmt(act, pci, t, -1ULL, 0, NULL);
989 static void log_unplug(struct per_cpu_info *pci, struct blk_io_trace *t,
992 process_fmt(act, pci, t, -1ULL, 0, NULL);
995 static void log_split(struct per_cpu_info *pci, struct blk_io_trace *t,
998 process_fmt(act, pci, t, -1ULL, 0, NULL);
1001 static void log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char *act)
1003 unsigned char *buf = (unsigned char *) t + sizeof(*t);
1005 process_fmt(act, pci, t, -1ULL, t->pdu_len, buf);
1008 static void dump_trace_pc(struct blk_io_trace *t, struct per_cpu_info *pci)
1010 int act = t->action & 0xffff;
1013 case __BLK_TA_QUEUE:
1014 log_generic(pci, t, "Q");
1016 case __BLK_TA_GETRQ:
1017 log_generic(pci, t, "G");
1019 case __BLK_TA_SLEEPRQ:
1020 log_generic(pci, t, "S");
1022 case __BLK_TA_REQUEUE:
1023 log_generic(pci, t, "R");
1025 case __BLK_TA_ISSUE:
1026 log_pc(pci, t, "D");
1028 case __BLK_TA_COMPLETE:
1029 log_pc(pci, t, "C");
1031 case __BLK_TA_INSERT:
1032 log_pc(pci, t, "I");
1035 fprintf(stderr, "Bad pc action %x\n", act);
1040 static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
1041 struct per_cpu_info *pci)
1043 int w = t->action & BLK_TC_ACT(BLK_TC_WRITE);
1044 int act = t->action & 0xffff;
1047 case __BLK_TA_QUEUE:
1048 account_queue(t, pci, w);
1049 log_queue(pci, t, "Q");
1051 case __BLK_TA_INSERT:
1052 log_insert(pdi, pci, t, "I");
1054 case __BLK_TA_BACKMERGE:
1055 account_m(t, pci, w);
1056 log_merge(pdi, pci, t, "M");
1058 case __BLK_TA_FRONTMERGE:
1059 account_m(t, pci, w);
1060 log_merge(pdi, pci, t, "F");
1062 case __BLK_TA_GETRQ:
1063 log_track_getrq(pdi, t);
1064 log_generic(pci, t, "G");
1066 case __BLK_TA_SLEEPRQ:
1067 log_generic(pci, t, "S");
1069 case __BLK_TA_REQUEUE:
1070 account_requeue(t, pci, w);
1071 log_queue(pci, t, "R");
1073 case __BLK_TA_ISSUE:
1074 account_issue(t, pci, w);
1075 log_issue(pdi, pci, t, "D");
1077 case __BLK_TA_COMPLETE:
1078 account_c(t, pci, w, t->bytes);
1079 log_complete(pdi, pci, t, "C");
1082 log_action(pci, t, "P");
1084 case __BLK_TA_UNPLUG_IO:
1085 account_unplug(t, pci, 0);
1086 log_unplug(pci, t, "U");
1088 case __BLK_TA_UNPLUG_TIMER:
1089 account_unplug(t, pci, 1);
1090 log_unplug(pci, t, "UT");
1092 case __BLK_TA_SPLIT:
1093 log_split(pci, t, "X");
1095 case __BLK_TA_BOUNCE:
1096 log_generic(pci, t, "B");
1098 case __BLK_TA_REMAP:
1099 log_generic(pci, t, "A");
1102 fprintf(stderr, "Bad fs action %x\n", t->action);
1107 static void dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci,
1108 struct per_dev_info *pdi)
1110 if (t->action & BLK_TC_ACT(BLK_TC_PC))
1111 dump_trace_pc(t, pci);
1113 dump_trace_fs(t, pdi, pci);
1116 pdi->first_reported_time = t->time;
1122 * print in a proper way, not too small and not too big. if more than
1123 * 1000,000K, turn into M and so on
1125 static char *size_cnv(char *dst, unsigned long long num, int in_kb)
1127 char suff[] = { '\0', 'K', 'M', 'G', 'P' };
1133 while (num > 1000 * 1000ULL && (i < sizeof(suff) - 1)) {
1138 sprintf(dst, "%'8Lu%c", num, suff[i]);
1142 static void dump_io_stats(struct io_stats *ios, char *msg)
1144 static char x[256], y[256];
1146 fprintf(ofp, "%s\n", msg);
1148 fprintf(ofp, " Reads Queued: %s, %siB\t", size_cnv(x, ios->qreads, 0), size_cnv(y, ios->qread_kb, 1));
1149 fprintf(ofp, " Writes Queued: %s, %siB\n", size_cnv(x, ios->qwrites, 0), size_cnv(y, ios->qwrite_kb, 1));
1151 fprintf(ofp, " Read Dispatches: %s, %siB\t", size_cnv(x, ios->ireads, 0), size_cnv(y, ios->iread_kb, 1));
1152 fprintf(ofp, " Write Dispatches: %s, %siB\n", size_cnv(x, ios->iwrites, 0), size_cnv(y, ios->iwrite_kb, 1));
1153 fprintf(ofp, " Reads Requeued: %s\t\t", size_cnv(x, ios->rrqueue, 0));
1154 fprintf(ofp, " Writes Requeued: %s\n", size_cnv(x, ios->wrqueue, 0));
1155 fprintf(ofp, " Reads Completed: %s, %siB\t", size_cnv(x, ios->creads, 0), size_cnv(y, ios->cread_kb, 1));
1156 fprintf(ofp, " Writes Completed: %s, %siB\n", size_cnv(x, ios->cwrites, 0), size_cnv(y, ios->cwrite_kb, 1));
1157 fprintf(ofp, " Read Merges: %'8lu%8c\t", ios->mreads, ' ');
1158 fprintf(ofp, " Write Merges: %'8lu\n", ios->mwrites);
1159 fprintf(ofp, " IO unplugs: %'8lu%8c\t", ios->io_unplugs, ' ');
1160 fprintf(ofp, " Timer unplugs: %'8lu\n", ios->timer_unplugs);
1163 static void dump_wait_stats(struct per_process_info *ppi)
1165 unsigned long rawait = ppi->longest_allocation_wait[0] / 1000;
1166 unsigned long rdwait = ppi->longest_dispatch_wait[0] / 1000;
1167 unsigned long rcwait = ppi->longest_completion_wait[0] / 1000;
1168 unsigned long wawait = ppi->longest_allocation_wait[1] / 1000;
1169 unsigned long wdwait = ppi->longest_dispatch_wait[1] / 1000;
1170 unsigned long wcwait = ppi->longest_completion_wait[1] / 1000;
1172 fprintf(ofp, " Allocation wait: %'8lu%8c\t", rawait, ' ');
1173 fprintf(ofp, " Allocation wait: %'8lu\n", wawait);
1174 fprintf(ofp, " Dispatch wait: %'8lu%8c\t", rdwait, ' ');
1175 fprintf(ofp, " Dispatch wait: %'8lu\n", wdwait);
1176 fprintf(ofp, " Completion wait: %'8lu%8c\t", rcwait, ' ');
1177 fprintf(ofp, " Completion wait: %'8lu\n", wcwait);
1180 static int ppi_name_compare(const void *p1, const void *p2)
1182 struct per_process_info *ppi1 = *((struct per_process_info **) p1);
1183 struct per_process_info *ppi2 = *((struct per_process_info **) p2);
1186 res = strverscmp(ppi1->name, ppi2->name);
1188 res = ppi1->pid > ppi2->pid;
1193 static void sort_process_list(void)
1195 struct per_process_info **ppis;
1196 struct per_process_info *ppi;
1199 ppis = malloc(ppi_list_entries * sizeof(struct per_process_info *));
1204 ppi = ppi->list_next;
1207 qsort(ppis, ppi_list_entries, sizeof(ppi), ppi_name_compare);
1209 i = ppi_list_entries - 1;
1214 ppi->list_next = ppi_list;
1222 static void show_process_stats(void)
1224 struct per_process_info *ppi;
1226 sort_process_list();
1232 if (ppi->more_than_one)
1233 sprintf(name, "%s (%u, ...)", ppi->name, ppi->pid);
1235 sprintf(name, "%s (%u)", ppi->name, ppi->pid);
1237 dump_io_stats(&ppi->io_stats, name);
1238 dump_wait_stats(ppi);
1239 ppi = ppi->list_next;
1245 static void show_device_and_cpu_stats(void)
1247 struct per_dev_info *pdi;
1248 struct per_cpu_info *pci;
1249 struct io_stats total, *ios;
1250 unsigned long long rrate, wrate, msec;
1251 int i, j, pci_events;
1252 char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
1255 for (pdi = devices, i = 0; i < ndevices; i++, pdi++) {
1257 memset(&total, 0, sizeof(total));
1263 for (pci = pdi->cpus, j = 0; j < pdi->ncpus; j++, pci++) {
1267 ios = &pci->io_stats;
1268 total.qreads += ios->qreads;
1269 total.qwrites += ios->qwrites;
1270 total.creads += ios->creads;
1271 total.cwrites += ios->cwrites;
1272 total.mreads += ios->mreads;
1273 total.mwrites += ios->mwrites;
1274 total.ireads += ios->ireads;
1275 total.iwrites += ios->iwrites;
1276 total.rrqueue += ios->rrqueue;
1277 total.wrqueue += ios->wrqueue;
1278 total.qread_kb += ios->qread_kb;
1279 total.qwrite_kb += ios->qwrite_kb;
1280 total.cread_kb += ios->cread_kb;
1281 total.cwrite_kb += ios->cwrite_kb;
1282 total.iread_kb += ios->iread_kb;
1283 total.iwrite_kb += ios->iwrite_kb;
1284 total.timer_unplugs += ios->timer_unplugs;
1285 total.io_unplugs += ios->io_unplugs;
1287 snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
1288 j, get_dev_name(pdi, name, sizeof(name)));
1289 dump_io_stats(ios, line);
1293 if (pci_events > 1) {
1295 snprintf(line, sizeof(line) - 1, "Total (%s):",
1296 get_dev_name(pdi, name, sizeof(name)));
1297 dump_io_stats(&total, line);
1301 msec = (pdi->last_reported_time - pdi->first_reported_time) / 1000000;
1303 rrate = 1000 * total.cread_kb / msec;
1304 wrate = 1000 * total.cwrite_kb / msec;
1307 fprintf(ofp, "\nThroughput (R/W): %'LuKiB/s / %'LuKiB/s\n",
1309 fprintf(ofp, "Events (%s): %'Lu entries\n",
1310 get_dev_name(pdi, line, sizeof(line)), pdi->events);
1311 fprintf(ofp, "Skips: %'lu forward (%'llu - %5.1lf%%) %'lu backward (%'llu - %5.1lf%%)\n",
1312 pdi->skips,pdi->seq_skips,
1313 100.0 * ((double)pdi->seq_skips /
1314 (double)(pdi->events + pdi->seq_skips)),
1315 pdi->nskips,pdi->seq_nskips,
1316 100.0 * ((double)pdi->seq_nskips /
1317 (double)(pdi->events + pdi->seq_skips)));
1322 * struct trace and blktrace allocation cache, we do potentially
1323 * millions of mallocs for these structures while only using at most
1324 * a few thousand at the time
1326 static inline void t_free(struct trace *t)
1328 if (t_alloc_cache < 1024) {
1329 t->next = t_alloc_list;
1336 static inline struct trace *t_alloc(void)
1338 struct trace *t = t_alloc_list;
1341 t_alloc_list = t->next;
1346 return malloc(sizeof(*t));
1349 static inline void bit_free(struct blk_io_trace *bit)
1351 if (bit_alloc_cache < 1024 && !bit->pdu_len) {
1353 * abuse a 64-bit field for a next pointer for the free item
1355 bit->time = (__u64) (unsigned long) bit_alloc_list;
1356 bit_alloc_list = (struct blk_io_trace *) bit;
1362 static inline struct blk_io_trace *bit_alloc(void)
1364 struct blk_io_trace *bit = bit_alloc_list;
1367 bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
1373 return malloc(sizeof(*bit));
1376 static void find_genesis(void)
1378 struct trace *t = trace_list;
1380 genesis_time = -1ULL;
1382 if (t->bit->time < genesis_time)
1383 genesis_time = t->bit->time;
1389 static inline int check_stopwatch(struct blk_io_trace *bit)
1391 if (bit->time < stopwatch_end &&
1392 bit->time >= stopwatch_start)
1399 * return youngest entry read
1401 static int sort_entries(unsigned long long *youngest)
1409 while ((t = trace_list) != NULL) {
1410 struct blk_io_trace *bit = t->bit;
1412 trace_list = t->next;
1414 bit->time -= genesis_time;
1416 if (bit->time < *youngest || !*youngest)
1417 *youngest = bit->time;
1419 if (bit->sequence < smallest_seq_read)
1420 smallest_seq_read = bit->sequence;
1422 if (check_stopwatch(bit)) {
1428 if (trace_rb_insert_sort(t))
1435 static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
1437 rb_erase(&t->rb_node, &pdi->rb_last);
1438 pdi->rb_last_entries--;
1444 static void put_trace(struct per_dev_info *pdi, struct trace *t)
1446 rb_erase(&t->rb_node, &rb_sort_root);
1449 trace_rb_insert_last(pdi, t);
1451 if (pdi->rb_last_entries > rb_batch * pdi->nfiles) {
1452 struct rb_node *n = rb_first(&pdi->rb_last);
1454 t = rb_entry(n, struct trace, rb_node);
1455 __put_trace_last(pdi, t);
1460 * to continue, we must have traces from all online cpus in the tree
1462 static int check_cpu_map(struct per_dev_info *pdi)
1464 unsigned long *cpu_map;
1471 * create a map of the cpus we have traces for
1473 cpu_map = malloc(pdi->cpu_map_max / sizeof(long));
1474 n = rb_first(&rb_sort_root);
1476 __t = rb_entry(n, struct trace, rb_node);
1477 cpu = __t->bit->cpu;
1479 cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
1484 * we can't continue if pdi->cpu_map has entries set that we don't
1485 * have in the sort rbtree. the opposite is not a problem, though
1488 for (i = 0; i < pdi->cpu_map_max / CPUS_PER_LONG; i++) {
1489 if (pdi->cpu_map[i] & ~(cpu_map[i])) {
1499 static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
1501 unsigned long expected_sequence = pdi->last_sequence + 1;
1502 struct blk_io_trace *bit = t->bit;
1505 if (!expected_sequence) {
1507 * 1 should be the first entry, just allow it
1509 if (bit->sequence == 1)
1512 return check_cpu_map(pdi);
1515 if (bit->sequence == expected_sequence)
1519 * we may not have seen that sequence yet. if we are not doing
1520 * the final run, break and wait for more entries.
1522 if (expected_sequence < smallest_seq_read) {
1523 __t = trace_rb_find_last(pdi, expected_sequence);
1527 __put_trace_last(pdi, __t);
1529 } else if (!force) {
1534 fprintf(stderr, "(%d,%d): skipping %lu -> %u\n",
1535 MAJOR(pdi->dev), MINOR(pdi->dev),
1536 pdi->last_sequence, bit->sequence);
1538 if (bit->sequence > pdi->last_sequence) {
1540 pdi->seq_skips += (bit->sequence - pdi->last_sequence);
1543 pdi->seq_nskips += (pdi->last_sequence - bit->sequence);
1549 static void show_entries_rb(int force)
1551 struct per_dev_info *pdi = NULL;
1552 struct per_cpu_info *pci = NULL;
1553 struct blk_io_trace *bit;
1557 while ((n = rb_first(&rb_sort_root)) != NULL) {
1558 if (is_done() && !force && !pipeline)
1561 t = rb_entry(n, struct trace, rb_node);
1564 if (!pdi || pdi->dev != bit->device)
1565 pdi = get_dev_info(bit->device);
1568 fprintf(stderr, "Unknown device ID? (%d,%d)\n",
1569 MAJOR(bit->device), MINOR(bit->device));
1573 if (check_sequence(pdi, t, force))
1576 if (!force && bit->time > last_allowed_time)
1579 pdi->last_sequence = bit->sequence;
1581 check_time(pdi, bit);
1583 if (!pci || pci->cpu != bit->cpu)
1584 pci = get_cpu_info(pdi, bit->cpu);
1588 if (bit->action & (act_mask << BLK_TC_SHIFT))
1589 dump_trace(bit, pci, pdi);
1595 static int read_data(int fd, void *buffer, int bytes, int block)
1597 int ret, bytes_left, fl;
1600 fl = fcntl(fd, F_GETFL);
1603 fcntl(fd, F_SETFL, fl | O_NONBLOCK);
1605 fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
1609 while (bytes_left > 0) {
1610 ret = read(fd, p, bytes_left);
1614 if (errno != EAGAIN)
1627 static int read_events(int fd, int always_block)
1629 struct per_dev_info *pdi = NULL;
1630 unsigned int events = 0;
1632 while (!is_done() && events < rb_batch) {
1633 struct blk_io_trace *bit;
1640 if (read_data(fd, bit, sizeof(*bit), !events || always_block)) {
1645 magic = be32_to_cpu(bit->magic);
1646 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
1647 fprintf(stderr, "Bad magic %x\n", magic);
1651 pdu_len = be16_to_cpu(bit->pdu_len);
1653 void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
1655 if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1)) {
1665 if (verify_trace(bit)) {
1671 memset(t, 0, sizeof(*t));
1674 t->next = trace_list;
1677 if (!pdi || pdi->dev != bit->device)
1678 pdi = get_dev_info(bit->device);
1680 if (bit->time > pdi->last_read_time)
1681 pdi->last_read_time = bit->time;
1689 static int do_file(void)
1691 struct per_cpu_info *pci;
1692 struct per_dev_info *pdi;
1693 int i, j, events, events_added;
1696 * first prepare all files for reading
1698 for (i = 0; i < ndevices; i++) {
1701 pdi->last_sequence = -1;
1708 pci = get_cpu_info(pdi, j);
1712 p = strdup(pdi->name);
1714 if (strcmp(dname, ".")) {
1716 p = strdup(pdi->name);
1717 strcpy(pdi->name, basename(p));
1722 len = sprintf(pci->fname, "%s/", input_dir);
1724 snprintf(pci->fname + len, sizeof(pci->fname)-1-len,
1725 "%s.blktrace.%d", pdi->name, pci->cpu);
1726 if (stat(pci->fname, &st) < 0)
1729 pci->fd = open(pci->fname, O_RDONLY);
1736 printf("Input file %s added\n", pci->fname);
1738 cpu_mark_online(pdi, pci->cpu);
1743 * now loop over the files reading in the data
1746 unsigned long long youngest;
1749 last_allowed_time = -1ULL;
1750 smallest_seq_read = -1U;
1752 for (i = 0; i < ndevices; i++) {
1755 for (j = 0; j < pdi->nfiles; j++) {
1757 pci = get_cpu_info(pdi, j);
1762 events = read_events(pci->fd, 1);
1764 cpu_mark_offline(pdi, pci->cpu);
1770 if (pdi->last_read_time < last_allowed_time)
1771 last_allowed_time = pdi->last_read_time;
1773 events_added += events;
1777 if (sort_entries(&youngest))
1780 if (youngest > stopwatch_end)
1785 } while (events_added);
1787 if (rb_sort_entries)
1793 static int do_stdin(void)
1795 unsigned long long youngest;
1798 last_allowed_time = -1ULL;
1799 fd = dup(STDIN_FILENO);
1801 perror("dup stdin");
1805 while ((events = read_events(fd, 0)) != 0) {
1807 smallest_seq_read = -1U;
1809 if (sort_entries(&youngest))
1812 if (youngest > stopwatch_end)
1818 if (rb_sort_entries)
1825 static void show_stats(void)
1834 if (per_process_stats)
1835 show_process_stats();
1837 if (per_device_and_cpu_stats)
1838 show_device_and_cpu_stats();
1843 static void handle_sigint(__attribute__((__unused__)) int sig)
1850 * Extract start and duration times from a string, allowing
1851 * us to specify a time interval of interest within a trace.
1852 * Format: "duration" (start is zero) or "start:duration".
1854 static int find_stopwatch_interval(char *string)
1859 value = strtod(string, &sp);
1861 fprintf(stderr,"Invalid stopwatch timer: %s\n", string);
1865 stopwatch_start = DOUBLE_TO_NANO_ULL(value);
1867 value = strtod(string, &sp);
1868 if (sp == string || *sp != '\0') {
1869 fprintf(stderr,"Invalid stopwatch duration time: %s\n",
1873 } else if (*sp != '\0') {
1874 fprintf(stderr,"Invalid stopwatch start timer: %s\n", string);
1877 stopwatch_end = DOUBLE_TO_NANO_ULL(value);
1878 if (stopwatch_end <= stopwatch_start) {
1879 fprintf(stderr, "Invalid stopwatch interval: %Lu -> %Lu\n",
1880 stopwatch_start, stopwatch_end);
1887 static char usage_str[] = \
1888 "[ -i <input name> ] [-o <output name> [ -s ] [ -t ] [ -q ]\n" \
1889 "[ -w start:stop ] [ -f output format ] [ -F format spec ] [ -v] \n\n" \
1890 "\t-i Input file containing trace data, or '-' for stdin\n" \
1891 "\t-D Directory to prepend to input file names\n" \
1892 "\t-o Output file. If not given, output is stdout\n" \
1893 "\t-b stdin read batching\n" \
1894 "\t-s Show per-program io statistics\n" \
1895 "\t-n Hash processes by name, not pid\n" \
1896 "\t-t Track individual ios. Will tell you the time a request took\n" \
1897 "\t to get queued, to get dispatched, and to get completed\n" \
1898 "\t-q Quiet. Don't display any stats at the end of the trace\n" \
1899 "\t-w Only parse data between the given time interval in seconds.\n" \
1900 "\t If 'start' isn't given, blkparse defaults the start time to 0\n" \
1901 "\t-f Output format. Customize the output format. The format field\n" \
1902 "\t identifies can be found in the documentation\n" \
1903 "\t-F Format specification. Can be found in the documentation\n" \
1904 "\t-v More verbose for marginal errors\n" \
1905 "\t-V Print program version info\n\n";
1907 static void usage(char *prog)
1909 fprintf(stderr, "Usage: %s %s %s", prog, blkparse_version, usage_str);
1912 int main(int argc, char *argv[])
1915 int i, c, ret, mode;
1916 int act_mask_tmp = 0;
1918 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
1921 i = find_mask_map(optarg);
1923 fprintf(stderr,"Invalid action mask %s\n",
1931 if ((sscanf(optarg, "%x", &i) != 1) ||
1932 !valid_act_opt(i)) {
1934 "Invalid set action mask %s/0x%x\n",
1941 if (!strcmp(optarg, "-") && !pipeline)
1943 else if (resize_devices(optarg) != 0)
1950 output_name = optarg;
1953 rb_batch = atoi(optarg);
1955 rb_batch = RB_BATCH_DEFAULT;
1958 per_process_stats = 1;
1964 per_device_and_cpu_stats = 0;
1967 if (find_stopwatch_interval(optarg) != 0)
1971 set_all_format_specs(optarg);
1974 if (add_format_spec(optarg) != 0)
1978 ppi_hash_by_pid = 0;
1984 printf("%s version %s\n", argv[0], blkparse_version);
1992 while (optind < argc) {
1993 if (!strcmp(argv[optind], "-") && !pipeline)
1995 else if (resize_devices(argv[optind]) != 0)
2000 if (!pipeline && !ndevices) {
2005 if (act_mask_tmp != 0)
2006 act_mask = act_mask_tmp;
2008 memset(&rb_sort_root, 0, sizeof(rb_sort_root));
2010 signal(SIGINT, handle_sigint);
2011 signal(SIGHUP, handle_sigint);
2012 signal(SIGTERM, handle_sigint);
2014 setlocale(LC_NUMERIC, "en_US");
2017 ofp = fdopen(STDOUT_FILENO, "w");
2022 snprintf(ofname, sizeof(ofname) - 1, "%s", output_name);
2023 ofp = fopen(ofname, "w");
2032 ofp_buffer = malloc(4096);
2033 if (setvbuf(ofp, ofp_buffer, mode, 4096)) {