2 * block queue tracing parse application
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <sys/types.h>
39 static char blkparse_version[] = "0.99.1";
42 unsigned long start, end;
43 struct skip_info *prev, *next;
51 unsigned long long events;
52 unsigned long long first_reported_time;
53 unsigned long long last_reported_time;
54 unsigned long long last_read_time;
55 struct io_stats io_stats;
57 unsigned long long seq_skips;
58 unsigned int max_depth[2];
59 unsigned int cur_depth[2];
61 struct rb_root rb_track;
66 unsigned long *cpu_map;
67 unsigned int cpu_map_max;
69 struct per_cpu_info *cpus;
73 * some duplicated effort here, we can unify this hash and the ppi hash later
75 struct process_pid_map {
78 struct process_pid_map *hash_next, *list_next;
81 #define PPM_HASH_SHIFT (8)
82 #define PPM_HASH_SIZE (1 << PPM_HASH_SHIFT)
83 #define PPM_HASH_MASK (PPM_HASH_SIZE - 1)
84 static struct process_pid_map *ppm_hash_table[PPM_HASH_SIZE];
86 struct per_process_info {
87 struct process_pid_map *ppm;
88 struct io_stats io_stats;
89 struct per_process_info *hash_next, *list_next;
95 unsigned long long longest_allocation_wait[2];
96 unsigned long long longest_dispatch_wait[2];
97 unsigned long long longest_completion_wait[2];
100 #define PPI_HASH_SHIFT (8)
101 #define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
102 #define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
103 static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
104 static struct per_process_info *ppi_list;
105 static int ppi_list_entries;
107 #define S_OPTS "a:A:i:o:b:stqw:f:F:vVhD:d:"
108 static struct option l_opts[] = {
111 .has_arg = required_argument,
117 .has_arg = required_argument,
123 .has_arg = required_argument,
129 .has_arg = required_argument,
135 .has_arg = required_argument,
140 .name = "per-program-stats",
141 .has_arg = no_argument,
147 .has_arg = no_argument,
153 .has_arg = no_argument,
159 .has_arg = required_argument,
165 .has_arg = required_argument,
170 .name = "format-spec",
171 .has_arg = required_argument,
176 .name = "hash-by-name",
177 .has_arg = no_argument,
183 .has_arg = no_argument,
189 .has_arg = no_argument,
194 .name = "input-directory",
195 .has_arg = required_argument,
200 .name = "dump-binary",
201 .has_arg = required_argument,
211 * for sorting the displayed output
214 struct blk_io_trace *bit;
215 struct rb_node rb_node;
217 unsigned long read_sequence;
220 static struct rb_root rb_sort_root;
221 static unsigned long rb_sort_entries;
223 static struct trace *trace_list;
228 static struct blk_io_trace *bit_alloc_list;
229 static struct trace *t_alloc_list;
232 * for tracking individual ios
235 struct rb_node rb_node;
237 struct process_pid_map *ppm;
239 unsigned long long allocation_time;
240 unsigned long long queue_time;
241 unsigned long long dispatch_time;
242 unsigned long long completion_time;
246 static struct per_dev_info *devices;
247 static char *get_dev_name(struct per_dev_info *, char *, int);
248 static int trace_rb_insert_last(struct per_dev_info *, struct trace *);
251 static char *output_name;
252 static char *input_dir;
254 static unsigned long long genesis_time;
255 static unsigned long long last_allowed_time;
256 static unsigned long long stopwatch_start; /* start from zero by default */
257 static unsigned long long stopwatch_end = -1ULL; /* "infinity" */
258 static unsigned long read_sequence;
260 static int per_process_stats;
261 static int per_device_and_cpu_stats = 1;
262 static int track_ios;
263 static int ppi_hash_by_pid = 1;
265 static unsigned int act_mask = -1U;
266 static int stats_printed;
267 int data_is_native = -1;
270 static char *dump_binary;
272 static unsigned int t_alloc_cache;
273 static unsigned int bit_alloc_cache;
275 #define RB_BATCH_DEFAULT (512)
276 static unsigned int rb_batch = RB_BATCH_DEFAULT;
280 #define is_done() (*(volatile int *)(&done))
281 static volatile int done;
283 #define JHASH_RANDOM (0x3af5f2ee)
285 #define CPUS_PER_LONG (8 * sizeof(unsigned long))
286 #define CPU_IDX(cpu) ((cpu) / CPUS_PER_LONG)
287 #define CPU_BIT(cpu) ((cpu) & (CPUS_PER_LONG - 1))
289 static void output_binary(void *buf, int len)
292 int n = write(dump_fd, buf, len);
301 static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
303 struct per_cpu_info *cpus = pdi->cpus;
304 int ncpus = pdi->ncpus;
305 int new_count = cpu + 1;
309 size = new_count * sizeof(struct per_cpu_info);
310 cpus = realloc(cpus, size);
313 fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
314 get_dev_name(pdi, name, sizeof(name)), size);
318 new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
319 new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
320 memset(new_start, 0, new_space);
322 pdi->ncpus = new_count;
325 for (new_count = 0; new_count < pdi->ncpus; new_count++) {
326 struct per_cpu_info *pci = &pdi->cpus[new_count];
330 memset(&pci->rb_last, 0, sizeof(pci->rb_last));
331 pci->rb_last_entries = 0;
332 pci->last_sequence = -1;
337 static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
339 struct per_cpu_info *pci;
341 if (cpu >= pdi->ncpus)
342 resize_cpu_info(pdi, cpu);
344 pci = &pdi->cpus[cpu];
350 static int resize_devices(char *name)
352 int size = (ndevices + 1) * sizeof(struct per_dev_info);
354 devices = realloc(devices, size);
356 fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
359 memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
360 devices[ndevices].name = name;
365 static struct per_dev_info *get_dev_info(dev_t dev)
367 struct per_dev_info *pdi;
370 for (i = 0; i < ndevices; i++) {
372 devices[i].dev = dev;
373 if (devices[i].dev == dev)
377 if (resize_devices(NULL))
380 pdi = &devices[ndevices - 1];
382 pdi->first_reported_time = 0;
383 pdi->last_read_time = 0;
388 static void insert_skip(struct per_cpu_info *pci, unsigned long start,
391 struct skip_info *sip;
393 for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
394 if (end == (sip->start - 1)) {
397 } else if (start == (sip->end + 1)) {
403 sip = malloc(sizeof(struct skip_info));
406 sip->prev = sip->next = NULL;
407 if (pci->skips_tail == NULL)
408 pci->skips_head = pci->skips_tail = sip;
410 sip->prev = pci->skips_tail;
411 pci->skips_tail->next = sip;
412 pci->skips_tail = sip;
416 static void remove_sip(struct per_cpu_info *pci, struct skip_info *sip)
418 if (sip->prev == NULL) {
419 if (sip->next == NULL)
420 pci->skips_head = pci->skips_tail = NULL;
422 pci->skips_head = sip->next;
423 sip->next->prev = NULL;
425 } else if (sip->next == NULL) {
426 pci->skips_tail = sip->prev;
427 sip->prev->next = NULL;
429 sip->prev->next = sip->next;
430 sip->next->prev = sip->prev;
433 sip->prev = sip->next = NULL;
437 #define IN_SKIP(sip,seq) (((sip)->start <= (seq)) && ((seq) <= sip->end))
438 static int check_current_skips(struct per_cpu_info *pci, unsigned long seq)
440 struct skip_info *sip;
442 for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
443 if (IN_SKIP(sip, seq)) {
444 if (sip->start == seq) {
446 remove_sip(pci, sip);
449 } else if (sip->end == seq)
453 insert_skip(pci, seq + 1, sip->end);
462 static void collect_pdi_skips(struct per_dev_info *pdi)
464 struct skip_info *sip;
470 for (cpu = 0; cpu < pdi->ncpus; cpu++) {
471 struct per_cpu_info *pci = &pdi->cpus[cpu];
473 for (sip = pci->skips_head; sip != NULL; sip = sip->next) {
475 pdi->seq_skips += (sip->end - sip->start + 1);
477 fprintf(stderr,"(%d,%d): skipping %lu -> %lu\n",
478 MAJOR(pdi->dev), MINOR(pdi->dev),
479 sip->start, sip->end);
484 static void cpu_mark_online(struct per_dev_info *pdi, unsigned int cpu)
486 if (cpu >= pdi->cpu_map_max || !pdi->cpu_map) {
487 int new_max = (cpu + CPUS_PER_LONG) & ~(CPUS_PER_LONG - 1);
488 unsigned long *map = malloc(new_max / sizeof(long));
490 memset(map, 0, new_max / sizeof(long));
493 memcpy(map, pdi->cpu_map, pdi->cpu_map_max / sizeof(long));
498 pdi->cpu_map_max = new_max;
501 pdi->cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
504 static inline void cpu_mark_offline(struct per_dev_info *pdi, int cpu)
506 pdi->cpu_map[CPU_IDX(cpu)] &= ~(1UL << CPU_BIT(cpu));
509 static inline int cpu_is_online(struct per_dev_info *pdi, int cpu)
511 return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
514 static inline int ppm_hash_pid(pid_t pid)
516 return jhash_1word(pid, JHASH_RANDOM) & PPM_HASH_MASK;
519 static struct process_pid_map *find_ppm(pid_t pid)
521 const int hash_idx = ppm_hash_pid(pid);
522 struct process_pid_map *ppm;
524 ppm = ppm_hash_table[hash_idx];
529 ppm = ppm->hash_next;
535 static void add_ppm_hash(pid_t pid, const char *name)
537 const int hash_idx = ppm_hash_pid(pid);
538 struct process_pid_map *ppm;
542 ppm = malloc(sizeof(*ppm));
543 memset(ppm, 0, sizeof(*ppm));
545 strcpy(ppm->comm, name);
546 ppm->hash_next = ppm_hash_table[hash_idx];
547 ppm_hash_table[hash_idx] = ppm;
551 char *find_process_name(pid_t pid)
553 struct process_pid_map *ppm = find_ppm(pid);
561 static inline int ppi_hash_pid(pid_t pid)
563 return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
566 static inline int ppi_hash_name(const char *name)
568 return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
571 static inline int ppi_hash(struct per_process_info *ppi)
573 struct process_pid_map *ppm = ppi->ppm;
576 return ppi_hash_pid(ppm->pid);
578 return ppi_hash_name(ppm->comm);
581 static inline void add_ppi_to_hash(struct per_process_info *ppi)
583 const int hash_idx = ppi_hash(ppi);
585 ppi->hash_next = ppi_hash_table[hash_idx];
586 ppi_hash_table[hash_idx] = ppi;
589 static inline void add_ppi_to_list(struct per_process_info *ppi)
591 ppi->list_next = ppi_list;
596 static struct per_process_info *find_ppi_by_name(char *name)
598 const int hash_idx = ppi_hash_name(name);
599 struct per_process_info *ppi;
601 ppi = ppi_hash_table[hash_idx];
603 struct process_pid_map *ppm = ppi->ppm;
605 if (!strcmp(ppm->comm, name))
608 ppi = ppi->hash_next;
614 static struct per_process_info *find_ppi_by_pid(pid_t pid)
616 const int hash_idx = ppi_hash_pid(pid);
617 struct per_process_info *ppi;
619 ppi = ppi_hash_table[hash_idx];
621 struct process_pid_map *ppm = ppi->ppm;
626 ppi = ppi->hash_next;
632 static struct per_process_info *find_ppi(pid_t pid)
634 struct per_process_info *ppi;
638 return find_ppi_by_pid(pid);
640 name = find_process_name(pid);
644 ppi = find_ppi_by_name(name);
645 if (ppi && ppi->ppm->pid != pid)
646 ppi->more_than_one = 1;
652 * struct trace and blktrace allocation cache, we do potentially
653 * millions of mallocs for these structures while only using at most
654 * a few thousand at the time
656 static inline void t_free(struct trace *t)
658 if (t_alloc_cache < 1024) {
659 t->next = t_alloc_list;
666 static inline struct trace *t_alloc(void)
668 struct trace *t = t_alloc_list;
671 t_alloc_list = t->next;
676 return malloc(sizeof(*t));
679 static inline void bit_free(struct blk_io_trace *bit)
681 if (bit_alloc_cache < 1024 && !bit->pdu_len) {
683 * abuse a 64-bit field for a next pointer for the free item
685 bit->time = (__u64) (unsigned long) bit_alloc_list;
686 bit_alloc_list = (struct blk_io_trace *) bit;
692 static inline struct blk_io_trace *bit_alloc(void)
694 struct blk_io_trace *bit = bit_alloc_list;
697 bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
703 return malloc(sizeof(*bit));
706 static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
708 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
710 rb_erase(&t->rb_node, &pci->rb_last);
711 pci->rb_last_entries--;
717 static void put_trace(struct per_dev_info *pdi, struct trace *t)
719 rb_erase(&t->rb_node, &rb_sort_root);
722 trace_rb_insert_last(pdi, t);
725 static inline int trace_rb_insert(struct trace *t, struct rb_root *root)
727 struct rb_node **p = &root->rb_node;
728 struct rb_node *parent = NULL;
734 __t = rb_entry(parent, struct trace, rb_node);
736 if (t->bit->time < __t->bit->time)
738 else if (t->bit->time > __t->bit->time)
740 else if (t->bit->device < __t->bit->device)
742 else if (t->bit->device > __t->bit->device)
744 else if (t->bit->sequence < __t->bit->sequence)
746 else /* >= sequence */
750 rb_link_node(&t->rb_node, parent, p);
751 rb_insert_color(&t->rb_node, root);
755 static inline int trace_rb_insert_sort(struct trace *t)
757 if (!trace_rb_insert(t, &rb_sort_root)) {
765 static int trace_rb_insert_last(struct per_dev_info *pdi, struct trace *t)
767 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
769 if (trace_rb_insert(t, &pci->rb_last))
772 pci->rb_last_entries++;
774 if (pci->rb_last_entries > rb_batch * pdi->nfiles) {
775 struct rb_node *n = rb_first(&pci->rb_last);
777 t = rb_entry(n, struct trace, rb_node);
778 __put_trace_last(pdi, t);
784 static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
785 struct rb_root *root, int order)
787 struct rb_node *n = root->rb_node;
788 struct rb_node *prev = NULL;
792 __t = rb_entry(n, struct trace, rb_node);
795 if (device < __t->bit->device)
797 else if (device > __t->bit->device)
799 else if (sequence < __t->bit->sequence)
801 else if (sequence > __t->bit->sequence)
808 * hack - the list may not be sequence ordered because some
809 * events don't have sequence and time matched. so we end up
810 * being a little off in the rb lookup here, because we don't
811 * know the time we are looking for. compensate by browsing
812 * a little ahead from the last entry to find the match
817 while (((n = rb_next(prev)) != NULL) && max--) {
818 __t = rb_entry(n, struct trace, rb_node);
820 if (__t->bit->device == device &&
821 __t->bit->sequence == sequence)
831 static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
832 struct per_cpu_info *pci,
835 return trace_rb_find(pdi->dev, seq, &pci->rb_last, 0);
838 static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
840 struct rb_node **p = &pdi->rb_track.rb_node;
841 struct rb_node *parent = NULL;
842 struct io_track *__iot;
846 __iot = rb_entry(parent, struct io_track, rb_node);
848 if (iot->sector < __iot->sector)
850 else if (iot->sector > __iot->sector)
854 "sector alias (%Lu) on device %d,%d!\n",
855 (unsigned long long) iot->sector,
856 MAJOR(pdi->dev), MINOR(pdi->dev));
861 rb_link_node(&iot->rb_node, parent, p);
862 rb_insert_color(&iot->rb_node, &pdi->rb_track);
866 static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
868 struct rb_node *n = pdi->rb_track.rb_node;
869 struct io_track *__iot;
872 __iot = rb_entry(n, struct io_track, rb_node);
874 if (sector < __iot->sector)
876 else if (sector > __iot->sector)
885 static struct io_track *find_track(struct per_dev_info *pdi, pid_t pid,
888 struct io_track *iot;
890 iot = __find_track(pdi, sector);
892 iot = malloc(sizeof(*iot));
893 iot->ppm = find_ppm(pid);
894 iot->sector = sector;
895 track_rb_insert(pdi, iot);
901 static void log_track_frontmerge(struct per_dev_info *pdi,
902 struct blk_io_trace *t)
904 struct io_track *iot;
909 iot = __find_track(pdi, t->sector + t_sec(t));
912 fprintf(stderr, "merge not found for (%d,%d): %llu\n",
913 MAJOR(pdi->dev), MINOR(pdi->dev),
914 (unsigned long long) t->sector + t_sec(t));
918 rb_erase(&iot->rb_node, &pdi->rb_track);
919 iot->sector -= t_sec(t);
920 track_rb_insert(pdi, iot);
923 static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
925 struct io_track *iot;
930 iot = find_track(pdi, t->pid, t->sector);
931 iot->allocation_time = t->time;
934 static inline int is_remapper(struct per_dev_info *pdi)
936 int major = MAJOR(pdi->dev);
938 return (major == 253 || major == 9);
942 * for md/dm setups, the interesting cycle is Q -> C. So track queueing
943 * time here, as dispatch time
945 static void log_track_queue(struct per_dev_info *pdi, struct blk_io_trace *t)
947 struct io_track *iot;
951 if (!is_remapper(pdi))
954 iot = find_track(pdi, t->pid, t->sector);
955 iot->dispatch_time = t->time;
959 * return time between rq allocation and insertion
961 static unsigned long long log_track_insert(struct per_dev_info *pdi,
962 struct blk_io_trace *t)
964 unsigned long long elapsed;
965 struct io_track *iot;
970 iot = find_track(pdi, t->pid, t->sector);
971 iot->queue_time = t->time;
973 if (!iot->allocation_time)
976 elapsed = iot->queue_time - iot->allocation_time;
978 if (per_process_stats) {
979 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
980 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
982 if (ppi && elapsed > ppi->longest_allocation_wait[w])
983 ppi->longest_allocation_wait[w] = elapsed;
990 * return time between queue and issue
992 static unsigned long long log_track_issue(struct per_dev_info *pdi,
993 struct blk_io_trace *t)
995 unsigned long long elapsed;
996 struct io_track *iot;
1000 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
1003 iot = __find_track(pdi, t->sector);
1006 fprintf(stderr, "issue not found for (%d,%d): %llu\n",
1007 MAJOR(pdi->dev), MINOR(pdi->dev),
1008 (unsigned long long) t->sector);
1012 iot->dispatch_time = t->time;
1013 elapsed = iot->dispatch_time - iot->queue_time;
1015 if (per_process_stats) {
1016 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
1017 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1019 if (ppi && elapsed > ppi->longest_dispatch_wait[w])
1020 ppi->longest_dispatch_wait[w] = elapsed;
1027 * return time between dispatch and complete
1029 static unsigned long long log_track_complete(struct per_dev_info *pdi,
1030 struct blk_io_trace *t)
1032 unsigned long long elapsed;
1033 struct io_track *iot;
1038 iot = __find_track(pdi, t->sector);
1041 fprintf(stderr,"complete not found for (%d,%d): %llu\n",
1042 MAJOR(pdi->dev), MINOR(pdi->dev),
1043 (unsigned long long) t->sector);
1047 iot->completion_time = t->time;
1048 elapsed = iot->completion_time - iot->dispatch_time;
1050 if (per_process_stats) {
1051 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
1052 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1054 if (ppi && elapsed > ppi->longest_completion_wait[w])
1055 ppi->longest_completion_wait[w] = elapsed;
1059 * kill the trace, we don't need it after completion
1061 rb_erase(&iot->rb_node, &pdi->rb_track);
1068 static struct io_stats *find_process_io_stats(pid_t pid)
1070 struct per_process_info *ppi = find_ppi(pid);
1073 ppi = malloc(sizeof(*ppi));
1074 memset(ppi, 0, sizeof(*ppi));
1075 ppi->ppm = find_ppm(pid);
1076 add_ppi_to_hash(ppi);
1077 add_ppi_to_list(ppi);
1080 return &ppi->io_stats;
1083 static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
1086 snprintf(buffer, size, "%s", pdi->name);
1088 snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
1092 static void check_time(struct per_dev_info *pdi, struct blk_io_trace *bit)
1094 unsigned long long this = bit->time;
1095 unsigned long long last = pdi->last_reported_time;
1097 pdi->backwards = (this < last) ? 'B' : ' ';
1098 pdi->last_reported_time = this;
1101 static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
1106 ios->qwrite_kb += t_kb(t);
1109 ios->qread_kb += t_kb(t);
1113 static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
1116 __account_m(&pci->io_stats, t, rw);
1118 if (per_process_stats) {
1119 struct io_stats *ios = find_process_io_stats(t->pid);
1121 __account_m(ios, t, rw);
1125 static inline void __account_queue(struct io_stats *ios, struct blk_io_trace *t,
1130 ios->qwrite_kb += t_kb(t);
1133 ios->qread_kb += t_kb(t);
1137 static inline void account_queue(struct blk_io_trace *t,
1138 struct per_cpu_info *pci, int rw)
1140 __account_queue(&pci->io_stats, t, rw);
1142 if (per_process_stats) {
1143 struct io_stats *ios = find_process_io_stats(t->pid);
1145 __account_queue(ios, t, rw);
1149 static inline void __account_c(struct io_stats *ios, int rw, int bytes)
1153 ios->cwrite_kb += bytes >> 10;
1156 ios->cread_kb += bytes >> 10;
1160 static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
1163 __account_c(&pci->io_stats, rw, bytes);
1165 if (per_process_stats) {
1166 struct io_stats *ios = find_process_io_stats(t->pid);
1168 __account_c(ios, rw, bytes);
1172 static inline void __account_issue(struct io_stats *ios, int rw,
1177 ios->iwrite_kb += bytes >> 10;
1180 ios->iread_kb += bytes >> 10;
1184 static inline void account_issue(struct blk_io_trace *t,
1185 struct per_cpu_info *pci, int rw)
1187 __account_issue(&pci->io_stats, rw, t->bytes);
1189 if (per_process_stats) {
1190 struct io_stats *ios = find_process_io_stats(t->pid);
1192 __account_issue(ios, rw, t->bytes);
1196 static inline void __account_unplug(struct io_stats *ios, int timer)
1199 ios->timer_unplugs++;
1204 static inline void account_unplug(struct blk_io_trace *t,
1205 struct per_cpu_info *pci, int timer)
1207 __account_unplug(&pci->io_stats, timer);
1209 if (per_process_stats) {
1210 struct io_stats *ios = find_process_io_stats(t->pid);
1212 __account_unplug(ios, timer);
1216 static inline void __account_requeue(struct io_stats *ios,
1217 struct blk_io_trace *t, int rw)
1221 ios->iwrite_kb -= t_kb(t);
1224 ios->iread_kb -= t_kb(t);
1228 static inline void account_requeue(struct blk_io_trace *t,
1229 struct per_cpu_info *pci, int rw)
1231 __account_requeue(&pci->io_stats, t, rw);
1233 if (per_process_stats) {
1234 struct io_stats *ios = find_process_io_stats(t->pid);
1236 __account_requeue(ios, t, rw);
1240 static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
1241 struct blk_io_trace *t, char *act)
1243 process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
1246 static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
1247 struct blk_io_trace *t, char *act)
1249 process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
1252 static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
1255 process_fmt(act, pci, t, -1, 0, NULL);
1258 static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
1259 struct blk_io_trace *t, char *act)
1261 process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
1264 static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
1265 struct blk_io_trace *t, char *act)
1268 log_track_frontmerge(pdi, t);
1270 process_fmt(act, pci, t, -1ULL, 0, NULL);
1273 static void log_action(struct per_cpu_info *pci, struct blk_io_trace *t,
1276 process_fmt(act, pci, t, -1ULL, 0, NULL);
1279 static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
1282 process_fmt(act, pci, t, -1ULL, 0, NULL);
1285 static void log_unplug(struct per_cpu_info *pci, struct blk_io_trace *t,
1288 process_fmt(act, pci, t, -1ULL, 0, NULL);
1291 static void log_split(struct per_cpu_info *pci, struct blk_io_trace *t,
1294 process_fmt(act, pci, t, -1ULL, 0, NULL);
1297 static void log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char *act)
1299 unsigned char *buf = (unsigned char *) t + sizeof(*t);
1301 process_fmt(act, pci, t, -1ULL, t->pdu_len, buf);
1304 static void dump_trace_pc(struct blk_io_trace *t, struct per_cpu_info *pci)
1306 int act = t->action & 0xffff;
1309 case __BLK_TA_QUEUE:
1310 log_generic(pci, t, "Q");
1312 case __BLK_TA_GETRQ:
1313 log_generic(pci, t, "G");
1315 case __BLK_TA_SLEEPRQ:
1316 log_generic(pci, t, "S");
1318 case __BLK_TA_REQUEUE:
1319 log_generic(pci, t, "R");
1321 case __BLK_TA_ISSUE:
1322 log_pc(pci, t, "D");
1324 case __BLK_TA_COMPLETE:
1325 log_pc(pci, t, "C");
1327 case __BLK_TA_INSERT:
1328 log_pc(pci, t, "I");
1331 fprintf(stderr, "Bad pc action %x\n", act);
1336 static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
1337 struct per_cpu_info *pci)
1339 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1340 int act = t->action & 0xffff;
1343 case __BLK_TA_QUEUE:
1344 log_track_queue(pdi, t);
1345 account_queue(t, pci, w);
1346 log_queue(pci, t, "Q");
1348 case __BLK_TA_INSERT:
1349 log_insert(pdi, pci, t, "I");
1351 case __BLK_TA_BACKMERGE:
1352 account_m(t, pci, w);
1353 log_merge(pdi, pci, t, "M");
1355 case __BLK_TA_FRONTMERGE:
1356 account_m(t, pci, w);
1357 log_merge(pdi, pci, t, "F");
1359 case __BLK_TA_GETRQ:
1360 log_track_getrq(pdi, t);
1361 log_generic(pci, t, "G");
1363 case __BLK_TA_SLEEPRQ:
1364 log_generic(pci, t, "S");
1366 case __BLK_TA_REQUEUE:
1368 * can happen if we miss traces, don't let it go
1371 if (pdi->cur_depth[w])
1372 pdi->cur_depth[w]--;
1373 account_requeue(t, pci, w);
1374 log_queue(pci, t, "R");
1376 case __BLK_TA_ISSUE:
1377 account_issue(t, pci, w);
1378 pdi->cur_depth[w]++;
1379 if (pdi->cur_depth[w] > pdi->max_depth[w])
1380 pdi->max_depth[w] = pdi->cur_depth[w];
1381 log_issue(pdi, pci, t, "D");
1383 case __BLK_TA_COMPLETE:
1384 if (pdi->cur_depth[w])
1385 pdi->cur_depth[w]--;
1386 account_c(t, pci, w, t->bytes);
1387 log_complete(pdi, pci, t, "C");
1390 log_action(pci, t, "P");
1392 case __BLK_TA_UNPLUG_IO:
1393 account_unplug(t, pci, 0);
1394 log_unplug(pci, t, "U");
1396 case __BLK_TA_UNPLUG_TIMER:
1397 account_unplug(t, pci, 1);
1398 log_unplug(pci, t, "UT");
1400 case __BLK_TA_SPLIT:
1401 log_split(pci, t, "X");
1403 case __BLK_TA_BOUNCE:
1404 log_generic(pci, t, "B");
1406 case __BLK_TA_REMAP:
1407 log_generic(pci, t, "A");
1410 fprintf(stderr, "Bad fs action %x\n", t->action);
1415 static void dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci,
1416 struct per_dev_info *pdi)
1418 if (t->action & BLK_TC_ACT(BLK_TC_PC))
1419 dump_trace_pc(t, pci);
1421 dump_trace_fs(t, pdi, pci);
1424 pdi->first_reported_time = t->time;
1428 output_binary(t, sizeof(*t) + t->pdu_len);
1432 * print in a proper way, not too small and not too big. if more than
1433 * 1000,000K, turn into M and so on
1435 static char *size_cnv(char *dst, unsigned long long num, int in_kb)
1437 char suff[] = { '\0', 'K', 'M', 'G', 'P' };
1443 while (num > 1000 * 1000ULL && (i < sizeof(suff) - 1)) {
1448 sprintf(dst, "%'8Lu%c", num, suff[i]);
1452 static void dump_io_stats(struct per_dev_info *pdi, struct io_stats *ios,
1455 static char x[256], y[256];
1457 fprintf(ofp, "%s\n", msg);
1459 fprintf(ofp, " Reads Queued: %s, %siB\t", size_cnv(x, ios->qreads, 0), size_cnv(y, ios->qread_kb, 1));
1460 fprintf(ofp, " Writes Queued: %s, %siB\n", size_cnv(x, ios->qwrites, 0), size_cnv(y, ios->qwrite_kb, 1));
1462 fprintf(ofp, " Read Dispatches: %s, %siB\t", size_cnv(x, ios->ireads, 0), size_cnv(y, ios->iread_kb, 1));
1463 fprintf(ofp, " Write Dispatches: %s, %siB\n", size_cnv(x, ios->iwrites, 0), size_cnv(y, ios->iwrite_kb, 1));
1464 fprintf(ofp, " Reads Requeued: %s\t\t", size_cnv(x, ios->rrqueue, 0));
1465 fprintf(ofp, " Writes Requeued: %s\n", size_cnv(x, ios->wrqueue, 0));
1466 fprintf(ofp, " Reads Completed: %s, %siB\t", size_cnv(x, ios->creads, 0), size_cnv(y, ios->cread_kb, 1));
1467 fprintf(ofp, " Writes Completed: %s, %siB\n", size_cnv(x, ios->cwrites, 0), size_cnv(y, ios->cwrite_kb, 1));
1468 fprintf(ofp, " Read Merges: %'8lu%8c\t", ios->mreads, ' ');
1469 fprintf(ofp, " Write Merges: %'8lu\n", ios->mwrites);
1471 fprintf(ofp, " Read depth: %'8u%8c\t", pdi->max_depth[0], ' ');
1472 fprintf(ofp, " Write depth: %'8u\n", pdi->max_depth[1]);
1474 fprintf(ofp, " IO unplugs: %'8lu%8c\t", ios->io_unplugs, ' ');
1475 fprintf(ofp, " Timer unplugs: %'8lu\n", ios->timer_unplugs);
1478 static void dump_wait_stats(struct per_process_info *ppi)
1480 unsigned long rawait = ppi->longest_allocation_wait[0] / 1000;
1481 unsigned long rdwait = ppi->longest_dispatch_wait[0] / 1000;
1482 unsigned long rcwait = ppi->longest_completion_wait[0] / 1000;
1483 unsigned long wawait = ppi->longest_allocation_wait[1] / 1000;
1484 unsigned long wdwait = ppi->longest_dispatch_wait[1] / 1000;
1485 unsigned long wcwait = ppi->longest_completion_wait[1] / 1000;
1487 fprintf(ofp, " Allocation wait: %'8lu%8c\t", rawait, ' ');
1488 fprintf(ofp, " Allocation wait: %'8lu\n", wawait);
1489 fprintf(ofp, " Dispatch wait: %'8lu%8c\t", rdwait, ' ');
1490 fprintf(ofp, " Dispatch wait: %'8lu\n", wdwait);
1491 fprintf(ofp, " Completion wait: %'8lu%8c\t", rcwait, ' ');
1492 fprintf(ofp, " Completion wait: %'8lu\n", wcwait);
1495 static int ppi_name_compare(const void *p1, const void *p2)
1497 struct per_process_info *ppi1 = *((struct per_process_info **) p1);
1498 struct per_process_info *ppi2 = *((struct per_process_info **) p2);
1501 res = strverscmp(ppi1->ppm->comm, ppi2->ppm->comm);
1503 res = ppi1->ppm->pid > ppi2->ppm->pid;
1508 static void sort_process_list(void)
1510 struct per_process_info **ppis;
1511 struct per_process_info *ppi;
1514 ppis = malloc(ppi_list_entries * sizeof(struct per_process_info *));
1519 ppi = ppi->list_next;
1522 qsort(ppis, ppi_list_entries, sizeof(ppi), ppi_name_compare);
1524 i = ppi_list_entries - 1;
1529 ppi->list_next = ppi_list;
1537 static void show_process_stats(void)
1539 struct per_process_info *ppi;
1541 sort_process_list();
1545 struct process_pid_map *ppm = ppi->ppm;
1548 if (ppi->more_than_one)
1549 sprintf(name, "%s (%u, ...)", ppm->comm, ppm->pid);
1551 sprintf(name, "%s (%u)", ppm->comm, ppm->pid);
1553 dump_io_stats(NULL, &ppi->io_stats, name);
1554 dump_wait_stats(ppi);
1555 ppi = ppi->list_next;
1561 static void show_device_and_cpu_stats(void)
1563 struct per_dev_info *pdi;
1564 struct per_cpu_info *pci;
1565 struct io_stats total, *ios;
1566 unsigned long long rrate, wrate, msec;
1567 int i, j, pci_events;
1568 char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
1571 for (pdi = devices, i = 0; i < ndevices; i++, pdi++) {
1573 memset(&total, 0, sizeof(total));
1579 for (pci = pdi->cpus, j = 0; j < pdi->ncpus; j++, pci++) {
1583 ios = &pci->io_stats;
1584 total.qreads += ios->qreads;
1585 total.qwrites += ios->qwrites;
1586 total.creads += ios->creads;
1587 total.cwrites += ios->cwrites;
1588 total.mreads += ios->mreads;
1589 total.mwrites += ios->mwrites;
1590 total.ireads += ios->ireads;
1591 total.iwrites += ios->iwrites;
1592 total.rrqueue += ios->rrqueue;
1593 total.wrqueue += ios->wrqueue;
1594 total.qread_kb += ios->qread_kb;
1595 total.qwrite_kb += ios->qwrite_kb;
1596 total.cread_kb += ios->cread_kb;
1597 total.cwrite_kb += ios->cwrite_kb;
1598 total.iread_kb += ios->iread_kb;
1599 total.iwrite_kb += ios->iwrite_kb;
1600 total.timer_unplugs += ios->timer_unplugs;
1601 total.io_unplugs += ios->io_unplugs;
1603 snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
1604 j, get_dev_name(pdi, name, sizeof(name)));
1605 dump_io_stats(pdi, ios, line);
1609 if (pci_events > 1) {
1611 snprintf(line, sizeof(line) - 1, "Total (%s):",
1612 get_dev_name(pdi, name, sizeof(name)));
1613 dump_io_stats(NULL, &total, line);
1617 msec = (pdi->last_reported_time - pdi->first_reported_time) / 1000000;
1619 rrate = 1000 * total.cread_kb / msec;
1620 wrate = 1000 * total.cwrite_kb / msec;
1623 fprintf(ofp, "\nThroughput (R/W): %'LuKiB/s / %'LuKiB/s\n",
1625 fprintf(ofp, "Events (%s): %'Lu entries\n",
1626 get_dev_name(pdi, line, sizeof(line)), pdi->events);
1628 collect_pdi_skips(pdi);
1629 fprintf(ofp, "Skips: %'lu forward (%'llu - %5.1lf%%)\n",
1630 pdi->skips,pdi->seq_skips,
1631 100.0 * ((double)pdi->seq_skips /
1632 (double)(pdi->events + pdi->seq_skips)));
1636 static void find_genesis(void)
1638 struct trace *t = trace_list;
1640 genesis_time = -1ULL;
1642 if (t->bit->time < genesis_time)
1643 genesis_time = t->bit->time;
1649 static inline int check_stopwatch(struct blk_io_trace *bit)
1651 if (bit->time < stopwatch_end &&
1652 bit->time >= stopwatch_start)
1659 * return youngest entry read
1661 static int sort_entries(unsigned long long *youngest)
1663 struct per_dev_info *pdi = NULL;
1664 struct per_cpu_info *pci = NULL;
1671 while ((t = trace_list) != NULL) {
1672 struct blk_io_trace *bit = t->bit;
1674 trace_list = t->next;
1676 bit->time -= genesis_time;
1678 if (bit->time < *youngest || !*youngest)
1679 *youngest = bit->time;
1681 if (!pdi || pdi->dev != bit->device) {
1682 pdi = get_dev_info(bit->device);
1686 if (!pci || pci->cpu != bit->cpu)
1687 pci = get_cpu_info(pdi, bit->cpu);
1689 if (bit->sequence < pci->smallest_seq_read)
1690 pci->smallest_seq_read = bit->sequence;
1692 if (check_stopwatch(bit)) {
1698 if (trace_rb_insert_sort(t))
1706 * to continue, we must have traces from all online cpus in the tree
1708 static int check_cpu_map(struct per_dev_info *pdi)
1710 unsigned long *cpu_map;
1717 * create a map of the cpus we have traces for
1719 cpu_map = malloc(pdi->cpu_map_max / sizeof(long));
1720 n = rb_first(&rb_sort_root);
1722 __t = rb_entry(n, struct trace, rb_node);
1723 cpu = __t->bit->cpu;
1725 cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
1730 * we can't continue if pdi->cpu_map has entries set that we don't
1731 * have in the sort rbtree. the opposite is not a problem, though
1734 for (i = 0; i < pdi->cpu_map_max / CPUS_PER_LONG; i++) {
1735 if (pdi->cpu_map[i] & ~(cpu_map[i])) {
1745 static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
1747 struct blk_io_trace *bit = t->bit;
1748 unsigned long expected_sequence;
1749 struct per_cpu_info *pci;
1752 pci = get_cpu_info(pdi, bit->cpu);
1753 expected_sequence = pci->last_sequence + 1;
1755 if (!expected_sequence) {
1757 * 1 should be the first entry, just allow it
1759 if (bit->sequence == 1)
1761 if (bit->sequence == pci->smallest_seq_read)
1764 return check_cpu_map(pdi);
1767 if (bit->sequence == expected_sequence)
1771 * we may not have seen that sequence yet. if we are not doing
1772 * the final run, break and wait for more entries.
1774 if (expected_sequence < pci->smallest_seq_read) {
1775 __t = trace_rb_find_last(pdi, pci, expected_sequence);
1779 __put_trace_last(pdi, __t);
1781 } else if (!force) {
1785 if (check_current_skips(pci, bit->sequence))
1788 if (expected_sequence < bit->sequence)
1789 insert_skip(pci, expected_sequence, bit->sequence - 1);
1794 static void show_entries_rb(int force)
1796 struct per_dev_info *pdi = NULL;
1797 struct per_cpu_info *pci = NULL;
1798 struct blk_io_trace *bit;
1802 while ((n = rb_first(&rb_sort_root)) != NULL) {
1803 if (is_done() && !force && !pipeline)
1806 t = rb_entry(n, struct trace, rb_node);
1809 if (read_sequence - t->read_sequence < 1 && !force)
1812 if (!pdi || pdi->dev != bit->device) {
1813 pdi = get_dev_info(bit->device);
1818 fprintf(stderr, "Unknown device ID? (%d,%d)\n",
1819 MAJOR(bit->device), MINOR(bit->device));
1823 if (check_sequence(pdi, t, force))
1826 if (!force && bit->time > last_allowed_time)
1829 check_time(pdi, bit);
1831 if (!pci || pci->cpu != bit->cpu)
1832 pci = get_cpu_info(pdi, bit->cpu);
1834 pci->last_sequence = bit->sequence;
1838 if (bit->action & (act_mask << BLK_TC_SHIFT))
1839 dump_trace(bit, pci, pdi);
1845 static int read_data(int fd, void *buffer, int bytes, int block, int *fdblock)
1847 int ret, bytes_left, fl;
1850 if (block != *fdblock) {
1851 fl = fcntl(fd, F_GETFL);
1855 fcntl(fd, F_SETFL, fl | O_NONBLOCK);
1858 fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
1864 while (bytes_left > 0) {
1865 ret = read(fd, p, bytes_left);
1869 if (errno != EAGAIN) {
1875 * never do partial reads. we can return if we
1876 * didn't read anything and we should not block,
1877 * otherwise wait for data
1879 if ((bytes_left == bytes) && !block)
1893 static inline __u16 get_pdulen(struct blk_io_trace *bit)
1896 return bit->pdu_len;
1898 return __bswap_16(bit->pdu_len);
1901 static inline __u32 get_magic(struct blk_io_trace *bit)
1906 return __bswap_32(bit->magic);
1909 static int read_events(int fd, int always_block, int *fdblock)
1911 struct per_dev_info *pdi = NULL;
1912 unsigned int events = 0;
1914 while (!is_done() && events < rb_batch) {
1915 struct blk_io_trace *bit;
1917 int pdu_len, should_block, ret;
1922 should_block = !events || always_block;
1924 ret = read_data(fd, bit, sizeof(*bit), should_block, fdblock);
1927 if (!events && ret < 0)
1933 * look at first trace to check whether we need to convert
1934 * data in the future
1936 if (data_is_native == -1 && check_data_endianness(bit->magic))
1939 magic = get_magic(bit);
1940 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
1941 fprintf(stderr, "Bad magic %x\n", magic);
1945 pdu_len = get_pdulen(bit);
1947 void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
1949 if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1, fdblock)) {
1959 if (verify_trace(bit)) {
1965 * not a real trace, so grab and handle it here
1967 if (bit->action & BLK_TC_ACT(BLK_TC_NOTIFY)) {
1968 add_ppm_hash(bit->pid, (char *) bit + sizeof(*bit));
1969 output_binary(bit, sizeof(*bit) + bit->pdu_len);
1974 memset(t, 0, sizeof(*t));
1976 t->read_sequence = read_sequence;
1978 t->next = trace_list;
1981 if (!pdi || pdi->dev != bit->device)
1982 pdi = get_dev_info(bit->device);
1984 if (bit->time > pdi->last_read_time)
1985 pdi->last_read_time = bit->time;
1993 static int do_file(void)
1995 struct per_cpu_info *pci;
1996 struct per_dev_info *pdi;
1997 int i, j, events, events_added;
2000 * first prepare all files for reading
2002 for (i = 0; i < ndevices; i++) {
2011 pci = get_cpu_info(pdi, j);
2016 p = strdup(pdi->name);
2018 if (strcmp(dname, ".")) {
2020 p = strdup(pdi->name);
2021 strcpy(pdi->name, basename(p));
2026 len = sprintf(pci->fname, "%s/", input_dir);
2028 snprintf(pci->fname + len, sizeof(pci->fname)-1-len,
2029 "%s.blktrace.%d", pdi->name, pci->cpu);
2030 if (stat(pci->fname, &st) < 0)
2033 pci->fd = open(pci->fname, O_RDONLY);
2040 printf("Input file %s added\n", pci->fname);
2042 cpu_mark_online(pdi, pci->cpu);
2047 * now loop over the files reading in the data
2050 unsigned long long youngest;
2053 last_allowed_time = -1ULL;
2056 for (i = 0; i < ndevices; i++) {
2058 pdi->last_read_time = -1ULL;
2060 for (j = 0; j < pdi->nfiles; j++) {
2062 pci = get_cpu_info(pdi, j);
2067 pci->smallest_seq_read = -1;
2069 events = read_events(pci->fd, 1, &pci->fdblock);
2071 cpu_mark_offline(pdi, pci->cpu);
2077 if (pdi->last_read_time < last_allowed_time)
2078 last_allowed_time = pdi->last_read_time;
2080 events_added += events;
2084 if (sort_entries(&youngest))
2087 if (youngest > stopwatch_end)
2092 } while (events_added);
2094 if (rb_sort_entries)
2100 static int do_stdin(void)
2102 unsigned long long youngest;
2103 int fd, events, fdblock;
2105 last_allowed_time = -1ULL;
2106 fd = dup(STDIN_FILENO);
2108 perror("dup stdin");
2113 while ((events = read_events(fd, 0, &fdblock)) > 0) {
2117 smallest_seq_read = -1U;
2120 if (sort_entries(&youngest))
2123 if (youngest > stopwatch_end)
2129 if (rb_sort_entries)
2136 static void show_stats(void)
2145 if (per_process_stats)
2146 show_process_stats();
2148 if (per_device_and_cpu_stats)
2149 show_device_and_cpu_stats();
2154 static void handle_sigint(__attribute__((__unused__)) int sig)
2160 * Extract start and duration times from a string, allowing
2161 * us to specify a time interval of interest within a trace.
2162 * Format: "duration" (start is zero) or "start:duration".
2164 static int find_stopwatch_interval(char *string)
2169 value = strtod(string, &sp);
2171 fprintf(stderr,"Invalid stopwatch timer: %s\n", string);
2175 stopwatch_start = DOUBLE_TO_NANO_ULL(value);
2177 value = strtod(string, &sp);
2178 if (sp == string || *sp != '\0') {
2179 fprintf(stderr,"Invalid stopwatch duration time: %s\n",
2183 } else if (*sp != '\0') {
2184 fprintf(stderr,"Invalid stopwatch start timer: %s\n", string);
2187 stopwatch_end = DOUBLE_TO_NANO_ULL(value);
2188 if (stopwatch_end <= stopwatch_start) {
2189 fprintf(stderr, "Invalid stopwatch interval: %Lu -> %Lu\n",
2190 stopwatch_start, stopwatch_end);
2197 static char usage_str[] = \
2198 "[ -i <input name> ] [-o <output name> [ -s ] [ -t ] [ -q ]\n" \
2199 "[ -w start:stop ] [ -f output format ] [ -F format spec ] [ -v] \n\n" \
2200 "\t-i Input file containing trace data, or '-' for stdin\n" \
2201 "\t-D Directory to prepend to input file names\n" \
2202 "\t-o Output file. If not given, output is stdout\n" \
2203 "\t-d Output file. If specified, binary data is written to file\n" \
2204 "\t-b stdin read batching\n" \
2205 "\t-s Show per-program io statistics\n" \
2206 "\t-h Hash processes by name, not pid\n" \
2207 "\t-t Track individual ios. Will tell you the time a request took\n" \
2208 "\t to get queued, to get dispatched, and to get completed\n" \
2209 "\t-q Quiet. Don't display any stats at the end of the trace\n" \
2210 "\t-w Only parse data between the given time interval in seconds.\n" \
2211 "\t If 'start' isn't given, blkparse defaults the start time to 0\n" \
2212 "\t-f Output format. Customize the output format. The format field\n" \
2213 "\t identifies can be found in the documentation\n" \
2214 "\t-F Format specification. Can be found in the documentation\n" \
2215 "\t-v More verbose for marginal errors\n" \
2216 "\t-V Print program version info\n\n";
2218 static void usage(char *prog)
2220 fprintf(stderr, "Usage: %s %s %s", prog, blkparse_version, usage_str);
2223 int main(int argc, char *argv[])
2226 int i, c, ret, mode;
2227 int act_mask_tmp = 0;
2229 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
2232 i = find_mask_map(optarg);
2234 fprintf(stderr,"Invalid action mask %s\n",
2242 if ((sscanf(optarg, "%x", &i) != 1) ||
2243 !valid_act_opt(i)) {
2245 "Invalid set action mask %s/0x%x\n",
2252 if (!strcmp(optarg, "-") && !pipeline)
2254 else if (resize_devices(optarg) != 0)
2261 output_name = optarg;
2264 rb_batch = atoi(optarg);
2266 rb_batch = RB_BATCH_DEFAULT;
2269 per_process_stats = 1;
2275 per_device_and_cpu_stats = 0;
2278 if (find_stopwatch_interval(optarg) != 0)
2282 set_all_format_specs(optarg);
2285 if (add_format_spec(optarg) != 0)
2289 ppi_hash_by_pid = 0;
2295 printf("%s version %s\n", argv[0], blkparse_version);
2298 dump_binary = optarg;
2306 while (optind < argc) {
2307 if (!strcmp(argv[optind], "-") && !pipeline)
2309 else if (resize_devices(argv[optind]) != 0)
2314 if (!pipeline && !ndevices) {
2319 if (act_mask_tmp != 0)
2320 act_mask = act_mask_tmp;
2322 memset(&rb_sort_root, 0, sizeof(rb_sort_root));
2324 signal(SIGINT, handle_sigint);
2325 signal(SIGHUP, handle_sigint);
2326 signal(SIGTERM, handle_sigint);
2328 setlocale(LC_NUMERIC, "en_US");
2331 ofp = fdopen(STDOUT_FILENO, "w");
2336 snprintf(ofname, sizeof(ofname) - 1, "%s", output_name);
2337 ofp = fopen(ofname, "w");
2346 ofp_buffer = malloc(4096);
2347 if (setvbuf(ofp, ofp_buffer, mode, 4096)) {
2353 dump_fd = creat(dump_binary, 0666);
2355 perror(dump_binary);