[PATCH] blktrace: note that the -b option is in KiB
[blktrace.git] / blkparse.c
... / ...
CommitLineData
1/*
2 * block queue tracing parse application
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#include <sys/types.h>
22#include <sys/stat.h>
23#include <unistd.h>
24#include <stdio.h>
25#include <fcntl.h>
26#include <stdlib.h>
27#include <string.h>
28#include <getopt.h>
29#include <errno.h>
30#include <signal.h>
31#include <locale.h>
32#include <limits.h>
33
34#include "blktrace.h"
35#include "rbtree.h"
36#include "jhash.h"
37
38static char blkparse_version[] = "0.90";
39
40struct per_dev_info {
41 dev_t dev;
42 char *name;
43
44 int backwards;
45 unsigned long long events;
46 unsigned long long last_reported_time;
47 unsigned long long last_read_time;
48 struct io_stats io_stats;
49 unsigned long last_sequence;
50 unsigned long skips;
51
52 struct rb_root rb_last;
53 unsigned long rb_last_entries;
54
55 struct rb_root rb_track;
56
57 int nfiles;
58 int ncpus;
59 struct per_cpu_info *cpus;
60};
61
62struct per_process_info {
63 char name[16];
64 __u32 pid;
65 struct io_stats io_stats;
66 struct per_process_info *hash_next, *list_next;
67 int more_than_one;
68
69 /*
70 * individual io stats
71 */
72 unsigned long long longest_allocation_wait[2];
73 unsigned long long longest_dispatch_wait[2];
74 unsigned long long longest_completion_wait[2];
75};
76
77#define PPI_HASH_SHIFT (8)
78#define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
79#define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
80static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
81static struct per_process_info *ppi_list;
82static int ppi_list_entries;
83
84#define S_OPTS "i:o:b:stqw:f:F:vnm"
85static struct option l_opts[] = {
86 {
87 .name = "input",
88 .has_arg = required_argument,
89 .flag = NULL,
90 .val = 'i'
91 },
92 {
93 .name = "output",
94 .has_arg = required_argument,
95 .flag = NULL,
96 .val = 'o'
97 },
98 {
99 .name = "batch",
100 .has_arg = required_argument,
101 .flag = NULL,
102 .val = 'b'
103 },
104 {
105 .name = "per program stats",
106 .has_arg = no_argument,
107 .flag = NULL,
108 .val = 's'
109 },
110 {
111 .name = "track ios",
112 .has_arg = no_argument,
113 .flag = NULL,
114 .val = 't'
115 },
116 {
117 .name = "quiet",
118 .has_arg = no_argument,
119 .flag = NULL,
120 .val = 'q'
121 },
122 {
123 .name = "stopwatch",
124 .has_arg = required_argument,
125 .flag = NULL,
126 .val = 'w'
127 },
128 {
129 .name = "format",
130 .has_arg = required_argument,
131 .flag = NULL,
132 .val = 'f'
133 },
134 {
135 .name = "format-spec",
136 .has_arg = required_argument,
137 .flag = NULL,
138 .val = 'F'
139 },
140 {
141 .name = "hash by name",
142 .has_arg = no_argument,
143 .flag = NULL,
144 .val = 'n'
145 },
146 {
147 .name = "missing",
148 .has_arg = no_argument,
149 .flag = NULL,
150 .val = 'm'
151 },
152 {
153 .name = "version",
154 .has_arg = no_argument,
155 .flag = NULL,
156 .val = 'v'
157 },
158};
159
160/*
161 * for sorting the displayed output
162 */
163struct trace {
164 struct blk_io_trace *bit;
165 struct rb_node rb_node;
166 struct trace *next;
167};
168
169static struct rb_root rb_sort_root;
170static unsigned long rb_sort_entries;
171
172static struct trace *trace_list;
173
174/*
175 * allocation cache
176 */
177static struct blk_io_trace *bit_alloc_list;
178static struct trace *t_alloc_list;
179
180/*
181 * for tracking individual ios
182 */
183struct io_track {
184 struct rb_node rb_node;
185
186 __u64 sector;
187 __u32 pid;
188 char comm[16];
189 unsigned long long allocation_time;
190 unsigned long long queue_time;
191 unsigned long long dispatch_time;
192 unsigned long long completion_time;
193};
194
195static int ndevices;
196static struct per_dev_info *devices;
197static char *get_dev_name(struct per_dev_info *, char *, int);
198
199FILE *ofp = NULL;
200static char *output_name;
201
202static unsigned long long genesis_time;
203static unsigned long long last_allowed_time;
204static unsigned int smallest_seq_read;
205static unsigned long long stopwatch_start; /* start from zero by default */
206static unsigned long long stopwatch_end = ULONG_LONG_MAX; /* "infinity" */
207
208static int per_process_stats;
209static int track_ios;
210static int ppi_hash_by_pid = 1;
211static int print_missing;
212
213static unsigned int t_alloc_cache;
214static unsigned int bit_alloc_cache;
215
216#define RB_BATCH_DEFAULT (512)
217static unsigned int rb_batch = RB_BATCH_DEFAULT;
218
219static int pipeline;
220
221#define is_done() (*(volatile int *)(&done))
222static volatile int done;
223
224#define JHASH_RANDOM (0x3af5f2ee)
225
226static inline int ppi_hash_pid(__u32 pid)
227{
228 return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
229}
230
231static inline int ppi_hash_name(const char *name)
232{
233 return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
234}
235
236static inline int ppi_hash(struct per_process_info *ppi)
237{
238 if (ppi_hash_by_pid)
239 return ppi_hash_pid(ppi->pid);
240
241 return ppi_hash_name(ppi->name);
242}
243
244static inline void add_process_to_hash(struct per_process_info *ppi)
245{
246 const int hash_idx = ppi_hash(ppi);
247
248 ppi->hash_next = ppi_hash_table[hash_idx];
249 ppi_hash_table[hash_idx] = ppi;
250}
251
252static inline void add_process_to_list(struct per_process_info *ppi)
253{
254 ppi->list_next = ppi_list;
255 ppi_list = ppi;
256 ppi_list_entries++;
257}
258
259static struct per_process_info *find_process_by_name(char *name)
260{
261 const int hash_idx = ppi_hash_name(name);
262 struct per_process_info *ppi;
263
264 ppi = ppi_hash_table[hash_idx];
265 while (ppi) {
266 if (!strcmp(ppi->name, name))
267 return ppi;
268
269 ppi = ppi->hash_next;
270 }
271
272 return NULL;
273}
274
275static struct per_process_info *find_process_by_pid(__u32 pid)
276{
277 const int hash_idx = ppi_hash_pid(pid);
278 struct per_process_info *ppi;
279
280 ppi = ppi_hash_table[hash_idx];
281 while (ppi) {
282 if (ppi->pid == pid)
283 return ppi;
284
285 ppi = ppi->hash_next;
286 }
287
288 return NULL;
289}
290
291static struct per_process_info *find_process(__u32 pid, char *name)
292{
293 struct per_process_info *ppi;
294
295 if (ppi_hash_by_pid)
296 return find_process_by_pid(pid);
297
298 ppi = find_process_by_name(name);
299 if (ppi && ppi->pid != pid)
300 ppi->more_than_one = 1;
301
302 return ppi;
303}
304
305static inline int trace_rb_insert(struct trace *t, struct rb_root *root,
306 int check_time)
307{
308 struct rb_node **p = &root->rb_node;
309 struct rb_node *parent = NULL;
310 struct trace *__t;
311
312 while (*p) {
313 parent = *p;
314
315 __t = rb_entry(parent, struct trace, rb_node);
316
317 if (check_time) {
318 if (t->bit->time < __t->bit->time) {
319 p = &(*p)->rb_left;
320 continue;
321 } else if (t->bit->time > __t->bit->time) {
322 p = &(*p)->rb_right;
323 continue;
324 }
325 }
326 if (t->bit->device < __t->bit->device)
327 p = &(*p)->rb_left;
328 else if (t->bit->device > __t->bit->device)
329 p = &(*p)->rb_right;
330 else if (t->bit->sequence < __t->bit->sequence)
331 p = &(*p)->rb_left;
332 else /* >= sequence */
333 p = &(*p)->rb_right;
334 }
335
336 rb_link_node(&t->rb_node, parent, p);
337 rb_insert_color(&t->rb_node, root);
338 return 0;
339}
340
341static inline int trace_rb_insert_sort(struct trace *t)
342{
343 if (!trace_rb_insert(t, &rb_sort_root, 1)) {
344 rb_sort_entries++;
345 return 0;
346 }
347
348 return 1;
349}
350
351static inline int trace_rb_insert_last(struct per_dev_info *pdi,struct trace *t)
352{
353 if (!trace_rb_insert(t, &pdi->rb_last, 1)) {
354 pdi->rb_last_entries++;
355 return 0;
356 }
357
358 return 1;
359}
360
361static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
362 struct rb_root *root, int order)
363{
364 struct rb_node *n = root->rb_node;
365 struct rb_node *prev = NULL;
366 struct trace *__t;
367
368 while (n) {
369 __t = rb_entry(n, struct trace, rb_node);
370 prev = n;
371
372 if (device < __t->bit->device)
373 n = n->rb_left;
374 else if (device > __t->bit->device)
375 n = n->rb_right;
376 else if (sequence < __t->bit->sequence)
377 n = n->rb_left;
378 else if (sequence > __t->bit->sequence)
379 n = n->rb_right;
380 else
381 return __t;
382 }
383
384 /*
385 * hack - the list may not be sequence ordered because some
386 * events don't have sequence and time matched. so we end up
387 * being a little off in the rb lookup here, because we don't
388 * know the time we are looking for. compensate by browsing
389 * a little ahead from the last entry to find the match
390 */
391 if (order && prev) {
392 int max = 5;
393
394 while (((n = rb_next(prev)) != NULL) && max--) {
395 __t = rb_entry(n, struct trace, rb_node);
396
397 if (__t->bit->device == device &&
398 __t->bit->sequence == sequence)
399 return __t;
400
401 prev = n;
402 }
403 }
404
405 return NULL;
406}
407
408static inline struct trace *trace_rb_find_sort(dev_t dev, unsigned long seq)
409{
410 return trace_rb_find(dev, seq, &rb_sort_root, 1);
411}
412
413static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
414 unsigned long seq)
415{
416 return trace_rb_find(pdi->dev, seq, &pdi->rb_last, 0);
417}
418
419static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
420{
421 struct rb_node **p = &pdi->rb_track.rb_node;
422 struct rb_node *parent = NULL;
423 struct io_track *__iot;
424
425 while (*p) {
426 parent = *p;
427 __iot = rb_entry(parent, struct io_track, rb_node);
428
429 if (iot->sector < __iot->sector)
430 p = &(*p)->rb_left;
431 else if (iot->sector > __iot->sector)
432 p = &(*p)->rb_right;
433 else {
434 fprintf(stderr,
435 "sector alias (%Lu) on device %d,%d!\n",
436 (unsigned long long) iot->sector,
437 MAJOR(pdi->dev), MINOR(pdi->dev));
438 return 1;
439 }
440 }
441
442 rb_link_node(&iot->rb_node, parent, p);
443 rb_insert_color(&iot->rb_node, &pdi->rb_track);
444 return 0;
445}
446
447static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
448{
449 struct rb_node *n = pdi->rb_track.rb_node;
450 struct io_track *__iot;
451
452 while (n) {
453 __iot = rb_entry(n, struct io_track, rb_node);
454
455 if (sector < __iot->sector)
456 n = n->rb_left;
457 else if (sector > __iot->sector)
458 n = n->rb_right;
459 else
460 return __iot;
461 }
462
463 return NULL;
464}
465
466static struct io_track *find_track(struct per_dev_info *pdi, __u32 pid,
467 char *comm, __u64 sector)
468{
469 struct io_track *iot;
470
471 iot = __find_track(pdi, sector);
472 if (!iot) {
473 iot = malloc(sizeof(*iot));
474 iot->pid = pid;
475 memcpy(iot->comm, comm, sizeof(iot->comm));
476 iot->sector = sector;
477 track_rb_insert(pdi, iot);
478 }
479
480 return iot;
481}
482
483static void log_track_frontmerge(struct per_dev_info *pdi,
484 struct blk_io_trace *t)
485{
486 struct io_track *iot;
487
488 if (!track_ios)
489 return;
490
491 iot = __find_track(pdi, t->sector + (t->bytes >> 9));
492 if (!iot) {
493 fprintf(stderr, "merge not found for (%d,%d): %llu\n",
494 MAJOR(pdi->dev), MINOR(pdi->dev),
495 (unsigned long long) t->sector + (t->bytes >> 9));
496 return;
497 }
498
499 rb_erase(&iot->rb_node, &pdi->rb_track);
500 iot->sector -= t->bytes >> 9;
501 track_rb_insert(pdi, iot);
502}
503
504static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
505{
506 struct io_track *iot;
507
508 if (!track_ios)
509 return;
510
511 iot = find_track(pdi, t->pid, t->comm, t->sector);
512 iot->allocation_time = t->time;
513}
514
515/*
516 * return time between rq allocation and insertion
517 */
518static unsigned long long log_track_insert(struct per_dev_info *pdi,
519 struct blk_io_trace *t)
520{
521 unsigned long long elapsed;
522 struct io_track *iot;
523
524 if (!track_ios)
525 return -1;
526
527 iot = find_track(pdi, t->pid, t->comm, t->sector);
528 iot->queue_time = t->time;
529
530 if (!iot->allocation_time)
531 return -1;
532
533 elapsed = iot->queue_time - iot->allocation_time;
534
535 if (per_process_stats) {
536 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
537 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
538
539 if (ppi && elapsed > ppi->longest_allocation_wait[w])
540 ppi->longest_allocation_wait[w] = elapsed;
541 }
542
543 return elapsed;
544}
545
546/*
547 * return time between queue and issue
548 */
549static unsigned long long log_track_issue(struct per_dev_info *pdi,
550 struct blk_io_trace *t)
551{
552 unsigned long long elapsed;
553 struct io_track *iot;
554
555 if (!track_ios)
556 return -1;
557 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
558 return -1;
559
560 iot = __find_track(pdi, t->sector);
561 if (!iot) {
562 fprintf(stderr, "issue not found for (%d,%d): %llu\n",
563 MAJOR(pdi->dev), MINOR(pdi->dev),
564 (unsigned long long) t->sector);
565 return -1;
566 }
567
568 iot->dispatch_time = t->time;
569 elapsed = iot->dispatch_time - iot->queue_time;
570
571 if (per_process_stats) {
572 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
573 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
574
575 if (ppi && elapsed > ppi->longest_dispatch_wait[w])
576 ppi->longest_dispatch_wait[w] = elapsed;
577 }
578
579 return elapsed;
580}
581
582/*
583 * return time between dispatch and complete
584 */
585static unsigned long long log_track_complete(struct per_dev_info *pdi,
586 struct blk_io_trace *t)
587{
588 unsigned long long elapsed;
589 struct io_track *iot;
590
591 if (!track_ios)
592 return -1;
593 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
594 return -1;
595
596 iot = __find_track(pdi, t->sector);
597 if (!iot) {
598 fprintf(stderr, "complete not found for (%d,%d): %llu\n",
599 MAJOR(pdi->dev), MINOR(pdi->dev),
600 (unsigned long long) t->sector);
601 return -1;
602 }
603
604 iot->completion_time = t->time;
605 elapsed = iot->completion_time - iot->dispatch_time;
606
607 if (per_process_stats) {
608 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
609 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
610
611 if (ppi && elapsed > ppi->longest_completion_wait[w])
612 ppi->longest_completion_wait[w] = elapsed;
613 }
614
615 /*
616 * kill the trace, we don't need it after completion
617 */
618 rb_erase(&iot->rb_node, &pdi->rb_track);
619 free(iot);
620
621 return elapsed;
622}
623
624
625static struct io_stats *find_process_io_stats(__u32 pid, char *name)
626{
627 struct per_process_info *ppi = find_process(pid, name);
628
629 if (!ppi) {
630 ppi = malloc(sizeof(*ppi));
631 memset(ppi, 0, sizeof(*ppi));
632 memcpy(ppi->name, name, 16);
633 ppi->pid = pid;
634 add_process_to_hash(ppi);
635 add_process_to_list(ppi);
636 }
637
638 return &ppi->io_stats;
639}
640
641static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
642{
643 struct per_cpu_info *cpus = pdi->cpus;
644 int ncpus = pdi->ncpus;
645 int new_count = cpu + 1;
646 int new_space, size;
647 char *new_start;
648
649 size = new_count * sizeof(struct per_cpu_info);
650 cpus = realloc(cpus, size);
651 if (!cpus) {
652 char name[20];
653 fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
654 get_dev_name(pdi, name, sizeof(name)), size);
655 exit(1);
656 }
657
658 new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
659 new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
660 memset(new_start, 0, new_space);
661
662 pdi->ncpus = new_count;
663 pdi->cpus = cpus;
664}
665
666static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
667{
668 struct per_cpu_info *pci;
669
670 if (cpu >= pdi->ncpus)
671 resize_cpu_info(pdi, cpu);
672
673 pci = &pdi->cpus[cpu];
674 pci->cpu = cpu;
675 return pci;
676}
677
678
679static int resize_devices(char *name)
680{
681 int size = (ndevices + 1) * sizeof(struct per_dev_info);
682
683 devices = realloc(devices, size);
684 if (!devices) {
685 fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
686 return 1;
687 }
688 memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
689 devices[ndevices].name = name;
690 ndevices++;
691 return 0;
692}
693
694static struct per_dev_info *get_dev_info(dev_t dev)
695{
696 struct per_dev_info *pdi;
697 int i;
698
699 for (i = 0; i < ndevices; i++) {
700 if (!devices[i].dev)
701 devices[i].dev = dev;
702 if (devices[i].dev == dev)
703 return &devices[i];
704 }
705
706 if (resize_devices(NULL))
707 return NULL;
708
709 pdi = &devices[ndevices - 1];
710 pdi->dev = dev;
711 pdi->last_sequence = -1;
712 pdi->last_read_time = 0;
713 memset(&pdi->rb_last, 0, sizeof(pdi->rb_last));
714 pdi->rb_last_entries = 0;
715 return pdi;
716}
717
718static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
719{
720 if (pdi->name)
721 snprintf(buffer, size, "%s", pdi->name);
722 else
723 snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
724 return buffer;
725}
726
727static void check_time(struct per_dev_info *pdi, struct blk_io_trace *bit)
728{
729 unsigned long long this = bit->time;
730 unsigned long long last = pdi->last_reported_time;
731
732 pdi->backwards = (this < last) ? 'B' : ' ';
733 pdi->last_reported_time = this;
734}
735
736static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
737 int rw)
738{
739 if (rw) {
740 ios->mwrites++;
741 ios->qwrite_kb += t->bytes >> 10;
742 } else {
743 ios->mreads++;
744 ios->qread_kb += t->bytes >> 10;
745 }
746}
747
748static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
749 int rw)
750{
751 __account_m(&pci->io_stats, t, rw);
752
753 if (per_process_stats) {
754 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
755
756 __account_m(ios, t, rw);
757 }
758}
759
760static inline void __account_queue(struct io_stats *ios, struct blk_io_trace *t,
761 int rw)
762{
763 if (rw) {
764 ios->qwrites++;
765 ios->qwrite_kb += t->bytes >> 10;
766 } else {
767 ios->qreads++;
768 ios->qread_kb += t->bytes >> 10;
769 }
770}
771
772static inline void account_queue(struct blk_io_trace *t,
773 struct per_cpu_info *pci, int rw)
774{
775 __account_queue(&pci->io_stats, t, rw);
776
777 if (per_process_stats) {
778 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
779
780 __account_queue(ios, t, rw);
781 }
782}
783
784static inline void __account_c(struct io_stats *ios, int rw, unsigned int bytes)
785{
786 if (rw) {
787 ios->cwrites++;
788 ios->cwrite_kb += bytes >> 10;
789 } else {
790 ios->creads++;
791 ios->cread_kb += bytes >> 10;
792 }
793}
794
795static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
796 int rw, int bytes)
797{
798 __account_c(&pci->io_stats, rw, bytes);
799
800 if (per_process_stats) {
801 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
802
803 __account_c(ios, rw, bytes);
804 }
805}
806
807static inline void __account_issue(struct io_stats *ios, int rw,
808 unsigned int bytes)
809{
810 if (rw) {
811 ios->iwrites++;
812 ios->iwrite_kb += bytes >> 10;
813 } else {
814 ios->ireads++;
815 ios->iread_kb += bytes >> 10;
816 }
817}
818
819static inline void account_issue(struct blk_io_trace *t,
820 struct per_cpu_info *pci, int rw)
821{
822 __account_issue(&pci->io_stats, rw, t->bytes);
823
824 if (per_process_stats) {
825 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
826
827 __account_issue(ios, rw, t->bytes);
828 }
829}
830
831static inline void __account_unplug(struct io_stats *ios, int timer)
832{
833 if (timer)
834 ios->timer_unplugs++;
835 else
836 ios->io_unplugs++;
837}
838
839static inline void account_unplug(struct blk_io_trace *t,
840 struct per_cpu_info *pci, int timer)
841{
842 __account_unplug(&pci->io_stats, timer);
843
844 if (per_process_stats) {
845 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
846
847 __account_unplug(ios, timer);
848 }
849}
850
851static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
852 struct blk_io_trace *t, char *act)
853{
854 process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
855}
856
857static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
858 struct blk_io_trace *t, char *act)
859{
860 process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
861}
862
863static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
864 char *act)
865{
866 process_fmt(act, pci, t, -1, 0, NULL);
867}
868
869static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
870 struct blk_io_trace *t, char *act)
871{
872 process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
873}
874
875static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
876 struct blk_io_trace *t, char *act)
877{
878 if (act[0] == 'F')
879 log_track_frontmerge(pdi, t);
880
881 process_fmt(act, pci, t, -1ULL, 0, NULL);
882}
883
884static void log_action(struct per_cpu_info *pci, struct blk_io_trace *t,
885 char *act)
886{
887 process_fmt(act, pci, t, -1ULL, 0, NULL);
888}
889
890static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
891 char *act)
892{
893 process_fmt(act, pci, t, -1ULL, 0, NULL);
894}
895
896static void log_unplug(struct per_cpu_info *pci, struct blk_io_trace *t,
897 char *act)
898{
899 process_fmt(act, pci, t, -1ULL, 0, NULL);
900}
901
902static void log_split(struct per_cpu_info *pci, struct blk_io_trace *t,
903 char *act)
904{
905 process_fmt(act, pci, t, -1ULL, 0, NULL);
906}
907
908static void log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char *act)
909{
910 unsigned char *buf = (unsigned char *) t + sizeof(*t);
911
912 process_fmt(act, pci, t, -1ULL, t->pdu_len, buf);
913}
914
915static void dump_trace_pc(struct blk_io_trace *t, struct per_cpu_info *pci)
916{
917 int act = t->action & 0xffff;
918
919 switch (act) {
920 case __BLK_TA_QUEUE:
921 log_generic(pci, t, "Q");
922 break;
923 case __BLK_TA_GETRQ:
924 log_generic(pci, t, "G");
925 break;
926 case __BLK_TA_SLEEPRQ:
927 log_generic(pci, t, "S");
928 break;
929 case __BLK_TA_REQUEUE:
930 log_generic(pci, t, "R");
931 break;
932 case __BLK_TA_ISSUE:
933 log_pc(pci, t, "D");
934 break;
935 case __BLK_TA_COMPLETE:
936 log_pc(pci, t, "C");
937 break;
938 case __BLK_TA_INSERT:
939 log_pc(pci, t, "I");
940 break;
941 default:
942 fprintf(stderr, "Bad pc action %x\n", act);
943 break;
944 }
945}
946
947static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
948 struct per_cpu_info *pci)
949{
950 int w = t->action & BLK_TC_ACT(BLK_TC_WRITE);
951 int act = t->action & 0xffff;
952
953 switch (act) {
954 case __BLK_TA_QUEUE:
955 account_queue(t, pci, w);
956 log_queue(pci, t, "Q");
957 break;
958 case __BLK_TA_INSERT:
959 log_insert(pdi, pci, t, "I");
960 break;
961 case __BLK_TA_BACKMERGE:
962 account_m(t, pci, w);
963 log_merge(pdi, pci, t, "M");
964 break;
965 case __BLK_TA_FRONTMERGE:
966 account_m(t, pci, w);
967 log_merge(pdi, pci, t, "F");
968 break;
969 case __BLK_TA_GETRQ:
970 log_track_getrq(pdi, t);
971 log_generic(pci, t, "G");
972 break;
973 case __BLK_TA_SLEEPRQ:
974 log_generic(pci, t, "S");
975 break;
976 case __BLK_TA_REQUEUE:
977 account_c(t, pci, w, -t->bytes);
978 log_queue(pci, t, "R");
979 break;
980 case __BLK_TA_ISSUE:
981 account_issue(t, pci, w);
982 log_issue(pdi, pci, t, "D");
983 break;
984 case __BLK_TA_COMPLETE:
985 account_c(t, pci, w, t->bytes);
986 log_complete(pdi, pci, t, "C");
987 break;
988 case __BLK_TA_PLUG:
989 log_action(pci, t, "P");
990 break;
991 case __BLK_TA_UNPLUG_IO:
992 account_unplug(t, pci, 0);
993 log_unplug(pci, t, "U");
994 break;
995 case __BLK_TA_UNPLUG_TIMER:
996 account_unplug(t, pci, 1);
997 log_unplug(pci, t, "UT");
998 break;
999 case __BLK_TA_SPLIT:
1000 log_split(pci, t, "X");
1001 break;
1002 case __BLK_TA_BOUNCE:
1003 log_generic(pci, t, "B");
1004 break;
1005 default:
1006 fprintf(stderr, "Bad fs action %x\n", t->action);
1007 break;
1008 }
1009}
1010
1011static void dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci,
1012 struct per_dev_info *pdi)
1013{
1014 if (t->action & BLK_TC_ACT(BLK_TC_PC))
1015 dump_trace_pc(t, pci);
1016 else
1017 dump_trace_fs(t, pdi, pci);
1018
1019 pdi->events++;
1020}
1021
1022static void dump_io_stats(struct io_stats *ios, char *msg)
1023{
1024 fprintf(ofp, "%s\n", msg);
1025
1026 fprintf(ofp, " Reads Queued: %'8lu, %'8LuKiB\t", ios->qreads, ios->qread_kb);
1027 fprintf(ofp, " Writes Queued: %'8lu, %'8LuKiB\n", ios->qwrites,ios->qwrite_kb);
1028
1029 fprintf(ofp, " Read Dispatches: %'8lu, %'8LuKiB\t", ios->ireads, ios->iread_kb);
1030 fprintf(ofp, " Write Dispatches: %'8lu, %'8LuKiB\n", ios->iwrites,ios->iwrite_kb);
1031 fprintf(ofp, " Reads Completed: %'8lu, %'8LuKiB\t", ios->creads, ios->cread_kb);
1032 fprintf(ofp, " Writes Completed: %'8lu, %'8LuKiB\n", ios->cwrites,ios->cwrite_kb);
1033 fprintf(ofp, " Read Merges: %'8lu%8c\t", ios->mreads, ' ');
1034 fprintf(ofp, " Write Merges: %'8lu\n", ios->mwrites);
1035 fprintf(ofp, " IO unplugs: %'8lu%8c\t", ios->io_unplugs, ' ');
1036 fprintf(ofp, " Timer unplugs: %'8lu\n", ios->timer_unplugs);
1037}
1038
1039static void dump_wait_stats(struct per_process_info *ppi)
1040{
1041 unsigned long rawait = ppi->longest_allocation_wait[0] / 1000;
1042 unsigned long rdwait = ppi->longest_dispatch_wait[0] / 1000;
1043 unsigned long rcwait = ppi->longest_completion_wait[0] / 1000;
1044 unsigned long wawait = ppi->longest_allocation_wait[1] / 1000;
1045 unsigned long wdwait = ppi->longest_dispatch_wait[1] / 1000;
1046 unsigned long wcwait = ppi->longest_completion_wait[1] / 1000;
1047
1048 fprintf(ofp, " Allocation wait: %'8lu%8c\t", rawait, ' ');
1049 fprintf(ofp, " Allocation wait: %'8lu\n", wawait);
1050 fprintf(ofp, " Dispatch wait: %'8lu%8c\t", rdwait, ' ');
1051 fprintf(ofp, " Dispatch wait: %'8lu\n", wdwait);
1052 fprintf(ofp, " Completion wait: %'8lu%8c\t", rcwait, ' ');
1053 fprintf(ofp, " Completion wait: %'8lu\n", wcwait);
1054}
1055
1056static int ppi_name_compare(const void *p1, const void *p2)
1057{
1058 struct per_process_info *ppi1 = *((struct per_process_info **) p1);
1059 struct per_process_info *ppi2 = *((struct per_process_info **) p2);
1060 int res;
1061
1062 res = strverscmp(ppi1->name, ppi2->name);
1063 if (!res)
1064 res = ppi1->pid > ppi2->pid;
1065
1066 return res;
1067}
1068
1069static void sort_process_list(void)
1070{
1071 struct per_process_info **ppis;
1072 struct per_process_info *ppi;
1073 int i = 0;
1074
1075 ppis = malloc(ppi_list_entries * sizeof(struct per_process_info *));
1076
1077 ppi = ppi_list;
1078 while (ppi) {
1079 ppis[i++] = ppi;
1080 ppi = ppi->list_next;
1081 }
1082
1083 qsort(ppis, ppi_list_entries, sizeof(ppi), ppi_name_compare);
1084
1085 i = ppi_list_entries - 1;
1086 ppi_list = NULL;
1087 while (i >= 0) {
1088 ppi = ppis[i];
1089
1090 ppi->list_next = ppi_list;
1091 ppi_list = ppi;
1092 i--;
1093 }
1094
1095 free(ppis);
1096}
1097
1098static void show_process_stats(void)
1099{
1100 struct per_process_info *ppi;
1101
1102 sort_process_list();
1103
1104 ppi = ppi_list;
1105 while (ppi) {
1106 char name[64];
1107
1108 if (ppi->more_than_one)
1109 sprintf(name, "%s (%u, ...)", ppi->name, ppi->pid);
1110 else
1111 sprintf(name, "%s (%u)", ppi->name, ppi->pid);
1112
1113 dump_io_stats(&ppi->io_stats, name);
1114 dump_wait_stats(ppi);
1115 ppi = ppi->list_next;
1116 }
1117
1118 fprintf(ofp, "\n");
1119}
1120
1121static void show_device_and_cpu_stats(void)
1122{
1123 struct per_dev_info *pdi;
1124 struct per_cpu_info *pci;
1125 struct io_stats total, *ios;
1126 int i, j, pci_events;
1127 char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
1128 char name[32];
1129
1130 for (pdi = devices, i = 0; i < ndevices; i++, pdi++) {
1131
1132 memset(&total, 0, sizeof(total));
1133 pci_events = 0;
1134
1135 if (i > 0)
1136 fprintf(ofp, "\n");
1137
1138 for (pci = pdi->cpus, j = 0; j < pdi->ncpus; j++, pci++) {
1139 if (!pci->nelems)
1140 continue;
1141
1142 ios = &pci->io_stats;
1143 total.qreads += ios->qreads;
1144 total.qwrites += ios->qwrites;
1145 total.creads += ios->creads;
1146 total.cwrites += ios->cwrites;
1147 total.mreads += ios->mreads;
1148 total.mwrites += ios->mwrites;
1149 total.ireads += ios->ireads;
1150 total.iwrites += ios->iwrites;
1151 total.qread_kb += ios->qread_kb;
1152 total.qwrite_kb += ios->qwrite_kb;
1153 total.cread_kb += ios->cread_kb;
1154 total.cwrite_kb += ios->cwrite_kb;
1155 total.iread_kb += ios->iread_kb;
1156 total.iwrite_kb += ios->iwrite_kb;
1157 total.timer_unplugs += ios->timer_unplugs;
1158 total.io_unplugs += ios->io_unplugs;
1159
1160 snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
1161 j, get_dev_name(pdi, name, sizeof(name)));
1162 dump_io_stats(ios, line);
1163 pci_events++;
1164 }
1165
1166 if (pci_events > 1) {
1167 fprintf(ofp, "\n");
1168 snprintf(line, sizeof(line) - 1, "Total (%s):",
1169 get_dev_name(pdi, name, sizeof(name)));
1170 dump_io_stats(&total, line);
1171 }
1172
1173 fprintf(ofp, "\nEvents (%s): %'Lu entries, %'lu skips\n",
1174 get_dev_name(pdi, line, sizeof(line)), pdi->events,
1175 pdi->skips);
1176 }
1177}
1178
1179/*
1180 * struct trace and blktrace allocation cache, we do potentially
1181 * millions of mallocs for these structures while only using at most
1182 * a few thousand at the time
1183 */
1184static inline void t_free(struct trace *t)
1185{
1186 if (t_alloc_cache < 1024) {
1187 t->next = t_alloc_list;
1188 t_alloc_list = t;
1189 t_alloc_cache++;
1190 } else
1191 free(t);
1192}
1193
1194static inline struct trace *t_alloc(void)
1195{
1196 struct trace *t = t_alloc_list;
1197
1198 if (t) {
1199 t_alloc_list = t->next;
1200 t_alloc_cache--;
1201 return t;
1202 }
1203
1204 return malloc(sizeof(*t));
1205}
1206
1207static inline void bit_free(struct blk_io_trace *bit)
1208{
1209 if (bit_alloc_cache < 1024) {
1210 /*
1211 * abuse a 64-bit field for a next pointer for the free item
1212 */
1213 bit->time = (__u64) (unsigned long) bit_alloc_list;
1214 bit_alloc_list = (struct blk_io_trace *) bit;
1215 bit_alloc_cache++;
1216 } else
1217 free(bit);
1218}
1219
1220static inline struct blk_io_trace *bit_alloc(void)
1221{
1222 struct blk_io_trace *bit = bit_alloc_list;
1223
1224 if (bit) {
1225 bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
1226 bit->time;
1227 bit_alloc_cache--;
1228 return bit;
1229 }
1230
1231 return malloc(sizeof(*bit));
1232}
1233
1234static void find_genesis(void)
1235{
1236 struct trace *t = trace_list;
1237
1238 genesis_time = -1ULL;
1239 while (t != NULL) {
1240 if (t->bit->time < genesis_time)
1241 genesis_time = t->bit->time;
1242
1243 t = t->next;
1244 }
1245}
1246
1247static inline int check_stopwatch(struct blk_io_trace *bit)
1248{
1249 if (bit->time < stopwatch_end &&
1250 bit->time >= stopwatch_start)
1251 return 0;
1252
1253 return 1;
1254}
1255
1256/*
1257 * return youngest entry read
1258 */
1259static int sort_entries(unsigned long long *youngest)
1260{
1261 struct trace *t;
1262
1263 if (!genesis_time)
1264 find_genesis();
1265
1266 *youngest = 0;
1267 while ((t = trace_list) != NULL) {
1268 struct blk_io_trace *bit = t->bit;
1269
1270 trace_list = t->next;
1271
1272 bit->time -= genesis_time;
1273
1274 if (bit->time < *youngest || !*youngest)
1275 *youngest = bit->time;
1276
1277 if (check_stopwatch(bit)) {
1278 bit_free(bit);
1279 t_free(t);
1280 continue;
1281 }
1282
1283 if (trace_rb_insert_sort(t))
1284 return -1;
1285
1286 if (bit->sequence < smallest_seq_read)
1287 smallest_seq_read = bit->sequence;
1288 }
1289
1290 return 0;
1291}
1292
1293static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
1294{
1295 rb_erase(&t->rb_node, &pdi->rb_last);
1296 pdi->rb_last_entries--;
1297
1298 bit_free(t->bit);
1299 t_free(t);
1300}
1301
1302static void put_trace(struct per_dev_info *pdi, struct trace *t)
1303{
1304 rb_erase(&t->rb_node, &rb_sort_root);
1305 rb_sort_entries--;
1306
1307 trace_rb_insert_last(pdi, t);
1308
1309 if (pdi->rb_last_entries > rb_batch * pdi->nfiles) {
1310 struct rb_node *n = rb_first(&pdi->rb_last);
1311
1312 t = rb_entry(n, struct trace, rb_node);
1313 __put_trace_last(pdi, t);
1314 }
1315}
1316
1317static int check_sequence(struct per_dev_info *pdi, struct blk_io_trace *bit)
1318{
1319 unsigned long expected_sequence = pdi->last_sequence + 1;
1320 struct trace *t;
1321
1322 /*
1323 * first entry, always ok
1324 */
1325 if (!expected_sequence)
1326 return 0;
1327
1328 if (bit->sequence == expected_sequence)
1329 return 0;
1330
1331 /*
1332 * we may not have seen that sequence yet. if we are not doing
1333 * the final run, break and wait for more entries.
1334 */
1335 if (expected_sequence < smallest_seq_read) {
1336 t = trace_rb_find_last(pdi, expected_sequence);
1337 if (!t)
1338 goto skip;
1339
1340 __put_trace_last(pdi, t);
1341 return 0;
1342 } else {
1343skip:
1344 if (print_missing) {
1345 fprintf(stderr, "(%d,%d): skipping %lu -> %u\n",
1346 MAJOR(pdi->dev), MINOR(pdi->dev),
1347 pdi->last_sequence, bit->sequence);
1348 }
1349 pdi->skips++;
1350 return 0;
1351 }
1352}
1353
1354static void show_entries_rb(int force)
1355{
1356 struct per_dev_info *pdi = NULL;
1357 struct per_cpu_info *pci = NULL;
1358 struct blk_io_trace *bit;
1359 struct rb_node *n;
1360 struct trace *t;
1361
1362 while ((n = rb_first(&rb_sort_root)) != NULL) {
1363 if (is_done() && !force && !pipeline)
1364 break;
1365
1366 t = rb_entry(n, struct trace, rb_node);
1367 bit = t->bit;
1368
1369 if (!pdi || pdi->dev != bit->device)
1370 pdi = get_dev_info(bit->device);
1371
1372 if (!pdi) {
1373 fprintf(stderr, "Unknown device ID? (%d,%d)\n",
1374 MAJOR(bit->device), MINOR(bit->device));
1375 break;
1376 }
1377
1378 if (!force) {
1379 if (check_sequence(pdi, bit))
1380 break;
1381
1382 if (bit->time > last_allowed_time)
1383 break;
1384 }
1385
1386 pdi->last_sequence = bit->sequence;
1387
1388 check_time(pdi, bit);
1389
1390 if (!pci || pci->cpu != bit->cpu)
1391 pci = get_cpu_info(pdi, bit->cpu);
1392
1393 dump_trace(bit, pci, pdi);
1394
1395 put_trace(pdi, t);
1396 }
1397}
1398
1399static int read_data(int fd, void *buffer, int bytes, int block)
1400{
1401 int ret, bytes_left, fl;
1402 void *p;
1403
1404 fl = fcntl(fd, F_GETFL);
1405
1406 if (!block)
1407 fcntl(fd, F_SETFL, fl | O_NONBLOCK);
1408 else
1409 fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
1410
1411 bytes_left = bytes;
1412 p = buffer;
1413 while (bytes_left > 0) {
1414 ret = read(fd, p, bytes_left);
1415 if (!ret)
1416 return 1;
1417 else if (ret < 0) {
1418 if (errno != EAGAIN)
1419 perror("read");
1420
1421 return -1;
1422 } else {
1423 p += ret;
1424 bytes_left -= ret;
1425 }
1426 }
1427
1428 return 0;
1429}
1430
1431static int read_events(int fd, int always_block)
1432{
1433 struct per_dev_info *pdi = NULL;
1434 unsigned int events = 0;
1435
1436 while (!is_done() && events < rb_batch) {
1437 struct blk_io_trace *bit;
1438 struct trace *t;
1439 int pdu_len;
1440 __u32 magic;
1441
1442 bit = bit_alloc();
1443
1444 if (read_data(fd, bit, sizeof(*bit), !events || always_block))
1445 break;
1446
1447 magic = be32_to_cpu(bit->magic);
1448 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
1449 fprintf(stderr, "Bad magic %x\n", magic);
1450 break;
1451 }
1452
1453 pdu_len = be16_to_cpu(bit->pdu_len);
1454 if (pdu_len) {
1455 void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
1456
1457 if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1))
1458 break;
1459
1460 bit = ptr;
1461 }
1462
1463 trace_to_cpu(bit);
1464
1465 if (verify_trace(bit)) {
1466 bit_free(bit);
1467 continue;
1468 }
1469
1470 t = t_alloc();
1471 memset(t, 0, sizeof(*t));
1472 t->bit = bit;
1473
1474 t->next = trace_list;
1475 trace_list = t;
1476
1477 if (!pdi || pdi->dev != bit->device)
1478 pdi = get_dev_info(bit->device);
1479
1480 if (bit->time > pdi->last_read_time)
1481 pdi->last_read_time = bit->time;
1482
1483 events++;
1484 }
1485
1486 return events;
1487}
1488
1489static int do_file(void)
1490{
1491 struct per_cpu_info *pci;
1492 struct per_dev_info *pdi;
1493 int i, j, events, events_added;
1494
1495 /*
1496 * first prepare all files for reading
1497 */
1498 for (i = 0; i < ndevices; i++) {
1499 pdi = &devices[i];
1500 pdi->nfiles = 0;
1501 pdi->last_sequence = -1;
1502
1503 for (j = 0;; j++) {
1504 struct stat st;
1505
1506 pci = get_cpu_info(pdi, j);
1507 pci->cpu = j;
1508 pci->fd = -1;
1509
1510 snprintf(pci->fname, sizeof(pci->fname)-1,
1511 "%s.blktrace.%d", pdi->name, pci->cpu);
1512 if (stat(pci->fname, &st) < 0)
1513 break;
1514 if (st.st_size) {
1515 pci->fd = open(pci->fname, O_RDONLY);
1516 if (pci->fd < 0) {
1517 perror(pci->fname);
1518 continue;
1519 }
1520 }
1521
1522 printf("Input file %s added\n", pci->fname);
1523 pdi->nfiles++;
1524 }
1525 }
1526
1527 /*
1528 * now loop over the files reading in the data
1529 */
1530 do {
1531 unsigned long long youngest;
1532
1533 events_added = 0;
1534 last_allowed_time = -1ULL;
1535 smallest_seq_read = -1U;
1536
1537 for (i = 0; i < ndevices; i++) {
1538 pdi = &devices[i];
1539
1540 for (j = 0; j < pdi->nfiles; j++) {
1541
1542 pci = get_cpu_info(pdi, j);
1543
1544 if (pci->fd == -1)
1545 continue;
1546
1547 events = read_events(pci->fd, 1);
1548 if (!events) {
1549 close(pci->fd);
1550 pci->fd = -1;
1551 continue;
1552 }
1553
1554 if (pdi->last_read_time < last_allowed_time)
1555 last_allowed_time = pdi->last_read_time;
1556
1557 events_added += events;
1558 }
1559 }
1560
1561 if (sort_entries(&youngest))
1562 break;
1563
1564 if (youngest > stopwatch_end)
1565 break;
1566
1567 show_entries_rb(0);
1568
1569 } while (events_added);
1570
1571 if (rb_sort_entries)
1572 show_entries_rb(1);
1573
1574 return 0;
1575}
1576
1577static int do_stdin(void)
1578{
1579 unsigned long long youngest;
1580 int fd, events, loops;
1581
1582 last_allowed_time = -1ULL;
1583 fd = dup(STDIN_FILENO);
1584 if (fd == -1) {
1585 perror("dup stdin");
1586 return -1;
1587 }
1588
1589 loops = 0;
1590 while ((events = read_events(fd, 0)) != 0) {
1591
1592 smallest_seq_read = -1U;
1593
1594 if (sort_entries(&youngest))
1595 break;
1596
1597 if (youngest > stopwatch_end)
1598 break;
1599
1600 if (loops++ & 1)
1601 show_entries_rb(0);
1602 }
1603
1604 if (rb_sort_entries)
1605 show_entries_rb(1);
1606
1607 close(fd);
1608 return 0;
1609}
1610
1611static void flush_output(void)
1612{
1613 fflush(ofp);
1614}
1615
1616static void handle_sigint(__attribute__((__unused__)) int sig)
1617{
1618 done = 1;
1619 flush_output();
1620}
1621
1622/*
1623 * Extract start and duration times from a string, allowing
1624 * us to specify a time interval of interest within a trace.
1625 * Format: "duration" (start is zero) or "start:duration".
1626 */
1627static int find_stopwatch_interval(char *string)
1628{
1629 double value;
1630 char *sp;
1631
1632 value = strtod(string, &sp);
1633 if (sp == string) {
1634 fprintf(stderr,"Invalid stopwatch timer: %s\n", string);
1635 return 1;
1636 }
1637 if (*sp == ':') {
1638 stopwatch_start = DOUBLE_TO_NANO_ULL(value);
1639 string = sp + 1;
1640 value = strtod(string, &sp);
1641 if (sp == string || *sp != '\0') {
1642 fprintf(stderr,"Invalid stopwatch duration time: %s\n",
1643 string);
1644 return 1;
1645 }
1646 } else if (*sp != '\0') {
1647 fprintf(stderr,"Invalid stopwatch start timer: %s\n", string);
1648 return 1;
1649 }
1650 stopwatch_end = DOUBLE_TO_NANO_ULL(value);
1651 if (stopwatch_end <= stopwatch_start) {
1652 fprintf(stderr, "Invalid stopwatch interval: %Lu -> %Lu\n",
1653 stopwatch_start, stopwatch_end);
1654 return 1;
1655 }
1656
1657 return 0;
1658}
1659
1660static char usage_str[] = \
1661 "[ -i <input name> ] [-o <output name> [ -s ] [ -t ] [ -q ]\n" \
1662 "[ -w start:stop ] [ -f output format ] [ -F format spec ] [ -v] \n\n" \
1663 "\t-i Input file containing trace data, or '-' for stdin\n" \
1664 "\t-o Output file. If not given, output is stdout\n" \
1665 "\t-b stdin read batching\n" \
1666 "\t-s Show per-program io statistics\n" \
1667 "\t-n Hash processes by name, not pid\n" \
1668 "\t-t Track individual ios. Will tell you the time a request took\n" \
1669 "\t to get queued, to get dispatched, and to get completed\n" \
1670 "\t-q Quiet. Don't display any stats at the end of the trace\n" \
1671 "\t-w Only parse data between the given time interval in seconds.\n" \
1672 "\t If 'start' isn't given, blkparse defaults the start time to 0\n" \
1673 "\t -f Output format. Customize the output format. The format field\n" \
1674 "\t identifies can be found in the documentation\n" \
1675 "\t-F Format specification. Can be found in the documentation\n" \
1676 "\t-m Print missing entries\n" \
1677 "\t-v Print program version info\n\n";
1678
1679static void usage(char *prog)
1680{
1681 fprintf(stderr, "Usage: %s %s %s", prog, blkparse_version, usage_str);
1682}
1683
1684int main(int argc, char *argv[])
1685{
1686 char *ofp_buffer;
1687 int c, ret, mode;
1688 int per_device_and_cpu_stats = 1;
1689
1690 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
1691 switch (c) {
1692 case 'i':
1693 if (!strcmp(optarg, "-") && !pipeline)
1694 pipeline = 1;
1695 else if (resize_devices(optarg) != 0)
1696 return 1;
1697 break;
1698 case 'o':
1699 output_name = optarg;
1700 break;
1701 case 'b':
1702 rb_batch = atoi(optarg);
1703 if (rb_batch <= 0)
1704 rb_batch = RB_BATCH_DEFAULT;
1705 break;
1706 case 's':
1707 per_process_stats = 1;
1708 break;
1709 case 't':
1710 track_ios = 1;
1711 break;
1712 case 'q':
1713 per_device_and_cpu_stats = 0;
1714 break;
1715 case 'w':
1716 if (find_stopwatch_interval(optarg) != 0)
1717 return 1;
1718 break;
1719 case 'f':
1720 set_all_format_specs(optarg);
1721 break;
1722 case 'F':
1723 if (add_format_spec(optarg) != 0)
1724 return 1;
1725 break;
1726 case 'n':
1727 ppi_hash_by_pid = 0;
1728 break;
1729 case 'm':
1730 print_missing = 1;
1731 break;
1732 case 'v':
1733 printf("%s version %s\n", argv[0], blkparse_version);
1734 return 0;
1735 default:
1736 usage(argv[0]);
1737 return 1;
1738 }
1739 }
1740
1741 while (optind < argc) {
1742 if (!strcmp(argv[optind], "-") && !pipeline)
1743 pipeline = 1;
1744 else if (resize_devices(argv[optind]) != 0)
1745 return 1;
1746 optind++;
1747 }
1748
1749 if (!pipeline && !ndevices) {
1750 usage(argv[0]);
1751 return 1;
1752 }
1753
1754 memset(&rb_sort_root, 0, sizeof(rb_sort_root));
1755
1756 signal(SIGINT, handle_sigint);
1757 signal(SIGHUP, handle_sigint);
1758 signal(SIGTERM, handle_sigint);
1759
1760 setlocale(LC_NUMERIC, "en_US");
1761
1762 if (!output_name) {
1763 ofp = fdopen(STDOUT_FILENO, "w");
1764 mode = _IOLBF;
1765 } else {
1766 char ofname[128];
1767
1768 snprintf(ofname, sizeof(ofname) - 1, "%s", output_name);
1769 ofp = fopen(ofname, "w");
1770 mode = _IOFBF;
1771 }
1772
1773 if (!ofp) {
1774 perror("fopen");
1775 return 1;
1776 }
1777
1778 ofp_buffer = malloc(4096);
1779 if (setvbuf(ofp, ofp_buffer, mode, 4096)) {
1780 perror("setvbuf");
1781 return 1;
1782 }
1783
1784 if (pipeline)
1785 ret = do_stdin();
1786 else
1787 ret = do_file();
1788
1789 if (per_process_stats)
1790 show_process_stats();
1791
1792 if (per_device_and_cpu_stats)
1793 show_device_and_cpu_stats();
1794
1795 flush_output();
1796 return ret;
1797}