[PATCH] verify_blkparse: improve output
[blktrace.git] / blkparse.c
... / ...
CommitLineData
1/*
2 * block queue tracing parse application
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#include <sys/types.h>
22#include <sys/stat.h>
23#include <unistd.h>
24#include <stdio.h>
25#include <fcntl.h>
26#include <stdlib.h>
27#include <string.h>
28#include <getopt.h>
29#include <errno.h>
30#include <signal.h>
31#include <locale.h>
32#include <libgen.h>
33
34#include "blktrace.h"
35#include "rbtree.h"
36#include "jhash.h"
37
38static char blkparse_version[] = "0.99";
39
40struct skip_info {
41 unsigned long start, end;
42 struct skip_info *prev, *next;
43};
44
45struct per_dev_info {
46 dev_t dev;
47 char *name;
48
49 int backwards;
50 unsigned long long events;
51 unsigned long long first_reported_time;
52 unsigned long long last_reported_time;
53 unsigned long long last_read_time;
54 struct io_stats io_stats;
55 unsigned long skips, nskips;
56 unsigned long long seq_skips, seq_nskips;
57 unsigned int max_depth[2];
58 unsigned int cur_depth[2];
59
60 struct rb_root rb_track;
61
62 int nfiles;
63 int ncpus;
64
65 unsigned long *cpu_map;
66 unsigned int cpu_map_max;
67
68 struct per_cpu_info *cpus;
69 struct skip_info *skips_head;
70 struct skip_info *skips_tail;
71};
72
73struct per_process_info {
74 char name[16];
75 __u32 pid;
76 struct io_stats io_stats;
77 struct per_process_info *hash_next, *list_next;
78 int more_than_one;
79
80 /*
81 * individual io stats
82 */
83 unsigned long long longest_allocation_wait[2];
84 unsigned long long longest_dispatch_wait[2];
85 unsigned long long longest_completion_wait[2];
86};
87
88#define PPI_HASH_SHIFT (8)
89#define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
90#define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
91static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
92static struct per_process_info *ppi_list;
93static int ppi_list_entries;
94
95#define S_OPTS "a:A:i:o:b:stqw:f:F:vVhD:"
96static struct option l_opts[] = {
97 {
98 .name = "act-mask",
99 .has_arg = required_argument,
100 .flag = NULL,
101 .val = 'a'
102 },
103 {
104 .name = "set-mask",
105 .has_arg = required_argument,
106 .flag = NULL,
107 .val = 'A'
108 },
109 {
110 .name = "input",
111 .has_arg = required_argument,
112 .flag = NULL,
113 .val = 'i'
114 },
115 {
116 .name = "output",
117 .has_arg = required_argument,
118 .flag = NULL,
119 .val = 'o'
120 },
121 {
122 .name = "batch",
123 .has_arg = required_argument,
124 .flag = NULL,
125 .val = 'b'
126 },
127 {
128 .name = "per-program-stats",
129 .has_arg = no_argument,
130 .flag = NULL,
131 .val = 's'
132 },
133 {
134 .name = "track-ios",
135 .has_arg = no_argument,
136 .flag = NULL,
137 .val = 't'
138 },
139 {
140 .name = "quiet",
141 .has_arg = no_argument,
142 .flag = NULL,
143 .val = 'q'
144 },
145 {
146 .name = "stopwatch",
147 .has_arg = required_argument,
148 .flag = NULL,
149 .val = 'w'
150 },
151 {
152 .name = "format",
153 .has_arg = required_argument,
154 .flag = NULL,
155 .val = 'f'
156 },
157 {
158 .name = "format-spec",
159 .has_arg = required_argument,
160 .flag = NULL,
161 .val = 'F'
162 },
163 {
164 .name = "hash-by-name",
165 .has_arg = no_argument,
166 .flag = NULL,
167 .val = 'h'
168 },
169 {
170 .name = "verbose",
171 .has_arg = no_argument,
172 .flag = NULL,
173 .val = 'v'
174 },
175 {
176 .name = "version",
177 .has_arg = no_argument,
178 .flag = NULL,
179 .val = 'V'
180 },
181 {
182 .name = "input-directory",
183 .has_arg = required_argument,
184 .flag = NULL,
185 .val = 'D'
186 },
187 {
188 .name = NULL,
189 }
190};
191
192/*
193 * for sorting the displayed output
194 */
195struct trace {
196 struct blk_io_trace *bit;
197 struct rb_node rb_node;
198 struct trace *next;
199};
200
201static struct rb_root rb_sort_root;
202static unsigned long rb_sort_entries;
203
204static struct trace *trace_list;
205
206/*
207 * allocation cache
208 */
209static struct blk_io_trace *bit_alloc_list;
210static struct trace *t_alloc_list;
211
212/*
213 * for tracking individual ios
214 */
215struct io_track {
216 struct rb_node rb_node;
217
218 __u64 sector;
219 __u32 pid;
220 char comm[16];
221 unsigned long long allocation_time;
222 unsigned long long queue_time;
223 unsigned long long dispatch_time;
224 unsigned long long completion_time;
225};
226
227static int ndevices;
228static struct per_dev_info *devices;
229static char *get_dev_name(struct per_dev_info *, char *, int);
230static int trace_rb_insert_last(struct per_dev_info *, struct trace *);
231
232FILE *ofp = NULL;
233static char *output_name;
234static char *input_dir;
235
236static unsigned long long genesis_time;
237static unsigned long long last_allowed_time;
238static unsigned long long stopwatch_start; /* start from zero by default */
239static unsigned long long stopwatch_end = -1ULL; /* "infinity" */
240
241static int per_process_stats;
242static int per_device_and_cpu_stats = 1;
243static int track_ios;
244static int ppi_hash_by_pid = 1;
245static int verbose;
246static unsigned int act_mask = -1U;
247static int stats_printed;
248
249static unsigned int t_alloc_cache;
250static unsigned int bit_alloc_cache;
251
252#define RB_BATCH_DEFAULT (512)
253static unsigned int rb_batch = RB_BATCH_DEFAULT;
254
255static int pipeline;
256
257#define is_done() (*(volatile int *)(&done))
258static volatile int done;
259
260#define JHASH_RANDOM (0x3af5f2ee)
261
262#define CPUS_PER_LONG (8 * sizeof(unsigned long))
263#define CPU_IDX(cpu) ((cpu) / CPUS_PER_LONG)
264#define CPU_BIT(cpu) ((cpu) & (CPUS_PER_LONG - 1))
265
266static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
267{
268 struct per_cpu_info *cpus = pdi->cpus;
269 int ncpus = pdi->ncpus;
270 int new_count = cpu + 1;
271 int new_space, size;
272 char *new_start;
273
274 size = new_count * sizeof(struct per_cpu_info);
275 cpus = realloc(cpus, size);
276 if (!cpus) {
277 char name[20];
278 fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
279 get_dev_name(pdi, name, sizeof(name)), size);
280 exit(1);
281 }
282
283 new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
284 new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
285 memset(new_start, 0, new_space);
286
287 pdi->ncpus = new_count;
288 pdi->cpus = cpus;
289
290 for (new_count = 0; new_count < pdi->ncpus; new_count++) {
291 struct per_cpu_info *pci = &pdi->cpus[new_count];
292
293 if (!pci->fd) {
294 pci->fd = -1;
295 memset(&pci->rb_last, 0, sizeof(pci->rb_last));
296 pci->rb_last_entries = 0;
297 pci->last_sequence = -1;
298 }
299 }
300}
301
302static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
303{
304 struct per_cpu_info *pci;
305
306 if (cpu >= pdi->ncpus)
307 resize_cpu_info(pdi, cpu);
308
309 pci = &pdi->cpus[cpu];
310 pci->cpu = cpu;
311 return pci;
312}
313
314
315static int resize_devices(char *name)
316{
317 int size = (ndevices + 1) * sizeof(struct per_dev_info);
318
319 devices = realloc(devices, size);
320 if (!devices) {
321 fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
322 return 1;
323 }
324 memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
325 devices[ndevices].name = name;
326 ndevices++;
327 return 0;
328}
329
330static struct per_dev_info *get_dev_info(dev_t dev)
331{
332 struct per_dev_info *pdi;
333 int i;
334
335 for (i = 0; i < ndevices; i++) {
336 if (!devices[i].dev)
337 devices[i].dev = dev;
338 if (devices[i].dev == dev)
339 return &devices[i];
340 }
341
342 if (resize_devices(NULL))
343 return NULL;
344
345 pdi = &devices[ndevices - 1];
346 pdi->dev = dev;
347 pdi->first_reported_time = 0;
348 pdi->last_read_time = 0;
349 pdi->skips_head = pdi->skips_tail = NULL;
350
351 return pdi;
352}
353
354static void insert_skip(struct per_dev_info *pdi, unsigned long start,
355 unsigned long end)
356{
357 struct skip_info *sip;
358
359 for (sip = pdi->skips_tail; sip != NULL; sip = sip->prev) {
360 if (end == (sip->start - 1)) {
361 sip->start = start;
362 return;
363 } else if (start == (sip->end + 1)) {
364 sip->end = end;
365 return;
366 }
367 }
368
369 sip = malloc(sizeof(struct skip_info));
370 sip->start = start;
371 sip->end = end;
372 sip->prev = sip->next = NULL;
373 if (pdi->skips_tail == NULL)
374 pdi->skips_head = pdi->skips_tail = sip;
375 else {
376 sip->prev = pdi->skips_tail;
377 pdi->skips_tail->next = sip;
378 pdi->skips_tail = sip;
379 }
380}
381
382static void remove_sip(struct per_dev_info *pdi, struct skip_info *sip)
383{
384 if (sip->prev == NULL) {
385 if (sip->next == NULL)
386 pdi->skips_head = pdi->skips_tail = NULL;
387 else {
388 pdi->skips_head = sip->next;
389 sip->next->prev = NULL;
390 }
391 } else if (sip->next == NULL) {
392 pdi->skips_tail = sip->prev;
393 sip->prev->next = NULL;
394 } else {
395 sip->prev->next = sip->next;
396 sip->next->prev = sip->prev;
397 }
398
399 sip->prev = sip->next = NULL;
400 free(sip);
401}
402
403#define IN_SKIP(sip,seq) (((sip)->start <= (seq)) && ((seq) <= sip->end))
404static int check_current_skips(struct per_dev_info *pdi, unsigned long seq)
405{
406 struct skip_info *sip;
407
408 for (sip = pdi->skips_tail; sip != NULL; sip = sip->prev) {
409 if (IN_SKIP(sip,seq)) {
410 if (sip->start == seq) {
411 if (sip->end == seq)
412 remove_sip(pdi,sip);
413 else
414 sip->start += 1;
415 } else if (sip->end == seq)
416 sip->end -= 1;
417 else {
418 sip->end = seq - 1;
419 insert_skip(pdi,seq+1,sip->end);
420 }
421 return 1;
422 }
423 }
424 return 0;
425}
426
427static void collect_pdi_skips(struct per_dev_info *pdi)
428{
429 struct skip_info *sip;
430
431 pdi->skips = 0;
432 pdi->seq_skips = 0;
433 for (sip = pdi->skips_head; sip != NULL; sip = sip->next) {
434 pdi->skips += 1;
435 pdi->seq_skips += (sip->end - sip->start + 1);
436 if (verbose)
437 fprintf(stderr, "(%d,%d): skipping %lu -> %lu\n",
438 MAJOR(pdi->dev), MINOR(pdi->dev),
439 sip->start, sip->end);
440 }
441}
442
443static void cpu_mark_online(struct per_dev_info *pdi, unsigned int cpu)
444{
445 if (cpu >= pdi->cpu_map_max || !pdi->cpu_map) {
446 int new_max = (cpu + CPUS_PER_LONG) & ~(CPUS_PER_LONG - 1);
447 unsigned long *map = malloc(new_max / sizeof(long));
448
449 memset(map, 0, new_max / sizeof(long));
450
451 if (pdi->cpu_map) {
452 memcpy(map, pdi->cpu_map, pdi->cpu_map_max / sizeof(long));
453 free(pdi->cpu_map);
454 }
455
456 pdi->cpu_map = map;
457 pdi->cpu_map_max = new_max;
458 }
459
460 pdi->cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
461}
462
463static inline void cpu_mark_offline(struct per_dev_info *pdi, int cpu)
464{
465 pdi->cpu_map[CPU_IDX(cpu)] &= ~(1UL << CPU_BIT(cpu));
466}
467
468static inline int cpu_is_online(struct per_dev_info *pdi, int cpu)
469{
470 return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
471}
472
473static inline int ppi_hash_pid(__u32 pid)
474{
475 return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
476}
477
478static inline int ppi_hash_name(const char *name)
479{
480 return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
481}
482
483static inline int ppi_hash(struct per_process_info *ppi)
484{
485 if (ppi_hash_by_pid)
486 return ppi_hash_pid(ppi->pid);
487
488 return ppi_hash_name(ppi->name);
489}
490
491static inline void add_process_to_hash(struct per_process_info *ppi)
492{
493 const int hash_idx = ppi_hash(ppi);
494
495 ppi->hash_next = ppi_hash_table[hash_idx];
496 ppi_hash_table[hash_idx] = ppi;
497}
498
499static inline void add_process_to_list(struct per_process_info *ppi)
500{
501 ppi->list_next = ppi_list;
502 ppi_list = ppi;
503 ppi_list_entries++;
504}
505
506static struct per_process_info *find_process_by_name(char *name)
507{
508 const int hash_idx = ppi_hash_name(name);
509 struct per_process_info *ppi;
510
511 ppi = ppi_hash_table[hash_idx];
512 while (ppi) {
513 if (!strcmp(ppi->name, name))
514 return ppi;
515
516 ppi = ppi->hash_next;
517 }
518
519 return NULL;
520}
521
522static struct per_process_info *find_process_by_pid(__u32 pid)
523{
524 const int hash_idx = ppi_hash_pid(pid);
525 struct per_process_info *ppi;
526
527 ppi = ppi_hash_table[hash_idx];
528 while (ppi) {
529 if (ppi->pid == pid)
530 return ppi;
531
532 ppi = ppi->hash_next;
533 }
534
535 return NULL;
536}
537
538static struct per_process_info *find_process(__u32 pid, char *name)
539{
540 struct per_process_info *ppi;
541
542 if (ppi_hash_by_pid)
543 return find_process_by_pid(pid);
544
545 ppi = find_process_by_name(name);
546 if (ppi && ppi->pid != pid)
547 ppi->more_than_one = 1;
548
549 return ppi;
550}
551
552/*
553 * struct trace and blktrace allocation cache, we do potentially
554 * millions of mallocs for these structures while only using at most
555 * a few thousand at the time
556 */
557static inline void t_free(struct trace *t)
558{
559 if (t_alloc_cache < 1024) {
560 t->next = t_alloc_list;
561 t_alloc_list = t;
562 t_alloc_cache++;
563 } else
564 free(t);
565}
566
567static inline struct trace *t_alloc(void)
568{
569 struct trace *t = t_alloc_list;
570
571 if (t) {
572 t_alloc_list = t->next;
573 t_alloc_cache--;
574 return t;
575 }
576
577 return malloc(sizeof(*t));
578}
579
580static inline void bit_free(struct blk_io_trace *bit)
581{
582 if (bit_alloc_cache < 1024 && !bit->pdu_len) {
583 /*
584 * abuse a 64-bit field for a next pointer for the free item
585 */
586 bit->time = (__u64) (unsigned long) bit_alloc_list;
587 bit_alloc_list = (struct blk_io_trace *) bit;
588 bit_alloc_cache++;
589 } else
590 free(bit);
591}
592
593static inline struct blk_io_trace *bit_alloc(void)
594{
595 struct blk_io_trace *bit = bit_alloc_list;
596
597 if (bit) {
598 bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
599 bit->time;
600 bit_alloc_cache--;
601 return bit;
602 }
603
604 return malloc(sizeof(*bit));
605}
606
607static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
608{
609 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
610
611 rb_erase(&t->rb_node, &pci->rb_last);
612 pci->rb_last_entries--;
613
614 bit_free(t->bit);
615 t_free(t);
616}
617
618static void put_trace(struct per_dev_info *pdi, struct trace *t)
619{
620 rb_erase(&t->rb_node, &rb_sort_root);
621 rb_sort_entries--;
622
623 trace_rb_insert_last(pdi, t);
624}
625
626static inline int trace_rb_insert(struct trace *t, struct rb_root *root)
627{
628 struct rb_node **p = &root->rb_node;
629 struct rb_node *parent = NULL;
630 struct trace *__t;
631
632 while (*p) {
633 parent = *p;
634
635 __t = rb_entry(parent, struct trace, rb_node);
636
637 if (t->bit->time < __t->bit->time)
638 p = &(*p)->rb_left;
639 else if (t->bit->time > __t->bit->time)
640 p = &(*p)->rb_right;
641 else if (t->bit->device < __t->bit->device)
642 p = &(*p)->rb_left;
643 else if (t->bit->device > __t->bit->device)
644 p = &(*p)->rb_right;
645 else if (t->bit->sequence < __t->bit->sequence)
646 p = &(*p)->rb_left;
647 else /* >= sequence */
648 p = &(*p)->rb_right;
649 }
650
651 rb_link_node(&t->rb_node, parent, p);
652 rb_insert_color(&t->rb_node, root);
653 return 0;
654}
655
656static inline int trace_rb_insert_sort(struct trace *t)
657{
658 if (!trace_rb_insert(t, &rb_sort_root)) {
659 rb_sort_entries++;
660 return 0;
661 }
662
663 return 1;
664}
665
666static int trace_rb_insert_last(struct per_dev_info *pdi, struct trace *t)
667{
668 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
669
670 if (trace_rb_insert(t, &pci->rb_last))
671 return 1;
672
673 pci->rb_last_entries++;
674
675 if (pci->rb_last_entries > rb_batch * pdi->nfiles) {
676 struct rb_node *n = rb_first(&pci->rb_last);
677
678 t = rb_entry(n, struct trace, rb_node);
679 __put_trace_last(pdi, t);
680 }
681
682 return 0;
683}
684
685static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
686 struct rb_root *root, int order)
687{
688 struct rb_node *n = root->rb_node;
689 struct rb_node *prev = NULL;
690 struct trace *__t;
691
692 while (n) {
693 __t = rb_entry(n, struct trace, rb_node);
694 prev = n;
695
696 if (device < __t->bit->device)
697 n = n->rb_left;
698 else if (device > __t->bit->device)
699 n = n->rb_right;
700 else if (sequence < __t->bit->sequence)
701 n = n->rb_left;
702 else if (sequence > __t->bit->sequence)
703 n = n->rb_right;
704 else
705 return __t;
706 }
707
708 /*
709 * hack - the list may not be sequence ordered because some
710 * events don't have sequence and time matched. so we end up
711 * being a little off in the rb lookup here, because we don't
712 * know the time we are looking for. compensate by browsing
713 * a little ahead from the last entry to find the match
714 */
715 if (order && prev) {
716 int max = 5;
717
718 while (((n = rb_next(prev)) != NULL) && max--) {
719 __t = rb_entry(n, struct trace, rb_node);
720
721 if (__t->bit->device == device &&
722 __t->bit->sequence == sequence)
723 return __t;
724
725 prev = n;
726 }
727 }
728
729 return NULL;
730}
731
732static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
733 struct per_cpu_info *pci,
734 unsigned long seq)
735{
736 return trace_rb_find(pdi->dev, seq, &pci->rb_last, 0);
737}
738
739static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
740{
741 struct rb_node **p = &pdi->rb_track.rb_node;
742 struct rb_node *parent = NULL;
743 struct io_track *__iot;
744
745 while (*p) {
746 parent = *p;
747 __iot = rb_entry(parent, struct io_track, rb_node);
748
749 if (iot->sector < __iot->sector)
750 p = &(*p)->rb_left;
751 else if (iot->sector > __iot->sector)
752 p = &(*p)->rb_right;
753 else {
754 fprintf(stderr,
755 "sector alias (%Lu) on device %d,%d!\n",
756 (unsigned long long) iot->sector,
757 MAJOR(pdi->dev), MINOR(pdi->dev));
758 return 1;
759 }
760 }
761
762 rb_link_node(&iot->rb_node, parent, p);
763 rb_insert_color(&iot->rb_node, &pdi->rb_track);
764 return 0;
765}
766
767static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
768{
769 struct rb_node *n = pdi->rb_track.rb_node;
770 struct io_track *__iot;
771
772 while (n) {
773 __iot = rb_entry(n, struct io_track, rb_node);
774
775 if (sector < __iot->sector)
776 n = n->rb_left;
777 else if (sector > __iot->sector)
778 n = n->rb_right;
779 else
780 return __iot;
781 }
782
783 return NULL;
784}
785
786static struct io_track *find_track(struct per_dev_info *pdi, __u32 pid,
787 char *comm, __u64 sector)
788{
789 struct io_track *iot;
790
791 iot = __find_track(pdi, sector);
792 if (!iot) {
793 iot = malloc(sizeof(*iot));
794 iot->pid = pid;
795 memcpy(iot->comm, comm, sizeof(iot->comm));
796 iot->sector = sector;
797 track_rb_insert(pdi, iot);
798 }
799
800 return iot;
801}
802
803static void log_track_frontmerge(struct per_dev_info *pdi,
804 struct blk_io_trace *t)
805{
806 struct io_track *iot;
807
808 if (!track_ios)
809 return;
810
811 iot = __find_track(pdi, t->sector + t_sec(t));
812 if (!iot) {
813 if (verbose)
814 fprintf(stderr, "merge not found for (%d,%d): %llu\n",
815 MAJOR(pdi->dev), MINOR(pdi->dev),
816 (unsigned long long) t->sector + t_sec(t));
817 return;
818 }
819
820 rb_erase(&iot->rb_node, &pdi->rb_track);
821 iot->sector -= t_sec(t);
822 track_rb_insert(pdi, iot);
823}
824
825static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
826{
827 struct io_track *iot;
828
829 if (!track_ios)
830 return;
831
832 iot = find_track(pdi, t->pid, t->comm, t->sector);
833 iot->allocation_time = t->time;
834}
835
836/*
837 * return time between rq allocation and insertion
838 */
839static unsigned long long log_track_insert(struct per_dev_info *pdi,
840 struct blk_io_trace *t)
841{
842 unsigned long long elapsed;
843 struct io_track *iot;
844
845 if (!track_ios)
846 return -1;
847
848 iot = find_track(pdi, t->pid, t->comm, t->sector);
849 iot->queue_time = t->time;
850
851 if (!iot->allocation_time)
852 return -1;
853
854 elapsed = iot->queue_time - iot->allocation_time;
855
856 if (per_process_stats) {
857 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
858 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
859
860 if (ppi && elapsed > ppi->longest_allocation_wait[w])
861 ppi->longest_allocation_wait[w] = elapsed;
862 }
863
864 return elapsed;
865}
866
867/*
868 * return time between queue and issue
869 */
870static unsigned long long log_track_issue(struct per_dev_info *pdi,
871 struct blk_io_trace *t)
872{
873 unsigned long long elapsed;
874 struct io_track *iot;
875
876 if (!track_ios)
877 return -1;
878 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
879 return -1;
880
881 iot = __find_track(pdi, t->sector);
882 if (!iot) {
883 if (verbose)
884 fprintf(stderr, "issue not found for (%d,%d): %llu\n",
885 MAJOR(pdi->dev), MINOR(pdi->dev),
886 (unsigned long long) t->sector);
887 return -1;
888 }
889
890 iot->dispatch_time = t->time;
891 elapsed = iot->dispatch_time - iot->queue_time;
892
893 if (per_process_stats) {
894 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
895 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
896
897 if (ppi && elapsed > ppi->longest_dispatch_wait[w])
898 ppi->longest_dispatch_wait[w] = elapsed;
899 }
900
901 return elapsed;
902}
903
904/*
905 * return time between dispatch and complete
906 */
907static unsigned long long log_track_complete(struct per_dev_info *pdi,
908 struct blk_io_trace *t)
909{
910 unsigned long long elapsed;
911 struct io_track *iot;
912
913 if (!track_ios)
914 return -1;
915 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
916 return -1;
917
918 iot = __find_track(pdi, t->sector);
919 if (!iot) {
920 if (verbose)
921 fprintf(stderr,"complete not found for (%d,%d): %llu\n",
922 MAJOR(pdi->dev), MINOR(pdi->dev),
923 (unsigned long long) t->sector);
924 return -1;
925 }
926
927 iot->completion_time = t->time;
928 elapsed = iot->completion_time - iot->dispatch_time;
929
930 if (per_process_stats) {
931 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
932 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
933
934 if (ppi && elapsed > ppi->longest_completion_wait[w])
935 ppi->longest_completion_wait[w] = elapsed;
936 }
937
938 /*
939 * kill the trace, we don't need it after completion
940 */
941 rb_erase(&iot->rb_node, &pdi->rb_track);
942 free(iot);
943
944 return elapsed;
945}
946
947
948static struct io_stats *find_process_io_stats(__u32 pid, char *name)
949{
950 struct per_process_info *ppi = find_process(pid, name);
951
952 if (!ppi) {
953 ppi = malloc(sizeof(*ppi));
954 memset(ppi, 0, sizeof(*ppi));
955 memcpy(ppi->name, name, 16);
956 ppi->pid = pid;
957 add_process_to_hash(ppi);
958 add_process_to_list(ppi);
959 }
960
961 return &ppi->io_stats;
962}
963
964static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
965{
966 if (pdi->name)
967 snprintf(buffer, size, "%s", pdi->name);
968 else
969 snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
970 return buffer;
971}
972
973static void check_time(struct per_dev_info *pdi, struct blk_io_trace *bit)
974{
975 unsigned long long this = bit->time;
976 unsigned long long last = pdi->last_reported_time;
977
978 pdi->backwards = (this < last) ? 'B' : ' ';
979 pdi->last_reported_time = this;
980}
981
982static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
983 int rw)
984{
985 if (rw) {
986 ios->mwrites++;
987 ios->qwrite_kb += t_kb(t);
988 } else {
989 ios->mreads++;
990 ios->qread_kb += t_kb(t);
991 }
992}
993
994static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
995 int rw)
996{
997 __account_m(&pci->io_stats, t, rw);
998
999 if (per_process_stats) {
1000 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1001
1002 __account_m(ios, t, rw);
1003 }
1004}
1005
1006static inline void __account_queue(struct io_stats *ios, struct blk_io_trace *t,
1007 int rw)
1008{
1009 if (rw) {
1010 ios->qwrites++;
1011 ios->qwrite_kb += t_kb(t);
1012 } else {
1013 ios->qreads++;
1014 ios->qread_kb += t_kb(t);
1015 }
1016}
1017
1018static inline void account_queue(struct blk_io_trace *t,
1019 struct per_cpu_info *pci, int rw)
1020{
1021 __account_queue(&pci->io_stats, t, rw);
1022
1023 if (per_process_stats) {
1024 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1025
1026 __account_queue(ios, t, rw);
1027 }
1028}
1029
1030static inline void __account_c(struct io_stats *ios, int rw, int bytes)
1031{
1032 if (rw) {
1033 ios->cwrites++;
1034 ios->cwrite_kb += bytes >> 10;
1035 } else {
1036 ios->creads++;
1037 ios->cread_kb += bytes >> 10;
1038 }
1039}
1040
1041static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
1042 int rw, int bytes)
1043{
1044 __account_c(&pci->io_stats, rw, bytes);
1045
1046 if (per_process_stats) {
1047 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1048
1049 __account_c(ios, rw, bytes);
1050 }
1051}
1052
1053static inline void __account_issue(struct io_stats *ios, int rw,
1054 unsigned int bytes)
1055{
1056 if (rw) {
1057 ios->iwrites++;
1058 ios->iwrite_kb += bytes >> 10;
1059 } else {
1060 ios->ireads++;
1061 ios->iread_kb += bytes >> 10;
1062 }
1063}
1064
1065static inline void account_issue(struct blk_io_trace *t,
1066 struct per_cpu_info *pci, int rw)
1067{
1068 __account_issue(&pci->io_stats, rw, t->bytes);
1069
1070 if (per_process_stats) {
1071 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1072
1073 __account_issue(ios, rw, t->bytes);
1074 }
1075}
1076
1077static inline void __account_unplug(struct io_stats *ios, int timer)
1078{
1079 if (timer)
1080 ios->timer_unplugs++;
1081 else
1082 ios->io_unplugs++;
1083}
1084
1085static inline void account_unplug(struct blk_io_trace *t,
1086 struct per_cpu_info *pci, int timer)
1087{
1088 __account_unplug(&pci->io_stats, timer);
1089
1090 if (per_process_stats) {
1091 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1092
1093 __account_unplug(ios, timer);
1094 }
1095}
1096
1097static inline void __account_requeue(struct io_stats *ios,
1098 struct blk_io_trace *t, int rw)
1099{
1100 if (rw) {
1101 ios->wrqueue++;
1102 ios->iwrite_kb -= t_kb(t);
1103 } else {
1104 ios->rrqueue++;
1105 ios->iread_kb -= t_kb(t);
1106 }
1107}
1108
1109static inline void account_requeue(struct blk_io_trace *t,
1110 struct per_cpu_info *pci, int rw)
1111{
1112 __account_requeue(&pci->io_stats, t, rw);
1113
1114 if (per_process_stats) {
1115 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1116
1117 __account_requeue(ios, t, rw);
1118 }
1119}
1120
1121static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
1122 struct blk_io_trace *t, char *act)
1123{
1124 process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
1125}
1126
1127static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
1128 struct blk_io_trace *t, char *act)
1129{
1130 process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
1131}
1132
1133static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
1134 char *act)
1135{
1136 process_fmt(act, pci, t, -1, 0, NULL);
1137}
1138
1139static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
1140 struct blk_io_trace *t, char *act)
1141{
1142 process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
1143}
1144
1145static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
1146 struct blk_io_trace *t, char *act)
1147{
1148 if (act[0] == 'F')
1149 log_track_frontmerge(pdi, t);
1150
1151 process_fmt(act, pci, t, -1ULL, 0, NULL);
1152}
1153
1154static void log_action(struct per_cpu_info *pci, struct blk_io_trace *t,
1155 char *act)
1156{
1157 process_fmt(act, pci, t, -1ULL, 0, NULL);
1158}
1159
1160static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
1161 char *act)
1162{
1163 process_fmt(act, pci, t, -1ULL, 0, NULL);
1164}
1165
1166static void log_unplug(struct per_cpu_info *pci, struct blk_io_trace *t,
1167 char *act)
1168{
1169 process_fmt(act, pci, t, -1ULL, 0, NULL);
1170}
1171
1172static void log_split(struct per_cpu_info *pci, struct blk_io_trace *t,
1173 char *act)
1174{
1175 process_fmt(act, pci, t, -1ULL, 0, NULL);
1176}
1177
1178static void log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char *act)
1179{
1180 unsigned char *buf = (unsigned char *) t + sizeof(*t);
1181
1182 process_fmt(act, pci, t, -1ULL, t->pdu_len, buf);
1183}
1184
1185static void dump_trace_pc(struct blk_io_trace *t, struct per_cpu_info *pci)
1186{
1187 int act = t->action & 0xffff;
1188
1189 switch (act) {
1190 case __BLK_TA_QUEUE:
1191 log_generic(pci, t, "Q");
1192 break;
1193 case __BLK_TA_GETRQ:
1194 log_generic(pci, t, "G");
1195 break;
1196 case __BLK_TA_SLEEPRQ:
1197 log_generic(pci, t, "S");
1198 break;
1199 case __BLK_TA_REQUEUE:
1200 log_generic(pci, t, "R");
1201 break;
1202 case __BLK_TA_ISSUE:
1203 log_pc(pci, t, "D");
1204 break;
1205 case __BLK_TA_COMPLETE:
1206 log_pc(pci, t, "C");
1207 break;
1208 case __BLK_TA_INSERT:
1209 log_pc(pci, t, "I");
1210 break;
1211 default:
1212 fprintf(stderr, "Bad pc action %x\n", act);
1213 break;
1214 }
1215}
1216
1217static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
1218 struct per_cpu_info *pci)
1219{
1220 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1221 int act = t->action & 0xffff;
1222
1223 switch (act) {
1224 case __BLK_TA_QUEUE:
1225 account_queue(t, pci, w);
1226 log_queue(pci, t, "Q");
1227 break;
1228 case __BLK_TA_INSERT:
1229 log_insert(pdi, pci, t, "I");
1230 break;
1231 case __BLK_TA_BACKMERGE:
1232 account_m(t, pci, w);
1233 log_merge(pdi, pci, t, "M");
1234 break;
1235 case __BLK_TA_FRONTMERGE:
1236 account_m(t, pci, w);
1237 log_merge(pdi, pci, t, "F");
1238 break;
1239 case __BLK_TA_GETRQ:
1240 log_track_getrq(pdi, t);
1241 log_generic(pci, t, "G");
1242 break;
1243 case __BLK_TA_SLEEPRQ:
1244 log_generic(pci, t, "S");
1245 break;
1246 case __BLK_TA_REQUEUE:
1247 pdi->cur_depth[w]--;
1248 account_requeue(t, pci, w);
1249 log_queue(pci, t, "R");
1250 break;
1251 case __BLK_TA_ISSUE:
1252 account_issue(t, pci, w);
1253 pdi->cur_depth[w]++;
1254 if (pdi->cur_depth[w] > pdi->max_depth[w])
1255 pdi->max_depth[w] = pdi->cur_depth[w];
1256 log_issue(pdi, pci, t, "D");
1257 break;
1258 case __BLK_TA_COMPLETE:
1259 pdi->cur_depth[w]--;
1260 account_c(t, pci, w, t->bytes);
1261 log_complete(pdi, pci, t, "C");
1262 break;
1263 case __BLK_TA_PLUG:
1264 log_action(pci, t, "P");
1265 break;
1266 case __BLK_TA_UNPLUG_IO:
1267 account_unplug(t, pci, 0);
1268 log_unplug(pci, t, "U");
1269 break;
1270 case __BLK_TA_UNPLUG_TIMER:
1271 account_unplug(t, pci, 1);
1272 log_unplug(pci, t, "UT");
1273 break;
1274 case __BLK_TA_SPLIT:
1275 log_split(pci, t, "X");
1276 break;
1277 case __BLK_TA_BOUNCE:
1278 log_generic(pci, t, "B");
1279 break;
1280 case __BLK_TA_REMAP:
1281 log_generic(pci, t, "A");
1282 break;
1283 default:
1284 fprintf(stderr, "Bad fs action %x\n", t->action);
1285 break;
1286 }
1287}
1288
1289static void dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci,
1290 struct per_dev_info *pdi)
1291{
1292 if (t->action & BLK_TC_ACT(BLK_TC_PC))
1293 dump_trace_pc(t, pci);
1294 else
1295 dump_trace_fs(t, pdi, pci);
1296
1297 if (!pdi->events)
1298 pdi->first_reported_time = t->time;
1299
1300 pdi->events++;
1301}
1302
1303/*
1304 * print in a proper way, not too small and not too big. if more than
1305 * 1000,000K, turn into M and so on
1306 */
1307static char *size_cnv(char *dst, unsigned long long num, int in_kb)
1308{
1309 char suff[] = { '\0', 'K', 'M', 'G', 'P' };
1310 unsigned int i = 0;
1311
1312 if (in_kb)
1313 i++;
1314
1315 while (num > 1000 * 1000ULL && (i < sizeof(suff) - 1)) {
1316 i++;
1317 num /= 1000;
1318 }
1319
1320 sprintf(dst, "%'8Lu%c", num, suff[i]);
1321 return dst;
1322}
1323
1324static void dump_io_stats(struct per_dev_info *pdi, struct io_stats *ios,
1325 char *msg)
1326{
1327 static char x[256], y[256];
1328
1329 fprintf(ofp, "%s\n", msg);
1330
1331 fprintf(ofp, " Reads Queued: %s, %siB\t", size_cnv(x, ios->qreads, 0), size_cnv(y, ios->qread_kb, 1));
1332 fprintf(ofp, " Writes Queued: %s, %siB\n", size_cnv(x, ios->qwrites, 0), size_cnv(y, ios->qwrite_kb, 1));
1333
1334 fprintf(ofp, " Read Dispatches: %s, %siB\t", size_cnv(x, ios->ireads, 0), size_cnv(y, ios->iread_kb, 1));
1335 fprintf(ofp, " Write Dispatches: %s, %siB\n", size_cnv(x, ios->iwrites, 0), size_cnv(y, ios->iwrite_kb, 1));
1336 fprintf(ofp, " Reads Requeued: %s\t\t", size_cnv(x, ios->rrqueue, 0));
1337 fprintf(ofp, " Writes Requeued: %s\n", size_cnv(x, ios->wrqueue, 0));
1338 fprintf(ofp, " Reads Completed: %s, %siB\t", size_cnv(x, ios->creads, 0), size_cnv(y, ios->cread_kb, 1));
1339 fprintf(ofp, " Writes Completed: %s, %siB\n", size_cnv(x, ios->cwrites, 0), size_cnv(y, ios->cwrite_kb, 1));
1340 fprintf(ofp, " Read Merges: %'8lu%8c\t", ios->mreads, ' ');
1341 fprintf(ofp, " Write Merges: %'8lu\n", ios->mwrites);
1342 if (pdi) {
1343 fprintf(ofp, " Read depth: %'8u%8c\t", pdi->max_depth[0], ' ');
1344 fprintf(ofp, " Write depth: %'8u\n", pdi->max_depth[1]);
1345 }
1346 fprintf(ofp, " IO unplugs: %'8lu%8c\t", ios->io_unplugs, ' ');
1347 fprintf(ofp, " Timer unplugs: %'8lu\n", ios->timer_unplugs);
1348}
1349
1350static void dump_wait_stats(struct per_process_info *ppi)
1351{
1352 unsigned long rawait = ppi->longest_allocation_wait[0] / 1000;
1353 unsigned long rdwait = ppi->longest_dispatch_wait[0] / 1000;
1354 unsigned long rcwait = ppi->longest_completion_wait[0] / 1000;
1355 unsigned long wawait = ppi->longest_allocation_wait[1] / 1000;
1356 unsigned long wdwait = ppi->longest_dispatch_wait[1] / 1000;
1357 unsigned long wcwait = ppi->longest_completion_wait[1] / 1000;
1358
1359 fprintf(ofp, " Allocation wait: %'8lu%8c\t", rawait, ' ');
1360 fprintf(ofp, " Allocation wait: %'8lu\n", wawait);
1361 fprintf(ofp, " Dispatch wait: %'8lu%8c\t", rdwait, ' ');
1362 fprintf(ofp, " Dispatch wait: %'8lu\n", wdwait);
1363 fprintf(ofp, " Completion wait: %'8lu%8c\t", rcwait, ' ');
1364 fprintf(ofp, " Completion wait: %'8lu\n", wcwait);
1365}
1366
1367static int ppi_name_compare(const void *p1, const void *p2)
1368{
1369 struct per_process_info *ppi1 = *((struct per_process_info **) p1);
1370 struct per_process_info *ppi2 = *((struct per_process_info **) p2);
1371 int res;
1372
1373 res = strverscmp(ppi1->name, ppi2->name);
1374 if (!res)
1375 res = ppi1->pid > ppi2->pid;
1376
1377 return res;
1378}
1379
1380static void sort_process_list(void)
1381{
1382 struct per_process_info **ppis;
1383 struct per_process_info *ppi;
1384 int i = 0;
1385
1386 ppis = malloc(ppi_list_entries * sizeof(struct per_process_info *));
1387
1388 ppi = ppi_list;
1389 while (ppi) {
1390 ppis[i++] = ppi;
1391 ppi = ppi->list_next;
1392 }
1393
1394 qsort(ppis, ppi_list_entries, sizeof(ppi), ppi_name_compare);
1395
1396 i = ppi_list_entries - 1;
1397 ppi_list = NULL;
1398 while (i >= 0) {
1399 ppi = ppis[i];
1400
1401 ppi->list_next = ppi_list;
1402 ppi_list = ppi;
1403 i--;
1404 }
1405
1406 free(ppis);
1407}
1408
1409static void show_process_stats(void)
1410{
1411 struct per_process_info *ppi;
1412
1413 sort_process_list();
1414
1415 ppi = ppi_list;
1416 while (ppi) {
1417 char name[64];
1418
1419 if (ppi->more_than_one)
1420 sprintf(name, "%s (%u, ...)", ppi->name, ppi->pid);
1421 else
1422 sprintf(name, "%s (%u)", ppi->name, ppi->pid);
1423
1424 dump_io_stats(NULL, &ppi->io_stats, name);
1425 dump_wait_stats(ppi);
1426 ppi = ppi->list_next;
1427 }
1428
1429 fprintf(ofp, "\n");
1430}
1431
1432static void show_device_and_cpu_stats(void)
1433{
1434 struct per_dev_info *pdi;
1435 struct per_cpu_info *pci;
1436 struct io_stats total, *ios;
1437 unsigned long long rrate, wrate, msec;
1438 int i, j, pci_events;
1439 char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
1440 char name[32];
1441
1442 for (pdi = devices, i = 0; i < ndevices; i++, pdi++) {
1443
1444 memset(&total, 0, sizeof(total));
1445 pci_events = 0;
1446
1447 if (i > 0)
1448 fprintf(ofp, "\n");
1449
1450 for (pci = pdi->cpus, j = 0; j < pdi->ncpus; j++, pci++) {
1451 if (!pci->nelems)
1452 continue;
1453
1454 ios = &pci->io_stats;
1455 total.qreads += ios->qreads;
1456 total.qwrites += ios->qwrites;
1457 total.creads += ios->creads;
1458 total.cwrites += ios->cwrites;
1459 total.mreads += ios->mreads;
1460 total.mwrites += ios->mwrites;
1461 total.ireads += ios->ireads;
1462 total.iwrites += ios->iwrites;
1463 total.rrqueue += ios->rrqueue;
1464 total.wrqueue += ios->wrqueue;
1465 total.qread_kb += ios->qread_kb;
1466 total.qwrite_kb += ios->qwrite_kb;
1467 total.cread_kb += ios->cread_kb;
1468 total.cwrite_kb += ios->cwrite_kb;
1469 total.iread_kb += ios->iread_kb;
1470 total.iwrite_kb += ios->iwrite_kb;
1471 total.timer_unplugs += ios->timer_unplugs;
1472 total.io_unplugs += ios->io_unplugs;
1473
1474 snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
1475 j, get_dev_name(pdi, name, sizeof(name)));
1476 dump_io_stats(pdi, ios, line);
1477 pci_events++;
1478 }
1479
1480 if (pci_events > 1) {
1481 fprintf(ofp, "\n");
1482 snprintf(line, sizeof(line) - 1, "Total (%s):",
1483 get_dev_name(pdi, name, sizeof(name)));
1484 dump_io_stats(NULL, &total, line);
1485 }
1486
1487 wrate = rrate = 0;
1488 msec = (pdi->last_reported_time - pdi->first_reported_time) / 1000000;
1489 if (msec) {
1490 rrate = 1000 * total.cread_kb / msec;
1491 wrate = 1000 * total.cwrite_kb / msec;
1492 }
1493
1494 fprintf(ofp, "\nThroughput (R/W): %'LuKiB/s / %'LuKiB/s\n",
1495 rrate, wrate);
1496 fprintf(ofp, "Events (%s): %'Lu entries\n",
1497 get_dev_name(pdi, line, sizeof(line)), pdi->events);
1498
1499 collect_pdi_skips(pdi);
1500 fprintf(ofp, "Skips: %'lu forward (%'llu - %5.1lf%%)\n",
1501 pdi->skips,pdi->seq_skips,
1502 100.0 * ((double)pdi->seq_skips /
1503 (double)(pdi->events + pdi->seq_skips)));
1504 }
1505}
1506
1507static void find_genesis(void)
1508{
1509 struct trace *t = trace_list;
1510
1511 genesis_time = -1ULL;
1512 while (t != NULL) {
1513 if (t->bit->time < genesis_time)
1514 genesis_time = t->bit->time;
1515
1516 t = t->next;
1517 }
1518}
1519
1520static inline int check_stopwatch(struct blk_io_trace *bit)
1521{
1522 if (bit->time < stopwatch_end &&
1523 bit->time >= stopwatch_start)
1524 return 0;
1525
1526 return 1;
1527}
1528
1529/*
1530 * return youngest entry read
1531 */
1532static int sort_entries(unsigned long long *youngest)
1533{
1534 struct per_dev_info *pdi = NULL;
1535 struct per_cpu_info *pci = NULL;
1536 struct trace *t;
1537
1538 if (!genesis_time)
1539 find_genesis();
1540
1541 *youngest = 0;
1542 while ((t = trace_list) != NULL) {
1543 struct blk_io_trace *bit = t->bit;
1544
1545 trace_list = t->next;
1546
1547 bit->time -= genesis_time;
1548
1549 if (bit->time < *youngest || !*youngest)
1550 *youngest = bit->time;
1551
1552 if (!pdi || pdi->dev != bit->device) {
1553 pdi = get_dev_info(bit->device);
1554 pci = NULL;
1555 }
1556
1557 if (!pci || pci->cpu != bit->cpu)
1558 pci = get_cpu_info(pdi, bit->cpu);
1559
1560 if (bit->sequence < pci->smallest_seq_read)
1561 pci->smallest_seq_read = bit->sequence;
1562
1563 if (check_stopwatch(bit)) {
1564 bit_free(bit);
1565 t_free(t);
1566 continue;
1567 }
1568
1569 if (trace_rb_insert_sort(t))
1570 return -1;
1571 }
1572
1573 return 0;
1574}
1575
1576/*
1577 * to continue, we must have traces from all online cpus in the tree
1578 */
1579static int check_cpu_map(struct per_dev_info *pdi)
1580{
1581 unsigned long *cpu_map;
1582 struct rb_node *n;
1583 struct trace *__t;
1584 unsigned int i;
1585 int ret, cpu;
1586
1587 /*
1588 * create a map of the cpus we have traces for
1589 */
1590 cpu_map = malloc(pdi->cpu_map_max / sizeof(long));
1591 n = rb_first(&rb_sort_root);
1592 while (n) {
1593 __t = rb_entry(n, struct trace, rb_node);
1594 cpu = __t->bit->cpu;
1595
1596 cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
1597 n = rb_next(n);
1598 }
1599
1600 /*
1601 * we can't continue if pdi->cpu_map has entries set that we don't
1602 * have in the sort rbtree. the opposite is not a problem, though
1603 */
1604 ret = 0;
1605 for (i = 0; i < pdi->cpu_map_max / CPUS_PER_LONG; i++) {
1606 if (pdi->cpu_map[i] & ~(cpu_map[i])) {
1607 ret = 1;
1608 break;
1609 }
1610 }
1611
1612 free(cpu_map);
1613 return ret;
1614}
1615
1616static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
1617{
1618 struct blk_io_trace *bit = t->bit;
1619 unsigned long expected_sequence;
1620 struct per_cpu_info *pci;
1621 struct trace *__t;
1622
1623 pci = get_cpu_info(pdi, bit->cpu);
1624 expected_sequence = pci->last_sequence + 1;
1625
1626 if (!expected_sequence) {
1627 /*
1628 * 1 should be the first entry, just allow it
1629 */
1630 if (bit->sequence == 1)
1631 return 0;
1632 if (bit->sequence == pci->smallest_seq_read)
1633 return 0;
1634
1635 return check_cpu_map(pdi);
1636 }
1637
1638 if (bit->sequence == expected_sequence)
1639 return 0;
1640
1641 /*
1642 * we may not have seen that sequence yet. if we are not doing
1643 * the final run, break and wait for more entries.
1644 */
1645 if (expected_sequence < pci->smallest_seq_read) {
1646 __t = trace_rb_find_last(pdi, pci, expected_sequence);
1647 if (!__t)
1648 goto skip;
1649
1650 __put_trace_last(pdi, __t);
1651 return 0;
1652 } else if (!force) {
1653 return 1;
1654 } else {
1655skip:
1656 if (check_current_skips(pdi,bit->sequence))
1657 return 0;
1658
1659 if (expected_sequence < bit->sequence)
1660 insert_skip(pdi, expected_sequence, bit->sequence - 1);
1661 return 0;
1662 }
1663}
1664
1665static void show_entries_rb(int force)
1666{
1667 struct per_dev_info *pdi = NULL;
1668 struct per_cpu_info *pci = NULL;
1669 struct blk_io_trace *bit;
1670 struct rb_node *n;
1671 struct trace *t;
1672
1673 while ((n = rb_first(&rb_sort_root)) != NULL) {
1674 if (is_done() && !force && !pipeline)
1675 break;
1676
1677 t = rb_entry(n, struct trace, rb_node);
1678 bit = t->bit;
1679
1680 if (!pdi || pdi->dev != bit->device) {
1681 pdi = get_dev_info(bit->device);
1682 pci = NULL;
1683 }
1684
1685 if (!pdi) {
1686 fprintf(stderr, "Unknown device ID? (%d,%d)\n",
1687 MAJOR(bit->device), MINOR(bit->device));
1688 break;
1689 }
1690
1691 if (check_sequence(pdi, t, force))
1692 break;
1693
1694 if (!force && bit->time > last_allowed_time)
1695 break;
1696
1697 check_time(pdi, bit);
1698
1699 if (!pci || pci->cpu != bit->cpu)
1700 pci = get_cpu_info(pdi, bit->cpu);
1701
1702 pci->last_sequence = bit->sequence;
1703
1704 pci->nelems++;
1705
1706 if (bit->action & (act_mask << BLK_TC_SHIFT))
1707 dump_trace(bit, pci, pdi);
1708
1709 put_trace(pdi, t);
1710 }
1711}
1712
1713static int read_data(int fd, void *buffer, int bytes, int block, int *fdblock)
1714{
1715 int ret, bytes_left, fl;
1716 void *p;
1717
1718 if (block != *fdblock) {
1719 fl = fcntl(fd, F_GETFL);
1720
1721 if (!block) {
1722 *fdblock = 0;
1723 fcntl(fd, F_SETFL, fl | O_NONBLOCK);
1724 } else {
1725 *fdblock = 1;
1726 fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
1727 }
1728 }
1729
1730 bytes_left = bytes;
1731 p = buffer;
1732 while (bytes_left > 0) {
1733 ret = read(fd, p, bytes_left);
1734 if (!ret)
1735 return 1;
1736 else if (ret < 0) {
1737 if (errno != EAGAIN) {
1738 perror("read");
1739 return -1;
1740 }
1741
1742 /*
1743 * never do partial reads. we can return if we
1744 * didn't read anything and we should not block,
1745 * otherwise wait for data
1746 */
1747 if ((bytes_left == bytes) && !block)
1748 return 1;
1749
1750 usleep(10);
1751 continue;
1752 } else {
1753 p += ret;
1754 bytes_left -= ret;
1755 }
1756 }
1757
1758 return 0;
1759}
1760
1761static int read_events(int fd, int always_block, int *fdblock)
1762{
1763 struct per_dev_info *pdi = NULL;
1764 unsigned int events = 0;
1765
1766 while (!is_done() && events < rb_batch) {
1767 struct blk_io_trace *bit;
1768 struct trace *t;
1769 int pdu_len, should_block, ret;
1770 __u32 magic;
1771
1772 bit = bit_alloc();
1773
1774 should_block = !events || always_block;
1775
1776 ret = read_data(fd, bit, sizeof(*bit), should_block, fdblock);
1777 if (ret) {
1778 bit_free(bit);
1779 if (!events && ret < 0)
1780 events = ret;
1781 break;
1782 }
1783
1784 magic = be32_to_cpu(bit->magic);
1785 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
1786 fprintf(stderr, "Bad magic %x\n", magic);
1787 break;
1788 }
1789
1790 pdu_len = be16_to_cpu(bit->pdu_len);
1791 if (pdu_len) {
1792 void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
1793
1794 if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1, fdblock)) {
1795 bit_free(ptr);
1796 break;
1797 }
1798
1799 bit = ptr;
1800 }
1801
1802 trace_to_cpu(bit);
1803
1804 if (verify_trace(bit)) {
1805 bit_free(bit);
1806 continue;
1807 }
1808
1809 t = t_alloc();
1810 memset(t, 0, sizeof(*t));
1811 t->bit = bit;
1812
1813 t->next = trace_list;
1814 trace_list = t;
1815
1816 if (!pdi || pdi->dev != bit->device)
1817 pdi = get_dev_info(bit->device);
1818
1819 if (bit->time > pdi->last_read_time)
1820 pdi->last_read_time = bit->time;
1821
1822 events++;
1823 }
1824
1825 return events;
1826}
1827
1828static int do_file(void)
1829{
1830 struct per_cpu_info *pci;
1831 struct per_dev_info *pdi;
1832 int i, j, events, events_added;
1833
1834 /*
1835 * first prepare all files for reading
1836 */
1837 for (i = 0; i < ndevices; i++) {
1838 pdi = &devices[i];
1839 pdi->nfiles = 0;
1840
1841 for (j = 0;; j++) {
1842 struct stat st;
1843 int len = 0;
1844 char *p, *dname;
1845
1846 pci = get_cpu_info(pdi, j);
1847 pci->cpu = j;
1848 pci->fd = -1;
1849 pci->fdblock = -1;
1850
1851 p = strdup(pdi->name);
1852 dname = dirname(p);
1853 if (strcmp(dname, ".")) {
1854 input_dir = dname;
1855 p = strdup(pdi->name);
1856 strcpy(pdi->name, basename(p));
1857 }
1858 free(p);
1859
1860 if (input_dir)
1861 len = sprintf(pci->fname, "%s/", input_dir);
1862
1863 snprintf(pci->fname + len, sizeof(pci->fname)-1-len,
1864 "%s.blktrace.%d", pdi->name, pci->cpu);
1865 if (stat(pci->fname, &st) < 0)
1866 break;
1867 if (st.st_size) {
1868 pci->fd = open(pci->fname, O_RDONLY);
1869 if (pci->fd < 0) {
1870 perror(pci->fname);
1871 continue;
1872 }
1873 }
1874
1875 printf("Input file %s added\n", pci->fname);
1876 pdi->nfiles++;
1877 cpu_mark_online(pdi, pci->cpu);
1878 }
1879 }
1880
1881 /*
1882 * now loop over the files reading in the data
1883 */
1884 do {
1885 unsigned long long youngest;
1886
1887 events_added = 0;
1888 last_allowed_time = -1ULL;
1889
1890 for (i = 0; i < ndevices; i++) {
1891 pdi = &devices[i];
1892
1893 for (j = 0; j < pdi->nfiles; j++) {
1894
1895 pci = get_cpu_info(pdi, j);
1896
1897 if (pci->fd == -1)
1898 continue;
1899
1900 pci->smallest_seq_read = -1;
1901
1902 events = read_events(pci->fd, 1, &pci->fdblock);
1903 if (events <= 0) {
1904 cpu_mark_offline(pdi, pci->cpu);
1905 close(pci->fd);
1906 pci->fd = -1;
1907 continue;
1908 }
1909
1910 if (pdi->last_read_time < last_allowed_time)
1911 last_allowed_time = pdi->last_read_time;
1912
1913 events_added += events;
1914 }
1915 }
1916
1917 if (sort_entries(&youngest))
1918 break;
1919
1920 if (youngest > stopwatch_end)
1921 break;
1922
1923 show_entries_rb(0);
1924
1925 } while (events_added);
1926
1927 if (rb_sort_entries)
1928 show_entries_rb(1);
1929
1930 return 0;
1931}
1932
1933static int do_stdin(void)
1934{
1935 unsigned long long youngest;
1936 int fd, events, fdblock;
1937
1938 last_allowed_time = -1ULL;
1939 fd = dup(STDIN_FILENO);
1940 if (fd == -1) {
1941 perror("dup stdin");
1942 return -1;
1943 }
1944
1945 fdblock = -1;
1946 while ((events = read_events(fd, 0, &fdblock)) > 0) {
1947
1948#if 0
1949 smallest_seq_read = -1U;
1950#endif
1951
1952 if (sort_entries(&youngest))
1953 break;
1954
1955 if (youngest > stopwatch_end)
1956 break;
1957
1958 show_entries_rb(0);
1959 }
1960
1961 if (rb_sort_entries)
1962 show_entries_rb(1);
1963
1964 close(fd);
1965 return 0;
1966}
1967
1968static void show_stats(void)
1969{
1970 if (!ofp)
1971 return;
1972 if (stats_printed)
1973 return;
1974
1975 stats_printed = 1;
1976
1977 if (per_process_stats)
1978 show_process_stats();
1979
1980 if (per_device_and_cpu_stats)
1981 show_device_and_cpu_stats();
1982
1983 fflush(ofp);
1984}
1985
1986static void handle_sigint(__attribute__((__unused__)) int sig)
1987{
1988 done = 1;
1989}
1990
1991/*
1992 * Extract start and duration times from a string, allowing
1993 * us to specify a time interval of interest within a trace.
1994 * Format: "duration" (start is zero) or "start:duration".
1995 */
1996static int find_stopwatch_interval(char *string)
1997{
1998 double value;
1999 char *sp;
2000
2001 value = strtod(string, &sp);
2002 if (sp == string) {
2003 fprintf(stderr,"Invalid stopwatch timer: %s\n", string);
2004 return 1;
2005 }
2006 if (*sp == ':') {
2007 stopwatch_start = DOUBLE_TO_NANO_ULL(value);
2008 string = sp + 1;
2009 value = strtod(string, &sp);
2010 if (sp == string || *sp != '\0') {
2011 fprintf(stderr,"Invalid stopwatch duration time: %s\n",
2012 string);
2013 return 1;
2014 }
2015 } else if (*sp != '\0') {
2016 fprintf(stderr,"Invalid stopwatch start timer: %s\n", string);
2017 return 1;
2018 }
2019 stopwatch_end = DOUBLE_TO_NANO_ULL(value);
2020 if (stopwatch_end <= stopwatch_start) {
2021 fprintf(stderr, "Invalid stopwatch interval: %Lu -> %Lu\n",
2022 stopwatch_start, stopwatch_end);
2023 return 1;
2024 }
2025
2026 return 0;
2027}
2028
2029static char usage_str[] = \
2030 "[ -i <input name> ] [-o <output name> [ -s ] [ -t ] [ -q ]\n" \
2031 "[ -w start:stop ] [ -f output format ] [ -F format spec ] [ -v] \n\n" \
2032 "\t-i Input file containing trace data, or '-' for stdin\n" \
2033 "\t-D Directory to prepend to input file names\n" \
2034 "\t-o Output file. If not given, output is stdout\n" \
2035 "\t-b stdin read batching\n" \
2036 "\t-s Show per-program io statistics\n" \
2037 "\t-h Hash processes by name, not pid\n" \
2038 "\t-t Track individual ios. Will tell you the time a request took\n" \
2039 "\t to get queued, to get dispatched, and to get completed\n" \
2040 "\t-q Quiet. Don't display any stats at the end of the trace\n" \
2041 "\t-w Only parse data between the given time interval in seconds.\n" \
2042 "\t If 'start' isn't given, blkparse defaults the start time to 0\n" \
2043 "\t-f Output format. Customize the output format. The format field\n" \
2044 "\t identifies can be found in the documentation\n" \
2045 "\t-F Format specification. Can be found in the documentation\n" \
2046 "\t-v More verbose for marginal errors\n" \
2047 "\t-V Print program version info\n\n";
2048
2049static void usage(char *prog)
2050{
2051 fprintf(stderr, "Usage: %s %s %s", prog, blkparse_version, usage_str);
2052}
2053
2054int main(int argc, char *argv[])
2055{
2056 char *ofp_buffer;
2057 int i, c, ret, mode;
2058 int act_mask_tmp = 0;
2059
2060 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
2061 switch (c) {
2062 case 'a':
2063 i = find_mask_map(optarg);
2064 if (i < 0) {
2065 fprintf(stderr,"Invalid action mask %s\n",
2066 optarg);
2067 return 1;
2068 }
2069 act_mask_tmp |= i;
2070 break;
2071
2072 case 'A':
2073 if ((sscanf(optarg, "%x", &i) != 1) ||
2074 !valid_act_opt(i)) {
2075 fprintf(stderr,
2076 "Invalid set action mask %s/0x%x\n",
2077 optarg, i);
2078 return 1;
2079 }
2080 act_mask_tmp = i;
2081 break;
2082 case 'i':
2083 if (!strcmp(optarg, "-") && !pipeline)
2084 pipeline = 1;
2085 else if (resize_devices(optarg) != 0)
2086 return 1;
2087 break;
2088 case 'D':
2089 input_dir = optarg;
2090 break;
2091 case 'o':
2092 output_name = optarg;
2093 break;
2094 case 'b':
2095 rb_batch = atoi(optarg);
2096 if (rb_batch <= 0)
2097 rb_batch = RB_BATCH_DEFAULT;
2098 break;
2099 case 's':
2100 per_process_stats = 1;
2101 break;
2102 case 't':
2103 track_ios = 1;
2104 break;
2105 case 'q':
2106 per_device_and_cpu_stats = 0;
2107 break;
2108 case 'w':
2109 if (find_stopwatch_interval(optarg) != 0)
2110 return 1;
2111 break;
2112 case 'f':
2113 set_all_format_specs(optarg);
2114 break;
2115 case 'F':
2116 if (add_format_spec(optarg) != 0)
2117 return 1;
2118 break;
2119 case 'h':
2120 ppi_hash_by_pid = 0;
2121 break;
2122 case 'v':
2123 verbose++;
2124 break;
2125 case 'V':
2126 printf("%s version %s\n", argv[0], blkparse_version);
2127 return 0;
2128 default:
2129 usage(argv[0]);
2130 return 1;
2131 }
2132 }
2133
2134 while (optind < argc) {
2135 if (!strcmp(argv[optind], "-") && !pipeline)
2136 pipeline = 1;
2137 else if (resize_devices(argv[optind]) != 0)
2138 return 1;
2139 optind++;
2140 }
2141
2142 if (!pipeline && !ndevices) {
2143 usage(argv[0]);
2144 return 1;
2145 }
2146
2147 if (act_mask_tmp != 0)
2148 act_mask = act_mask_tmp;
2149
2150 memset(&rb_sort_root, 0, sizeof(rb_sort_root));
2151
2152 signal(SIGINT, handle_sigint);
2153 signal(SIGHUP, handle_sigint);
2154 signal(SIGTERM, handle_sigint);
2155
2156 setlocale(LC_NUMERIC, "en_US");
2157
2158 if (!output_name) {
2159 ofp = fdopen(STDOUT_FILENO, "w");
2160 mode = _IOLBF;
2161 } else {
2162 char ofname[128];
2163
2164 snprintf(ofname, sizeof(ofname) - 1, "%s", output_name);
2165 ofp = fopen(ofname, "w");
2166 mode = _IOFBF;
2167 }
2168
2169 if (!ofp) {
2170 perror("fopen");
2171 return 1;
2172 }
2173
2174 ofp_buffer = malloc(4096);
2175 if (setvbuf(ofp, ofp_buffer, mode, 4096)) {
2176 perror("setvbuf");
2177 return 1;
2178 }
2179
2180 if (pipeline)
2181 ret = do_stdin();
2182 else
2183 ret = do_file();
2184
2185 show_stats();
2186 free(ofp_buffer);
2187 return ret;
2188}