[PATCH] blktrace: fix get_subbuf() leak
[blktrace.git] / blkparse.c
1 /*
2  * block queue tracing parse application
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include <sys/types.h>
22 #include <sys/stat.h>
23 #include <unistd.h>
24 #include <stdio.h>
25 #include <fcntl.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <getopt.h>
29 #include <errno.h>
30 #include <signal.h>
31 #include <locale.h>
32 #include <libgen.h>
33
34 #include "blktrace.h"
35 #include "rbtree.h"
36 #include "jhash.h"
37
38 static char blkparse_version[] = "0.99.1";
39
40 struct skip_info {
41         unsigned long start, end;
42         struct skip_info *prev, *next;
43 };
44
45 struct per_dev_info {
46         dev_t dev;
47         char *name;
48
49         int backwards;
50         unsigned long long events;
51         unsigned long long first_reported_time;
52         unsigned long long last_reported_time;
53         unsigned long long last_read_time;
54         struct io_stats io_stats;
55         unsigned long skips;
56         unsigned long long seq_skips;
57         unsigned int max_depth[2];
58         unsigned int cur_depth[2];
59
60         struct rb_root rb_track;
61
62         int nfiles;
63         int ncpus;
64
65         unsigned long *cpu_map;
66         unsigned int cpu_map_max;
67
68         struct per_cpu_info *cpus;
69 };
70
71 /*
72  * some duplicated effort here, we can unify this hash and the ppi hash later
73  */
74 struct process_pid_map {
75         pid_t pid;
76         char comm[16];
77         struct process_pid_map *hash_next, *list_next;
78 };
79
80 #define PPM_HASH_SHIFT  (8)
81 #define PPM_HASH_SIZE   (1 << PPM_HASH_SHIFT)
82 #define PPM_HASH_MASK   (PPM_HASH_SIZE - 1)
83 static struct process_pid_map *ppm_hash_table[PPM_HASH_SIZE];
84
85 struct per_process_info {
86         struct process_pid_map *ppm;
87         struct io_stats io_stats;
88         struct per_process_info *hash_next, *list_next;
89         int more_than_one;
90
91         /*
92          * individual io stats
93          */
94         unsigned long long longest_allocation_wait[2];
95         unsigned long long longest_dispatch_wait[2];
96         unsigned long long longest_completion_wait[2];
97 };
98
99 #define PPI_HASH_SHIFT  (8)
100 #define PPI_HASH_SIZE   (1 << PPI_HASH_SHIFT)
101 #define PPI_HASH_MASK   (PPI_HASH_SIZE - 1)
102 static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
103 static struct per_process_info *ppi_list;
104 static int ppi_list_entries;
105
106 #define S_OPTS  "a:A:i:o:b:stqw:f:F:vVhD:d:"
107 static struct option l_opts[] = {
108         {
109                 .name = "act-mask",
110                 .has_arg = required_argument,
111                 .flag = NULL,
112                 .val = 'a'
113         },
114         {
115                 .name = "set-mask",
116                 .has_arg = required_argument,
117                 .flag = NULL,
118                 .val = 'A'
119         },
120         {
121                 .name = "input",
122                 .has_arg = required_argument,
123                 .flag = NULL,
124                 .val = 'i'
125         },
126         {
127                 .name = "output",
128                 .has_arg = required_argument,
129                 .flag = NULL,
130                 .val = 'o'
131         },
132         {
133                 .name = "batch",
134                 .has_arg = required_argument,
135                 .flag = NULL,
136                 .val = 'b'
137         },
138         {
139                 .name = "per-program-stats",
140                 .has_arg = no_argument,
141                 .flag = NULL,
142                 .val = 's'
143         },
144         {
145                 .name = "track-ios",
146                 .has_arg = no_argument,
147                 .flag = NULL,
148                 .val = 't'
149         },
150         {
151                 .name = "quiet",
152                 .has_arg = no_argument,
153                 .flag = NULL,
154                 .val = 'q'
155         },
156         {
157                 .name = "stopwatch",
158                 .has_arg = required_argument,
159                 .flag = NULL,
160                 .val = 'w'
161         },
162         {
163                 .name = "format",
164                 .has_arg = required_argument,
165                 .flag = NULL,
166                 .val = 'f'
167         },
168         {
169                 .name = "format-spec",
170                 .has_arg = required_argument,
171                 .flag = NULL,
172                 .val = 'F'
173         },
174         {
175                 .name = "hash-by-name",
176                 .has_arg = no_argument,
177                 .flag = NULL,
178                 .val = 'h'
179         },
180         {
181                 .name = "verbose",
182                 .has_arg = no_argument,
183                 .flag = NULL,
184                 .val = 'v'
185         },
186         {
187                 .name = "version",
188                 .has_arg = no_argument,
189                 .flag = NULL,
190                 .val = 'V'
191         },
192         {
193                 .name = "input-directory",
194                 .has_arg = required_argument,
195                 .flag = NULL,
196                 .val = 'D'
197         },
198         {
199                 .name = "dump-binary",
200                 .has_arg = required_argument,
201                 .flag = NULL,
202                 .val = 'd'
203         },
204         {
205                 .name = NULL,
206         }
207 };
208
209 /*
210  * for sorting the displayed output
211  */
212 struct trace {
213         struct blk_io_trace *bit;
214         struct rb_node rb_node;
215         struct trace *next;
216         unsigned long read_sequence;
217 };
218
219 static struct rb_root rb_sort_root;
220 static unsigned long rb_sort_entries;
221
222 static struct trace *trace_list;
223
224 /*
225  * allocation cache
226  */
227 static struct blk_io_trace *bit_alloc_list;
228 static struct trace *t_alloc_list;
229
230 /*
231  * for tracking individual ios
232  */
233 struct io_track {
234         struct rb_node rb_node;
235
236         struct process_pid_map *ppm;
237         __u64 sector;
238         unsigned long long allocation_time;
239         unsigned long long queue_time;
240         unsigned long long dispatch_time;
241         unsigned long long completion_time;
242 };
243
244 static int ndevices;
245 static struct per_dev_info *devices;
246 static char *get_dev_name(struct per_dev_info *, char *, int);
247 static int trace_rb_insert_last(struct per_dev_info *, struct trace *);
248
249 FILE *ofp = NULL;
250 static char *output_name;
251 static char *input_dir;
252
253 static unsigned long long genesis_time;
254 static unsigned long long last_allowed_time;
255 static unsigned long long stopwatch_start;      /* start from zero by default */
256 static unsigned long long stopwatch_end = -1ULL;        /* "infinity" */
257 static unsigned long read_sequence;
258
259 static int per_process_stats;
260 static int per_device_and_cpu_stats = 1;
261 static int track_ios;
262 static int ppi_hash_by_pid = 1;
263 static int verbose;
264 static unsigned int act_mask = -1U;
265 static int stats_printed;
266 int data_is_native = -1;
267
268 static int dump_fd;
269 static char *dump_binary;
270
271 static unsigned int t_alloc_cache;
272 static unsigned int bit_alloc_cache;
273
274 #define RB_BATCH_DEFAULT        (512)
275 static unsigned int rb_batch = RB_BATCH_DEFAULT;
276
277 static int pipeline;
278
279 #define is_done()       (*(volatile int *)(&done))
280 static volatile int done;
281
282 #define JHASH_RANDOM    (0x3af5f2ee)
283
284 #define CPUS_PER_LONG   (8 * sizeof(unsigned long))
285 #define CPU_IDX(cpu)    ((cpu) / CPUS_PER_LONG)
286 #define CPU_BIT(cpu)    ((cpu) & (CPUS_PER_LONG - 1))
287
288 static void output_binary(void *buf, int len)
289 {
290         if (dump_binary) {
291                 int n = write(dump_fd, buf, len);
292                 if (n != len) {
293                         perror(dump_binary);
294                         close(dump_fd);
295                         dump_binary = NULL;
296                 }
297         }
298 }
299
300 static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
301 {
302         struct per_cpu_info *cpus = pdi->cpus;
303         int ncpus = pdi->ncpus;
304         int new_count = cpu + 1;
305         int new_space, size;
306         char *new_start;
307
308         size = new_count * sizeof(struct per_cpu_info);
309         cpus = realloc(cpus, size);
310         if (!cpus) {
311                 char name[20];
312                 fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
313                         get_dev_name(pdi, name, sizeof(name)), size);
314                 exit(1);
315         }
316
317         new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
318         new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
319         memset(new_start, 0, new_space);
320
321         pdi->ncpus = new_count;
322         pdi->cpus = cpus;
323
324         for (new_count = 0; new_count < pdi->ncpus; new_count++) {
325                 struct per_cpu_info *pci = &pdi->cpus[new_count];
326
327                 if (!pci->fd) {
328                         pci->fd = -1;
329                         memset(&pci->rb_last, 0, sizeof(pci->rb_last));
330                         pci->rb_last_entries = 0;
331                         pci->last_sequence = -1;
332                 }
333         }
334 }
335
336 static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
337 {
338         struct per_cpu_info *pci;
339
340         if (cpu >= pdi->ncpus)
341                 resize_cpu_info(pdi, cpu);
342
343         pci = &pdi->cpus[cpu];
344         pci->cpu = cpu;
345         return pci;
346 }
347
348
349 static int resize_devices(char *name)
350 {
351         int size = (ndevices + 1) * sizeof(struct per_dev_info);
352
353         devices = realloc(devices, size);
354         if (!devices) {
355                 fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
356                 return 1;
357         }
358         memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
359         devices[ndevices].name = name;
360         ndevices++;
361         return 0;
362 }
363
364 static struct per_dev_info *get_dev_info(dev_t dev)
365 {
366         struct per_dev_info *pdi;
367         int i;
368
369         for (i = 0; i < ndevices; i++) {
370                 if (!devices[i].dev)
371                         devices[i].dev = dev;
372                 if (devices[i].dev == dev)
373                         return &devices[i];
374         }
375
376         if (resize_devices(NULL))
377                 return NULL;
378
379         pdi = &devices[ndevices - 1];
380         pdi->dev = dev;
381         pdi->first_reported_time = 0;
382         pdi->last_read_time = 0;
383
384         return pdi;
385 }
386
387 static void insert_skip(struct per_cpu_info *pci, unsigned long start,
388                         unsigned long end)
389 {
390         struct skip_info *sip;
391
392         for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
393                 if (end == (sip->start - 1)) {
394                         sip->start = start;
395                         return;
396                 } else if (start == (sip->end + 1)) {
397                         sip->end = end;
398                         return;
399                 }
400         }
401
402         sip = malloc(sizeof(struct skip_info));
403         sip->start = start;
404         sip->end = end;
405         sip->prev = sip->next = NULL;
406         if (pci->skips_tail == NULL)
407                 pci->skips_head = pci->skips_tail = sip;
408         else {
409                 sip->prev = pci->skips_tail;
410                 pci->skips_tail->next = sip;
411                 pci->skips_tail = sip;
412         }
413 }
414
415 static void remove_sip(struct per_cpu_info *pci, struct skip_info *sip)
416 {
417         if (sip->prev == NULL) {
418                 if (sip->next == NULL)
419                         pci->skips_head = pci->skips_tail = NULL;
420                 else {
421                         pci->skips_head = sip->next;
422                         sip->next->prev = NULL;
423                 }
424         } else if (sip->next == NULL) {
425                 pci->skips_tail = sip->prev;
426                 sip->prev->next = NULL;
427         } else {
428                 sip->prev->next = sip->next;
429                 sip->next->prev = sip->prev;
430         }
431
432         sip->prev = sip->next = NULL;
433         free(sip);
434 }
435
436 #define IN_SKIP(sip,seq) (((sip)->start <= (seq)) && ((seq) <= sip->end))
437 static int check_current_skips(struct per_cpu_info *pci, unsigned long seq)
438 {
439         struct skip_info *sip;
440
441         for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
442                 if (IN_SKIP(sip, seq)) {
443                         if (sip->start == seq) {
444                                 if (sip->end == seq)
445                                         remove_sip(pci, sip);
446                                 else
447                                         sip->start += 1;
448                         } else if (sip->end == seq)
449                                 sip->end -= 1;
450                         else {
451                                 sip->end = seq - 1;
452                                 insert_skip(pci, seq + 1, sip->end);
453                         }
454                         return 1;
455                 }
456         }
457
458         return 0;
459 }
460
461 static void collect_pdi_skips(struct per_dev_info *pdi)
462 {
463         struct skip_info *sip;
464         int cpu;
465
466         pdi->skips = 0;
467         pdi->seq_skips = 0;
468
469         for (cpu = 0; cpu < pdi->ncpus; cpu++) {
470                 struct per_cpu_info *pci = &pdi->cpus[cpu];
471
472                 for (sip = pci->skips_head; sip != NULL; sip = sip->next) {
473                         pdi->skips++;
474                         pdi->seq_skips += (sip->end - sip->start + 1);
475                         if (verbose)
476                                 fprintf(stderr,"(%d,%d): skipping %lu -> %lu\n",
477                                         MAJOR(pdi->dev), MINOR(pdi->dev),
478                                         sip->start, sip->end);
479                 }
480         }
481 }
482
483 static void cpu_mark_online(struct per_dev_info *pdi, unsigned int cpu)
484 {
485         if (cpu >= pdi->cpu_map_max || !pdi->cpu_map) {
486                 int new_max = (cpu + CPUS_PER_LONG) & ~(CPUS_PER_LONG - 1);
487                 unsigned long *map = malloc(new_max / sizeof(long));
488
489                 memset(map, 0, new_max / sizeof(long));
490
491                 if (pdi->cpu_map) {
492                         memcpy(map, pdi->cpu_map, pdi->cpu_map_max / sizeof(long));
493                         free(pdi->cpu_map);
494                 }
495
496                 pdi->cpu_map = map;
497                 pdi->cpu_map_max = new_max;
498         }
499
500         pdi->cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
501 }
502
503 static inline void cpu_mark_offline(struct per_dev_info *pdi, int cpu)
504 {
505         pdi->cpu_map[CPU_IDX(cpu)] &= ~(1UL << CPU_BIT(cpu));
506 }
507
508 static inline int cpu_is_online(struct per_dev_info *pdi, int cpu)
509 {
510         return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
511 }
512
513 static inline int ppm_hash_pid(pid_t pid)
514 {
515         return jhash_1word(pid, JHASH_RANDOM) & PPM_HASH_MASK;
516 }
517
518 static struct process_pid_map *find_ppm(pid_t pid)
519 {
520         const int hash_idx = ppm_hash_pid(pid);
521         struct process_pid_map *ppm;
522
523         ppm = ppm_hash_table[hash_idx];
524         while (ppm) {
525                 if (ppm->pid == pid)
526                         return ppm;
527
528                 ppm = ppm->hash_next;
529         }
530
531         return NULL;
532 }
533
534 static void add_ppm_hash(pid_t pid, const char *name)
535 {
536         const int hash_idx = ppm_hash_pid(pid);
537         struct process_pid_map *ppm;
538
539         ppm = find_ppm(pid);
540         if (!ppm) {
541                 ppm = malloc(sizeof(*ppm));
542                 memset(ppm, 0, sizeof(*ppm));
543                 ppm->pid = pid;
544                 strcpy(ppm->comm, name);
545                 ppm->hash_next = ppm_hash_table[hash_idx];
546                 ppm_hash_table[hash_idx] = ppm;
547         }
548 }
549
550 char *find_process_name(pid_t pid)
551 {
552         struct process_pid_map *ppm = find_ppm(pid);
553
554         if (ppm)
555                 return ppm->comm;
556
557         return NULL;
558 }
559
560 static inline int ppi_hash_pid(pid_t pid)
561 {
562         return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
563 }
564
565 static inline int ppi_hash_name(const char *name)
566 {
567         return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
568 }
569
570 static inline int ppi_hash(struct per_process_info *ppi)
571 {
572         struct process_pid_map *ppm = ppi->ppm;
573
574         if (ppi_hash_by_pid)
575                 return ppi_hash_pid(ppm->pid);
576
577         return ppi_hash_name(ppm->comm);
578 }
579
580 static inline void add_ppi_to_hash(struct per_process_info *ppi)
581 {
582         const int hash_idx = ppi_hash(ppi);
583
584         ppi->hash_next = ppi_hash_table[hash_idx];
585         ppi_hash_table[hash_idx] = ppi;
586 }
587
588 static inline void add_ppi_to_list(struct per_process_info *ppi)
589 {
590         ppi->list_next = ppi_list;
591         ppi_list = ppi;
592         ppi_list_entries++;
593 }
594
595 static struct per_process_info *find_ppi_by_name(char *name)
596 {
597         const int hash_idx = ppi_hash_name(name);
598         struct per_process_info *ppi;
599
600         ppi = ppi_hash_table[hash_idx];
601         while (ppi) {
602                 struct process_pid_map *ppm = ppi->ppm;
603
604                 if (!strcmp(ppm->comm, name))
605                         return ppi;
606
607                 ppi = ppi->hash_next;
608         }
609
610         return NULL;
611 }
612
613 static struct per_process_info *find_ppi_by_pid(pid_t pid)
614 {
615         const int hash_idx = ppi_hash_pid(pid);
616         struct per_process_info *ppi;
617
618         ppi = ppi_hash_table[hash_idx];
619         while (ppi) {
620                 struct process_pid_map *ppm = ppi->ppm;
621
622                 if (ppm->pid == pid)
623                         return ppi;
624
625                 ppi = ppi->hash_next;
626         }
627
628         return NULL;
629 }
630
631 static struct per_process_info *find_ppi(pid_t pid)
632 {
633         struct per_process_info *ppi;
634         char *name;
635
636         if (ppi_hash_by_pid)
637                 return find_ppi_by_pid(pid);
638
639         name = find_process_name(pid);
640         if (!name)
641                 return NULL;
642
643         ppi = find_ppi_by_name(name);
644         if (ppi && ppi->ppm->pid != pid)
645                 ppi->more_than_one = 1;
646
647         return ppi;
648 }
649
650 /*
651  * struct trace and blktrace allocation cache, we do potentially
652  * millions of mallocs for these structures while only using at most
653  * a few thousand at the time
654  */
655 static inline void t_free(struct trace *t)
656 {
657         if (t_alloc_cache < 1024) {
658                 t->next = t_alloc_list;
659                 t_alloc_list = t;
660                 t_alloc_cache++;
661         } else
662                 free(t);
663 }
664
665 static inline struct trace *t_alloc(void)
666 {
667         struct trace *t = t_alloc_list;
668
669         if (t) {
670                 t_alloc_list = t->next;
671                 t_alloc_cache--;
672                 return t;
673         }
674
675         return malloc(sizeof(*t));
676 }
677
678 static inline void bit_free(struct blk_io_trace *bit)
679 {
680         if (bit_alloc_cache < 1024 && !bit->pdu_len) {
681                 /*
682                  * abuse a 64-bit field for a next pointer for the free item
683                  */
684                 bit->time = (__u64) (unsigned long) bit_alloc_list;
685                 bit_alloc_list = (struct blk_io_trace *) bit;
686                 bit_alloc_cache++;
687         } else
688                 free(bit);
689 }
690
691 static inline struct blk_io_trace *bit_alloc(void)
692 {
693         struct blk_io_trace *bit = bit_alloc_list;
694
695         if (bit) {
696                 bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
697                                  bit->time;
698                 bit_alloc_cache--;
699                 return bit;
700         }
701
702         return malloc(sizeof(*bit));
703 }
704
705 static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
706 {
707         struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
708
709         rb_erase(&t->rb_node, &pci->rb_last);
710         pci->rb_last_entries--;
711
712         bit_free(t->bit);
713         t_free(t);
714 }
715
716 static void put_trace(struct per_dev_info *pdi, struct trace *t)
717 {
718         rb_erase(&t->rb_node, &rb_sort_root);
719         rb_sort_entries--;
720
721         trace_rb_insert_last(pdi, t);
722 }
723
724 static inline int trace_rb_insert(struct trace *t, struct rb_root *root)
725 {
726         struct rb_node **p = &root->rb_node;
727         struct rb_node *parent = NULL;
728         struct trace *__t;
729
730         while (*p) {
731                 parent = *p;
732
733                 __t = rb_entry(parent, struct trace, rb_node);
734
735                 if (t->bit->time < __t->bit->time)
736                         p = &(*p)->rb_left;
737                 else if (t->bit->time > __t->bit->time)
738                         p = &(*p)->rb_right;
739                 else if (t->bit->device < __t->bit->device)
740                         p = &(*p)->rb_left;
741                 else if (t->bit->device > __t->bit->device)
742                         p = &(*p)->rb_right;
743                 else if (t->bit->sequence < __t->bit->sequence)
744                         p = &(*p)->rb_left;
745                 else    /* >= sequence */
746                         p = &(*p)->rb_right;
747         }
748
749         rb_link_node(&t->rb_node, parent, p);
750         rb_insert_color(&t->rb_node, root);
751         return 0;
752 }
753
754 static inline int trace_rb_insert_sort(struct trace *t)
755 {
756         if (!trace_rb_insert(t, &rb_sort_root)) {
757                 rb_sort_entries++;
758                 return 0;
759         }
760
761         return 1;
762 }
763
764 static int trace_rb_insert_last(struct per_dev_info *pdi, struct trace *t)
765 {
766         struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
767
768         if (trace_rb_insert(t, &pci->rb_last))
769                 return 1;
770
771         pci->rb_last_entries++;
772
773         if (pci->rb_last_entries > rb_batch * pdi->nfiles) {
774                 struct rb_node *n = rb_first(&pci->rb_last);
775
776                 t = rb_entry(n, struct trace, rb_node);
777                 __put_trace_last(pdi, t);
778         }
779
780         return 0;
781 }
782
783 static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
784                                    struct rb_root *root, int order)
785 {
786         struct rb_node *n = root->rb_node;
787         struct rb_node *prev = NULL;
788         struct trace *__t;
789
790         while (n) {
791                 __t = rb_entry(n, struct trace, rb_node);
792                 prev = n;
793
794                 if (device < __t->bit->device)
795                         n = n->rb_left;
796                 else if (device > __t->bit->device)
797                         n = n->rb_right;
798                 else if (sequence < __t->bit->sequence)
799                         n = n->rb_left;
800                 else if (sequence > __t->bit->sequence)
801                         n = n->rb_right;
802                 else
803                         return __t;
804         }
805
806         /*
807          * hack - the list may not be sequence ordered because some
808          * events don't have sequence and time matched. so we end up
809          * being a little off in the rb lookup here, because we don't
810          * know the time we are looking for. compensate by browsing
811          * a little ahead from the last entry to find the match
812          */
813         if (order && prev) {
814                 int max = 5;
815
816                 while (((n = rb_next(prev)) != NULL) && max--) {
817                         __t = rb_entry(n, struct trace, rb_node);
818
819                         if (__t->bit->device == device &&
820                             __t->bit->sequence == sequence)
821                                 return __t;
822
823                         prev = n;
824                 }
825         }
826
827         return NULL;
828 }
829
830 static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
831                                                struct per_cpu_info *pci,
832                                                unsigned long seq)
833 {
834         return trace_rb_find(pdi->dev, seq, &pci->rb_last, 0);
835 }
836
837 static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
838 {
839         struct rb_node **p = &pdi->rb_track.rb_node;
840         struct rb_node *parent = NULL;
841         struct io_track *__iot;
842
843         while (*p) {
844                 parent = *p;
845                 __iot = rb_entry(parent, struct io_track, rb_node);
846
847                 if (iot->sector < __iot->sector)
848                         p = &(*p)->rb_left;
849                 else if (iot->sector > __iot->sector)
850                         p = &(*p)->rb_right;
851                 else {
852                         fprintf(stderr,
853                                 "sector alias (%Lu) on device %d,%d!\n",
854                                 (unsigned long long) iot->sector,
855                                 MAJOR(pdi->dev), MINOR(pdi->dev));
856                         return 1;
857                 }
858         }
859
860         rb_link_node(&iot->rb_node, parent, p);
861         rb_insert_color(&iot->rb_node, &pdi->rb_track);
862         return 0;
863 }
864
865 static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
866 {
867         struct rb_node *n = pdi->rb_track.rb_node;
868         struct io_track *__iot;
869
870         while (n) {
871                 __iot = rb_entry(n, struct io_track, rb_node);
872
873                 if (sector < __iot->sector)
874                         n = n->rb_left;
875                 else if (sector > __iot->sector)
876                         n = n->rb_right;
877                 else
878                         return __iot;
879         }
880
881         return NULL;
882 }
883
884 static struct io_track *find_track(struct per_dev_info *pdi, pid_t pid,
885                                    __u64 sector)
886 {
887         struct io_track *iot;
888
889         iot = __find_track(pdi, sector);
890         if (!iot) {
891                 iot = malloc(sizeof(*iot));
892                 iot->ppm = find_ppm(pid);
893                 iot->sector = sector;
894                 track_rb_insert(pdi, iot);
895         }
896
897         return iot;
898 }
899
900 static void log_track_frontmerge(struct per_dev_info *pdi,
901                                  struct blk_io_trace *t)
902 {
903         struct io_track *iot;
904
905         if (!track_ios)
906                 return;
907
908         iot = __find_track(pdi, t->sector + t_sec(t));
909         if (!iot) {
910                 if (verbose)
911                         fprintf(stderr, "merge not found for (%d,%d): %llu\n",
912                                 MAJOR(pdi->dev), MINOR(pdi->dev),
913                                 (unsigned long long) t->sector + t_sec(t));
914                 return;
915         }
916
917         rb_erase(&iot->rb_node, &pdi->rb_track);
918         iot->sector -= t_sec(t);
919         track_rb_insert(pdi, iot);
920 }
921
922 static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
923 {
924         struct io_track *iot;
925
926         if (!track_ios)
927                 return;
928
929         iot = find_track(pdi, t->pid, t->sector);
930         iot->allocation_time = t->time;
931 }
932
933 static inline int is_remapper(struct per_dev_info *pdi)
934 {
935         int major = MAJOR(pdi->dev);
936
937         return (major == 253 || major == 9);
938 }
939
940 /*
941  * for md/dm setups, the interesting cycle is Q -> C. So track queueing
942  * time here, as dispatch time
943  */
944 static void log_track_queue(struct per_dev_info *pdi, struct blk_io_trace *t)
945 {
946         struct io_track *iot;
947
948         if (!track_ios)
949                 return;
950         if (!is_remapper(pdi))
951                 return;
952
953         iot = find_track(pdi, t->pid, t->sector);
954         iot->dispatch_time = t->time;
955 }
956
957 /*
958  * return time between rq allocation and insertion
959  */
960 static unsigned long long log_track_insert(struct per_dev_info *pdi,
961                                            struct blk_io_trace *t)
962 {
963         unsigned long long elapsed;
964         struct io_track *iot;
965
966         if (!track_ios)
967                 return -1;
968
969         iot = find_track(pdi, t->pid, t->sector);
970         iot->queue_time = t->time;
971
972         if (!iot->allocation_time)
973                 return -1;
974
975         elapsed = iot->queue_time - iot->allocation_time;
976
977         if (per_process_stats) {
978                 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
979                 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
980
981                 if (ppi && elapsed > ppi->longest_allocation_wait[w])
982                         ppi->longest_allocation_wait[w] = elapsed;
983         }
984
985         return elapsed;
986 }
987
988 /*
989  * return time between queue and issue
990  */
991 static unsigned long long log_track_issue(struct per_dev_info *pdi,
992                                           struct blk_io_trace *t)
993 {
994         unsigned long long elapsed;
995         struct io_track *iot;
996
997         if (!track_ios)
998                 return -1;
999         if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
1000                 return -1;
1001
1002         iot = __find_track(pdi, t->sector);
1003         if (!iot) {
1004                 if (verbose)
1005                         fprintf(stderr, "issue not found for (%d,%d): %llu\n",
1006                                 MAJOR(pdi->dev), MINOR(pdi->dev),
1007                                 (unsigned long long) t->sector);
1008                 return -1;
1009         }
1010
1011         iot->dispatch_time = t->time;
1012         elapsed = iot->dispatch_time - iot->queue_time;
1013
1014         if (per_process_stats) {
1015                 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
1016                 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1017
1018                 if (ppi && elapsed > ppi->longest_dispatch_wait[w])
1019                         ppi->longest_dispatch_wait[w] = elapsed;
1020         }
1021
1022         return elapsed;
1023 }
1024
1025 /*
1026  * return time between dispatch and complete
1027  */
1028 static unsigned long long log_track_complete(struct per_dev_info *pdi,
1029                                              struct blk_io_trace *t)
1030 {
1031         unsigned long long elapsed;
1032         struct io_track *iot;
1033
1034         if (!track_ios)
1035                 return -1;
1036
1037         iot = __find_track(pdi, t->sector);
1038         if (!iot) {
1039                 if (verbose)
1040                         fprintf(stderr,"complete not found for (%d,%d): %llu\n",
1041                                 MAJOR(pdi->dev), MINOR(pdi->dev),
1042                                 (unsigned long long) t->sector);
1043                 return -1;
1044         }
1045
1046         iot->completion_time = t->time;
1047         elapsed = iot->completion_time - iot->dispatch_time;
1048
1049         if (per_process_stats) {
1050                 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
1051                 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1052
1053                 if (ppi && elapsed > ppi->longest_completion_wait[w])
1054                         ppi->longest_completion_wait[w] = elapsed;
1055         }
1056
1057         /*
1058          * kill the trace, we don't need it after completion
1059          */
1060         rb_erase(&iot->rb_node, &pdi->rb_track);
1061         free(iot);
1062
1063         return elapsed;
1064 }
1065
1066
1067 static struct io_stats *find_process_io_stats(pid_t pid)
1068 {
1069         struct per_process_info *ppi = find_ppi(pid);
1070
1071         if (!ppi) {
1072                 ppi = malloc(sizeof(*ppi));
1073                 memset(ppi, 0, sizeof(*ppi));
1074                 ppi->ppm = find_ppm(pid);
1075                 add_ppi_to_hash(ppi);
1076                 add_ppi_to_list(ppi);
1077         }
1078
1079         return &ppi->io_stats;
1080 }
1081
1082 static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
1083 {
1084         if (pdi->name)
1085                 snprintf(buffer, size, "%s", pdi->name);
1086         else
1087                 snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
1088         return buffer;
1089 }
1090
1091 static void check_time(struct per_dev_info *pdi, struct blk_io_trace *bit)
1092 {
1093         unsigned long long this = bit->time;
1094         unsigned long long last = pdi->last_reported_time;
1095
1096         pdi->backwards = (this < last) ? 'B' : ' ';
1097         pdi->last_reported_time = this;
1098 }
1099
1100 static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
1101                                int rw)
1102 {
1103         if (rw) {
1104                 ios->mwrites++;
1105                 ios->qwrite_kb += t_kb(t);
1106         } else {
1107                 ios->mreads++;
1108                 ios->qread_kb += t_kb(t);
1109         }
1110 }
1111
1112 static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
1113                              int rw)
1114 {
1115         __account_m(&pci->io_stats, t, rw);
1116
1117         if (per_process_stats) {
1118                 struct io_stats *ios = find_process_io_stats(t->pid);
1119
1120                 __account_m(ios, t, rw);
1121         }
1122 }
1123
1124 static inline void __account_queue(struct io_stats *ios, struct blk_io_trace *t,
1125                                    int rw)
1126 {
1127         if (rw) {
1128                 ios->qwrites++;
1129                 ios->qwrite_kb += t_kb(t);
1130         } else {
1131                 ios->qreads++;
1132                 ios->qread_kb += t_kb(t);
1133         }
1134 }
1135
1136 static inline void account_queue(struct blk_io_trace *t,
1137                                  struct per_cpu_info *pci, int rw)
1138 {
1139         __account_queue(&pci->io_stats, t, rw);
1140
1141         if (per_process_stats) {
1142                 struct io_stats *ios = find_process_io_stats(t->pid);
1143
1144                 __account_queue(ios, t, rw);
1145         }
1146 }
1147
1148 static inline void __account_c(struct io_stats *ios, int rw, int bytes)
1149 {
1150         if (rw) {
1151                 ios->cwrites++;
1152                 ios->cwrite_kb += bytes >> 10;
1153         } else {
1154                 ios->creads++;
1155                 ios->cread_kb += bytes >> 10;
1156         }
1157 }
1158
1159 static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
1160                              int rw, int bytes)
1161 {
1162         __account_c(&pci->io_stats, rw, bytes);
1163
1164         if (per_process_stats) {
1165                 struct io_stats *ios = find_process_io_stats(t->pid);
1166
1167                 __account_c(ios, rw, bytes);
1168         }
1169 }
1170
1171 static inline void __account_issue(struct io_stats *ios, int rw,
1172                                    unsigned int bytes)
1173 {
1174         if (rw) {
1175                 ios->iwrites++;
1176                 ios->iwrite_kb += bytes >> 10;
1177         } else {
1178                 ios->ireads++;
1179                 ios->iread_kb += bytes >> 10;
1180         }
1181 }
1182
1183 static inline void account_issue(struct blk_io_trace *t,
1184                                  struct per_cpu_info *pci, int rw)
1185 {
1186         __account_issue(&pci->io_stats, rw, t->bytes);
1187
1188         if (per_process_stats) {
1189                 struct io_stats *ios = find_process_io_stats(t->pid);
1190
1191                 __account_issue(ios, rw, t->bytes);
1192         }
1193 }
1194
1195 static inline void __account_unplug(struct io_stats *ios, int timer)
1196 {
1197         if (timer)
1198                 ios->timer_unplugs++;
1199         else
1200                 ios->io_unplugs++;
1201 }
1202
1203 static inline void account_unplug(struct blk_io_trace *t,
1204                                   struct per_cpu_info *pci, int timer)
1205 {
1206         __account_unplug(&pci->io_stats, timer);
1207
1208         if (per_process_stats) {
1209                 struct io_stats *ios = find_process_io_stats(t->pid);
1210
1211                 __account_unplug(ios, timer);
1212         }
1213 }
1214
1215 static inline void __account_requeue(struct io_stats *ios,
1216                                      struct blk_io_trace *t, int rw)
1217 {
1218         if (rw) {
1219                 ios->wrqueue++;
1220                 ios->iwrite_kb -= t_kb(t);
1221         } else {
1222                 ios->rrqueue++;
1223                 ios->iread_kb -= t_kb(t);
1224         }
1225 }
1226
1227 static inline void account_requeue(struct blk_io_trace *t,
1228                                    struct per_cpu_info *pci, int rw)
1229 {
1230         __account_requeue(&pci->io_stats, t, rw);
1231
1232         if (per_process_stats) {
1233                 struct io_stats *ios = find_process_io_stats(t->pid);
1234
1235                 __account_requeue(ios, t, rw);
1236         }
1237 }
1238
1239 static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
1240                          struct blk_io_trace *t, char *act)
1241 {
1242         process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
1243 }
1244
1245 static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
1246                        struct blk_io_trace *t, char *act)
1247 {
1248         process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
1249 }
1250
1251 static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
1252                       char *act)
1253 {
1254         process_fmt(act, pci, t, -1, 0, NULL);
1255 }
1256
1257 static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
1258                       struct blk_io_trace *t, char *act)
1259 {
1260         process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
1261 }
1262
1263 static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
1264                       struct blk_io_trace *t, char *act)
1265 {
1266         if (act[0] == 'F')
1267                 log_track_frontmerge(pdi, t);
1268
1269         process_fmt(act, pci, t, -1ULL, 0, NULL);
1270 }
1271
1272 static void log_action(struct per_cpu_info *pci, struct blk_io_trace *t,
1273                         char *act)
1274 {
1275         process_fmt(act, pci, t, -1ULL, 0, NULL);
1276 }
1277
1278 static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
1279                         char *act)
1280 {
1281         process_fmt(act, pci, t, -1ULL, 0, NULL);
1282 }
1283
1284 static void log_unplug(struct per_cpu_info *pci, struct blk_io_trace *t,
1285                       char *act)
1286 {
1287         process_fmt(act, pci, t, -1ULL, 0, NULL);
1288 }
1289
1290 static void log_split(struct per_cpu_info *pci, struct blk_io_trace *t,
1291                       char *act)
1292 {
1293         process_fmt(act, pci, t, -1ULL, 0, NULL);
1294 }
1295
1296 static void log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char *act)
1297 {
1298         unsigned char *buf = (unsigned char *) t + sizeof(*t);
1299
1300         process_fmt(act, pci, t, -1ULL, t->pdu_len, buf);
1301 }
1302
1303 static void dump_trace_pc(struct blk_io_trace *t, struct per_cpu_info *pci)
1304 {
1305         int act = t->action & 0xffff;
1306
1307         switch (act) {
1308                 case __BLK_TA_QUEUE:
1309                         log_generic(pci, t, "Q");
1310                         break;
1311                 case __BLK_TA_GETRQ:
1312                         log_generic(pci, t, "G");
1313                         break;
1314                 case __BLK_TA_SLEEPRQ:
1315                         log_generic(pci, t, "S");
1316                         break;
1317                 case __BLK_TA_REQUEUE:
1318                         log_generic(pci, t, "R");
1319                         break;
1320                 case __BLK_TA_ISSUE:
1321                         log_pc(pci, t, "D");
1322                         break;
1323                 case __BLK_TA_COMPLETE:
1324                         log_pc(pci, t, "C");
1325                         break;
1326                 case __BLK_TA_INSERT:
1327                         log_pc(pci, t, "I");
1328                         break;
1329                 default:
1330                         fprintf(stderr, "Bad pc action %x\n", act);
1331                         break;
1332         }
1333 }
1334
1335 static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
1336                           struct per_cpu_info *pci)
1337 {
1338         int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1339         int act = t->action & 0xffff;
1340
1341         switch (act) {
1342                 case __BLK_TA_QUEUE:
1343                         log_track_queue(pdi, t);
1344                         account_queue(t, pci, w);
1345                         log_queue(pci, t, "Q");
1346                         break;
1347                 case __BLK_TA_INSERT:
1348                         log_insert(pdi, pci, t, "I");
1349                         break;
1350                 case __BLK_TA_BACKMERGE:
1351                         account_m(t, pci, w);
1352                         log_merge(pdi, pci, t, "M");
1353                         break;
1354                 case __BLK_TA_FRONTMERGE:
1355                         account_m(t, pci, w);
1356                         log_merge(pdi, pci, t, "F");
1357                         break;
1358                 case __BLK_TA_GETRQ:
1359                         log_track_getrq(pdi, t);
1360                         log_generic(pci, t, "G");
1361                         break;
1362                 case __BLK_TA_SLEEPRQ:
1363                         log_generic(pci, t, "S");
1364                         break;
1365                 case __BLK_TA_REQUEUE:
1366                         /*
1367                          * can happen if we miss traces, don't let it go
1368                          * below zero
1369                          */
1370                         if (pdi->cur_depth[w])
1371                                 pdi->cur_depth[w]--;
1372                         account_requeue(t, pci, w);
1373                         log_queue(pci, t, "R");
1374                         break;
1375                 case __BLK_TA_ISSUE:
1376                         account_issue(t, pci, w);
1377                         pdi->cur_depth[w]++;
1378                         if (pdi->cur_depth[w] > pdi->max_depth[w])
1379                                 pdi->max_depth[w] = pdi->cur_depth[w];
1380                         log_issue(pdi, pci, t, "D");
1381                         break;
1382                 case __BLK_TA_COMPLETE:
1383                         if (pdi->cur_depth[w])
1384                                 pdi->cur_depth[w]--;
1385                         account_c(t, pci, w, t->bytes);
1386                         log_complete(pdi, pci, t, "C");
1387                         break;
1388                 case __BLK_TA_PLUG:
1389                         log_action(pci, t, "P");
1390                         break;
1391                 case __BLK_TA_UNPLUG_IO:
1392                         account_unplug(t, pci, 0);
1393                         log_unplug(pci, t, "U");
1394                         break;
1395                 case __BLK_TA_UNPLUG_TIMER:
1396                         account_unplug(t, pci, 1);
1397                         log_unplug(pci, t, "UT");
1398                         break;
1399                 case __BLK_TA_SPLIT:
1400                         log_split(pci, t, "X");
1401                         break;
1402                 case __BLK_TA_BOUNCE:
1403                         log_generic(pci, t, "B");
1404                         break;
1405                 case __BLK_TA_REMAP:
1406                         log_generic(pci, t, "A");
1407                         break;
1408                 default:
1409                         fprintf(stderr, "Bad fs action %x\n", t->action);
1410                         break;
1411         }
1412 }
1413
1414 static void dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci,
1415                        struct per_dev_info *pdi)
1416 {
1417         if (t->action & BLK_TC_ACT(BLK_TC_PC))
1418                 dump_trace_pc(t, pci);
1419         else
1420                 dump_trace_fs(t, pdi, pci);
1421
1422         if (!pdi->events)
1423                 pdi->first_reported_time = t->time;
1424
1425         pdi->events++;
1426
1427         output_binary(t, sizeof(*t) + t->pdu_len);
1428 }
1429
1430 /*
1431  * print in a proper way, not too small and not too big. if more than
1432  * 1000,000K, turn into M and so on
1433  */
1434 static char *size_cnv(char *dst, unsigned long long num, int in_kb)
1435 {
1436         char suff[] = { '\0', 'K', 'M', 'G', 'P' };
1437         unsigned int i = 0;
1438
1439         if (in_kb)
1440                 i++;
1441
1442         while (num > 1000 * 1000ULL && (i < sizeof(suff) - 1)) {
1443                 i++;
1444                 num /= 1000;
1445         }
1446
1447         sprintf(dst, "%'8Lu%c", num, suff[i]);
1448         return dst;
1449 }
1450
1451 static void dump_io_stats(struct per_dev_info *pdi, struct io_stats *ios,
1452                           char *msg)
1453 {
1454         static char x[256], y[256];
1455
1456         fprintf(ofp, "%s\n", msg);
1457
1458         fprintf(ofp, " Reads Queued:    %s, %siB\t", size_cnv(x, ios->qreads, 0), size_cnv(y, ios->qread_kb, 1));
1459         fprintf(ofp, " Writes Queued:    %s, %siB\n", size_cnv(x, ios->qwrites, 0), size_cnv(y, ios->qwrite_kb, 1));
1460
1461         fprintf(ofp, " Read Dispatches: %s, %siB\t", size_cnv(x, ios->ireads, 0), size_cnv(y, ios->iread_kb, 1));
1462         fprintf(ofp, " Write Dispatches: %s, %siB\n", size_cnv(x, ios->iwrites, 0), size_cnv(y, ios->iwrite_kb, 1));
1463         fprintf(ofp, " Reads Requeued:  %s\t\t", size_cnv(x, ios->rrqueue, 0));
1464         fprintf(ofp, " Writes Requeued:  %s\n", size_cnv(x, ios->wrqueue, 0));
1465         fprintf(ofp, " Reads Completed: %s, %siB\t", size_cnv(x, ios->creads, 0), size_cnv(y, ios->cread_kb, 1));
1466         fprintf(ofp, " Writes Completed: %s, %siB\n", size_cnv(x, ios->cwrites, 0), size_cnv(y, ios->cwrite_kb, 1));
1467         fprintf(ofp, " Read Merges:     %'8lu%8c\t", ios->mreads, ' ');
1468         fprintf(ofp, " Write Merges:     %'8lu\n", ios->mwrites);
1469         if (pdi) {
1470                 fprintf(ofp, " Read depth:      %'8u%8c\t", pdi->max_depth[0], ' ');
1471                 fprintf(ofp, " Write depth:      %'8u\n", pdi->max_depth[1]);
1472         }
1473         fprintf(ofp, " IO unplugs:      %'8lu%8c\t", ios->io_unplugs, ' ');
1474         fprintf(ofp, " Timer unplugs:    %'8lu\n", ios->timer_unplugs);
1475 }
1476
1477 static void dump_wait_stats(struct per_process_info *ppi)
1478 {
1479         unsigned long rawait = ppi->longest_allocation_wait[0] / 1000;
1480         unsigned long rdwait = ppi->longest_dispatch_wait[0] / 1000;
1481         unsigned long rcwait = ppi->longest_completion_wait[0] / 1000;
1482         unsigned long wawait = ppi->longest_allocation_wait[1] / 1000;
1483         unsigned long wdwait = ppi->longest_dispatch_wait[1] / 1000;
1484         unsigned long wcwait = ppi->longest_completion_wait[1] / 1000;
1485
1486         fprintf(ofp, " Allocation wait: %'8lu%8c\t", rawait, ' ');
1487         fprintf(ofp, " Allocation wait:  %'8lu\n", wawait);
1488         fprintf(ofp, " Dispatch wait:   %'8lu%8c\t", rdwait, ' ');
1489         fprintf(ofp, " Dispatch wait:    %'8lu\n", wdwait);
1490         fprintf(ofp, " Completion wait: %'8lu%8c\t", rcwait, ' ');
1491         fprintf(ofp, " Completion wait:  %'8lu\n", wcwait);
1492 }
1493
1494 static int ppi_name_compare(const void *p1, const void *p2)
1495 {
1496         struct per_process_info *ppi1 = *((struct per_process_info **) p1);
1497         struct per_process_info *ppi2 = *((struct per_process_info **) p2);
1498         int res;
1499
1500         res = strverscmp(ppi1->ppm->comm, ppi2->ppm->comm);
1501         if (!res)
1502                 res = ppi1->ppm->pid > ppi2->ppm->pid;
1503
1504         return res;
1505 }
1506
1507 static void sort_process_list(void)
1508 {
1509         struct per_process_info **ppis;
1510         struct per_process_info *ppi;
1511         int i = 0;
1512
1513         ppis = malloc(ppi_list_entries * sizeof(struct per_process_info *));
1514
1515         ppi = ppi_list;
1516         while (ppi) {
1517                 ppis[i++] = ppi;
1518                 ppi = ppi->list_next;
1519         }
1520
1521         qsort(ppis, ppi_list_entries, sizeof(ppi), ppi_name_compare);
1522
1523         i = ppi_list_entries - 1;
1524         ppi_list = NULL;
1525         while (i >= 0) {
1526                 ppi = ppis[i];
1527
1528                 ppi->list_next = ppi_list;
1529                 ppi_list = ppi;
1530                 i--;
1531         }
1532
1533         free(ppis);
1534 }
1535
1536 static void show_process_stats(void)
1537 {
1538         struct per_process_info *ppi;
1539
1540         sort_process_list();
1541
1542         ppi = ppi_list;
1543         while (ppi) {
1544                 struct process_pid_map *ppm = ppi->ppm;
1545                 char name[64];
1546
1547                 if (ppi->more_than_one)
1548                         sprintf(name, "%s (%u, ...)", ppm->comm, ppm->pid);
1549                 else
1550                         sprintf(name, "%s (%u)", ppm->comm, ppm->pid);
1551
1552                 dump_io_stats(NULL, &ppi->io_stats, name);
1553                 dump_wait_stats(ppi);
1554                 ppi = ppi->list_next;
1555         }
1556
1557         fprintf(ofp, "\n");
1558 }
1559
1560 static void show_device_and_cpu_stats(void)
1561 {
1562         struct per_dev_info *pdi;
1563         struct per_cpu_info *pci;
1564         struct io_stats total, *ios;
1565         unsigned long long rrate, wrate, msec;
1566         int i, j, pci_events;
1567         char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
1568         char name[32];
1569
1570         for (pdi = devices, i = 0; i < ndevices; i++, pdi++) {
1571
1572                 memset(&total, 0, sizeof(total));
1573                 pci_events = 0;
1574
1575                 if (i > 0)
1576                         fprintf(ofp, "\n");
1577
1578                 for (pci = pdi->cpus, j = 0; j < pdi->ncpus; j++, pci++) {
1579                         if (!pci->nelems)
1580                                 continue;
1581
1582                         ios = &pci->io_stats;
1583                         total.qreads += ios->qreads;
1584                         total.qwrites += ios->qwrites;
1585                         total.creads += ios->creads;
1586                         total.cwrites += ios->cwrites;
1587                         total.mreads += ios->mreads;
1588                         total.mwrites += ios->mwrites;
1589                         total.ireads += ios->ireads;
1590                         total.iwrites += ios->iwrites;
1591                         total.rrqueue += ios->rrqueue;
1592                         total.wrqueue += ios->wrqueue;
1593                         total.qread_kb += ios->qread_kb;
1594                         total.qwrite_kb += ios->qwrite_kb;
1595                         total.cread_kb += ios->cread_kb;
1596                         total.cwrite_kb += ios->cwrite_kb;
1597                         total.iread_kb += ios->iread_kb;
1598                         total.iwrite_kb += ios->iwrite_kb;
1599                         total.timer_unplugs += ios->timer_unplugs;
1600                         total.io_unplugs += ios->io_unplugs;
1601
1602                         snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
1603                                  j, get_dev_name(pdi, name, sizeof(name)));
1604                         dump_io_stats(pdi, ios, line);
1605                         pci_events++;
1606                 }
1607
1608                 if (pci_events > 1) {
1609                         fprintf(ofp, "\n");
1610                         snprintf(line, sizeof(line) - 1, "Total (%s):",
1611                                  get_dev_name(pdi, name, sizeof(name)));
1612                         dump_io_stats(NULL, &total, line);
1613                 }
1614
1615                 wrate = rrate = 0;
1616                 msec = (pdi->last_reported_time - pdi->first_reported_time) / 1000000;
1617                 if (msec) {
1618                         rrate = 1000 * total.cread_kb / msec;
1619                         wrate = 1000 * total.cwrite_kb / msec;
1620                 }
1621
1622                 fprintf(ofp, "\nThroughput (R/W): %'LuKiB/s / %'LuKiB/s\n",
1623                         rrate, wrate);
1624                 fprintf(ofp, "Events (%s): %'Lu entries\n",
1625                         get_dev_name(pdi, line, sizeof(line)), pdi->events);
1626
1627                 collect_pdi_skips(pdi);
1628                 fprintf(ofp, "Skips: %'lu forward (%'llu - %5.1lf%%)\n",
1629                         pdi->skips,pdi->seq_skips,
1630                         100.0 * ((double)pdi->seq_skips /
1631                                 (double)(pdi->events + pdi->seq_skips)));
1632         }
1633 }
1634
1635 static void find_genesis(void)
1636 {
1637         struct trace *t = trace_list;
1638
1639         genesis_time = -1ULL;
1640         while (t != NULL) {
1641                 if (t->bit->time < genesis_time)
1642                         genesis_time = t->bit->time;
1643
1644                 t = t->next;
1645         }
1646 }
1647
1648 static inline int check_stopwatch(struct blk_io_trace *bit)
1649 {
1650         if (bit->time < stopwatch_end &&
1651             bit->time >= stopwatch_start)
1652                 return 0;
1653
1654         return 1;
1655 }
1656
1657 /*
1658  * return youngest entry read
1659  */
1660 static int sort_entries(unsigned long long *youngest)
1661 {
1662         struct per_dev_info *pdi = NULL;
1663         struct per_cpu_info *pci = NULL;
1664         struct trace *t;
1665
1666         if (!genesis_time)
1667                 find_genesis();
1668
1669         *youngest = 0;
1670         while ((t = trace_list) != NULL) {
1671                 struct blk_io_trace *bit = t->bit;
1672
1673                 trace_list = t->next;
1674
1675                 bit->time -= genesis_time;
1676
1677                 if (bit->time < *youngest || !*youngest)
1678                         *youngest = bit->time;
1679
1680                 if (!pdi || pdi->dev != bit->device) {
1681                         pdi = get_dev_info(bit->device);
1682                         pci = NULL;
1683                 }
1684
1685                 if (!pci || pci->cpu != bit->cpu)
1686                         pci = get_cpu_info(pdi, bit->cpu);
1687
1688                 if (bit->sequence < pci->smallest_seq_read)
1689                         pci->smallest_seq_read = bit->sequence;
1690
1691                 if (check_stopwatch(bit)) {
1692                         bit_free(bit);
1693                         t_free(t);
1694                         continue;
1695                 }
1696
1697                 if (trace_rb_insert_sort(t))
1698                         return -1;
1699         }
1700
1701         return 0;
1702 }
1703
1704 /*
1705  * to continue, we must have traces from all online cpus in the tree
1706  */
1707 static int check_cpu_map(struct per_dev_info *pdi)
1708 {
1709         unsigned long *cpu_map;
1710         struct rb_node *n;
1711         struct trace *__t;
1712         unsigned int i;
1713         int ret, cpu;
1714
1715         /*
1716          * create a map of the cpus we have traces for
1717          */
1718         cpu_map = malloc(pdi->cpu_map_max / sizeof(long));
1719         n = rb_first(&rb_sort_root);
1720         while (n) {
1721                 __t = rb_entry(n, struct trace, rb_node);
1722                 cpu = __t->bit->cpu;
1723
1724                 cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
1725                 n = rb_next(n);
1726         }
1727
1728         /*
1729          * we can't continue if pdi->cpu_map has entries set that we don't
1730          * have in the sort rbtree. the opposite is not a problem, though
1731          */
1732         ret = 0;
1733         for (i = 0; i < pdi->cpu_map_max / CPUS_PER_LONG; i++) {
1734                 if (pdi->cpu_map[i] & ~(cpu_map[i])) {
1735                         ret = 1;
1736                         break;
1737                 }
1738         }
1739
1740         free(cpu_map);
1741         return ret;
1742 }
1743
1744 static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
1745 {
1746         struct blk_io_trace *bit = t->bit;
1747         unsigned long expected_sequence;
1748         struct per_cpu_info *pci;
1749         struct trace *__t;
1750
1751         pci = get_cpu_info(pdi, bit->cpu);
1752         expected_sequence = pci->last_sequence + 1;
1753
1754         if (!expected_sequence) {
1755                 /*
1756                  * 1 should be the first entry, just allow it
1757                  */
1758                 if (bit->sequence == 1)
1759                         return 0;
1760                 if (bit->sequence == pci->smallest_seq_read)
1761                         return 0;
1762
1763                 return check_cpu_map(pdi);
1764         }
1765
1766         if (bit->sequence == expected_sequence)
1767                 return 0;
1768
1769         /*
1770          * we may not have seen that sequence yet. if we are not doing
1771          * the final run, break and wait for more entries.
1772          */
1773         if (expected_sequence < pci->smallest_seq_read) {
1774                 __t = trace_rb_find_last(pdi, pci, expected_sequence);
1775                 if (!__t)
1776                         goto skip;
1777
1778                 __put_trace_last(pdi, __t);
1779                 return 0;
1780         } else if (!force) {
1781                 return 1;
1782         } else {
1783 skip:
1784                 if (check_current_skips(pci, bit->sequence))
1785                         return 0;
1786
1787                 if (expected_sequence < bit->sequence)
1788                         insert_skip(pci, expected_sequence, bit->sequence - 1);
1789                 return 0;
1790         }
1791 }
1792
1793 static void show_entries_rb(int force)
1794 {
1795         struct per_dev_info *pdi = NULL;
1796         struct per_cpu_info *pci = NULL;
1797         struct blk_io_trace *bit;
1798         struct rb_node *n;
1799         struct trace *t;
1800
1801         while ((n = rb_first(&rb_sort_root)) != NULL) {
1802                 if (is_done() && !force && !pipeline)
1803                         break;
1804
1805                 t = rb_entry(n, struct trace, rb_node);
1806                 bit = t->bit;
1807
1808                 if (read_sequence - t->read_sequence < 1 && !force)
1809                         break;
1810
1811                 if (!pdi || pdi->dev != bit->device) {
1812                         pdi = get_dev_info(bit->device);
1813                         pci = NULL;
1814                 }
1815
1816                 if (!pdi) {
1817                         fprintf(stderr, "Unknown device ID? (%d,%d)\n",
1818                                 MAJOR(bit->device), MINOR(bit->device));
1819                         break;
1820                 }
1821
1822                 if (check_sequence(pdi, t, force))
1823                         break;
1824
1825                 if (!force && bit->time > last_allowed_time)
1826                         break;
1827
1828                 check_time(pdi, bit);
1829
1830                 if (!pci || pci->cpu != bit->cpu)
1831                         pci = get_cpu_info(pdi, bit->cpu);
1832
1833                 pci->last_sequence = bit->sequence;
1834
1835                 pci->nelems++;
1836
1837                 if (bit->action & (act_mask << BLK_TC_SHIFT))
1838                         dump_trace(bit, pci, pdi);
1839
1840                 put_trace(pdi, t);
1841         }
1842 }
1843
1844 static int read_data(int fd, void *buffer, int bytes, int block, int *fdblock)
1845 {
1846         int ret, bytes_left, fl;
1847         void *p;
1848
1849         if (block != *fdblock) {
1850                 fl = fcntl(fd, F_GETFL);
1851
1852                 if (!block) {
1853                         *fdblock = 0;
1854                         fcntl(fd, F_SETFL, fl | O_NONBLOCK);
1855                 } else {
1856                         *fdblock = 1;
1857                         fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
1858                 }
1859         }
1860
1861         bytes_left = bytes;
1862         p = buffer;
1863         while (bytes_left > 0) {
1864                 ret = read(fd, p, bytes_left);
1865                 if (!ret)
1866                         return 1;
1867                 else if (ret < 0) {
1868                         if (errno != EAGAIN) {
1869                                 perror("read");
1870                                 return -1;
1871                         }
1872
1873                         /*
1874                          * never do partial reads. we can return if we
1875                          * didn't read anything and we should not block,
1876                          * otherwise wait for data
1877                          */
1878                         if ((bytes_left == bytes) && !block)
1879                                 return 1;
1880
1881                         usleep(10);
1882                         continue;
1883                 } else {
1884                         p += ret;
1885                         bytes_left -= ret;
1886                 }
1887         }
1888
1889         return 0;
1890 }
1891
1892 static inline __u16 get_pdulen(struct blk_io_trace *bit)
1893 {
1894         if (data_is_native)
1895                 return bit->pdu_len;
1896
1897         return __bswap_16(bit->pdu_len);
1898 }
1899
1900 static inline __u32 get_magic(struct blk_io_trace *bit)
1901 {
1902         if (data_is_native)
1903                 return bit->magic;
1904
1905         return __bswap_32(bit->magic);
1906 }
1907
1908 static int read_events(int fd, int always_block, int *fdblock)
1909 {
1910         struct per_dev_info *pdi = NULL;
1911         unsigned int events = 0;
1912
1913         while (!is_done() && events < rb_batch) {
1914                 struct blk_io_trace *bit;
1915                 struct trace *t;
1916                 int pdu_len, should_block, ret;
1917                 __u32 magic;
1918
1919                 bit = bit_alloc();
1920
1921                 should_block = !events || always_block;
1922
1923                 ret = read_data(fd, bit, sizeof(*bit), should_block, fdblock);
1924                 if (ret) {
1925                         bit_free(bit);
1926                         if (!events && ret < 0)
1927                                 events = ret;
1928                         break;
1929                 }
1930
1931                 /*
1932                  * look at first trace to check whether we need to convert
1933                  * data in the future
1934                  */
1935                 if (data_is_native == -1 && check_data_endianness(bit->magic))
1936                         break;
1937
1938                 magic = get_magic(bit);
1939                 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
1940                         fprintf(stderr, "Bad magic %x\n", magic);
1941                         break;
1942                 }
1943
1944                 pdu_len = get_pdulen(bit);
1945                 if (pdu_len) {
1946                         void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
1947
1948                         if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1, fdblock)) {
1949                                 bit_free(ptr);
1950                                 break;
1951                         }
1952
1953                         bit = ptr;
1954                 }
1955
1956                 trace_to_cpu(bit);
1957
1958                 if (verify_trace(bit)) {
1959                         bit_free(bit);
1960                         continue;
1961                 }
1962
1963                 /*
1964                  * not a real trace, so grab and handle it here
1965                  */
1966                 if (bit->action & BLK_TC_ACT(BLK_TC_NOTIFY)) {
1967                         add_ppm_hash(bit->pid, (char *) bit + sizeof(*bit));
1968                         output_binary(bit, sizeof(*bit) + bit->pdu_len);
1969                         continue;
1970                 }
1971
1972                 t = t_alloc();
1973                 memset(t, 0, sizeof(*t));
1974                 t->bit = bit;
1975                 t->read_sequence = read_sequence;
1976
1977                 t->next = trace_list;
1978                 trace_list = t;
1979
1980                 if (!pdi || pdi->dev != bit->device)
1981                         pdi = get_dev_info(bit->device);
1982
1983                 if (bit->time > pdi->last_read_time)
1984                         pdi->last_read_time = bit->time;
1985
1986                 events++;
1987         }
1988
1989         return events;
1990 }
1991
1992 static int do_file(void)
1993 {
1994         struct per_cpu_info *pci;
1995         struct per_dev_info *pdi;
1996         int i, j, events, events_added;
1997
1998         /*
1999          * first prepare all files for reading
2000          */
2001         for (i = 0; i < ndevices; i++) {
2002                 pdi = &devices[i];
2003                 pdi->nfiles = 0;
2004
2005                 for (j = 0;; j++) {
2006                         struct stat st;
2007                         int len = 0;
2008                         char *p, *dname;
2009
2010                         pci = get_cpu_info(pdi, j);
2011                         pci->cpu = j;
2012                         pci->fd = -1;
2013                         pci->fdblock = -1;
2014         
2015                         p = strdup(pdi->name);
2016                         dname = dirname(p);
2017                         if (strcmp(dname, ".")) {
2018                                 input_dir = dname;
2019                                 p = strdup(pdi->name);
2020                                 strcpy(pdi->name, basename(p));
2021                         }
2022                         free(p);
2023
2024                         if (input_dir)
2025                                 len = sprintf(pci->fname, "%s/", input_dir);
2026
2027                         snprintf(pci->fname + len, sizeof(pci->fname)-1-len,
2028                                  "%s.blktrace.%d", pdi->name, pci->cpu);
2029                         if (stat(pci->fname, &st) < 0)
2030                                 break;
2031                         if (st.st_size) {
2032                                 pci->fd = open(pci->fname, O_RDONLY);
2033                                 if (pci->fd < 0) {
2034                                         perror(pci->fname);
2035                                         continue;
2036                                 }
2037                         }
2038
2039                         printf("Input file %s added\n", pci->fname);
2040                         pdi->nfiles++;
2041                         cpu_mark_online(pdi, pci->cpu);
2042                 }
2043         }
2044
2045         /*
2046          * now loop over the files reading in the data
2047          */
2048         do {
2049                 unsigned long long youngest;
2050
2051                 events_added = 0;
2052                 last_allowed_time = -1ULL;
2053                 read_sequence++;
2054
2055                 for (i = 0; i < ndevices; i++) {
2056                         pdi = &devices[i];
2057                         pdi->last_read_time = -1ULL;
2058
2059                         for (j = 0; j < pdi->nfiles; j++) {
2060
2061                                 pci = get_cpu_info(pdi, j);
2062
2063                                 if (pci->fd == -1)
2064                                         continue;
2065
2066                                 pci->smallest_seq_read = -1;
2067
2068                                 events = read_events(pci->fd, 1, &pci->fdblock);
2069                                 if (events <= 0) {
2070                                         cpu_mark_offline(pdi, pci->cpu);
2071                                         close(pci->fd);
2072                                         pci->fd = -1;
2073                                         continue;
2074                                 }
2075
2076                                 if (pdi->last_read_time < last_allowed_time)
2077                                         last_allowed_time = pdi->last_read_time;
2078
2079                                 events_added += events;
2080                         }
2081                 }
2082
2083                 if (sort_entries(&youngest))
2084                         break;
2085
2086                 if (youngest > stopwatch_end)
2087                         break;
2088
2089                 show_entries_rb(0);
2090
2091         } while (events_added);
2092
2093         if (rb_sort_entries)
2094                 show_entries_rb(1);
2095
2096         return 0;
2097 }
2098
2099 static int do_stdin(void)
2100 {
2101         unsigned long long youngest;
2102         int fd, events, fdblock;
2103
2104         last_allowed_time = -1ULL;
2105         fd = dup(STDIN_FILENO);
2106         if (fd == -1) {
2107                 perror("dup stdin");
2108                 return -1;
2109         }
2110
2111         fdblock = -1;
2112         while ((events = read_events(fd, 0, &fdblock)) > 0) {
2113                 read_sequence++;
2114         
2115 #if 0
2116                 smallest_seq_read = -1U;
2117 #endif
2118
2119                 if (sort_entries(&youngest))
2120                         break;
2121
2122                 if (youngest > stopwatch_end)
2123                         break;
2124
2125                 show_entries_rb(0);
2126         }
2127
2128         if (rb_sort_entries)
2129                 show_entries_rb(1);
2130
2131         close(fd);
2132         return 0;
2133 }
2134
2135 static void show_stats(void)
2136 {
2137         if (!ofp)
2138                 return;
2139         if (stats_printed)
2140                 return;
2141
2142         stats_printed = 1;
2143
2144         if (per_process_stats)
2145                 show_process_stats();
2146
2147         if (per_device_and_cpu_stats)
2148                 show_device_and_cpu_stats();
2149
2150         fflush(ofp);
2151 }
2152
2153 static void handle_sigint(__attribute__((__unused__)) int sig)
2154 {
2155         done = 1;
2156 }
2157
2158 /*
2159  * Extract start and duration times from a string, allowing
2160  * us to specify a time interval of interest within a trace.
2161  * Format: "duration" (start is zero) or "start:duration".
2162  */
2163 static int find_stopwatch_interval(char *string)
2164 {
2165         double value;
2166         char *sp;
2167
2168         value = strtod(string, &sp);
2169         if (sp == string) {
2170                 fprintf(stderr,"Invalid stopwatch timer: %s\n", string);
2171                 return 1;
2172         }
2173         if (*sp == ':') {
2174                 stopwatch_start = DOUBLE_TO_NANO_ULL(value);
2175                 string = sp + 1;
2176                 value = strtod(string, &sp);
2177                 if (sp == string || *sp != '\0') {
2178                         fprintf(stderr,"Invalid stopwatch duration time: %s\n",
2179                                 string);
2180                         return 1;
2181                 }
2182         } else if (*sp != '\0') {
2183                 fprintf(stderr,"Invalid stopwatch start timer: %s\n", string);
2184                 return 1;
2185         }
2186         stopwatch_end = DOUBLE_TO_NANO_ULL(value);
2187         if (stopwatch_end <= stopwatch_start) {
2188                 fprintf(stderr, "Invalid stopwatch interval: %Lu -> %Lu\n",
2189                         stopwatch_start, stopwatch_end);
2190                 return 1;
2191         }
2192
2193         return 0;
2194 }
2195
2196 static char usage_str[] = \
2197         "[ -i <input name> ] [-o <output name> [ -s ] [ -t ] [ -q ]\n" \
2198         "[ -w start:stop ] [ -f output format ] [ -F format spec ] [ -v] \n\n" \
2199         "\t-i Input file containing trace data, or '-' for stdin\n" \
2200         "\t-D Directory to prepend to input file names\n" \
2201         "\t-o Output file. If not given, output is stdout\n" \
2202         "\t-d Output file. If specified, binary data is written to file\n" \
2203         "\t-b stdin read batching\n" \
2204         "\t-s Show per-program io statistics\n" \
2205         "\t-h Hash processes by name, not pid\n" \
2206         "\t-t Track individual ios. Will tell you the time a request took\n" \
2207         "\t   to get queued, to get dispatched, and to get completed\n" \
2208         "\t-q Quiet. Don't display any stats at the end of the trace\n" \
2209         "\t-w Only parse data between the given time interval in seconds.\n" \
2210         "\t   If 'start' isn't given, blkparse defaults the start time to 0\n" \
2211         "\t-f Output format. Customize the output format. The format field\n" \
2212         "\t   identifies can be found in the documentation\n" \
2213         "\t-F Format specification. Can be found in the documentation\n" \
2214         "\t-v More verbose for marginal errors\n" \
2215         "\t-V Print program version info\n\n";
2216
2217 static void usage(char *prog)
2218 {
2219         fprintf(stderr, "Usage: %s %s %s", prog, blkparse_version, usage_str);
2220 }
2221
2222 int main(int argc, char *argv[])
2223 {
2224         char *ofp_buffer;
2225         int i, c, ret, mode;
2226         int act_mask_tmp = 0;
2227
2228         while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
2229                 switch (c) {
2230                 case 'a':
2231                         i = find_mask_map(optarg);
2232                         if (i < 0) {
2233                                 fprintf(stderr,"Invalid action mask %s\n",
2234                                         optarg);
2235                                 return 1;
2236                         }
2237                         act_mask_tmp |= i;
2238                         break;
2239
2240                 case 'A':
2241                         if ((sscanf(optarg, "%x", &i) != 1) || 
2242                                                         !valid_act_opt(i)) {
2243                                 fprintf(stderr,
2244                                         "Invalid set action mask %s/0x%x\n",
2245                                         optarg, i);
2246                                 return 1;
2247                         }
2248                         act_mask_tmp = i;
2249                         break;
2250                 case 'i':
2251                         if (!strcmp(optarg, "-") && !pipeline)
2252                                 pipeline = 1;
2253                         else if (resize_devices(optarg) != 0)
2254                                 return 1;
2255                         break;
2256                 case 'D':
2257                         input_dir = optarg;
2258                         break;
2259                 case 'o':
2260                         output_name = optarg;
2261                         break;
2262                 case 'b':
2263                         rb_batch = atoi(optarg);
2264                         if (rb_batch <= 0)
2265                                 rb_batch = RB_BATCH_DEFAULT;
2266                         break;
2267                 case 's':
2268                         per_process_stats = 1;
2269                         break;
2270                 case 't':
2271                         track_ios = 1;
2272                         break;
2273                 case 'q':
2274                         per_device_and_cpu_stats = 0;
2275                         break;
2276                 case 'w':
2277                         if (find_stopwatch_interval(optarg) != 0)
2278                                 return 1;
2279                         break;
2280                 case 'f':
2281                         set_all_format_specs(optarg);
2282                         break;
2283                 case 'F':
2284                         if (add_format_spec(optarg) != 0)
2285                                 return 1;
2286                         break;
2287                 case 'h':
2288                         ppi_hash_by_pid = 0;
2289                         break;
2290                 case 'v':
2291                         verbose++;
2292                         break;
2293                 case 'V':
2294                         printf("%s version %s\n", argv[0], blkparse_version);
2295                         return 0;
2296                 case 'd':
2297                         dump_binary = optarg;
2298                         break;
2299                 default:
2300                         usage(argv[0]);
2301                         return 1;
2302                 }
2303         }
2304
2305         while (optind < argc) {
2306                 if (!strcmp(argv[optind], "-") && !pipeline)
2307                         pipeline = 1;
2308                 else if (resize_devices(argv[optind]) != 0)
2309                         return 1;
2310                 optind++;
2311         }
2312
2313         if (!pipeline && !ndevices) {
2314                 usage(argv[0]);
2315                 return 1;
2316         }
2317
2318         if (act_mask_tmp != 0)
2319                 act_mask = act_mask_tmp;
2320
2321         memset(&rb_sort_root, 0, sizeof(rb_sort_root));
2322
2323         signal(SIGINT, handle_sigint);
2324         signal(SIGHUP, handle_sigint);
2325         signal(SIGTERM, handle_sigint);
2326
2327         setlocale(LC_NUMERIC, "en_US");
2328
2329         if (!output_name) {
2330                 ofp = fdopen(STDOUT_FILENO, "w");
2331                 mode = _IOLBF;
2332         } else {
2333                 char ofname[128];
2334
2335                 snprintf(ofname, sizeof(ofname) - 1, "%s", output_name);
2336                 ofp = fopen(ofname, "w");
2337                 mode = _IOFBF;
2338         }
2339
2340         if (!ofp) {
2341                 perror("fopen");
2342                 return 1;
2343         }
2344
2345         ofp_buffer = malloc(4096);      
2346         if (setvbuf(ofp, ofp_buffer, mode, 4096)) {
2347                 perror("setvbuf");
2348                 return 1;
2349         }
2350
2351         if (dump_binary) {
2352                 dump_fd = creat(dump_binary, 0666);
2353                 if (dump_fd < 0) {
2354                         perror(dump_binary);
2355                         dump_binary = NULL;
2356                         return 1;
2357                 }
2358         }
2359
2360         if (pipeline)
2361                 ret = do_stdin();
2362         else
2363                 ret = do_file();
2364
2365         show_stats();
2366         free(ofp_buffer);
2367         return ret;
2368 }