[PATCH] blktrace: need to free ts->buf for networked transfer
[blktrace.git] / blkparse.c
CommitLineData
d956a2cd
JA
1/*
2 * block queue tracing parse application
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
d0ca268b
JA
21#include <sys/types.h>
22#include <sys/stat.h>
23#include <unistd.h>
24#include <stdio.h>
25#include <fcntl.h>
26#include <stdlib.h>
8fc0abbc 27#include <string.h>
d5396421 28#include <getopt.h>
412819ce
JA
29#include <errno.h>
30#include <signal.h>
d69db225 31#include <locale.h>
6e0073ed 32#include <libgen.h>
d0ca268b 33
8fc0abbc
JA
34#include "blktrace.h"
35#include "rbtree.h"
bf0720af 36#include "jhash.h"
d0ca268b 37
13d928f0 38static char blkparse_version[] = "0.99";
52724a0e 39
492da111
AB
40struct skip_info {
41 unsigned long start, end;
42 struct skip_info *prev, *next;
43};
44
e7c9f3ff 45struct per_dev_info {
f7bd1a9b 46 dev_t dev;
e7c9f3ff
NS
47 char *name;
48
49 int backwards;
50 unsigned long long events;
20ed6177 51 unsigned long long first_reported_time;
e7c9f3ff 52 unsigned long long last_reported_time;
287fa3d6 53 unsigned long long last_read_time;
e7c9f3ff 54 struct io_stats io_stats;
2990e589
JA
55 unsigned long skips;
56 unsigned long long seq_skips;
649c7b66
JA
57 unsigned int max_depth[2];
58 unsigned int cur_depth[2];
e7c9f3ff 59
f7bd1a9b
JA
60 struct rb_root rb_track;
61
73877e12 62 int nfiles;
e7c9f3ff 63 int ncpus;
824c2b39
JA
64
65 unsigned long *cpu_map;
66 unsigned int cpu_map_max;
67
e7c9f3ff
NS
68 struct per_cpu_info *cpus;
69};
70
2990e589
JA
71/*
72 * some duplicated effort here, we can unify this hash and the ppi hash later
73 */
74struct process_pid_map {
75 pid_t pid;
76 char comm[16];
77 struct process_pid_map *hash_next, *list_next;
78};
79
80#define PPM_HASH_SHIFT (8)
81#define PPM_HASH_SIZE (1 << PPM_HASH_SHIFT)
82#define PPM_HASH_MASK (PPM_HASH_SIZE - 1)
83static struct process_pid_map *ppm_hash_table[PPM_HASH_SIZE];
84
152f6476 85struct per_process_info {
2990e589 86 struct process_pid_map *ppm;
152f6476
JA
87 struct io_stats io_stats;
88 struct per_process_info *hash_next, *list_next;
715d8021 89 int more_than_one;
50adc0ba
JA
90
91 /*
92 * individual io stats
93 */
b9d40d6f
JA
94 unsigned long long longest_allocation_wait[2];
95 unsigned long long longest_dispatch_wait[2];
96 unsigned long long longest_completion_wait[2];
d0ca268b
JA
97};
98
152f6476 99#define PPI_HASH_SHIFT (8)
bf0720af
JA
100#define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
101#define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
102static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
152f6476 103static struct per_process_info *ppi_list;
886ecf0e 104static int ppi_list_entries;
152f6476 105
d915dee6 106#define S_OPTS "a:A:i:o:b:stqw:f:F:vVhD:"
d5396421 107static struct option l_opts[] = {
98f8386b
AB
108 {
109 .name = "act-mask",
110 .has_arg = required_argument,
111 .flag = NULL,
112 .val = 'a'
113 },
114 {
115 .name = "set-mask",
116 .has_arg = required_argument,
117 .flag = NULL,
118 .val = 'A'
119 },
d5396421
JA
120 {
121 .name = "input",
428683db 122 .has_arg = required_argument,
d5396421
JA
123 .flag = NULL,
124 .val = 'i'
125 },
126 {
127 .name = "output",
428683db 128 .has_arg = required_argument,
d5396421
JA
129 .flag = NULL,
130 .val = 'o'
131 },
79f19470
JA
132 {
133 .name = "batch",
428683db 134 .has_arg = required_argument,
79f19470
JA
135 .flag = NULL,
136 .val = 'b'
137 },
152f6476 138 {
3f65c585 139 .name = "per-program-stats",
428683db 140 .has_arg = no_argument,
152f6476
JA
141 .flag = NULL,
142 .val = 's'
143 },
7997c5b0 144 {
3f65c585 145 .name = "track-ios",
428683db 146 .has_arg = no_argument,
7997c5b0
JA
147 .flag = NULL,
148 .val = 't'
149 },
1e1c60f1
NS
150 {
151 .name = "quiet",
428683db 152 .has_arg = no_argument,
1e1c60f1
NS
153 .flag = NULL,
154 .val = 'q'
155 },
46e6968b
NS
156 {
157 .name = "stopwatch",
428683db 158 .has_arg = required_argument,
46e6968b
NS
159 .flag = NULL,
160 .val = 'w'
161 },
ab197ca7
AB
162 {
163 .name = "format",
428683db 164 .has_arg = required_argument,
ab197ca7
AB
165 .flag = NULL,
166 .val = 'f'
167 },
168 {
169 .name = "format-spec",
428683db 170 .has_arg = required_argument,
ab197ca7
AB
171 .flag = NULL,
172 .val = 'F'
173 },
bf0720af 174 {
3f65c585 175 .name = "hash-by-name",
bf0720af
JA
176 .has_arg = no_argument,
177 .flag = NULL,
d915dee6 178 .val = 'h'
bf0720af 179 },
7d1c0411 180 {
57ea8602 181 .name = "verbose",
7d1c0411
JA
182 .has_arg = no_argument,
183 .flag = NULL,
57ea8602 184 .val = 'v'
7d1c0411 185 },
52724a0e
JA
186 {
187 .name = "version",
188 .has_arg = no_argument,
189 .flag = NULL,
57ea8602 190 .val = 'V'
52724a0e 191 },
d1d7f15f 192 {
3f65c585 193 .name = "input-directory",
d1d7f15f
JA
194 .has_arg = required_argument,
195 .flag = NULL,
196 .val = 'D'
197 },
71ef8b7c
JA
198 {
199 .name = NULL,
200 }
d5396421
JA
201};
202
7997c5b0
JA
203/*
204 * for sorting the displayed output
205 */
8fc0abbc
JA
206struct trace {
207 struct blk_io_trace *bit;
208 struct rb_node rb_node;
cb2a1a62 209 struct trace *next;
a43c1c17 210 unsigned long read_sequence;
8fc0abbc
JA
211};
212
cb2a1a62 213static struct rb_root rb_sort_root;
a649216c
JA
214static unsigned long rb_sort_entries;
215
cb2a1a62
JA
216static struct trace *trace_list;
217
d36421e4
JA
218/*
219 * allocation cache
220 */
221static struct blk_io_trace *bit_alloc_list;
222static struct trace *t_alloc_list;
223
7997c5b0
JA
224/*
225 * for tracking individual ios
226 */
227struct io_track {
228 struct rb_node rb_node;
229
2990e589 230 struct process_pid_map *ppm;
7997c5b0 231 __u64 sector;
95c15013 232 unsigned long long allocation_time;
7997c5b0
JA
233 unsigned long long queue_time;
234 unsigned long long dispatch_time;
235 unsigned long long completion_time;
236};
237
e7c9f3ff
NS
238static int ndevices;
239static struct per_dev_info *devices;
240static char *get_dev_name(struct per_dev_info *, char *, int);
210824c3 241static int trace_rb_insert_last(struct per_dev_info *, struct trace *);
d0ca268b 242
71d5d4c9 243FILE *ofp = NULL;
e7c9f3ff 244static char *output_name;
d1d7f15f 245static char *input_dir;
e7c9f3ff
NS
246
247static unsigned long long genesis_time;
287fa3d6 248static unsigned long long last_allowed_time;
46e6968b 249static unsigned long long stopwatch_start; /* start from zero by default */
bc171579 250static unsigned long long stopwatch_end = -1ULL; /* "infinity" */
a43c1c17 251static unsigned long read_sequence;
152f6476
JA
252
253static int per_process_stats;
cbc927b6 254static int per_device_and_cpu_stats = 1;
7997c5b0 255static int track_ios;
bf0720af 256static int ppi_hash_by_pid = 1;
57ea8602 257static int verbose;
98f8386b 258static unsigned int act_mask = -1U;
cbc927b6 259static int stats_printed;
86368eb5 260int data_is_native = -1;
d0ca268b 261
1d24fc14
JA
262static unsigned int t_alloc_cache;
263static unsigned int bit_alloc_cache;
264
7d747d22 265#define RB_BATCH_DEFAULT (512)
e820abd7 266static unsigned int rb_batch = RB_BATCH_DEFAULT;
79f19470 267
e7c9f3ff
NS
268static int pipeline;
269
412819ce
JA
270#define is_done() (*(volatile int *)(&done))
271static volatile int done;
272
bf0720af
JA
273#define JHASH_RANDOM (0x3af5f2ee)
274
824c2b39
JA
275#define CPUS_PER_LONG (8 * sizeof(unsigned long))
276#define CPU_IDX(cpu) ((cpu) / CPUS_PER_LONG)
277#define CPU_BIT(cpu) ((cpu) & (CPUS_PER_LONG - 1))
278
210824c3
JA
279static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
280{
281 struct per_cpu_info *cpus = pdi->cpus;
282 int ncpus = pdi->ncpus;
283 int new_count = cpu + 1;
284 int new_space, size;
285 char *new_start;
286
287 size = new_count * sizeof(struct per_cpu_info);
288 cpus = realloc(cpus, size);
289 if (!cpus) {
290 char name[20];
291 fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
292 get_dev_name(pdi, name, sizeof(name)), size);
293 exit(1);
294 }
295
296 new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
297 new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
298 memset(new_start, 0, new_space);
299
300 pdi->ncpus = new_count;
301 pdi->cpus = cpus;
302
303 for (new_count = 0; new_count < pdi->ncpus; new_count++) {
304 struct per_cpu_info *pci = &pdi->cpus[new_count];
305
306 if (!pci->fd) {
307 pci->fd = -1;
308 memset(&pci->rb_last, 0, sizeof(pci->rb_last));
309 pci->rb_last_entries = 0;
310 pci->last_sequence = -1;
311 }
312 }
313}
314
315static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
316{
317 struct per_cpu_info *pci;
318
319 if (cpu >= pdi->ncpus)
320 resize_cpu_info(pdi, cpu);
321
322 pci = &pdi->cpus[cpu];
323 pci->cpu = cpu;
324 return pci;
325}
326
327
328static int resize_devices(char *name)
329{
330 int size = (ndevices + 1) * sizeof(struct per_dev_info);
331
332 devices = realloc(devices, size);
333 if (!devices) {
334 fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
335 return 1;
336 }
337 memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
338 devices[ndevices].name = name;
339 ndevices++;
340 return 0;
341}
342
343static struct per_dev_info *get_dev_info(dev_t dev)
344{
345 struct per_dev_info *pdi;
346 int i;
347
348 for (i = 0; i < ndevices; i++) {
349 if (!devices[i].dev)
350 devices[i].dev = dev;
351 if (devices[i].dev == dev)
352 return &devices[i];
353 }
354
355 if (resize_devices(NULL))
356 return NULL;
357
358 pdi = &devices[ndevices - 1];
359 pdi->dev = dev;
360 pdi->first_reported_time = 0;
361 pdi->last_read_time = 0;
210824c3
JA
362
363 return pdi;
364}
365
66930177 366static void insert_skip(struct per_cpu_info *pci, unsigned long start,
492da111
AB
367 unsigned long end)
368{
369 struct skip_info *sip;
370
66930177 371 for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
492da111
AB
372 if (end == (sip->start - 1)) {
373 sip->start = start;
374 return;
375 } else if (start == (sip->end + 1)) {
376 sip->end = end;
377 return;
378 }
379 }
380
381 sip = malloc(sizeof(struct skip_info));
382 sip->start = start;
383 sip->end = end;
384 sip->prev = sip->next = NULL;
66930177
JA
385 if (pci->skips_tail == NULL)
386 pci->skips_head = pci->skips_tail = sip;
492da111 387 else {
66930177
JA
388 sip->prev = pci->skips_tail;
389 pci->skips_tail->next = sip;
390 pci->skips_tail = sip;
492da111
AB
391 }
392}
393
66930177 394static void remove_sip(struct per_cpu_info *pci, struct skip_info *sip)
492da111
AB
395{
396 if (sip->prev == NULL) {
397 if (sip->next == NULL)
66930177 398 pci->skips_head = pci->skips_tail = NULL;
492da111 399 else {
66930177 400 pci->skips_head = sip->next;
492da111
AB
401 sip->next->prev = NULL;
402 }
403 } else if (sip->next == NULL) {
66930177 404 pci->skips_tail = sip->prev;
492da111
AB
405 sip->prev->next = NULL;
406 } else {
407 sip->prev->next = sip->next;
408 sip->next->prev = sip->prev;
409 }
410
411 sip->prev = sip->next = NULL;
412 free(sip);
413}
414
415#define IN_SKIP(sip,seq) (((sip)->start <= (seq)) && ((seq) <= sip->end))
66930177 416static int check_current_skips(struct per_cpu_info *pci, unsigned long seq)
492da111
AB
417{
418 struct skip_info *sip;
419
66930177
JA
420 for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
421 if (IN_SKIP(sip, seq)) {
492da111
AB
422 if (sip->start == seq) {
423 if (sip->end == seq)
66930177 424 remove_sip(pci, sip);
492da111
AB
425 else
426 sip->start += 1;
427 } else if (sip->end == seq)
428 sip->end -= 1;
429 else {
430 sip->end = seq - 1;
66930177 431 insert_skip(pci, seq + 1, sip->end);
492da111
AB
432 }
433 return 1;
434 }
435 }
66930177 436
492da111
AB
437 return 0;
438}
439
440static void collect_pdi_skips(struct per_dev_info *pdi)
441{
442 struct skip_info *sip;
66930177 443 int cpu;
492da111
AB
444
445 pdi->skips = 0;
446 pdi->seq_skips = 0;
66930177
JA
447
448 for (cpu = 0; cpu < pdi->ncpus; cpu++) {
449 struct per_cpu_info *pci = &pdi->cpus[cpu];
450
451 for (sip = pci->skips_head; sip != NULL; sip = sip->next) {
452 pdi->skips++;
453 pdi->seq_skips += (sip->end - sip->start + 1);
454 if (verbose)
455 fprintf(stderr,"(%d,%d): skipping %lu -> %lu\n",
456 MAJOR(pdi->dev), MINOR(pdi->dev),
457 sip->start, sip->end);
458 }
492da111
AB
459 }
460}
461
824c2b39
JA
462static void cpu_mark_online(struct per_dev_info *pdi, unsigned int cpu)
463{
464 if (cpu >= pdi->cpu_map_max || !pdi->cpu_map) {
465 int new_max = (cpu + CPUS_PER_LONG) & ~(CPUS_PER_LONG - 1);
466 unsigned long *map = malloc(new_max / sizeof(long));
467
468 memset(map, 0, new_max / sizeof(long));
469
470 if (pdi->cpu_map) {
471 memcpy(map, pdi->cpu_map, pdi->cpu_map_max / sizeof(long));
472 free(pdi->cpu_map);
473 }
474
475 pdi->cpu_map = map;
476 pdi->cpu_map_max = new_max;
477 }
478
479 pdi->cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
480}
481
482static inline void cpu_mark_offline(struct per_dev_info *pdi, int cpu)
483{
484 pdi->cpu_map[CPU_IDX(cpu)] &= ~(1UL << CPU_BIT(cpu));
485}
486
487static inline int cpu_is_online(struct per_dev_info *pdi, int cpu)
488{
489 return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
490}
491
bfc70ad5
JA
492static inline int ppm_hash_pid(pid_t pid)
493{
494 return jhash_1word(pid, JHASH_RANDOM) & PPM_HASH_MASK;
495}
496
497static struct process_pid_map *find_ppm(pid_t pid)
498{
499 const int hash_idx = ppm_hash_pid(pid);
500 struct process_pid_map *ppm;
501
502 ppm = ppm_hash_table[hash_idx];
503 while (ppm) {
504 if (ppm->pid == pid)
505 return ppm;
506
507 ppm = ppm->hash_next;
508 }
509
510 return NULL;
511}
512
513static void add_ppm_hash(pid_t pid, const char *name)
514{
515 const int hash_idx = ppm_hash_pid(pid);
516 struct process_pid_map *ppm;
517
518 ppm = find_ppm(pid);
248eac8f
JA
519 if (!ppm) {
520 ppm = malloc(sizeof(*ppm));
521 memset(ppm, 0, sizeof(*ppm));
522 ppm->pid = pid;
523 strcpy(ppm->comm, name);
524 ppm->hash_next = ppm_hash_table[hash_idx];
525 ppm_hash_table[hash_idx] = ppm;
bfc70ad5 526 }
bfc70ad5
JA
527}
528
529char *find_process_name(pid_t pid)
530{
531 struct process_pid_map *ppm = find_ppm(pid);
532
533 if (ppm)
534 return ppm->comm;
535
536 return NULL;
537}
538
9e4cd1b8 539static inline int ppi_hash_pid(pid_t pid)
bf0720af
JA
540{
541 return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
542}
543
544static inline int ppi_hash_name(const char *name)
152f6476 545{
bf0720af
JA
546 return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
547}
548
549static inline int ppi_hash(struct per_process_info *ppi)
550{
2990e589
JA
551 struct process_pid_map *ppm = ppi->ppm;
552
bf0720af 553 if (ppi_hash_by_pid)
2990e589 554 return ppi_hash_pid(ppm->pid);
bf0720af 555
2990e589 556 return ppi_hash_name(ppm->comm);
152f6476
JA
557}
558
bfc70ad5 559static inline void add_ppi_to_hash(struct per_process_info *ppi)
152f6476 560{
bf0720af 561 const int hash_idx = ppi_hash(ppi);
152f6476 562
bf0720af
JA
563 ppi->hash_next = ppi_hash_table[hash_idx];
564 ppi_hash_table[hash_idx] = ppi;
152f6476
JA
565}
566
bfc70ad5 567static inline void add_ppi_to_list(struct per_process_info *ppi)
152f6476
JA
568{
569 ppi->list_next = ppi_list;
570 ppi_list = ppi;
886ecf0e 571 ppi_list_entries++;
152f6476
JA
572}
573
bfc70ad5 574static struct per_process_info *find_ppi_by_name(char *name)
bf0720af
JA
575{
576 const int hash_idx = ppi_hash_name(name);
577 struct per_process_info *ppi;
578
579 ppi = ppi_hash_table[hash_idx];
580 while (ppi) {
2990e589
JA
581 struct process_pid_map *ppm = ppi->ppm;
582
583 if (!strcmp(ppm->comm, name))
bf0720af
JA
584 return ppi;
585
586 ppi = ppi->hash_next;
587 }
588
589 return NULL;
590}
591
9e4cd1b8 592static struct per_process_info *find_ppi_by_pid(pid_t pid)
152f6476 593{
bf0720af 594 const int hash_idx = ppi_hash_pid(pid);
152f6476
JA
595 struct per_process_info *ppi;
596
bf0720af 597 ppi = ppi_hash_table[hash_idx];
152f6476 598 while (ppi) {
2990e589
JA
599 struct process_pid_map *ppm = ppi->ppm;
600
601 if (ppm->pid == pid)
152f6476
JA
602 return ppi;
603
604 ppi = ppi->hash_next;
605 }
606
607 return NULL;
608}
609
9e4cd1b8 610static struct per_process_info *find_ppi(pid_t pid)
bf0720af 611{
715d8021 612 struct per_process_info *ppi;
bfc70ad5 613 char *name;
715d8021 614
bf0720af 615 if (ppi_hash_by_pid)
bfc70ad5
JA
616 return find_ppi_by_pid(pid);
617
618 name = find_process_name(pid);
619 if (!name)
620 return NULL;
bf0720af 621
bfc70ad5 622 ppi = find_ppi_by_name(name);
2990e589 623 if (ppi && ppi->ppm->pid != pid)
715d8021
JA
624 ppi->more_than_one = 1;
625
626 return ppi;
bf0720af
JA
627}
628
210824c3
JA
629/*
630 * struct trace and blktrace allocation cache, we do potentially
631 * millions of mallocs for these structures while only using at most
632 * a few thousand at the time
633 */
634static inline void t_free(struct trace *t)
635{
636 if (t_alloc_cache < 1024) {
637 t->next = t_alloc_list;
638 t_alloc_list = t;
639 t_alloc_cache++;
640 } else
641 free(t);
642}
643
644static inline struct trace *t_alloc(void)
645{
646 struct trace *t = t_alloc_list;
647
648 if (t) {
649 t_alloc_list = t->next;
650 t_alloc_cache--;
651 return t;
652 }
653
654 return malloc(sizeof(*t));
655}
656
657static inline void bit_free(struct blk_io_trace *bit)
658{
659 if (bit_alloc_cache < 1024 && !bit->pdu_len) {
660 /*
661 * abuse a 64-bit field for a next pointer for the free item
662 */
663 bit->time = (__u64) (unsigned long) bit_alloc_list;
664 bit_alloc_list = (struct blk_io_trace *) bit;
665 bit_alloc_cache++;
666 } else
667 free(bit);
668}
669
670static inline struct blk_io_trace *bit_alloc(void)
671{
672 struct blk_io_trace *bit = bit_alloc_list;
673
674 if (bit) {
675 bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
676 bit->time;
677 bit_alloc_cache--;
678 return bit;
679 }
680
681 return malloc(sizeof(*bit));
682}
683
684static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
685{
686 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
687
688 rb_erase(&t->rb_node, &pci->rb_last);
689 pci->rb_last_entries--;
690
691 bit_free(t->bit);
692 t_free(t);
693}
694
695static void put_trace(struct per_dev_info *pdi, struct trace *t)
696{
697 rb_erase(&t->rb_node, &rb_sort_root);
698 rb_sort_entries--;
699
700 trace_rb_insert_last(pdi, t);
701}
702
89482da6 703static inline int trace_rb_insert(struct trace *t, struct rb_root *root)
7997c5b0 704{
2a1b3424 705 struct rb_node **p = &root->rb_node;
7997c5b0
JA
706 struct rb_node *parent = NULL;
707 struct trace *__t;
708
709 while (*p) {
710 parent = *p;
2a1b3424 711
7997c5b0
JA
712 __t = rb_entry(parent, struct trace, rb_node);
713
89482da6
JA
714 if (t->bit->time < __t->bit->time)
715 p = &(*p)->rb_left;
716 else if (t->bit->time > __t->bit->time)
717 p = &(*p)->rb_right;
718 else if (t->bit->device < __t->bit->device)
e7c9f3ff
NS
719 p = &(*p)->rb_left;
720 else if (t->bit->device > __t->bit->device)
721 p = &(*p)->rb_right;
dcf0f7ed
JA
722 else if (t->bit->sequence < __t->bit->sequence)
723 p = &(*p)->rb_left;
0b07f23e 724 else /* >= sequence */
dcf0f7ed 725 p = &(*p)->rb_right;
7997c5b0
JA
726 }
727
728 rb_link_node(&t->rb_node, parent, p);
2a1b3424 729 rb_insert_color(&t->rb_node, root);
7997c5b0
JA
730 return 0;
731}
732
2a1b3424 733static inline int trace_rb_insert_sort(struct trace *t)
e3556946 734{
89482da6 735 if (!trace_rb_insert(t, &rb_sort_root)) {
2a1b3424
JA
736 rb_sort_entries++;
737 return 0;
738 }
739
740 return 1;
741}
742
210824c3 743static int trace_rb_insert_last(struct per_dev_info *pdi, struct trace *t)
2a1b3424 744{
210824c3
JA
745 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
746
747 if (trace_rb_insert(t, &pci->rb_last))
748 return 1;
749
750 pci->rb_last_entries++;
751
752 if (pci->rb_last_entries > rb_batch * pdi->nfiles) {
753 struct rb_node *n = rb_first(&pci->rb_last);
754
755 t = rb_entry(n, struct trace, rb_node);
756 __put_trace_last(pdi, t);
2a1b3424
JA
757 }
758
210824c3 759 return 0;
2a1b3424
JA
760}
761
762static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
763 struct rb_root *root, int order)
764{
765 struct rb_node *n = root->rb_node;
766 struct rb_node *prev = NULL;
e3556946
JA
767 struct trace *__t;
768
2a1b3424
JA
769 while (n) {
770 __t = rb_entry(n, struct trace, rb_node);
771 prev = n;
e3556946 772
0583b6a2 773 if (device < __t->bit->device)
2a1b3424 774 n = n->rb_left;
0583b6a2 775 else if (device > __t->bit->device)
2a1b3424 776 n = n->rb_right;
0583b6a2 777 else if (sequence < __t->bit->sequence)
2a1b3424 778 n = n->rb_left;
e3556946 779 else if (sequence > __t->bit->sequence)
2a1b3424 780 n = n->rb_right;
e3556946
JA
781 else
782 return __t;
783 }
784
2a1b3424
JA
785 /*
786 * hack - the list may not be sequence ordered because some
787 * events don't have sequence and time matched. so we end up
788 * being a little off in the rb lookup here, because we don't
789 * know the time we are looking for. compensate by browsing
790 * a little ahead from the last entry to find the match
791 */
792 if (order && prev) {
793 int max = 5;
794
795 while (((n = rb_next(prev)) != NULL) && max--) {
796 __t = rb_entry(n, struct trace, rb_node);
492da111 797
2a1b3424
JA
798 if (__t->bit->device == device &&
799 __t->bit->sequence == sequence)
800 return __t;
801
802 prev = n;
803 }
804 }
492da111 805
e3556946
JA
806 return NULL;
807}
808
2a1b3424 809static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
210824c3 810 struct per_cpu_info *pci,
2a1b3424
JA
811 unsigned long seq)
812{
210824c3 813 return trace_rb_find(pdi->dev, seq, &pci->rb_last, 0);
2a1b3424
JA
814}
815
f7bd1a9b 816static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
7997c5b0 817{
f7bd1a9b 818 struct rb_node **p = &pdi->rb_track.rb_node;
7997c5b0
JA
819 struct rb_node *parent = NULL;
820 struct io_track *__iot;
821
822 while (*p) {
823 parent = *p;
7997c5b0
JA
824 __iot = rb_entry(parent, struct io_track, rb_node);
825
f7bd1a9b 826 if (iot->sector < __iot->sector)
7997c5b0
JA
827 p = &(*p)->rb_left;
828 else if (iot->sector > __iot->sector)
829 p = &(*p)->rb_right;
830 else {
e7c9f3ff 831 fprintf(stderr,
ab197ca7
AB
832 "sector alias (%Lu) on device %d,%d!\n",
833 (unsigned long long) iot->sector,
f7bd1a9b 834 MAJOR(pdi->dev), MINOR(pdi->dev));
7997c5b0
JA
835 return 1;
836 }
837 }
838
839 rb_link_node(&iot->rb_node, parent, p);
f7bd1a9b 840 rb_insert_color(&iot->rb_node, &pdi->rb_track);
7997c5b0
JA
841 return 0;
842}
843
f7bd1a9b 844static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
7997c5b0 845{
f7bd1a9b 846 struct rb_node *n = pdi->rb_track.rb_node;
7997c5b0
JA
847 struct io_track *__iot;
848
2a1b3424
JA
849 while (n) {
850 __iot = rb_entry(n, struct io_track, rb_node);
7997c5b0 851
f7bd1a9b 852 if (sector < __iot->sector)
2a1b3424 853 n = n->rb_left;
7997c5b0 854 else if (sector > __iot->sector)
2a1b3424 855 n = n->rb_right;
7997c5b0
JA
856 else
857 return __iot;
858 }
859
860 return NULL;
861}
862
9e4cd1b8 863static struct io_track *find_track(struct per_dev_info *pdi, pid_t pid,
bfc70ad5 864 __u64 sector)
7997c5b0 865{
916b5501 866 struct io_track *iot;
7997c5b0 867
f7bd1a9b 868 iot = __find_track(pdi, sector);
7997c5b0
JA
869 if (!iot) {
870 iot = malloc(sizeof(*iot));
2990e589 871 iot->ppm = find_ppm(pid);
7997c5b0 872 iot->sector = sector;
f7bd1a9b 873 track_rb_insert(pdi, iot);
7997c5b0
JA
874 }
875
876 return iot;
877}
878
f7bd1a9b
JA
879static void log_track_frontmerge(struct per_dev_info *pdi,
880 struct blk_io_trace *t)
2e3e8ded
JA
881{
882 struct io_track *iot;
883
884 if (!track_ios)
885 return;
2e3e8ded 886
ae957cbc 887 iot = __find_track(pdi, t->sector + t_sec(t));
cb2a1a62 888 if (!iot) {
57ea8602
JA
889 if (verbose)
890 fprintf(stderr, "merge not found for (%d,%d): %llu\n",
891 MAJOR(pdi->dev), MINOR(pdi->dev),
892 (unsigned long long) t->sector + t_sec(t));
cb2a1a62 893 return;
2e3e8ded 894 }
cb2a1a62 895
f7bd1a9b 896 rb_erase(&iot->rb_node, &pdi->rb_track);
ae957cbc 897 iot->sector -= t_sec(t);
f7bd1a9b 898 track_rb_insert(pdi, iot);
2e3e8ded
JA
899}
900
f7bd1a9b 901static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
2e3e8ded
JA
902{
903 struct io_track *iot;
904
905 if (!track_ios)
906 return;
907
bfc70ad5 908 iot = find_track(pdi, t->pid, t->sector);
95c15013
JA
909 iot->allocation_time = t->time;
910}
911
753f9091
JA
912static inline int is_remapper(struct per_dev_info *pdi)
913{
914 int major = MAJOR(pdi->dev);
915
916 return (major == 253 || major == 9);
917}
918
919/*
920 * for md/dm setups, the interesting cycle is Q -> C. So track queueing
921 * time here, as dispatch time
922 */
923static void log_track_queue(struct per_dev_info *pdi, struct blk_io_trace *t)
924{
925 struct io_track *iot;
926
927 if (!track_ios)
928 return;
929 if (!is_remapper(pdi))
930 return;
931
bfc70ad5 932 iot = find_track(pdi, t->pid, t->sector);
753f9091
JA
933 iot->dispatch_time = t->time;
934}
935
95c15013 936/*
b6076a9b 937 * return time between rq allocation and insertion
95c15013 938 */
f7bd1a9b
JA
939static unsigned long long log_track_insert(struct per_dev_info *pdi,
940 struct blk_io_trace *t)
95c15013 941{
50adc0ba 942 unsigned long long elapsed;
95c15013
JA
943 struct io_track *iot;
944
945 if (!track_ios)
946 return -1;
947
bfc70ad5 948 iot = find_track(pdi, t->pid, t->sector);
2e3e8ded 949 iot->queue_time = t->time;
acd70d21
JA
950
951 if (!iot->allocation_time)
952 return -1;
953
50adc0ba
JA
954 elapsed = iot->queue_time - iot->allocation_time;
955
956 if (per_process_stats) {
2990e589 957 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
b9d40d6f 958 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
50adc0ba 959
b9d40d6f
JA
960 if (ppi && elapsed > ppi->longest_allocation_wait[w])
961 ppi->longest_allocation_wait[w] = elapsed;
50adc0ba
JA
962 }
963
964 return elapsed;
2e3e8ded
JA
965}
966
967/*
968 * return time between queue and issue
969 */
f7bd1a9b
JA
970static unsigned long long log_track_issue(struct per_dev_info *pdi,
971 struct blk_io_trace *t)
2e3e8ded 972{
50adc0ba 973 unsigned long long elapsed;
2e3e8ded
JA
974 struct io_track *iot;
975
976 if (!track_ios)
977 return -1;
978 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
979 return -1;
980
f7bd1a9b 981 iot = __find_track(pdi, t->sector);
cb2a1a62 982 if (!iot) {
57ea8602
JA
983 if (verbose)
984 fprintf(stderr, "issue not found for (%d,%d): %llu\n",
985 MAJOR(pdi->dev), MINOR(pdi->dev),
986 (unsigned long long) t->sector);
2e3e8ded 987 return -1;
cb2a1a62 988 }
2e3e8ded
JA
989
990 iot->dispatch_time = t->time;
50adc0ba
JA
991 elapsed = iot->dispatch_time - iot->queue_time;
992
993 if (per_process_stats) {
2990e589 994 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
b9d40d6f 995 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
50adc0ba 996
b9d40d6f
JA
997 if (ppi && elapsed > ppi->longest_dispatch_wait[w])
998 ppi->longest_dispatch_wait[w] = elapsed;
50adc0ba
JA
999 }
1000
1001 return elapsed;
2e3e8ded
JA
1002}
1003
1004/*
1005 * return time between dispatch and complete
1006 */
f7bd1a9b
JA
1007static unsigned long long log_track_complete(struct per_dev_info *pdi,
1008 struct blk_io_trace *t)
2e3e8ded
JA
1009{
1010 unsigned long long elapsed;
1011 struct io_track *iot;
1012
1013 if (!track_ios)
1014 return -1;
2e3e8ded 1015
f7bd1a9b 1016 iot = __find_track(pdi, t->sector);
cb2a1a62 1017 if (!iot) {
57ea8602
JA
1018 if (verbose)
1019 fprintf(stderr,"complete not found for (%d,%d): %llu\n",
1020 MAJOR(pdi->dev), MINOR(pdi->dev),
1021 (unsigned long long) t->sector);
2e3e8ded 1022 return -1;
cb2a1a62 1023 }
2e3e8ded
JA
1024
1025 iot->completion_time = t->time;
1026 elapsed = iot->completion_time - iot->dispatch_time;
1027
50adc0ba 1028 if (per_process_stats) {
2990e589 1029 struct per_process_info *ppi = find_ppi(iot->ppm->pid);
b9d40d6f 1030 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
50adc0ba 1031
b9d40d6f
JA
1032 if (ppi && elapsed > ppi->longest_completion_wait[w])
1033 ppi->longest_completion_wait[w] = elapsed;
50adc0ba
JA
1034 }
1035
2e3e8ded
JA
1036 /*
1037 * kill the trace, we don't need it after completion
1038 */
f7bd1a9b 1039 rb_erase(&iot->rb_node, &pdi->rb_track);
2e3e8ded
JA
1040 free(iot);
1041
1042 return elapsed;
1043}
1044
1045
9e4cd1b8 1046static struct io_stats *find_process_io_stats(pid_t pid)
152f6476 1047{
bfc70ad5 1048 struct per_process_info *ppi = find_ppi(pid);
152f6476
JA
1049
1050 if (!ppi) {
1051 ppi = malloc(sizeof(*ppi));
1052 memset(ppi, 0, sizeof(*ppi));
2990e589 1053 ppi->ppm = find_ppm(pid);
bfc70ad5
JA
1054 add_ppi_to_hash(ppi);
1055 add_ppi_to_list(ppi);
152f6476
JA
1056 }
1057
1058 return &ppi->io_stats;
1059}
1060
e7c9f3ff
NS
1061static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
1062{
1063 if (pdi->name)
1064 snprintf(buffer, size, "%s", pdi->name);
1065 else
f7bd1a9b 1066 snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
e7c9f3ff
NS
1067 return buffer;
1068}
1069
e7c9f3ff 1070static void check_time(struct per_dev_info *pdi, struct blk_io_trace *bit)
cfab07eb
AB
1071{
1072 unsigned long long this = bit->time;
e7c9f3ff 1073 unsigned long long last = pdi->last_reported_time;
cfab07eb 1074
e7c9f3ff
NS
1075 pdi->backwards = (this < last) ? 'B' : ' ';
1076 pdi->last_reported_time = this;
cfab07eb
AB
1077}
1078
152f6476
JA
1079static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
1080 int rw)
d0ca268b
JA
1081{
1082 if (rw) {
152f6476 1083 ios->mwrites++;
ae957cbc 1084 ios->qwrite_kb += t_kb(t);
d0ca268b 1085 } else {
152f6476 1086 ios->mreads++;
ae957cbc 1087 ios->qread_kb += t_kb(t);
152f6476
JA
1088 }
1089}
1090
1091static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
1092 int rw)
1093{
1094 __account_m(&pci->io_stats, t, rw);
1095
1096 if (per_process_stats) {
bfc70ad5 1097 struct io_stats *ios = find_process_io_stats(t->pid);
152f6476
JA
1098
1099 __account_m(ios, t, rw);
d0ca268b
JA
1100 }
1101}
1102
b6076a9b
JA
1103static inline void __account_queue(struct io_stats *ios, struct blk_io_trace *t,
1104 int rw)
d0ca268b
JA
1105{
1106 if (rw) {
152f6476 1107 ios->qwrites++;
ae957cbc 1108 ios->qwrite_kb += t_kb(t);
d0ca268b 1109 } else {
152f6476 1110 ios->qreads++;
ae957cbc 1111 ios->qread_kb += t_kb(t);
152f6476
JA
1112 }
1113}
1114
b6076a9b
JA
1115static inline void account_queue(struct blk_io_trace *t,
1116 struct per_cpu_info *pci, int rw)
152f6476 1117{
b6076a9b 1118 __account_queue(&pci->io_stats, t, rw);
152f6476
JA
1119
1120 if (per_process_stats) {
bfc70ad5 1121 struct io_stats *ios = find_process_io_stats(t->pid);
152f6476 1122
b6076a9b 1123 __account_queue(ios, t, rw);
d0ca268b
JA
1124 }
1125}
1126
e21dc4dd 1127static inline void __account_c(struct io_stats *ios, int rw, int bytes)
d0ca268b
JA
1128{
1129 if (rw) {
152f6476
JA
1130 ios->cwrites++;
1131 ios->cwrite_kb += bytes >> 10;
d0ca268b 1132 } else {
152f6476
JA
1133 ios->creads++;
1134 ios->cread_kb += bytes >> 10;
1135 }
1136}
1137
1138static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
1139 int rw, int bytes)
1140{
1141 __account_c(&pci->io_stats, rw, bytes);
1142
1143 if (per_process_stats) {
bfc70ad5 1144 struct io_stats *ios = find_process_io_stats(t->pid);
152f6476
JA
1145
1146 __account_c(ios, rw, bytes);
d0ca268b
JA
1147 }
1148}
1149
b6076a9b
JA
1150static inline void __account_issue(struct io_stats *ios, int rw,
1151 unsigned int bytes)
afd2d7ad 1152{
1153 if (rw) {
152f6476
JA
1154 ios->iwrites++;
1155 ios->iwrite_kb += bytes >> 10;
afd2d7ad 1156 } else {
152f6476
JA
1157 ios->ireads++;
1158 ios->iread_kb += bytes >> 10;
afd2d7ad 1159 }
1160}
1161
b6076a9b
JA
1162static inline void account_issue(struct blk_io_trace *t,
1163 struct per_cpu_info *pci, int rw)
d0ca268b 1164{
b6076a9b 1165 __account_issue(&pci->io_stats, rw, t->bytes);
152f6476
JA
1166
1167 if (per_process_stats) {
bfc70ad5 1168 struct io_stats *ios = find_process_io_stats(t->pid);
d5396421 1169
b6076a9b 1170 __account_issue(ios, rw, t->bytes);
152f6476
JA
1171 }
1172}
1173
06639b27
JA
1174static inline void __account_unplug(struct io_stats *ios, int timer)
1175{
1176 if (timer)
1177 ios->timer_unplugs++;
1178 else
1179 ios->io_unplugs++;
1180}
1181
1182static inline void account_unplug(struct blk_io_trace *t,
1183 struct per_cpu_info *pci, int timer)
1184{
1185 __account_unplug(&pci->io_stats, timer);
1186
1187 if (per_process_stats) {
bfc70ad5 1188 struct io_stats *ios = find_process_io_stats(t->pid);
06639b27
JA
1189
1190 __account_unplug(ios, timer);
1191 }
1192}
1193
4054070a
JA
1194static inline void __account_requeue(struct io_stats *ios,
1195 struct blk_io_trace *t, int rw)
1196{
1197 if (rw) {
1198 ios->wrqueue++;
1199 ios->iwrite_kb -= t_kb(t);
1200 } else {
1201 ios->rrqueue++;
1202 ios->iread_kb -= t_kb(t);
1203 }
1204}
1205
1206static inline void account_requeue(struct blk_io_trace *t,
1207 struct per_cpu_info *pci, int rw)
1208{
1209 __account_requeue(&pci->io_stats, t, rw);
1210
1211 if (per_process_stats) {
bfc70ad5 1212 struct io_stats *ios = find_process_io_stats(t->pid);
4054070a
JA
1213
1214 __account_requeue(ios, t, rw);
1215 }
1216}
1217
f7bd1a9b
JA
1218static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
1219 struct blk_io_trace *t, char *act)
ab197ca7 1220{
f7bd1a9b 1221 process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
ab197ca7
AB
1222}
1223
f7bd1a9b
JA
1224static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
1225 struct blk_io_trace *t, char *act)
b6076a9b 1226{
f7bd1a9b 1227 process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
b6076a9b
JA
1228}
1229
ab197ca7
AB
1230static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
1231 char *act)
1232{
b6076a9b 1233 process_fmt(act, pci, t, -1, 0, NULL);
ab197ca7 1234}
2e3e8ded 1235
f7bd1a9b
JA
1236static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
1237 struct blk_io_trace *t, char *act)
ab197ca7 1238{
f7bd1a9b 1239 process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
d0ca268b
JA
1240}
1241
f7bd1a9b
JA
1242static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
1243 struct blk_io_trace *t, char *act)
d0ca268b 1244{
a01516de 1245 if (act[0] == 'F')
f7bd1a9b 1246 log_track_frontmerge(pdi, t);
2e3e8ded 1247
ab197ca7 1248 process_fmt(act, pci, t, -1ULL, 0, NULL);
d0ca268b
JA
1249}
1250
dfe34da1 1251static void log_action(struct per_cpu_info *pci, struct blk_io_trace *t,
3639a11e 1252 char *act)
dfe34da1 1253{
ab197ca7 1254 process_fmt(act, pci, t, -1ULL, 0, NULL);
dfe34da1
JA
1255}
1256
d5396421 1257static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
3639a11e 1258 char *act)
d0ca268b 1259{
ab197ca7 1260 process_fmt(act, pci, t, -1ULL, 0, NULL);
d0ca268b
JA
1261}
1262
ab197ca7 1263static void log_unplug(struct per_cpu_info *pci, struct blk_io_trace *t,
3639a11e 1264 char *act)
67e14fdc 1265{
ab197ca7 1266 process_fmt(act, pci, t, -1ULL, 0, NULL);
67e14fdc
JA
1267}
1268
93f1c611
JA
1269static void log_split(struct per_cpu_info *pci, struct blk_io_trace *t,
1270 char *act)
1271{
1272 process_fmt(act, pci, t, -1ULL, 0, NULL);
1273}
1274
ab197ca7 1275static void log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char *act)
d0ca268b 1276{
ab197ca7 1277 unsigned char *buf = (unsigned char *) t + sizeof(*t);
d0ca268b 1278
ab197ca7 1279 process_fmt(act, pci, t, -1ULL, t->pdu_len, buf);
d0ca268b
JA
1280}
1281
ff3a732c 1282static void dump_trace_pc(struct blk_io_trace *t, struct per_cpu_info *pci)
d0ca268b 1283{
56f2af81
JA
1284 int act = t->action & 0xffff;
1285
1286 switch (act) {
d0ca268b 1287 case __BLK_TA_QUEUE:
3639a11e 1288 log_generic(pci, t, "Q");
d0ca268b
JA
1289 break;
1290 case __BLK_TA_GETRQ:
3639a11e 1291 log_generic(pci, t, "G");
d0ca268b
JA
1292 break;
1293 case __BLK_TA_SLEEPRQ:
3639a11e 1294 log_generic(pci, t, "S");
d0ca268b
JA
1295 break;
1296 case __BLK_TA_REQUEUE:
3639a11e 1297 log_generic(pci, t, "R");
d0ca268b
JA
1298 break;
1299 case __BLK_TA_ISSUE:
ab197ca7 1300 log_pc(pci, t, "D");
d0ca268b
JA
1301 break;
1302 case __BLK_TA_COMPLETE:
3639a11e 1303 log_pc(pci, t, "C");
d0ca268b 1304 break;
56f2af81
JA
1305 case __BLK_TA_INSERT:
1306 log_pc(pci, t, "I");
1307 break;
d0ca268b 1308 default:
56f2af81 1309 fprintf(stderr, "Bad pc action %x\n", act);
87b72777 1310 break;
d0ca268b 1311 }
d0ca268b
JA
1312}
1313
f7bd1a9b
JA
1314static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
1315 struct per_cpu_info *pci)
d0ca268b 1316{
649c7b66 1317 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
7997c5b0 1318 int act = t->action & 0xffff;
d0ca268b 1319
7997c5b0 1320 switch (act) {
d0ca268b 1321 case __BLK_TA_QUEUE:
753f9091 1322 log_track_queue(pdi, t);
b6076a9b 1323 account_queue(t, pci, w);
3639a11e 1324 log_queue(pci, t, "Q");
d0ca268b 1325 break;
b6076a9b 1326 case __BLK_TA_INSERT:
f7bd1a9b 1327 log_insert(pdi, pci, t, "I");
b6076a9b 1328 break;
d0ca268b 1329 case __BLK_TA_BACKMERGE:
152f6476 1330 account_m(t, pci, w);
f7bd1a9b 1331 log_merge(pdi, pci, t, "M");
d0ca268b
JA
1332 break;
1333 case __BLK_TA_FRONTMERGE:
152f6476 1334 account_m(t, pci, w);
f7bd1a9b 1335 log_merge(pdi, pci, t, "F");
d0ca268b
JA
1336 break;
1337 case __BLK_TA_GETRQ:
f7bd1a9b 1338 log_track_getrq(pdi, t);
3639a11e 1339 log_generic(pci, t, "G");
d0ca268b
JA
1340 break;
1341 case __BLK_TA_SLEEPRQ:
3639a11e 1342 log_generic(pci, t, "S");
d0ca268b
JA
1343 break;
1344 case __BLK_TA_REQUEUE:
65f2deb5
JA
1345 /*
1346 * can happen if we miss traces, don't let it go
1347 * below zero
1348 */
1349 if (pdi->cur_depth[w])
1350 pdi->cur_depth[w]--;
4054070a 1351 account_requeue(t, pci, w);
3639a11e 1352 log_queue(pci, t, "R");
d0ca268b
JA
1353 break;
1354 case __BLK_TA_ISSUE:
b6076a9b 1355 account_issue(t, pci, w);
649c7b66
JA
1356 pdi->cur_depth[w]++;
1357 if (pdi->cur_depth[w] > pdi->max_depth[w])
1358 pdi->max_depth[w] = pdi->cur_depth[w];
f7bd1a9b 1359 log_issue(pdi, pci, t, "D");
d0ca268b
JA
1360 break;
1361 case __BLK_TA_COMPLETE:
65f2deb5
JA
1362 if (pdi->cur_depth[w])
1363 pdi->cur_depth[w]--;
152f6476 1364 account_c(t, pci, w, t->bytes);
f7bd1a9b 1365 log_complete(pdi, pci, t, "C");
d0ca268b 1366 break;
88b1a526 1367 case __BLK_TA_PLUG:
3639a11e 1368 log_action(pci, t, "P");
88b1a526 1369 break;
3639a11e 1370 case __BLK_TA_UNPLUG_IO:
06639b27 1371 account_unplug(t, pci, 0);
3639a11e
JA
1372 log_unplug(pci, t, "U");
1373 break;
1374 case __BLK_TA_UNPLUG_TIMER:
06639b27 1375 account_unplug(t, pci, 1);
3639a11e 1376 log_unplug(pci, t, "UT");
88b1a526 1377 break;
93f1c611
JA
1378 case __BLK_TA_SPLIT:
1379 log_split(pci, t, "X");
1380 break;
1381 case __BLK_TA_BOUNCE:
1382 log_generic(pci, t, "B");
1383 break;
a8f30e64
JA
1384 case __BLK_TA_REMAP:
1385 log_generic(pci, t, "A");
1386 break;
d0ca268b
JA
1387 default:
1388 fprintf(stderr, "Bad fs action %x\n", t->action);
1f79c4a0 1389 break;
d0ca268b 1390 }
d0ca268b
JA
1391}
1392
ff3a732c
JA
1393static void dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci,
1394 struct per_dev_info *pdi)
d0ca268b
JA
1395{
1396 if (t->action & BLK_TC_ACT(BLK_TC_PC))
ff3a732c 1397 dump_trace_pc(t, pci);
d0ca268b 1398 else
f7bd1a9b 1399 dump_trace_fs(t, pdi, pci);
87b72777 1400
20ed6177
JA
1401 if (!pdi->events)
1402 pdi->first_reported_time = t->time;
1403
e7c9f3ff 1404 pdi->events++;
d0ca268b
JA
1405}
1406
4c523165
JA
1407/*
1408 * print in a proper way, not too small and not too big. if more than
1409 * 1000,000K, turn into M and so on
1410 */
1411static char *size_cnv(char *dst, unsigned long long num, int in_kb)
1412{
da19e768 1413 char suff[] = { '\0', 'K', 'M', 'G', 'P' };
0dc3602c 1414 unsigned int i = 0;
4c523165
JA
1415
1416 if (in_kb)
1417 i++;
1418
0dc3602c 1419 while (num > 1000 * 1000ULL && (i < sizeof(suff) - 1)) {
4c523165
JA
1420 i++;
1421 num /= 1000;
1422 }
1423
1424 sprintf(dst, "%'8Lu%c", num, suff[i]);
1425 return dst;
1426}
1427
649c7b66
JA
1428static void dump_io_stats(struct per_dev_info *pdi, struct io_stats *ios,
1429 char *msg)
5c017e4b 1430{
4c523165
JA
1431 static char x[256], y[256];
1432
152f6476
JA
1433 fprintf(ofp, "%s\n", msg);
1434
4c523165
JA
1435 fprintf(ofp, " Reads Queued: %s, %siB\t", size_cnv(x, ios->qreads, 0), size_cnv(y, ios->qread_kb, 1));
1436 fprintf(ofp, " Writes Queued: %s, %siB\n", size_cnv(x, ios->qwrites, 0), size_cnv(y, ios->qwrite_kb, 1));
0a6b8fc4 1437
4c523165
JA
1438 fprintf(ofp, " Read Dispatches: %s, %siB\t", size_cnv(x, ios->ireads, 0), size_cnv(y, ios->iread_kb, 1));
1439 fprintf(ofp, " Write Dispatches: %s, %siB\n", size_cnv(x, ios->iwrites, 0), size_cnv(y, ios->iwrite_kb, 1));
4054070a
JA
1440 fprintf(ofp, " Reads Requeued: %s\t\t", size_cnv(x, ios->rrqueue, 0));
1441 fprintf(ofp, " Writes Requeued: %s\n", size_cnv(x, ios->wrqueue, 0));
4c523165
JA
1442 fprintf(ofp, " Reads Completed: %s, %siB\t", size_cnv(x, ios->creads, 0), size_cnv(y, ios->cread_kb, 1));
1443 fprintf(ofp, " Writes Completed: %s, %siB\n", size_cnv(x, ios->cwrites, 0), size_cnv(y, ios->cwrite_kb, 1));
152f6476 1444 fprintf(ofp, " Read Merges: %'8lu%8c\t", ios->mreads, ' ');
152f6476 1445 fprintf(ofp, " Write Merges: %'8lu\n", ios->mwrites);
649c7b66
JA
1446 if (pdi) {
1447 fprintf(ofp, " Read depth: %'8u%8c\t", pdi->max_depth[0], ' ');
1448 fprintf(ofp, " Write depth: %'8u\n", pdi->max_depth[1]);
1449 }
06639b27
JA
1450 fprintf(ofp, " IO unplugs: %'8lu%8c\t", ios->io_unplugs, ' ');
1451 fprintf(ofp, " Timer unplugs: %'8lu\n", ios->timer_unplugs);
5c017e4b
JA
1452}
1453
50adc0ba
JA
1454static void dump_wait_stats(struct per_process_info *ppi)
1455{
b9d40d6f
JA
1456 unsigned long rawait = ppi->longest_allocation_wait[0] / 1000;
1457 unsigned long rdwait = ppi->longest_dispatch_wait[0] / 1000;
1458 unsigned long rcwait = ppi->longest_completion_wait[0] / 1000;
1459 unsigned long wawait = ppi->longest_allocation_wait[1] / 1000;
1460 unsigned long wdwait = ppi->longest_dispatch_wait[1] / 1000;
1461 unsigned long wcwait = ppi->longest_completion_wait[1] / 1000;
1462
1463 fprintf(ofp, " Allocation wait: %'8lu%8c\t", rawait, ' ');
1464 fprintf(ofp, " Allocation wait: %'8lu\n", wawait);
1465 fprintf(ofp, " Dispatch wait: %'8lu%8c\t", rdwait, ' ');
1466 fprintf(ofp, " Dispatch wait: %'8lu\n", wdwait);
1467 fprintf(ofp, " Completion wait: %'8lu%8c\t", rcwait, ' ');
1468 fprintf(ofp, " Completion wait: %'8lu\n", wcwait);
50adc0ba
JA
1469}
1470
886ecf0e
JA
1471static int ppi_name_compare(const void *p1, const void *p2)
1472{
1473 struct per_process_info *ppi1 = *((struct per_process_info **) p1);
1474 struct per_process_info *ppi2 = *((struct per_process_info **) p2);
1475 int res;
1476
2990e589 1477 res = strverscmp(ppi1->ppm->comm, ppi2->ppm->comm);
886ecf0e 1478 if (!res)
2990e589 1479 res = ppi1->ppm->pid > ppi2->ppm->pid;
886ecf0e
JA
1480
1481 return res;
1482}
1483
1484static void sort_process_list(void)
1485{
1486 struct per_process_info **ppis;
1487 struct per_process_info *ppi;
1488 int i = 0;
1489
1490 ppis = malloc(ppi_list_entries * sizeof(struct per_process_info *));
1491
1492 ppi = ppi_list;
1493 while (ppi) {
06e6f286 1494 ppis[i++] = ppi;
886ecf0e
JA
1495 ppi = ppi->list_next;
1496 }
1497
06e6f286 1498 qsort(ppis, ppi_list_entries, sizeof(ppi), ppi_name_compare);
886ecf0e
JA
1499
1500 i = ppi_list_entries - 1;
1501 ppi_list = NULL;
1502 while (i >= 0) {
1503 ppi = ppis[i];
1504
1505 ppi->list_next = ppi_list;
1506 ppi_list = ppi;
1507 i--;
1508 }
50c38702
JA
1509
1510 free(ppis);
886ecf0e
JA
1511}
1512
152f6476
JA
1513static void show_process_stats(void)
1514{
1515 struct per_process_info *ppi;
1516
886ecf0e
JA
1517 sort_process_list();
1518
152f6476
JA
1519 ppi = ppi_list;
1520 while (ppi) {
2990e589 1521 struct process_pid_map *ppm = ppi->ppm;
ce8b6b4f
JA
1522 char name[64];
1523
715d8021 1524 if (ppi->more_than_one)
2990e589 1525 sprintf(name, "%s (%u, ...)", ppm->comm, ppm->pid);
715d8021 1526 else
2990e589 1527 sprintf(name, "%s (%u)", ppm->comm, ppm->pid);
bf0720af 1528
649c7b66 1529 dump_io_stats(NULL, &ppi->io_stats, name);
50adc0ba 1530 dump_wait_stats(ppi);
152f6476
JA
1531 ppi = ppi->list_next;
1532 }
1533
1534 fprintf(ofp, "\n");
1535}
1536
e7c9f3ff 1537static void show_device_and_cpu_stats(void)
d0ca268b 1538{
e7c9f3ff
NS
1539 struct per_dev_info *pdi;
1540 struct per_cpu_info *pci;
1541 struct io_stats total, *ios;
20ed6177 1542 unsigned long long rrate, wrate, msec;
e7c9f3ff
NS
1543 int i, j, pci_events;
1544 char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
1545 char name[32];
1546
1547 for (pdi = devices, i = 0; i < ndevices; i++, pdi++) {
1548
1549 memset(&total, 0, sizeof(total));
1550 pci_events = 0;
1551
1552 if (i > 0)
1553 fprintf(ofp, "\n");
1554
1555 for (pci = pdi->cpus, j = 0; j < pdi->ncpus; j++, pci++) {
1556 if (!pci->nelems)
1557 continue;
1558
1559 ios = &pci->io_stats;
1560 total.qreads += ios->qreads;
1561 total.qwrites += ios->qwrites;
1562 total.creads += ios->creads;
1563 total.cwrites += ios->cwrites;
1564 total.mreads += ios->mreads;
1565 total.mwrites += ios->mwrites;
1566 total.ireads += ios->ireads;
1567 total.iwrites += ios->iwrites;
4054070a
JA
1568 total.rrqueue += ios->rrqueue;
1569 total.wrqueue += ios->wrqueue;
e7c9f3ff
NS
1570 total.qread_kb += ios->qread_kb;
1571 total.qwrite_kb += ios->qwrite_kb;
1572 total.cread_kb += ios->cread_kb;
1573 total.cwrite_kb += ios->cwrite_kb;
1574 total.iread_kb += ios->iread_kb;
1575 total.iwrite_kb += ios->iwrite_kb;
06639b27
JA
1576 total.timer_unplugs += ios->timer_unplugs;
1577 total.io_unplugs += ios->io_unplugs;
e7c9f3ff
NS
1578
1579 snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
1580 j, get_dev_name(pdi, name, sizeof(name)));
649c7b66 1581 dump_io_stats(pdi, ios, line);
e7c9f3ff
NS
1582 pci_events++;
1583 }
5c017e4b 1584
e7c9f3ff
NS
1585 if (pci_events > 1) {
1586 fprintf(ofp, "\n");
1587 snprintf(line, sizeof(line) - 1, "Total (%s):",
1588 get_dev_name(pdi, name, sizeof(name)));
649c7b66 1589 dump_io_stats(NULL, &total, line);
e7c9f3ff 1590 }
d0ca268b 1591
20ed6177 1592 wrate = rrate = 0;
20ed6177
JA
1593 msec = (pdi->last_reported_time - pdi->first_reported_time) / 1000000;
1594 if (msec) {
1595 rrate = 1000 * total.cread_kb / msec;
1596 wrate = 1000 * total.cwrite_kb / msec;
1597 }
1598
dce0f678
AB
1599 fprintf(ofp, "\nThroughput (R/W): %'LuKiB/s / %'LuKiB/s\n",
1600 rrate, wrate);
1601 fprintf(ofp, "Events (%s): %'Lu entries\n",
1602 get_dev_name(pdi, line, sizeof(line)), pdi->events);
492da111
AB
1603
1604 collect_pdi_skips(pdi);
1605 fprintf(ofp, "Skips: %'lu forward (%'llu - %5.1lf%%)\n",
dce0f678
AB
1606 pdi->skips,pdi->seq_skips,
1607 100.0 * ((double)pdi->seq_skips /
dce0f678 1608 (double)(pdi->events + pdi->seq_skips)));
e7c9f3ff 1609 }
d0ca268b
JA
1610}
1611
4f0ae44f
JA
1612static void find_genesis(void)
1613{
1614 struct trace *t = trace_list;
1615
1616 genesis_time = -1ULL;
1617 while (t != NULL) {
1618 if (t->bit->time < genesis_time)
1619 genesis_time = t->bit->time;
1620
1621 t = t->next;
1622 }
1623}
1624
7f4d89e6 1625static inline int check_stopwatch(struct blk_io_trace *bit)
4f0ae44f 1626{
7f4d89e6
JA
1627 if (bit->time < stopwatch_end &&
1628 bit->time >= stopwatch_start)
4f0ae44f
JA
1629 return 0;
1630
1631 return 1;
1632}
1633
53c68c88
JA
1634/*
1635 * return youngest entry read
1636 */
1637static int sort_entries(unsigned long long *youngest)
4f0ae44f 1638{
210824c3
JA
1639 struct per_dev_info *pdi = NULL;
1640 struct per_cpu_info *pci = NULL;
4f0ae44f 1641 struct trace *t;
4f0ae44f
JA
1642
1643 if (!genesis_time)
1644 find_genesis();
1645
d6222db8 1646 *youngest = 0;
4f0ae44f
JA
1647 while ((t = trace_list) != NULL) {
1648 struct blk_io_trace *bit = t->bit;
1649
1650 trace_list = t->next;
1651
7f4d89e6 1652 bit->time -= genesis_time;
4f0ae44f 1653
d6222db8
JA
1654 if (bit->time < *youngest || !*youngest)
1655 *youngest = bit->time;
1656
210824c3
JA
1657 if (!pdi || pdi->dev != bit->device) {
1658 pdi = get_dev_info(bit->device);
1659 pci = NULL;
1660 }
1661
1662 if (!pci || pci->cpu != bit->cpu)
1663 pci = get_cpu_info(pdi, bit->cpu);
1664
1665 if (bit->sequence < pci->smallest_seq_read)
1666 pci->smallest_seq_read = bit->sequence;
774a1a10 1667
7f4d89e6 1668 if (check_stopwatch(bit)) {
4f0ae44f
JA
1669 bit_free(bit);
1670 t_free(t);
1671 continue;
1672 }
1673
2a1b3424 1674 if (trace_rb_insert_sort(t))
53c68c88 1675 return -1;
4f0ae44f
JA
1676 }
1677
53c68c88 1678 return 0;
4f0ae44f
JA
1679}
1680
824c2b39
JA
1681/*
1682 * to continue, we must have traces from all online cpus in the tree
1683 */
1684static int check_cpu_map(struct per_dev_info *pdi)
1685{
1686 unsigned long *cpu_map;
1687 struct rb_node *n;
1688 struct trace *__t;
1689 unsigned int i;
1690 int ret, cpu;
1691
1692 /*
1693 * create a map of the cpus we have traces for
1694 */
1695 cpu_map = malloc(pdi->cpu_map_max / sizeof(long));
1696 n = rb_first(&rb_sort_root);
1697 while (n) {
1698 __t = rb_entry(n, struct trace, rb_node);
1699 cpu = __t->bit->cpu;
1700
1701 cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
1702 n = rb_next(n);
1703 }
1704
1705 /*
b1c8e614
JA
1706 * we can't continue if pdi->cpu_map has entries set that we don't
1707 * have in the sort rbtree. the opposite is not a problem, though
824c2b39
JA
1708 */
1709 ret = 0;
1710 for (i = 0; i < pdi->cpu_map_max / CPUS_PER_LONG; i++) {
1711 if (pdi->cpu_map[i] & ~(cpu_map[i])) {
1712 ret = 1;
1713 break;
1714 }
1715 }
1716
1717 free(cpu_map);
1718 return ret;
1719}
1720
a141a7cd 1721static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
2a1b3424 1722{
1ca323a5 1723 struct blk_io_trace *bit = t->bit;
210824c3
JA
1724 unsigned long expected_sequence;
1725 struct per_cpu_info *pci;
1ca323a5 1726 struct trace *__t;
492da111 1727
210824c3
JA
1728 pci = get_cpu_info(pdi, bit->cpu);
1729 expected_sequence = pci->last_sequence + 1;
1730
774a1a10 1731 if (!expected_sequence) {
774a1a10
JA
1732 /*
1733 * 1 should be the first entry, just allow it
1734 */
1735 if (bit->sequence == 1)
1736 return 0;
210824c3 1737 if (bit->sequence == pci->smallest_seq_read)
79ee9704 1738 return 0;
774a1a10 1739
824c2b39 1740 return check_cpu_map(pdi);
774a1a10 1741 }
2a1b3424
JA
1742
1743 if (bit->sequence == expected_sequence)
1744 return 0;
1745
2a1b3424 1746 /*
1c7c54aa
JA
1747 * we may not have seen that sequence yet. if we are not doing
1748 * the final run, break and wait for more entries.
1c24add6 1749 */
210824c3
JA
1750 if (expected_sequence < pci->smallest_seq_read) {
1751 __t = trace_rb_find_last(pdi, pci, expected_sequence);
1ca323a5 1752 if (!__t)
1c7c54aa 1753 goto skip;
2a1b3424 1754
1ca323a5 1755 __put_trace_last(pdi, __t);
2a1b3424 1756 return 0;
a141a7cd
JA
1757 } else if (!force) {
1758 return 1;
0b07f23e 1759 } else {
1c7c54aa 1760skip:
66930177 1761 if (check_current_skips(pci, bit->sequence))
492da111
AB
1762 return 0;
1763
965eca2d 1764 if (expected_sequence < bit->sequence)
66930177 1765 insert_skip(pci, expected_sequence, bit->sequence - 1);
1c7c54aa
JA
1766 return 0;
1767 }
2a1b3424
JA
1768}
1769
a649216c 1770static void show_entries_rb(int force)
8fc0abbc 1771{
1f7afa72
JA
1772 struct per_dev_info *pdi = NULL;
1773 struct per_cpu_info *pci = NULL;
8fc0abbc 1774 struct blk_io_trace *bit;
3aabcd89 1775 struct rb_node *n;
8fc0abbc 1776 struct trace *t;
1f7afa72 1777
7d747d22 1778 while ((n = rb_first(&rb_sort_root)) != NULL) {
dd90748f 1779 if (is_done() && !force && !pipeline)
1f7afa72 1780 break;
8fc0abbc
JA
1781
1782 t = rb_entry(n, struct trace, rb_node);
1783 bit = t->bit;
1784
a43c1c17
JA
1785 if (read_sequence - t->read_sequence < 1 && !force)
1786 break;
1787
210824c3 1788 if (!pdi || pdi->dev != bit->device) {
287fa3d6 1789 pdi = get_dev_info(bit->device);
210824c3
JA
1790 pci = NULL;
1791 }
1f7afa72 1792
e7c9f3ff
NS
1793 if (!pdi) {
1794 fprintf(stderr, "Unknown device ID? (%d,%d)\n",
1795 MAJOR(bit->device), MINOR(bit->device));
1796 break;
1797 }
1f7afa72 1798
a141a7cd
JA
1799 if (check_sequence(pdi, t, force))
1800 break;
cb2a1a62 1801
a141a7cd
JA
1802 if (!force && bit->time > last_allowed_time)
1803 break;
8fc0abbc 1804
4f0ae44f 1805 check_time(pdi, bit);
8fc0abbc 1806
4f0ae44f
JA
1807 if (!pci || pci->cpu != bit->cpu)
1808 pci = get_cpu_info(pdi, bit->cpu);
287fa3d6 1809
210824c3
JA
1810 pci->last_sequence = bit->sequence;
1811
cbc927b6
JA
1812 pci->nelems++;
1813
66930177 1814 if (bit->action & (act_mask << BLK_TC_SHIFT))
98f8386b 1815 dump_trace(bit, pci, pdi);
87b72777 1816
2a1b3424 1817 put_trace(pdi, t);
cb2a1a62 1818 }
8fc0abbc
JA
1819}
1820
c0e0dbc2 1821static int read_data(int fd, void *buffer, int bytes, int block, int *fdblock)
1f79c4a0
JA
1822{
1823 int ret, bytes_left, fl;
1824 void *p;
1825
c0e0dbc2
JA
1826 if (block != *fdblock) {
1827 fl = fcntl(fd, F_GETFL);
1f79c4a0 1828
c0e0dbc2
JA
1829 if (!block) {
1830 *fdblock = 0;
1831 fcntl(fd, F_SETFL, fl | O_NONBLOCK);
1832 } else {
1833 *fdblock = 1;
1834 fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
1835 }
1836 }
1f79c4a0
JA
1837
1838 bytes_left = bytes;
1839 p = buffer;
1840 while (bytes_left > 0) {
1841 ret = read(fd, p, bytes_left);
1842 if (!ret)
1843 return 1;
1844 else if (ret < 0) {
db7e0552 1845 if (errno != EAGAIN) {
1f79c4a0 1846 perror("read");
db7e0552
JA
1847 return -1;
1848 }
a649216c 1849
5c0f40f7
JA
1850 /*
1851 * never do partial reads. we can return if we
1852 * didn't read anything and we should not block,
1853 * otherwise wait for data
1854 */
1855 if ((bytes_left == bytes) && !block)
1856 return 1;
1857
1858 usleep(10);
1859 continue;
1f79c4a0
JA
1860 } else {
1861 p += ret;
1862 bytes_left -= ret;
1863 }
1864 }
1865
1866 return 0;
1867}
1868
017d1660
JA
1869static inline __u16 get_pdulen(struct blk_io_trace *bit)
1870{
1871 if (data_is_native)
1872 return bit->pdu_len;
1873
1874 return __bswap_16(bit->pdu_len);
1875}
1876
1877static inline __u32 get_magic(struct blk_io_trace *bit)
1878{
1879 if (data_is_native)
1880 return bit->magic;
1881
1882 return __bswap_32(bit->magic);
1883}
1884
c0e0dbc2 1885static int read_events(int fd, int always_block, int *fdblock)
cb2a1a62 1886{
287fa3d6 1887 struct per_dev_info *pdi = NULL;
e820abd7 1888 unsigned int events = 0;
7d747d22
JA
1889
1890 while (!is_done() && events < rb_batch) {
1891 struct blk_io_trace *bit;
1892 struct trace *t;
db7e0552 1893 int pdu_len, should_block, ret;
7d747d22
JA
1894 __u32 magic;
1895
d36421e4 1896 bit = bit_alloc();
cb2a1a62 1897
c0e0dbc2
JA
1898 should_block = !events || always_block;
1899
db7e0552
JA
1900 ret = read_data(fd, bit, sizeof(*bit), should_block, fdblock);
1901 if (ret) {
eb9bd4e9 1902 bit_free(bit);
db7e0552
JA
1903 if (!events && ret < 0)
1904 events = ret;
cb2a1a62 1905 break;
eb9bd4e9 1906 }
cb2a1a62 1907
017d1660
JA
1908 /*
1909 * look at first trace to check whether we need to convert
1910 * data in the future
1911 */
9e4cd1b8 1912 if (data_is_native == -1 && check_data_endianness(bit->magic))
017d1660
JA
1913 break;
1914
1915 magic = get_magic(bit);
7d747d22
JA
1916 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
1917 fprintf(stderr, "Bad magic %x\n", magic);
1918 break;
1919 }
1920
017d1660 1921 pdu_len = get_pdulen(bit);
7d747d22
JA
1922 if (pdu_len) {
1923 void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
1924
c0e0dbc2 1925 if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1, fdblock)) {
eb9bd4e9 1926 bit_free(ptr);
7d747d22 1927 break;
eb9bd4e9 1928 }
7d747d22
JA
1929
1930 bit = ptr;
1931 }
1932
d6222db8
JA
1933 trace_to_cpu(bit);
1934
1935 if (verify_trace(bit)) {
1936 bit_free(bit);
1937 continue;
1938 }
1939
bfc70ad5
JA
1940 /*
1941 * not a real trace, so grab and handle it here
1942 */
1943 if (bit->action & BLK_TC_ACT(BLK_TC_NOTIFY)) {
1944 add_ppm_hash(bit->pid, (char *) bit + sizeof(*bit));
1945 continue;
1946 }
1947
d36421e4 1948 t = t_alloc();
cb2a1a62
JA
1949 memset(t, 0, sizeof(*t));
1950 t->bit = bit;
a43c1c17 1951 t->read_sequence = read_sequence;
cb2a1a62 1952
7d747d22
JA
1953 t->next = trace_list;
1954 trace_list = t;
1f7afa72 1955
f7bd1a9b 1956 if (!pdi || pdi->dev != bit->device)
287fa3d6
JA
1957 pdi = get_dev_info(bit->device);
1958
1959 if (bit->time > pdi->last_read_time)
1960 pdi->last_read_time = bit->time;
1961
7d747d22 1962 events++;
cb2a1a62
JA
1963 }
1964
7d747d22 1965 return events;
cb2a1a62
JA
1966}
1967
d5396421 1968static int do_file(void)
d0ca268b 1969{
7d747d22 1970 struct per_cpu_info *pci;
73877e12
NS
1971 struct per_dev_info *pdi;
1972 int i, j, events, events_added;
d0ca268b 1973
7d747d22
JA
1974 /*
1975 * first prepare all files for reading
1976 */
e8741a4a 1977 for (i = 0; i < ndevices; i++) {
73877e12
NS
1978 pdi = &devices[i];
1979 pdi->nfiles = 0;
73877e12 1980
74feaeb5 1981 for (j = 0;; j++) {
e7c9f3ff 1982 struct stat st;
d1d7f15f 1983 int len = 0;
6e0073ed 1984 char *p, *dname;
87b72777 1985
e7c9f3ff
NS
1986 pci = get_cpu_info(pdi, j);
1987 pci->cpu = j;
7d747d22 1988 pci->fd = -1;
c0e0dbc2 1989 pci->fdblock = -1;
6e0073ed
JA
1990
1991 p = strdup(pdi->name);
1992 dname = dirname(p);
1993 if (strcmp(dname, ".")) {
1994 input_dir = dname;
1995 p = strdup(pdi->name);
1996 strcpy(pdi->name, basename(p));
1997 }
1998 free(p);
d0ca268b 1999
d1d7f15f
JA
2000 if (input_dir)
2001 len = sprintf(pci->fname, "%s/", input_dir);
2002
2003 snprintf(pci->fname + len, sizeof(pci->fname)-1-len,
7d747d22 2004 "%s.blktrace.%d", pdi->name, pci->cpu);
e7c9f3ff
NS
2005 if (stat(pci->fname, &st) < 0)
2006 break;
74feaeb5
AB
2007 if (st.st_size) {
2008 pci->fd = open(pci->fname, O_RDONLY);
2009 if (pci->fd < 0) {
2010 perror(pci->fname);
2011 continue;
2012 }
e7c9f3ff
NS
2013 }
2014
7d747d22 2015 printf("Input file %s added\n", pci->fname);
73877e12 2016 pdi->nfiles++;
824c2b39 2017 cpu_mark_online(pdi, pci->cpu);
d0ca268b 2018 }
d5396421
JA
2019 }
2020
7d747d22
JA
2021 /*
2022 * now loop over the files reading in the data
2023 */
412819ce 2024 do {
53c68c88
JA
2025 unsigned long long youngest;
2026
7d747d22 2027 events_added = 0;
287fa3d6 2028 last_allowed_time = -1ULL;
a43c1c17 2029 read_sequence++;
d5396421 2030
7d747d22 2031 for (i = 0; i < ndevices; i++) {
73877e12 2032 pdi = &devices[i];
66930177 2033 pdi->last_read_time = -1ULL;
73877e12
NS
2034
2035 for (j = 0; j < pdi->nfiles; j++) {
d5396421 2036
73877e12 2037 pci = get_cpu_info(pdi, j);
d5396421 2038
7d747d22
JA
2039 if (pci->fd == -1)
2040 continue;
51128a28 2041
210824c3
JA
2042 pci->smallest_seq_read = -1;
2043
c0e0dbc2 2044 events = read_events(pci->fd, 1, &pci->fdblock);
db7e0552 2045 if (events <= 0) {
824c2b39 2046 cpu_mark_offline(pdi, pci->cpu);
7d747d22
JA
2047 close(pci->fd);
2048 pci->fd = -1;
2049 continue;
2050 }
d5396421 2051
287fa3d6
JA
2052 if (pdi->last_read_time < last_allowed_time)
2053 last_allowed_time = pdi->last_read_time;
d5396421 2054
7d747d22
JA
2055 events_added += events;
2056 }
2ff323b0 2057 }
d5396421 2058
53c68c88
JA
2059 if (sort_entries(&youngest))
2060 break;
2061
2062 if (youngest > stopwatch_end)
287fa3d6
JA
2063 break;
2064
a649216c 2065 show_entries_rb(0);
cb2a1a62 2066
7d747d22 2067 } while (events_added);
d5396421 2068
a649216c
JA
2069 if (rb_sort_entries)
2070 show_entries_rb(1);
2071
7d747d22 2072 return 0;
412819ce 2073}
d5396421 2074
412819ce
JA
2075static int do_stdin(void)
2076{
53c68c88 2077 unsigned long long youngest;
c0e0dbc2 2078 int fd, events, fdblock;
d5396421 2079
be925321 2080 last_allowed_time = -1ULL;
1f79c4a0 2081 fd = dup(STDIN_FILENO);
0b07f23e
JA
2082 if (fd == -1) {
2083 perror("dup stdin");
2084 return -1;
2085 }
d5396421 2086
c0e0dbc2 2087 fdblock = -1;
db7e0552 2088 while ((events = read_events(fd, 0, &fdblock)) > 0) {
4ab42801 2089 read_sequence++;
412819ce 2090
210824c3 2091#if 0
0b07f23e 2092 smallest_seq_read = -1U;
210824c3 2093#endif
0b07f23e 2094
53c68c88
JA
2095 if (sort_entries(&youngest))
2096 break;
2097
2098 if (youngest > stopwatch_end)
2ff323b0
JA
2099 break;
2100
763d936e 2101 show_entries_rb(0);
0b07f23e 2102 }
d5396421 2103
a649216c
JA
2104 if (rb_sort_entries)
2105 show_entries_rb(1);
2106
d5396421 2107 close(fd);
d5396421
JA
2108 return 0;
2109}
d0ca268b 2110
cbc927b6 2111static void show_stats(void)
412819ce 2112{
cbc927b6
JA
2113 if (!ofp)
2114 return;
2115 if (stats_printed)
2116 return;
2117
2118 stats_printed = 1;
2119
2120 if (per_process_stats)
2121 show_process_stats();
2122
2123 if (per_device_and_cpu_stats)
2124 show_device_and_cpu_stats();
2125
152f6476 2126 fflush(ofp);
412819ce
JA
2127}
2128
e820abd7 2129static void handle_sigint(__attribute__((__unused__)) int sig)
412819ce
JA
2130{
2131 done = 1;
412819ce
JA
2132}
2133
46e6968b
NS
2134/*
2135 * Extract start and duration times from a string, allowing
2136 * us to specify a time interval of interest within a trace.
2137 * Format: "duration" (start is zero) or "start:duration".
2138 */
2139static int find_stopwatch_interval(char *string)
2140{
2141 double value;
2142 char *sp;
2143
2144 value = strtod(string, &sp);
2145 if (sp == string) {
2146 fprintf(stderr,"Invalid stopwatch timer: %s\n", string);
2147 return 1;
2148 }
2149 if (*sp == ':') {
2150 stopwatch_start = DOUBLE_TO_NANO_ULL(value);
2151 string = sp + 1;
2152 value = strtod(string, &sp);
2153 if (sp == string || *sp != '\0') {
2154 fprintf(stderr,"Invalid stopwatch duration time: %s\n",
2155 string);
2156 return 1;
2157 }
2158 } else if (*sp != '\0') {
2159 fprintf(stderr,"Invalid stopwatch start timer: %s\n", string);
2160 return 1;
2161 }
1b928247
JA
2162 stopwatch_end = DOUBLE_TO_NANO_ULL(value);
2163 if (stopwatch_end <= stopwatch_start) {
2164 fprintf(stderr, "Invalid stopwatch interval: %Lu -> %Lu\n",
2165 stopwatch_start, stopwatch_end);
2166 return 1;
2167 }
2168
46e6968b
NS
2169 return 0;
2170}
2171
52724a0e
JA
2172static char usage_str[] = \
2173 "[ -i <input name> ] [-o <output name> [ -s ] [ -t ] [ -q ]\n" \
2174 "[ -w start:stop ] [ -f output format ] [ -F format spec ] [ -v] \n\n" \
2175 "\t-i Input file containing trace data, or '-' for stdin\n" \
d1d7f15f 2176 "\t-D Directory to prepend to input file names\n" \
52724a0e
JA
2177 "\t-o Output file. If not given, output is stdout\n" \
2178 "\t-b stdin read batching\n" \
2179 "\t-s Show per-program io statistics\n" \
ac740d98 2180 "\t-h Hash processes by name, not pid\n" \
52724a0e
JA
2181 "\t-t Track individual ios. Will tell you the time a request took\n" \
2182 "\t to get queued, to get dispatched, and to get completed\n" \
2183 "\t-q Quiet. Don't display any stats at the end of the trace\n" \
2184 "\t-w Only parse data between the given time interval in seconds.\n" \
2185 "\t If 'start' isn't given, blkparse defaults the start time to 0\n" \
d915dee6
JA
2186 "\t-f Output format. Customize the output format. The format field\n" \
2187 "\t identifies can be found in the documentation\n" \
52724a0e 2188 "\t-F Format specification. Can be found in the documentation\n" \
57ea8602
JA
2189 "\t-v More verbose for marginal errors\n" \
2190 "\t-V Print program version info\n\n";
52724a0e 2191
1f79c4a0
JA
2192static void usage(char *prog)
2193{
52724a0e 2194 fprintf(stderr, "Usage: %s %s %s", prog, blkparse_version, usage_str);
1f79c4a0
JA
2195}
2196
d5396421
JA
2197int main(int argc, char *argv[])
2198{
152f6476 2199 char *ofp_buffer;
98f8386b 2200 int i, c, ret, mode;
98f8386b 2201 int act_mask_tmp = 0;
d5396421
JA
2202
2203 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
2204 switch (c) {
98f8386b
AB
2205 case 'a':
2206 i = find_mask_map(optarg);
2207 if (i < 0) {
2208 fprintf(stderr,"Invalid action mask %s\n",
2209 optarg);
2210 return 1;
2211 }
2212 act_mask_tmp |= i;
2213 break;
2214
2215 case 'A':
2216 if ((sscanf(optarg, "%x", &i) != 1) ||
2217 !valid_act_opt(i)) {
2218 fprintf(stderr,
2219 "Invalid set action mask %s/0x%x\n",
2220 optarg, i);
2221 return 1;
2222 }
2223 act_mask_tmp = i;
2224 break;
d5396421 2225 case 'i':
e7c9f3ff
NS
2226 if (!strcmp(optarg, "-") && !pipeline)
2227 pipeline = 1;
2228 else if (resize_devices(optarg) != 0)
2229 return 1;
d5396421 2230 break;
d1d7f15f
JA
2231 case 'D':
2232 input_dir = optarg;
2233 break;
d5396421 2234 case 'o':
66efebf8 2235 output_name = optarg;
d5396421 2236 break;
79f19470
JA
2237 case 'b':
2238 rb_batch = atoi(optarg);
2239 if (rb_batch <= 0)
2240 rb_batch = RB_BATCH_DEFAULT;
2241 break;
152f6476
JA
2242 case 's':
2243 per_process_stats = 1;
2244 break;
7997c5b0
JA
2245 case 't':
2246 track_ios = 1;
2247 break;
1e1c60f1
NS
2248 case 'q':
2249 per_device_and_cpu_stats = 0;
2250 break;
46e6968b
NS
2251 case 'w':
2252 if (find_stopwatch_interval(optarg) != 0)
2253 return 1;
2254 break;
ab197ca7
AB
2255 case 'f':
2256 set_all_format_specs(optarg);
2257 break;
2258 case 'F':
2259 if (add_format_spec(optarg) != 0)
2260 return 1;
2261 break;
d915dee6 2262 case 'h':
715d8021 2263 ppi_hash_by_pid = 0;
bf0720af 2264 break;
52724a0e 2265 case 'v':
57ea8602
JA
2266 verbose++;
2267 break;
2268 case 'V':
52724a0e
JA
2269 printf("%s version %s\n", argv[0], blkparse_version);
2270 return 0;
d5396421 2271 default:
1f79c4a0 2272 usage(argv[0]);
d5396421
JA
2273 return 1;
2274 }
d0ca268b
JA
2275 }
2276
e7c9f3ff
NS
2277 while (optind < argc) {
2278 if (!strcmp(argv[optind], "-") && !pipeline)
2279 pipeline = 1;
2280 else if (resize_devices(argv[optind]) != 0)
2281 return 1;
2282 optind++;
2283 }
2284
2285 if (!pipeline && !ndevices) {
1f79c4a0 2286 usage(argv[0]);
d5396421
JA
2287 return 1;
2288 }
2289
98f8386b
AB
2290 if (act_mask_tmp != 0)
2291 act_mask = act_mask_tmp;
2292
7997c5b0 2293 memset(&rb_sort_root, 0, sizeof(rb_sort_root));
412819ce
JA
2294
2295 signal(SIGINT, handle_sigint);
2296 signal(SIGHUP, handle_sigint);
2297 signal(SIGTERM, handle_sigint);
d5396421 2298
d69db225
JA
2299 setlocale(LC_NUMERIC, "en_US");
2300
a66877e6 2301 if (!output_name) {
152f6476 2302 ofp = fdopen(STDOUT_FILENO, "w");
a66877e6
JA
2303 mode = _IOLBF;
2304 } else {
152f6476
JA
2305 char ofname[128];
2306
7a9690c0 2307 snprintf(ofname, sizeof(ofname) - 1, "%s", output_name);
152f6476 2308 ofp = fopen(ofname, "w");
a66877e6 2309 mode = _IOFBF;
152f6476
JA
2310 }
2311
2312 if (!ofp) {
2313 perror("fopen");
2314 return 1;
2315 }
2316
2317 ofp_buffer = malloc(4096);
a66877e6 2318 if (setvbuf(ofp, ofp_buffer, mode, 4096)) {
152f6476
JA
2319 perror("setvbuf");
2320 return 1;
2321 }
2322
e7c9f3ff 2323 if (pipeline)
d5396421
JA
2324 ret = do_stdin();
2325 else
2326 ret = do_file();
2327
cbc927b6 2328 show_stats();
eb9bd4e9 2329 free(ofp_buffer);
d5396421 2330 return ret;
d0ca268b 2331}