[PATCH] blkparse: add per-process stats for longest waits
[blktrace.git] / blkparse.c
... / ...
CommitLineData
1/*
2 * block queue tracing parse application
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#include <sys/types.h>
22#include <sys/stat.h>
23#include <unistd.h>
24#include <stdio.h>
25#include <fcntl.h>
26#include <stdlib.h>
27#include <string.h>
28#include <getopt.h>
29#include <errno.h>
30#include <signal.h>
31#include <locale.h>
32
33#include "blktrace.h"
34#include "rbtree.h"
35
36#define SECONDS(x) ((unsigned long long)(x) / 1000000000)
37#define NANO_SECONDS(x) ((unsigned long long)(x) % 1000000000)
38
39static int backwards;
40static unsigned long long genesis_time, last_reported_time;
41
42struct io_stats {
43 unsigned long qreads, qwrites, creads, cwrites, mreads, mwrites;
44 unsigned long ireads, iwrites;
45 unsigned long long qread_kb, qwrite_kb, cread_kb, cwrite_kb;
46 unsigned long long iread_kb, iwrite_kb;
47};
48
49struct per_cpu_info {
50 int cpu;
51 int nelems;
52
53 int fd;
54 char fname[128];
55
56 struct io_stats io_stats;
57};
58
59struct per_process_info {
60 char name[16];
61 __u32 pid;
62 struct io_stats io_stats;
63 struct per_process_info *hash_next, *list_next;
64
65 /*
66 * individual io stats
67 */
68 unsigned long long longest_allocation_wait;
69 unsigned long long longest_dispatch_wait;
70 unsigned long long longest_completion_wait;
71};
72
73#define PPI_HASH_SHIFT (8)
74static struct per_process_info *ppi_hash[1 << PPI_HASH_SHIFT];
75static struct per_process_info *ppi_list;
76
77#define S_OPTS "i:o:b:st"
78static struct option l_opts[] = {
79 {
80 .name = "input",
81 .has_arg = 1,
82 .flag = NULL,
83 .val = 'i'
84 },
85 {
86 .name = "output",
87 .has_arg = 1,
88 .flag = NULL,
89 .val = 'o'
90 },
91 {
92 .name = "batch",
93 .has_arg = 1,
94 .flag = NULL,
95 .val = 'b'
96 },
97 {
98 .name = "per program stats",
99 .has_arg = 0,
100 .flag = NULL,
101 .val = 's'
102 },
103 {
104 .name = "track ios",
105 .has_arg = 0,
106 .flag = NULL,
107 .val = 't'
108 },
109 {
110 .name = NULL,
111 .has_arg = 0,
112 .flag = NULL,
113 .val = 0
114 }
115};
116
117static struct rb_root rb_sort_root;
118static struct rb_root rb_track_root;
119
120/*
121 * for sorting the displayed output
122 */
123struct trace {
124 struct blk_io_trace *bit;
125 struct rb_node rb_node;
126};
127
128/*
129 * for tracking individual ios
130 */
131struct io_track {
132 struct rb_node rb_node;
133
134 __u64 sector;
135 __u32 pid;
136 unsigned long long allocation_time;
137 unsigned long long queue_time;
138 unsigned long long dispatch_time;
139 unsigned long long completion_time;
140};
141
142static int max_cpus;
143static struct per_cpu_info *per_cpu_info;
144
145static unsigned long long events;
146
147static char *dev, *output_name;
148static FILE *ofp;
149
150static int per_process_stats;
151static int track_ios;
152
153#define RB_BATCH_DEFAULT (1024)
154static int rb_batch = RB_BATCH_DEFAULT;
155
156#define is_done() (*(volatile int *)(&done))
157static volatile int done;
158
159static inline unsigned long hash_long(unsigned long val)
160{
161#if __WORDSIZE == 32
162 val *= 0x9e370001UL;
163#elif __WORDSIZE == 64
164 val *= 0x9e37fffffffc0001UL;
165#else
166#error unknown word size
167#endif
168
169 return val >> (__WORDSIZE - PPI_HASH_SHIFT);
170}
171
172static inline void add_process_to_hash(struct per_process_info *ppi)
173{
174 const int hash_idx = hash_long(ppi->pid);
175
176 ppi->hash_next = ppi_hash[hash_idx];
177 ppi_hash[hash_idx] = ppi;
178}
179
180static inline void add_process_to_list(struct per_process_info *ppi)
181{
182 ppi->list_next = ppi_list;
183 ppi_list = ppi;
184}
185
186static struct per_process_info *find_process_by_pid(__u32 pid)
187{
188 const int hash_idx = hash_long(pid);
189 struct per_process_info *ppi;
190
191 ppi = ppi_hash[hash_idx];
192 while (ppi) {
193 if (ppi->pid == pid)
194 return ppi;
195
196 ppi = ppi->hash_next;
197 }
198
199 return NULL;
200}
201
202static inline int trace_rb_insert(struct trace *t)
203{
204 struct rb_node **p = &rb_sort_root.rb_node;
205 struct rb_node *parent = NULL;
206 struct trace *__t;
207
208 while (*p) {
209 parent = *p;
210 __t = rb_entry(parent, struct trace, rb_node);
211
212 if (t->bit->sequence < __t->bit->sequence)
213 p = &(*p)->rb_left;
214 else if (t->bit->sequence > __t->bit->sequence)
215 p = &(*p)->rb_right;
216 else {
217 fprintf(stderr, "sequence alias!\n");
218 return 1;
219 }
220 }
221
222 rb_link_node(&t->rb_node, parent, p);
223 rb_insert_color(&t->rb_node, &rb_sort_root);
224 return 0;
225}
226
227static inline int track_rb_insert(struct io_track *iot)
228{
229 struct rb_node **p = &rb_track_root.rb_node;
230 struct rb_node *parent = NULL;
231 struct io_track *__iot;
232
233 while (*p) {
234 parent = *p;
235
236 __iot = rb_entry(parent, struct io_track, rb_node);
237
238 if (iot->sector < __iot->sector)
239 p = &(*p)->rb_left;
240 else if (iot->sector > __iot->sector)
241 p = &(*p)->rb_right;
242 else {
243 fprintf(stderr, "sequence alias!\n");
244 return 1;
245 }
246 }
247
248 rb_link_node(&iot->rb_node, parent, p);
249 rb_insert_color(&iot->rb_node, &rb_track_root);
250 return 0;
251}
252
253static struct io_track *__find_track(__u64 sector)
254{
255 struct rb_node **p = &rb_track_root.rb_node;
256 struct rb_node *parent = NULL;
257 struct io_track *__iot;
258
259 while (*p) {
260 parent = *p;
261
262 __iot = rb_entry(parent, struct io_track, rb_node);
263
264 if (sector < __iot->sector)
265 p = &(*p)->rb_left;
266 else if (sector > __iot->sector)
267 p = &(*p)->rb_right;
268 else
269 return __iot;
270 }
271
272 return NULL;
273}
274
275static struct io_track *find_track(__u32 pid, __u64 sector)
276{
277 struct io_track *iot = __find_track(sector);
278
279 iot = __find_track(sector);
280 if (!iot) {
281 iot = malloc(sizeof(*iot));
282 iot->pid = pid;
283 iot->sector = sector;
284 track_rb_insert(iot);
285 }
286
287 return iot;
288}
289
290static void log_track_merge(struct blk_io_trace *t)
291{
292 struct io_track *iot;
293
294 if (!track_ios)
295 return;
296 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
297 return;
298
299 iot = __find_track(t->sector - (t->bytes >> 10));
300 if (!iot) {
301 fprintf(stderr, "Trying to merge on non-existing request\n");
302 return;
303 }
304
305 rb_erase(&iot->rb_node, &rb_track_root);
306 iot->sector -= t->bytes >> 10;
307 track_rb_insert(iot);
308}
309
310static void log_track_getrq(struct blk_io_trace *t)
311{
312 struct io_track *iot;
313
314 if (!track_ios)
315 return;
316
317 iot = find_track(t->pid, t->sector);
318 iot->allocation_time = t->time;
319}
320
321
322/*
323 * return time between rq allocation and queue
324 */
325static unsigned long long log_track_queue(struct blk_io_trace *t)
326{
327 unsigned long long elapsed;
328 struct io_track *iot;
329
330 if (!track_ios)
331 return -1;
332
333 iot = find_track(t->pid, t->sector);
334 iot->queue_time = t->time;
335 elapsed = iot->queue_time - iot->allocation_time;
336
337 if (per_process_stats) {
338 struct per_process_info *ppi = find_process_by_pid(iot->pid);
339
340 if (ppi && elapsed > ppi->longest_allocation_wait)
341 ppi->longest_allocation_wait = elapsed;
342 }
343
344 return elapsed;
345}
346
347/*
348 * return time between queue and issue
349 */
350static unsigned long long log_track_issue(struct blk_io_trace *t)
351{
352 unsigned long long elapsed;
353 struct io_track *iot;
354
355 if (!track_ios)
356 return -1;
357 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
358 return -1;
359
360 iot = __find_track(t->sector);
361 if (!iot) {
362 fprintf(stderr, "Trying to issue on non-existing request\n");
363 return -1;
364 }
365
366 iot->dispatch_time = t->time;
367 elapsed = iot->dispatch_time - iot->queue_time;
368
369 if (per_process_stats) {
370 struct per_process_info *ppi = find_process_by_pid(iot->pid);
371
372 if (ppi && elapsed > ppi->longest_dispatch_wait)
373 ppi->longest_dispatch_wait = elapsed;
374 }
375
376 return elapsed;
377}
378
379/*
380 * return time between dispatch and complete
381 */
382static unsigned long long log_track_complete(struct blk_io_trace *t)
383{
384 unsigned long long elapsed;
385 struct io_track *iot;
386
387 if (!track_ios)
388 return -1;
389 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
390 return -1;
391
392 iot = __find_track(t->sector);
393 if (!iot) {
394 fprintf(stderr, "Trying to dispatch on non-existing request\n");
395 return -1;
396 }
397
398 iot->completion_time = t->time;
399 elapsed = iot->completion_time - iot->dispatch_time;
400
401 if (per_process_stats) {
402 struct per_process_info *ppi = find_process_by_pid(iot->pid);
403
404 if (ppi && elapsed > ppi->longest_completion_wait)
405 ppi->longest_completion_wait = elapsed;
406 }
407
408 /*
409 * kill the trace, we don't need it after completion
410 */
411 rb_erase(&iot->rb_node, &rb_track_root);
412 free(iot);
413
414 return elapsed;
415}
416
417
418static struct io_stats *find_process_io_stats(__u32 pid, char *name)
419{
420 struct per_process_info *ppi = find_process_by_pid(pid);
421
422 if (!ppi) {
423 ppi = malloc(sizeof(*ppi));
424 memset(ppi, 0, sizeof(*ppi));
425 strncpy(ppi->name, name, sizeof(ppi->name));
426 ppi->pid = pid;
427 add_process_to_hash(ppi);
428 add_process_to_list(ppi);
429 }
430
431 return &ppi->io_stats;
432}
433
434static void resize_cpu_info(int cpuid)
435{
436 int new_space, new_max = cpuid + 1;
437 char *new_start;
438
439 per_cpu_info = realloc(per_cpu_info, new_max * sizeof(*per_cpu_info));
440 if (!per_cpu_info) {
441 fprintf(stderr, "Cannot allocate CPU info -- %d x %d bytes\n",
442 new_max, (int) sizeof(*per_cpu_info));
443 exit(1);
444 }
445
446 new_start = (char *)per_cpu_info + (max_cpus * sizeof(*per_cpu_info));
447 new_space = (new_max - max_cpus) * sizeof(*per_cpu_info);
448 memset(new_start, 0, new_space);
449 max_cpus = new_max;
450}
451
452static struct per_cpu_info *get_cpu_info(int cpu)
453{
454 struct per_cpu_info *pci;
455
456 if (cpu >= max_cpus)
457 resize_cpu_info(cpu);
458
459 /*
460 * ->cpu might already be set, but just set it unconditionally
461 */
462 pci = &per_cpu_info[cpu];
463 pci->cpu = cpu;
464
465 return pci;
466}
467
468static inline void check_time(struct blk_io_trace *bit)
469{
470 unsigned long long this = bit->time;
471 unsigned long long last = last_reported_time;
472
473 backwards = (this < last) ? 'B' : ' ';
474 last_reported_time = this;
475}
476
477static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
478 int rw)
479{
480 if (rw) {
481 ios->mwrites++;
482 ios->qwrite_kb += t->bytes >> 10;
483 } else {
484 ios->mreads++;
485 ios->qread_kb += t->bytes >> 10;
486 }
487}
488
489static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
490 int rw)
491{
492 __account_m(&pci->io_stats, t, rw);
493
494 if (per_process_stats) {
495 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
496
497 __account_m(ios, t, rw);
498 }
499}
500
501static inline void __account_q(struct io_stats *ios, struct blk_io_trace *t,
502 int rw)
503{
504 if (rw) {
505 ios->qwrites++;
506 ios->qwrite_kb += t->bytes >> 10;
507 } else {
508 ios->qreads++;
509 ios->qread_kb += t->bytes >> 10;
510 }
511}
512
513static inline void account_q(struct blk_io_trace *t, struct per_cpu_info *pci,
514 int rw)
515{
516 __account_q(&pci->io_stats, t, rw);
517
518 if (per_process_stats) {
519 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
520
521 __account_q(ios, t, rw);
522 }
523}
524
525static inline void __account_c(struct io_stats *ios, int rw, unsigned int bytes)
526{
527 if (rw) {
528 ios->cwrites++;
529 ios->cwrite_kb += bytes >> 10;
530 } else {
531 ios->creads++;
532 ios->cread_kb += bytes >> 10;
533 }
534}
535
536static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
537 int rw, int bytes)
538{
539 __account_c(&pci->io_stats, rw, bytes);
540
541 if (per_process_stats) {
542 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
543
544 __account_c(ios, rw, bytes);
545 }
546}
547
548static inline void __account_i(struct io_stats *ios, int rw, unsigned int bytes)
549{
550 if (rw) {
551 ios->iwrites++;
552 ios->iwrite_kb += bytes >> 10;
553 } else {
554 ios->ireads++;
555 ios->iread_kb += bytes >> 10;
556 }
557}
558
559static inline void account_i(struct blk_io_trace *t, struct per_cpu_info *pci,
560 int rw)
561{
562 __account_i(&pci->io_stats, rw, t->bytes);
563
564 if (per_process_stats) {
565 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
566
567 __account_i(ios, rw, t->bytes);
568 }
569}
570
571static void output(struct per_cpu_info *pci, char *s)
572{
573 fprintf(ofp, "%s", s);
574}
575
576static char hstring[256];
577static char tstring[256];
578
579static inline char *setup_header(struct per_cpu_info *pci,
580 struct blk_io_trace *t, char act)
581{
582 int w = t->action & BLK_TC_ACT(BLK_TC_WRITE);
583 int b = t->action & BLK_TC_ACT(BLK_TC_BARRIER);
584 int s = t->action & BLK_TC_ACT(BLK_TC_SYNC);
585 char rwbs[4];
586 int i = 0;
587
588 if (w)
589 rwbs[i++] = 'W';
590 else
591 rwbs[i++] = 'R';
592 if (b)
593 rwbs[i++] = 'B';
594 if (s)
595 rwbs[i++] = 'S';
596
597 rwbs[i] = '\0';
598
599 sprintf(hstring, "%2d %8ld %5Lu.%09Lu %5u %c %3s",
600 pci->cpu,
601 (unsigned long)t->sequence, SECONDS(t->time),
602 NANO_SECONDS(t->time), t->pid, act, rwbs);
603
604 return hstring;
605}
606
607static void log_complete(struct per_cpu_info *pci, struct blk_io_trace *t,
608 char act)
609{
610 unsigned long long elapsed = log_track_complete(t);
611
612 if (elapsed != -1ULL) {
613 double usec = (double) elapsed / 1000;
614
615 sprintf(tstring,"%s %Lu + %u (%8.2f) [%d]\n",
616 setup_header(pci, t, act),
617 (unsigned long long)t->sector, t->bytes >> 9,
618 usec, t->error);
619 } else {
620 sprintf(tstring,"%s %Lu + %u [%d]\n", setup_header(pci, t, act),
621 (unsigned long long)t->sector, t->bytes >> 9, t->error);
622 }
623
624 output(pci, tstring);
625}
626
627static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
628 char act)
629{
630 unsigned long long elapsed = log_track_queue(t);
631
632 if (elapsed != -1ULL) {
633 double usec = (double) elapsed / 1000;
634
635 sprintf(tstring,"%s %Lu + %u (%8.2f) [%s]\n",
636 setup_header(pci, t, act),
637 (unsigned long long)t->sector, t->bytes >> 9,
638 usec, t->comm);
639 } else {
640 sprintf(tstring,"%s %Lu + %u [%s]\n", setup_header(pci, t, act),
641 (unsigned long long)t->sector, t->bytes >> 9, t->comm);
642 }
643 output(pci, tstring);
644}
645
646static void log_issue(struct per_cpu_info *pci, struct blk_io_trace *t,
647 char act)
648{
649 unsigned long long elapsed = log_track_issue(t);
650
651 if (elapsed != -1ULL) {
652 double usec = (double) elapsed / 1000;
653
654 sprintf(tstring,"%s %Lu + %u (%8.2f) [%s]\n",
655 setup_header(pci, t, act),
656 (unsigned long long)t->sector, t->bytes >> 9,
657 usec, t->comm);
658 } else {
659 sprintf(tstring,"%s %Lu + %u [%s]\n", setup_header(pci, t, act),
660 (unsigned long long)t->sector, t->bytes >> 9, t->comm);
661 }
662
663 output(pci, tstring);
664}
665
666static void log_merge(struct per_cpu_info *pci, struct blk_io_trace *t,
667 char act)
668{
669 log_track_merge(t);
670
671 sprintf(tstring,"%s %Lu + %u [%s]\n", setup_header(pci, t, act),
672 (unsigned long long)t->sector, t->bytes >> 9, t->comm);
673 output(pci, tstring);
674}
675
676static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
677 char act)
678{
679 sprintf(tstring,"%s %Lu + %u [%s]\n", setup_header(pci, t, act),
680 (unsigned long long)t->sector, t->bytes >> 9, t->comm);
681 output(pci, tstring);
682}
683
684static int log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char act)
685{
686 unsigned char *buf;
687 int i;
688
689 sprintf(tstring,"%s ", setup_header(pci, t, act));
690 output(pci, tstring);
691
692 buf = (unsigned char *) t + sizeof(*t);
693 for (i = 0; i < t->pdu_len; i++) {
694 sprintf(tstring,"%02x ", buf[i]);
695 output(pci, tstring);
696 }
697
698 if (act == 'C') {
699 sprintf(tstring,"[%d]\n", t->error);
700 output(pci, tstring);
701 } else {
702 sprintf(tstring,"[%s]\n", t->comm);
703 output(pci, tstring);
704 }
705 return 0;
706}
707
708static int dump_trace_pc(struct blk_io_trace *t, struct per_cpu_info *pci)
709{
710 int ret = 0;
711
712 switch (t->action & 0xffff) {
713 case __BLK_TA_QUEUE:
714 log_generic(pci, t, 'Q');
715 break;
716 case __BLK_TA_GETRQ:
717 log_generic(pci, t, 'G');
718 break;
719 case __BLK_TA_SLEEPRQ:
720 log_generic(pci, t, 'S');
721 break;
722 case __BLK_TA_REQUEUE:
723 log_generic(pci, t, 'R');
724 break;
725 case __BLK_TA_ISSUE:
726 ret = log_pc(pci, t, 'D');
727 break;
728 case __BLK_TA_COMPLETE:
729 log_pc(pci, t, 'C');
730 break;
731 default:
732 fprintf(stderr, "Bad pc action %x\n", t->action);
733 ret = 1;
734 break;
735 }
736
737 return ret;
738}
739
740static void dump_trace_fs(struct blk_io_trace *t, struct per_cpu_info *pci)
741{
742 int w = t->action & BLK_TC_ACT(BLK_TC_WRITE);
743 int act = t->action & 0xffff;
744
745 switch (act) {
746 case __BLK_TA_QUEUE:
747 account_q(t, pci, w);
748 log_queue(pci, t, 'Q');
749 break;
750 case __BLK_TA_BACKMERGE:
751 account_m(t, pci, w);
752 log_merge(pci, t, 'M');
753 break;
754 case __BLK_TA_FRONTMERGE:
755 account_m(t, pci, w);
756 log_merge(pci, t, 'F');
757 break;
758 case __BLK_TA_GETRQ:
759 log_track_getrq(t);
760 log_generic(pci, t, 'G');
761 break;
762 case __BLK_TA_SLEEPRQ:
763 log_generic(pci, t, 'S');
764 break;
765 case __BLK_TA_REQUEUE:
766 account_c(t, pci, w, -t->bytes);
767 log_queue(pci, t, 'R');
768 break;
769 case __BLK_TA_ISSUE:
770 account_i(t, pci, w);
771 log_issue(pci, t, 'D');
772 break;
773 case __BLK_TA_COMPLETE:
774 account_c(t, pci, w, t->bytes);
775 log_complete(pci, t, 'C');
776 break;
777 default:
778 fprintf(stderr, "Bad fs action %x\n", t->action);
779 break;
780 }
781}
782
783static int dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci)
784{
785 int ret = 0;
786
787 if (t->action & BLK_TC_ACT(BLK_TC_PC))
788 ret = dump_trace_pc(t, pci);
789 else
790 dump_trace_fs(t, pci);
791
792 events++;
793 return ret;
794}
795
796static void dump_io_stats(struct io_stats *ios, char *msg)
797{
798 fprintf(ofp, "%s\n", msg);
799
800 fprintf(ofp, " Reads Queued: %'8lu, %'8LuKiB\t", ios->qreads, ios->qread_kb);
801 fprintf(ofp, " Writes Queued: %'8lu, %'8LuKiB\n", ios->qwrites,ios->qwrite_kb);
802
803 fprintf(ofp, " Read Dispatches: %'8lu, %'8LuKiB\t", ios->ireads, ios->iread_kb);
804 fprintf(ofp, " Write Dispatches: %'8lu, %'8LuKiB\n", ios->iwrites,ios->iwrite_kb);
805 fprintf(ofp, " Reads Completed: %'8lu, %'8LuKiB\t", ios->creads, ios->cread_kb);
806 fprintf(ofp, " Writes Completed: %'8lu, %'8LuKiB\n", ios->cwrites,ios->cwrite_kb);
807 fprintf(ofp, " Read Merges: %'8lu%8c\t", ios->mreads, ' ');
808
809 fprintf(ofp, " Write Merges: %'8lu\n", ios->mwrites);
810}
811
812static void dump_wait_stats(struct per_process_info *ppi)
813{
814 double await = (double) ppi->longest_allocation_wait / 1000;
815 double dwait = (double) ppi->longest_dispatch_wait / 1000;
816 double cwait = (double) ppi->longest_completion_wait / 1000;
817
818 fprintf(ofp, " Wait: Alloc=%f, Dispatch=%f, Completion=%f\n",
819 await, dwait, cwait);
820}
821
822static void show_process_stats(void)
823{
824 struct per_process_info *ppi;
825
826 ppi = ppi_list;
827 while (ppi) {
828 dump_io_stats(&ppi->io_stats, ppi->name);
829 dump_wait_stats(ppi);
830 ppi = ppi->list_next;
831 }
832
833 fprintf(ofp, "\n");
834}
835
836static void show_cpu_stats(void)
837{
838 struct per_cpu_info foo, *pci;
839 struct io_stats *ios;
840 int i, pci_events = 0;
841
842 memset(&foo, 0, sizeof(foo));
843
844 for (i = 0; i < max_cpus; i++) {
845 char cpu[8];
846
847 pci = &per_cpu_info[i];
848 ios = &pci->io_stats;
849
850 if (!pci->nelems)
851 continue;
852
853 foo.io_stats.qreads += ios->qreads;
854 foo.io_stats.qwrites += ios->qwrites;
855 foo.io_stats.creads += ios->creads;
856 foo.io_stats.cwrites += ios->cwrites;
857 foo.io_stats.mreads += ios->mreads;
858 foo.io_stats.mwrites += ios->mwrites;
859 foo.io_stats.ireads += ios->ireads;
860 foo.io_stats.iwrites += ios->iwrites;
861 foo.io_stats.qread_kb += ios->qread_kb;
862 foo.io_stats.qwrite_kb += ios->qwrite_kb;
863 foo.io_stats.cread_kb += ios->cread_kb;
864 foo.io_stats.cwrite_kb += ios->cwrite_kb;
865 foo.io_stats.iread_kb += ios->iread_kb;
866 foo.io_stats.iwrite_kb += ios->iwrite_kb;
867
868 snprintf(cpu, sizeof(cpu) - 1, "CPU%d:", i);
869 dump_io_stats(ios, cpu);
870 pci_events++;
871 }
872
873 if (pci_events > 1) {
874 fprintf(ofp, "\n");
875 dump_io_stats(&foo.io_stats, "Total:");
876 }
877
878 fprintf(ofp, "\nEvents: %'Lu\n", events);
879}
880
881#define min(a, b) ((a) < (b) ? (a) : (b))
882
883static struct blk_io_trace *find_trace(void *p, unsigned long offset, int nr)
884{
885 unsigned long max_offset = min(offset,nr * sizeof(struct blk_io_trace));
886 unsigned long off;
887 struct blk_io_trace *bit;
888 __u32 magic;
889
890 for (off = 0; off < max_offset; off++) {
891 bit = p + off;
892
893 magic = be32_to_cpu(bit->magic);
894 if ((magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
895 return bit;
896 }
897
898 return NULL;
899}
900
901static int sort_entries(void *traces, unsigned long offset, int nr)
902{
903 struct per_cpu_info *pci;
904 struct blk_io_trace *bit;
905 struct trace *t;
906 void *start = traces;
907 int nelems = 0;
908
909 while (traces - start <= offset - sizeof(*bit)) {
910 if (!nr)
911 break;
912
913 bit = find_trace(traces, offset - (traces - start), nr);
914 if (!bit)
915 break;
916
917 t = malloc(sizeof(*t));
918 t->bit = bit;
919 memset(&t->rb_node, 0, sizeof(t->rb_node));
920
921 trace_to_cpu(bit);
922
923 if (verify_trace(bit))
924 break;
925
926 pci = get_cpu_info(bit->cpu);
927 pci->nelems++;
928
929 if (trace_rb_insert(t))
930 return -1;
931
932 traces += sizeof(*bit) + bit->pdu_len;
933 nelems++;
934 nr--;
935 }
936
937 return nelems;
938}
939
940static void free_entries_rb(void)
941{
942 struct rb_node *n;
943
944 while ((n = rb_first(&rb_sort_root)) != NULL) {
945 struct trace *t = rb_entry(n, struct trace, rb_node);
946
947 rb_erase(&t->rb_node, &rb_sort_root);
948 free(t);
949 }
950}
951
952static void show_entries_rb(void)
953{
954 struct blk_io_trace *bit;
955 struct rb_node *n;
956 struct trace *t;
957 int cpu;
958
959 n = rb_first(&rb_sort_root);
960 if (!n)
961 return;
962
963 do {
964 t = rb_entry(n, struct trace, rb_node);
965 bit = t->bit;
966
967 cpu = bit->cpu;
968 if (cpu > max_cpus) {
969 fprintf(stderr, "CPU number too large (%d)\n", cpu);
970 break;
971 }
972
973 if (genesis_time == 0)
974 genesis_time = bit->time;
975 bit->time -= genesis_time;
976
977 check_time(bit);
978
979 if (dump_trace(bit, &per_cpu_info[cpu]))
980 break;
981
982 } while ((n = rb_next(n)) != NULL);
983}
984
985static int read_data(int fd, void *buffer, int bytes, int block)
986{
987 int ret, bytes_left, fl;
988 void *p;
989
990 fl = fcntl(fd, F_GETFL);
991
992 if (!block)
993 fcntl(fd, F_SETFL, fl | O_NONBLOCK);
994 else
995 fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
996
997 bytes_left = bytes;
998 p = buffer;
999 while (bytes_left > 0) {
1000 ret = read(fd, p, bytes_left);
1001 if (!ret)
1002 return 1;
1003 else if (ret < 0) {
1004 if (errno != EAGAIN)
1005 perror("read");
1006 return -1;
1007 } else {
1008 p += ret;
1009 bytes_left -= ret;
1010 }
1011 }
1012
1013 return 0;
1014}
1015
1016static int do_file(void)
1017{
1018 int i, nfiles;
1019
1020 for (i = 0, nfiles = 0;; i++, nfiles++) {
1021 struct per_cpu_info *pci;
1022 struct stat st;
1023 void *tb;
1024
1025 pci = get_cpu_info(i);
1026
1027 snprintf(pci->fname, sizeof(pci->fname)-1,"%s_out.%d", dev, i);
1028 if (stat(pci->fname, &st) < 0)
1029 break;
1030 if (!st.st_size)
1031 continue;
1032
1033 printf("Processing %s\n", pci->fname);
1034
1035 tb = malloc(st.st_size);
1036
1037 pci->fd = open(pci->fname, O_RDONLY);
1038 if (pci->fd < 0) {
1039 perror(pci->fname);
1040 break;
1041 }
1042
1043 if (read_data(pci->fd, tb, st.st_size, 1))
1044 break;
1045
1046 if (sort_entries(tb, st.st_size, ~0U) == -1)
1047 break;
1048
1049 close(pci->fd);
1050 printf("\t%2d %10s %15d\n", i, pci->fname, pci->nelems);
1051
1052 }
1053
1054 if (!nfiles) {
1055 fprintf(stderr, "No files found\n");
1056 return 1;
1057 }
1058
1059 show_entries_rb();
1060 return 0;
1061}
1062
1063static void resize_buffer(void **buffer, long *size, long offset)
1064{
1065 long old_size = *size;
1066
1067 *size *= 2;
1068 *buffer = realloc(*buffer, *size);
1069 memset(*buffer + offset, 0, *size - old_size);
1070}
1071
1072static int read_sort_events(int fd, void **buffer, long *max_offset)
1073{
1074 long offset;
1075 int events;
1076
1077 if (*max_offset == 0) {
1078 *max_offset = 128 * sizeof(struct blk_io_trace);
1079 *buffer = malloc(*max_offset);
1080 }
1081
1082 events = 0;
1083 offset = 0;
1084
1085 do {
1086 struct blk_io_trace *t;
1087 int pdu_len;
1088
1089 if (*max_offset - offset < sizeof(*t))
1090 resize_buffer(buffer, max_offset, offset);
1091
1092 if (read_data(fd, *buffer + offset, sizeof(*t), !events)) {
1093 if (events)
1094 break;
1095
1096 usleep(1000);
1097 continue;
1098 }
1099
1100 t = *buffer + offset;
1101 offset += sizeof(*t);
1102
1103 pdu_len = be16_to_cpu(t->pdu_len);
1104 if (pdu_len) {
1105 if (*max_offset - offset < pdu_len)
1106 resize_buffer(buffer, max_offset, offset);
1107
1108 if (read_data(fd, *buffer + offset, pdu_len, 1))
1109 break;
1110
1111 offset += pdu_len;
1112 }
1113
1114 events++;
1115 } while (!is_done() && events < rb_batch);
1116
1117 return events;
1118}
1119
1120static int do_stdin(void)
1121{
1122 int fd;
1123 void *ptr;
1124 long max_offset;
1125
1126 fd = dup(STDIN_FILENO);
1127 max_offset = 0;
1128 do {
1129 int events;
1130
1131 events = read_sort_events(fd, &ptr, &max_offset);
1132 if (!events)
1133 break;
1134
1135 if (sort_entries(ptr, ~0UL, events) == -1)
1136 break;
1137
1138 show_entries_rb();
1139 free_entries_rb();
1140 } while (1);
1141
1142 free(ptr);
1143 close(fd);
1144 return 0;
1145}
1146
1147static void flush_output(void)
1148{
1149 fflush(ofp);
1150}
1151
1152static void handle_sigint(int sig)
1153{
1154 done = 1;
1155 flush_output();
1156}
1157
1158static void usage(char *prog)
1159{
1160 fprintf(stderr, "Usage: %s -i <name> [-o <output>][-s]\n", prog);
1161}
1162
1163int main(int argc, char *argv[])
1164{
1165 char *ofp_buffer;
1166 int c, ret, mode;
1167
1168 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
1169 switch (c) {
1170 case 'i':
1171 dev = optarg;
1172 break;
1173 case 'o':
1174 output_name = optarg;
1175 break;
1176 case 'b':
1177 rb_batch = atoi(optarg);
1178 if (rb_batch <= 0)
1179 rb_batch = RB_BATCH_DEFAULT;
1180 break;
1181 case 's':
1182 per_process_stats = 1;
1183 break;
1184 case 't':
1185 track_ios = 1;
1186 break;
1187 default:
1188 usage(argv[0]);
1189 return 1;
1190 }
1191 }
1192
1193 if (!dev) {
1194 usage(argv[0]);
1195 return 1;
1196 }
1197
1198 memset(&rb_sort_root, 0, sizeof(rb_sort_root));
1199 memset(&rb_track_root, 0, sizeof(rb_track_root));
1200
1201 signal(SIGINT, handle_sigint);
1202 signal(SIGHUP, handle_sigint);
1203 signal(SIGTERM, handle_sigint);
1204
1205 setlocale(LC_NUMERIC, "en_US");
1206
1207 if (!output_name) {
1208 ofp = fdopen(STDOUT_FILENO, "w");
1209 mode = _IOLBF;
1210 } else {
1211 char ofname[128];
1212
1213 snprintf(ofname, sizeof(ofname) - 1, "%s.log", output_name);
1214 ofp = fopen(ofname, "w");
1215 mode = _IOFBF;
1216 }
1217
1218 if (!ofp) {
1219 perror("fopen");
1220 return 1;
1221 }
1222
1223 ofp_buffer = malloc(4096);
1224 if (setvbuf(ofp, ofp_buffer, mode, 4096)) {
1225 perror("setvbuf");
1226 return 1;
1227 }
1228
1229 if (!strcmp(dev, "-"))
1230 ret = do_stdin();
1231 else
1232 ret = do_file();
1233
1234 if (per_process_stats)
1235 show_process_stats();
1236
1237 show_cpu_stats();
1238
1239 flush_output();
1240 return ret;
1241}