iowatcher: Check program exit code properly
[blktrace.git] / iowatcher / blkparse.c
1 /*
2  * Copyright (C) 2012 Fusion-io
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public
6  *  License v2 as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope that it will be useful,
9  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
10  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  *  GNU General Public License for more details.
12  *
13  *  You should have received a copy of the GNU General Public License
14  *  along with this program; if not, write to the Free Software
15  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16  *
17  *  Parts of this file were imported from Jens Axboe's blktrace sources (also GPL)
18  */
19 #include <sys/types.h>
20 #include <sys/stat.h>
21 #include <fcntl.h>
22 #include <unistd.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <math.h>
26 #include <inttypes.h>
27 #include <string.h>
28 #include <asm/types.h>
29 #include <errno.h>
30 #include <sys/mman.h>
31 #include <time.h>
32 #include <math.h>
33 #include <dirent.h>
34
35 #include "plot.h"
36 #include "blkparse.h"
37 #include "list.h"
38 #include "tracers.h"
39
40 #define IO_HASH_TABLE_BITS  11
41 #define IO_HASH_TABLE_SIZE (1 << IO_HASH_TABLE_BITS)
42 static struct list_head io_hash_table[IO_HASH_TABLE_SIZE];
43 static u64 ios_in_flight = 0;
44
45 #define PROCESS_HASH_TABLE_BITS 7
46 #define PROCESS_HASH_TABLE_SIZE (1 << PROCESS_HASH_TABLE_BITS)
47 static struct list_head process_hash_table[PROCESS_HASH_TABLE_SIZE];
48
49 extern int plot_io_action;
50 extern int io_per_process;
51
52 static const int line_len = 1024;
53 static char line[1024];
54
55 /*
56  * Trace categories
57  */
58 enum {
59         BLK_TC_READ     = 1 << 0,       /* reads */
60         BLK_TC_WRITE    = 1 << 1,       /* writes */
61         BLK_TC_FLUSH    = 1 << 2,       /* flush */
62         BLK_TC_SYNC     = 1 << 3,       /* sync */
63         BLK_TC_QUEUE    = 1 << 4,       /* queueing/merging */
64         BLK_TC_REQUEUE  = 1 << 5,       /* requeueing */
65         BLK_TC_ISSUE    = 1 << 6,       /* issue */
66         BLK_TC_COMPLETE = 1 << 7,       /* completions */
67         BLK_TC_FS       = 1 << 8,       /* fs requests */
68         BLK_TC_PC       = 1 << 9,       /* pc requests */
69         BLK_TC_NOTIFY   = 1 << 10,      /* special message */
70         BLK_TC_AHEAD    = 1 << 11,      /* readahead */
71         BLK_TC_META     = 1 << 12,      /* metadata */
72         BLK_TC_DISCARD  = 1 << 13,      /* discard requests */
73         BLK_TC_DRV_DATA = 1 << 14,      /* binary driver data */
74         BLK_TC_FUA      = 1 << 15,      /* fua requests */
75
76         BLK_TC_END      = 1 << 15,      /* we've run out of bits! */
77 };
78
79 #define BLK_TC_SHIFT            (16)
80 #define BLK_TC_ACT(act)         ((act) << BLK_TC_SHIFT)
81 #define BLK_DATADIR(a) (((a) >> BLK_TC_SHIFT) & (BLK_TC_READ | BLK_TC_WRITE))
82
83 /*
84  * Basic trace actions
85  */
86 enum {
87         __BLK_TA_QUEUE = 1,             /* queued */
88         __BLK_TA_BACKMERGE,             /* back merged to existing rq */
89         __BLK_TA_FRONTMERGE,            /* front merge to existing rq */
90         __BLK_TA_GETRQ,                 /* allocated new request */
91         __BLK_TA_SLEEPRQ,               /* sleeping on rq allocation */
92         __BLK_TA_REQUEUE,               /* request requeued */
93         __BLK_TA_ISSUE,                 /* sent to driver */
94         __BLK_TA_COMPLETE,              /* completed by driver */
95         __BLK_TA_PLUG,                  /* queue was plugged */
96         __BLK_TA_UNPLUG_IO,             /* queue was unplugged by io */
97         __BLK_TA_UNPLUG_TIMER,          /* queue was unplugged by timer */
98         __BLK_TA_INSERT,                /* insert request */
99         __BLK_TA_SPLIT,                 /* bio was split */
100         __BLK_TA_BOUNCE,                /* bio was bounced */
101         __BLK_TA_REMAP,                 /* bio was remapped */
102         __BLK_TA_ABORT,                 /* request aborted */
103         __BLK_TA_DRV_DATA,              /* binary driver data */
104 };
105
106 #define BLK_TA_MASK ((1 << BLK_TC_SHIFT) - 1)
107
108 /*
109  * Notify events.
110  */
111 enum blktrace_notify {
112         __BLK_TN_PROCESS = 0,           /* establish pid/name mapping */
113         __BLK_TN_TIMESTAMP,             /* include system clock */
114         __BLK_TN_MESSAGE,               /* Character string message */
115 };
116
117 /*
118  * Trace actions in full. Additionally, read or write is masked
119  */
120 #define BLK_TA_QUEUE            (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
121 #define BLK_TA_BACKMERGE        (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
122 #define BLK_TA_FRONTMERGE       (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
123 #define BLK_TA_GETRQ            (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
124 #define BLK_TA_SLEEPRQ          (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
125 #define BLK_TA_REQUEUE          (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
126 #define BLK_TA_ISSUE            (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
127 #define BLK_TA_COMPLETE         (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
128 #define BLK_TA_PLUG             (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
129 #define BLK_TA_UNPLUG_IO        (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
130 #define BLK_TA_UNPLUG_TIMER     (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
131 #define BLK_TA_INSERT           (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
132 #define BLK_TA_SPLIT            (__BLK_TA_SPLIT)
133 #define BLK_TA_BOUNCE           (__BLK_TA_BOUNCE)
134 #define BLK_TA_REMAP            (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
135 #define BLK_TA_ABORT            (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
136 #define BLK_TA_DRV_DATA         (__BLK_TA_DRV_DATA | BLK_TC_ACT(BLK_TC_DRV_DATA))
137
138 #define BLK_TN_PROCESS          (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
139 #define BLK_TN_TIMESTAMP        (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
140 #define BLK_TN_MESSAGE          (__BLK_TN_MESSAGE | BLK_TC_ACT(BLK_TC_NOTIFY))
141
142 #define BLK_IO_TRACE_MAGIC      0x65617400
143 #define BLK_IO_TRACE_VERSION    0x07
144 /*
145  * The trace itself
146  */
147 struct blk_io_trace {
148         __u32 magic;            /* MAGIC << 8 | version */
149         __u32 sequence;         /* event number */
150         __u64 time;             /* in nanoseconds */
151         __u64 sector;           /* disk offset */
152         __u32 bytes;            /* transfer length */
153         __u32 action;           /* what happened */
154         __u32 pid;              /* who did it */
155         __u32 device;           /* device identifier (dev_t) */
156         __u32 cpu;              /* on what cpu did it happen */
157         __u16 error;            /* completion error */
158         __u16 pdu_len;          /* length of data after this trace */
159 };
160
161 struct pending_io {
162         /* sector offset of this IO */
163         u64 sector;
164
165         /* dev_t for this IO */
166         u32 device;
167
168         /* time this IO was dispatched */
169         u64 dispatch_time;
170         /* time this IO was finished */
171         u64 completion_time;
172         struct list_head hash_list;
173         /* process which queued this IO */
174         u32 pid;
175 };
176
177 struct pid_map {
178         struct list_head hash_list;
179         u32 pid;
180         int index;
181         char name[0];
182 };
183
184 u64 get_record_time(struct trace *trace)
185 {
186         return trace->io->time;
187 }
188
189 void init_io_hash_table(void)
190 {
191         int i;
192         struct list_head *head;
193
194         for (i = 0; i < IO_HASH_TABLE_SIZE; i++) {
195                 head = io_hash_table + i;
196                 INIT_LIST_HEAD(head);
197         }
198 }
199
200 /* taken from the kernel hash.h */
201 static inline u64 hash_sector(u64 val)
202 {
203         u64 hash = val;
204
205         /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
206         u64 n = hash;
207         n <<= 18;
208         hash -= n;
209         n <<= 33;
210         hash -= n;
211         n <<= 3;
212         hash += n;
213         n <<= 3;
214         hash -= n;
215         n <<= 4;
216         hash += n;
217         n <<= 2;
218         hash += n;
219
220         /* High bits are more random, so use them. */
221         return hash >> (64 - IO_HASH_TABLE_BITS);
222 }
223
224 static int io_hash_table_insert(struct pending_io *ins_pio)
225 {
226         u64 sector = ins_pio->sector;
227         u32 dev = ins_pio->device;
228         int slot = hash_sector(sector);
229         struct list_head *head;
230         struct pending_io *pio;
231
232         head = io_hash_table + slot;
233         list_for_each_entry(pio, head, hash_list) {
234                 if (pio->sector == sector && pio->device == dev)
235                         return -EEXIST;
236         }
237         list_add_tail(&ins_pio->hash_list, head);
238         return 0;
239 }
240
241 static struct pending_io *io_hash_table_search(u64 sector, u32 dev)
242 {
243         int slot = hash_sector(sector);
244         struct list_head *head;
245         struct pending_io *pio;
246
247         head = io_hash_table + slot;
248         list_for_each_entry(pio, head, hash_list) {
249                 if (pio->sector == sector && pio->device == dev)
250                         return pio;
251         }
252         return NULL;
253 }
254
255 static struct pending_io *hash_queued_io(struct blk_io_trace *io)
256 {
257         struct pending_io *pio;
258         int ret;
259
260         pio = calloc(1, sizeof(*pio));
261         pio->sector = io->sector;
262         pio->device = io->device;
263         pio->pid = io->pid;
264
265         ret = io_hash_table_insert(pio);
266         if (ret < 0) {
267                 /* crud, the IO is there already */
268                 free(pio);
269                 return NULL;
270         }
271         return pio;
272 }
273
274 static struct pending_io *hash_dispatched_io(struct blk_io_trace *io)
275 {
276         struct pending_io *pio;
277
278         pio = io_hash_table_search(io->sector, io->device);
279         if (!pio) {
280                 pio = hash_queued_io(io);
281                 if (!pio)
282                         return NULL;
283         }
284         pio->dispatch_time = io->time;
285         return pio;
286 }
287
288 static struct pending_io *hash_completed_io(struct blk_io_trace *io)
289 {
290         struct pending_io *pio;
291
292         pio = io_hash_table_search(io->sector, io->device);
293
294         if (!pio)
295                 return NULL;
296         return pio;
297 }
298
299 void init_process_hash_table(void)
300 {
301         int i;
302         struct list_head *head;
303
304         for (i = 0; i < PROCESS_HASH_TABLE_SIZE; i++) {
305                 head = process_hash_table + i;
306                 INIT_LIST_HEAD(head);
307         }
308 }
309
310 static u32 hash_pid(u32 pid)
311 {
312         u32 hash = pid;
313
314         hash ^= pid >> 3;
315         hash ^= pid >> 3;
316         hash ^= pid >> 4;
317         hash ^= pid >> 6;
318         return (hash & (PROCESS_HASH_TABLE_SIZE - 1));
319 }
320
321 static struct pid_map *process_hash_search(u32 pid)
322 {
323         int slot = hash_pid(pid);
324         struct list_head *head;
325         struct pid_map *pm;
326
327         head = process_hash_table + slot;
328         list_for_each_entry(pm, head, hash_list) {
329                 if (pm->pid == pid)
330                         return pm;
331         }
332         return NULL;
333 }
334
335 static struct pid_map *process_hash_insert(u32 pid, char *name)
336 {
337         int slot = hash_pid(pid);
338         struct pid_map *pm;
339         int old_index = 0;
340         char buf[16];
341
342         pm = process_hash_search(pid);
343         if (pm) {
344                 /* Entry exists and name shouldn't be changed? */
345                 if (!name || !strcmp(name, pm->name))
346                         return pm;
347                 list_del(&pm->hash_list);
348                 old_index = pm->index;
349                 free(pm);
350         }
351         if (!name) {
352                 sprintf(buf, "[%u]", pid);
353                 name = buf;
354         }
355         pm = malloc(sizeof(struct pid_map) + strlen(name) + 1);
356         pm->pid = pid;
357         pm->index = old_index;
358         strcpy(pm->name, name);
359         list_add_tail(&pm->hash_list, process_hash_table + slot);
360
361         return pm;
362 }
363
364 static void handle_notify(struct trace *trace)
365 {
366         struct blk_io_trace *io = trace->io;
367         void *payload = (char *)io + sizeof(*io);
368         u32 two32[2];
369
370         if (io->action == BLK_TN_PROCESS) {
371                 if (io_per_process)
372                         process_hash_insert(io->pid, payload);
373                 return;
374         }
375
376         if (io->action != BLK_TN_TIMESTAMP)
377                 return;
378
379         if (io->pdu_len != sizeof(two32))
380                 return;
381
382         memcpy(two32, payload, sizeof(two32));
383         trace->start_timestamp = io->time;
384         trace->abs_start_time.tv_sec = two32[0];
385         trace->abs_start_time.tv_nsec = two32[1];
386         if (trace->abs_start_time.tv_nsec < 0) {
387                 trace->abs_start_time.tv_sec--;
388                 trace->abs_start_time.tv_nsec += 1000000000;
389         }
390 }
391
392 int next_record(struct trace *trace)
393 {
394         int skip = trace->io->pdu_len;
395         u64 offset;
396
397         trace->cur += sizeof(*trace->io) + skip;
398         offset = trace->cur - trace->start;
399         if (offset >= trace->len)
400                 return 1;
401
402         trace->io = (struct blk_io_trace *)trace->cur;
403         return 0;
404 }
405
406 void first_record(struct trace *trace)
407 {
408         trace->cur = trace->start;
409         trace->io = (struct blk_io_trace *)trace->cur;
410 }
411
412 int is_io_event(struct blk_io_trace *test)
413 {
414         char *message;
415         if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
416                 return 1;
417         if (test->action == BLK_TN_MESSAGE) {
418                 int len = test->pdu_len;
419                 if (len < 3)
420                         return 0;
421                 message = (char *)(test + 1);
422                 if (strncmp(message, "fio ", 4) == 0) {
423                         return 1;
424                 }
425         }
426         return 0;
427 }
428
429 u64 find_last_time(struct trace *trace)
430 {
431         char *p = trace->start + trace->len;
432         struct blk_io_trace *test;
433         int search_len = 0;
434         u64 found = 0;
435
436         if (trace->len < sizeof(*trace->io))
437                 return 0;
438         p -= sizeof(*trace->io);
439         while (p >= trace->start) {
440                 test = (struct blk_io_trace *)p;
441                 if (CHECK_MAGIC(test) && is_io_event(test)) {
442                         u64 offset = p - trace->start;
443                         if (offset + sizeof(*test) + test->pdu_len == trace->len) {
444                                 return test->time;
445                         }
446                 }
447                 p--;
448                 search_len++;
449                 if (search_len > 8192) {
450                         break;
451                 }
452         }
453
454         /* searching backwards didn't work out, we'll have to scan the file */
455         first_record(trace);
456         while (1) {
457                 if (is_io_event(trace->io))
458                         found = trace->io->time;
459                 if (next_record(trace))
460                         break;
461         }
462         first_record(trace);
463         return found;
464 }
465
466 int parse_fio_bank_message(struct trace *trace, u64 *bank_ret, u64 *offset_ret,
467                            u64 *num_banks_ret)
468 {
469         char *s;
470         char *next;
471         char *message;
472         struct blk_io_trace *test = trace->io;
473         int len = test->pdu_len;
474         u64 bank;
475         u64 offset;
476         u64 num_banks;
477
478         if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
479                 return -1;
480         if (test->action != BLK_TN_MESSAGE)
481                 return -1;
482
483         /* the message is fio rw bank offset num_banks */
484         if (len < 3)
485                 return -1;
486         message = (char *)(test + 1);
487         if (strncmp(message, "fio r ", 6) != 0)
488                 return -1;
489
490         message = strndup(message, len);
491         s = strchr(message, ' ');
492         if (!s)
493                 goto out;
494         s++;
495         s = strchr(s, ' ');
496         if (!s)
497                 goto out;
498
499         bank = strtoll(s, &next, 10);
500         if (s == next)
501                 goto out;
502         s = next;
503
504         offset = strtoll(s, &next, 10);
505         if (s == next)
506                 goto out;
507         s = next;
508
509         num_banks = strtoll(s, &next, 10);
510         if (s == next)
511                 goto out;
512
513         *bank_ret = bank;
514         *offset_ret = offset;
515         *num_banks_ret = num_banks;
516
517         return 0;
518 out:
519         free(message);
520         return -1;
521 }
522
523 static struct dev_info *lookup_dev(struct trace *trace, struct blk_io_trace *io)
524 {
525         u32 dev = io->device;
526         int i;
527         struct dev_info *di = NULL;
528
529         for (i = 0; i < trace->num_devices; i++) {
530                 if (trace->devices[i].device == dev) {
531                         di = trace->devices + i;
532                         goto found;
533                 }
534         }
535         i = trace->num_devices++;
536         if (i >= MAX_DEVICES_PER_TRACE) {
537                 fprintf(stderr, "Trace contains too many devices (%d)\n", i);
538                 exit(1);
539         }
540         di = trace->devices + i;
541         di->device = dev;
542 found:
543         return di;
544 }
545
546 static void map_devices(struct trace *trace)
547 {
548         struct dev_info *di;
549         u64 found;
550         u64 map_start = 0;
551         int i;
552
553         first_record(trace);
554         while (1) {
555                 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
556                         di = lookup_dev(trace, trace->io);
557                         found = trace->io->sector << 9;
558                         if (found < di->min)
559                                 di->min = found;
560
561                         found += trace->io->bytes;
562                         if (di->max < found)
563                                 di->max = found;
564                 }
565                 if (next_record(trace))
566                         break;
567         }
568         first_record(trace);
569         for (i = 0; i < trace->num_devices; i++) {
570                 di = trace->devices + i;
571                 di->map = map_start;
572                 map_start += di->max - di->min;
573         }
574 }
575
576 u64 map_io(struct trace *trace, struct blk_io_trace *io)
577 {
578         struct dev_info *di = lookup_dev(trace, io);
579         u64 val = trace->io->sector << 9;
580         return di->map + val - di->min;
581 }
582
583 void find_extreme_offsets(struct trace *trace, u64 *min_ret, u64 *max_ret, u64 *max_bank_ret,
584                           u64 *max_offset_ret)
585 {
586         u64 found = 0;
587         u64 max = 0, min = ~(u64)0;
588         u64 max_bank = 0;
589         u64 max_bank_offset = 0;
590         u64 num_banks = 0;
591
592         map_devices(trace);
593
594         first_record(trace);
595         while (1) {
596                 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
597                         found = map_io(trace, trace->io);
598                         if (found < min)
599                                 min = found;
600
601                         found += trace->io->bytes;
602                         if (max < found)
603                                 max = found;
604                 } else {
605                         u64 bank;
606                         u64 offset;
607                         if (!parse_fio_bank_message(trace, &bank,
608                                                     &offset, &num_banks)) {
609                                 if (bank > max_bank)
610                                         max_bank = bank;
611                                 if (offset > max_bank_offset)
612                                         max_bank_offset = offset;
613                         }
614                 }
615                 if (next_record(trace))
616                         break;
617         }
618         first_record(trace);
619         *min_ret = min;
620         *max_ret = max;
621         *max_bank_ret = max_bank;
622         *max_offset_ret = max_bank_offset;
623 }
624
625 static void check_io_types(struct trace *trace)
626 {
627         struct blk_io_trace *io = trace->io;
628         int action = io->action & BLK_TA_MASK;
629
630         if (!(io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
631                 switch (action) {
632                 case __BLK_TA_COMPLETE:
633                         trace->found_completion = 1;
634                         break;
635                 case __BLK_TA_ISSUE:
636                         trace->found_issue = 1;
637                         break;
638                 case __BLK_TA_QUEUE:
639                         trace->found_queue = 1;
640                         break;
641                 };
642         }
643 }
644
645
646 int filter_outliers(struct trace *trace, u64 min_offset, u64 max_offset,
647                     u64 *yzoom_min, u64 *yzoom_max)
648 {
649         int hits[11];
650         u64 max_per_bucket[11];
651         u64 min_per_bucket[11];
652         u64 bytes_per_bucket = (max_offset - min_offset + 1) / 10;
653         int slot;
654         int fat_count = 0;
655
656         memset(hits, 0, sizeof(int) * 11);
657         memset(max_per_bucket, 0, sizeof(u64) * 11);
658         memset(min_per_bucket, 0xff, sizeof(u64) * 11);
659         first_record(trace);
660         while (1) {
661                 check_io_types(trace);
662                 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY)) &&
663                     (trace->io->action & BLK_TA_MASK) == __BLK_TA_QUEUE) {
664                         u64 off = map_io(trace, trace->io) - min_offset;
665
666                         slot = (int)(off / bytes_per_bucket);
667                         hits[slot]++;
668                         if (off < min_per_bucket[slot])
669                                 min_per_bucket[slot] = off;
670
671                         off += trace->io->bytes;
672                         slot = (int)(off / bytes_per_bucket);
673                         hits[slot]++;
674                         if (off > max_per_bucket[slot])
675                                 max_per_bucket[slot] = off;
676                 }
677                 if (next_record(trace))
678                         break;
679         }
680         first_record(trace);
681         for (slot = 0; slot < 11; slot++) {
682                 if (hits[slot] > fat_count) {
683                         fat_count = hits[slot];
684                 }
685         }
686
687         *yzoom_max = max_offset;
688         for (slot = 10; slot >= 0; slot--) {
689                 double d = hits[slot];
690
691                 if (d >= (double)fat_count * .05) {
692                         *yzoom_max = max_per_bucket[slot] + min_offset;
693                         break;
694                 }
695         }
696
697         *yzoom_min = min_offset;
698         for (slot = 0; slot < 10; slot++) {
699                 double d = hits[slot];
700
701                 if (d >= (double)fat_count * .05) {
702                         *yzoom_min = min_per_bucket[slot] + min_offset;
703                         break;
704                 }
705         }
706         return 0;
707 }
708
709 static char footer[] = ".blktrace.0";
710 static int footer_len = sizeof(footer) - 1;
711
712 static int match_trace(char *name, int *len)
713 {
714         int match_len;
715         int footer_start;
716
717         match_len = strlen(name);
718         if (match_len <= footer_len)
719                 return 0;
720
721         footer_start = match_len - footer_len;
722         if (strcmp(name + footer_start, footer) != 0)
723                 return 0;
724
725         if (len)
726                 *len = match_len;
727         return 1;
728 }
729
730 struct tracelist {
731         struct tracelist *next;
732         char *name;
733 };
734
735 static struct tracelist *traces_list(char *dir_name, int *len)
736 {
737         int count = 0;
738         struct tracelist *traces = NULL;
739         DIR *dir = opendir(dir_name);
740         if (!dir)
741                 return NULL;
742
743         while (1) {
744                 int len;
745                 struct tracelist *tl;
746                 struct dirent *d = readdir(dir);
747                 if (!d)
748                         break;
749
750                 if (!match_trace(d->d_name, &len))
751                         continue;
752
753                 /* Allocate space for tracelist + filename */
754                 tl = calloc(1, sizeof(struct tracelist) + (sizeof(char) * (len + 1)));
755                 if (!tl)
756                         return NULL;
757                 tl->next = traces;
758                 tl->name = (char *)(tl + 1);
759                 strncpy(tl->name, d->d_name, len);
760                 traces = tl;
761                 count++;
762         }
763
764         closedir(dir);
765
766         if (len)
767                 *len = count;
768
769         return traces;
770 }
771
772 static void traces_free(struct tracelist *traces)
773 {
774         while (traces) {
775                 struct tracelist *tl = traces;
776                 traces = traces->next;
777                 free(tl);
778         }
779 }
780
781 static char *combine_blktrace_devs(char *dir_name)
782 {
783         struct tracelist *traces = NULL;
784         struct tracelist *tl;
785         char *ret = NULL;
786         char **argv = NULL;
787         char *dumpfile;
788         int argc = 0;
789         int i;
790         int err;
791
792         if (!asprintf(&dumpfile, "%s.dump", dir_name))
793                 goto out;
794
795         traces = traces_list(dir_name, &argc);
796         if (!traces)
797                 goto out;
798
799         argc *= 2; /* {"-i", trace } */
800         argc += 6; /* See below */
801         argv = calloc(argc + 1, sizeof(char *));
802         if (!argv)
803                 goto out;
804
805         i = 0;
806         argv[i++] = "blkparse";
807         argv[i++] = "-O";
808         argv[i++] = "-D";
809         argv[i++] = dir_name;
810         argv[i++] = "-d";
811         argv[i++] = dumpfile;
812         for (tl = traces; tl != NULL; tl = tl->next) {
813                 argv[i++] = "-i";
814                 argv[i++] = tl->name;
815         }
816
817         err = run_program2(argc, argv);
818         free(argv);
819         if (err) {
820                 fprintf(stderr, "blkparse failed with exit code %d\n", err);
821                 exit(1);
822         }
823         ret = dumpfile;
824 out:
825         traces_free(traces);
826         return ret;
827 }
828
829 static char *find_trace_file(char *filename)
830 {
831         int ret;
832         struct stat st;
833         char *dot;
834         char *try;
835         int found_dir = 0;
836
837         /* look for an exact match of whatever they pass in.
838          * If it is a file, assume it is the dump file.
839          * If a directory, remember that it existed so we
840          * can combine traces in that directory later
841          */
842         ret = stat(filename, &st);
843         if (ret == 0) {
844                 if (S_ISREG(st.st_mode))
845                         return strdup(filename);
846
847                 if (S_ISDIR(st.st_mode))
848                         found_dir = 1;
849         }
850
851         if (found_dir) {
852                 int i;
853                 /* Eat up trailing '/'s */
854                 for (i = strlen(filename) - 1; filename[i] == '/'; i--)
855                         filename[i] = '\0';
856         }
857
858         /*
859          * try tacking .dump onto the end and see if that already
860          * has been generated
861          */
862         snprintf(line, line_len, "%s.%s", filename, "dump");
863         ret = stat(line, &st);
864         if (ret == 0)
865                 return strdup(line);
866
867         /*
868          * try to generate the .dump from all the traces in
869          * a single dir.
870          */
871         if (found_dir) {
872                 try = combine_blktrace_devs(filename);
873                 if (try)
874                         return try;
875         }
876
877         /*
878          * try to generate the .dump from all the blktrace
879          * files for a named trace
880          */
881         try = strdup(filename);
882         dot = strrchr(try, '.');
883         if (!dot || strcmp(".dump", dot) != 0) {
884                 if (dot && dot != try)
885                         *dot = '\0';
886                 snprintf(line, line_len, "%s%s", try, ".blktrace.0");
887                 ret = stat(line, &st);
888                 if (ret == 0) {
889                         blktrace_to_dump(try);
890                         snprintf(line, line_len, "%s.%s", try, "dump");
891                         ret = stat(line, &st);
892                         if (ret == 0) {
893                                 free(try);
894                                 return strdup(line);
895                         }
896                 }
897         }
898         free(try);
899         return NULL;
900 }
901 struct trace *open_trace(char *filename)
902 {
903         int fd;
904         char *p;
905         struct stat st;
906         int ret;
907         struct trace *trace;
908         char *found_filename;
909
910         trace = calloc(1, sizeof(*trace));
911         if (!trace) {
912                 fprintf(stderr, "unable to allocate memory for trace\n");
913                 return NULL;
914         }
915
916         found_filename = find_trace_file(filename);
917         if (!found_filename) {
918                 fprintf(stderr, "Unable to find trace file %s\n", filename);
919                 goto fail;
920         }
921         filename = found_filename;
922
923         fd = open(filename, O_RDONLY);
924         if (fd < 0) {
925                 fprintf(stderr, "Unable to open trace file %s err %s\n", filename, strerror(errno));
926                 goto fail;
927         }
928         ret = fstat(fd, &st);
929         if (ret < 0) {
930                 fprintf(stderr, "stat failed on %s err %s\n", filename, strerror(errno));
931                 goto fail_fd;
932         }
933         p = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
934         if (p == MAP_FAILED) {
935                 fprintf(stderr, "Unable to mmap trace file %s, err %s\n", filename, strerror(errno));
936                 goto fail_fd;
937         }
938         trace->fd = fd;
939         trace->len = st.st_size;
940         trace->start = p;
941         trace->cur = p;
942         trace->io = (struct blk_io_trace *)p;
943         return trace;
944
945 fail_fd:
946         close(fd);
947 fail:
948         free(trace);
949         return NULL;
950 }
951 static inline int tput_event(struct trace *trace)
952 {
953         if (trace->found_completion)
954                 return __BLK_TA_COMPLETE;
955         if (trace->found_issue)
956                 return __BLK_TA_ISSUE;
957         if (trace->found_queue)
958                 return __BLK_TA_QUEUE;
959
960         return __BLK_TA_COMPLETE;
961 }
962
963 int action_char_to_num(char action)
964 {
965         switch (action) {
966         case 'Q':
967                 return __BLK_TA_QUEUE;
968         case 'D':
969                 return __BLK_TA_ISSUE;
970         case 'C':
971                 return __BLK_TA_COMPLETE;
972         }
973         return -1;
974 }
975
976 static inline int io_event(struct trace *trace)
977 {
978         if (plot_io_action)
979                 return plot_io_action;
980         if (trace->found_queue)
981                 return __BLK_TA_QUEUE;
982         if (trace->found_issue)
983                 return __BLK_TA_ISSUE;
984         if (trace->found_completion)
985                 return __BLK_TA_COMPLETE;
986
987         return __BLK_TA_COMPLETE;
988 }
989
990 void add_tput(struct trace *trace, struct graph_line_data *writes_gld,
991               struct graph_line_data *reads_gld)
992 {
993         struct blk_io_trace *io = trace->io;
994         struct graph_line_data *gld;
995         int action = io->action & BLK_TA_MASK;
996         int seconds;
997
998         if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
999                 return;
1000
1001         if (action != tput_event(trace))
1002                 return;
1003
1004         if (BLK_DATADIR(io->action) & BLK_TC_READ)
1005                 gld = reads_gld;
1006         else
1007                 gld = writes_gld;
1008
1009         seconds = SECONDS(io->time);
1010         gld->data[seconds].sum += io->bytes;
1011
1012         gld->data[seconds].count = 1;
1013         if (gld->data[seconds].sum > gld->max)
1014                 gld->max = gld->data[seconds].sum;
1015 }
1016
1017 #define GDD_PTR_ALLOC_STEP 16
1018
1019 static struct pid_map *get_pid_map(struct trace_file *tf, u32 pid)
1020 {
1021         struct pid_map *pm;
1022
1023         if (!io_per_process) {
1024                 if (!tf->io_plots)
1025                         tf->io_plots = 1;
1026                 return NULL;
1027         }
1028
1029         pm = process_hash_insert(pid, NULL);
1030         /* New entry? */
1031         if (!pm->index) {
1032                 if (tf->io_plots == tf->io_plots_allocated) {
1033                         tf->io_plots_allocated += GDD_PTR_ALLOC_STEP;
1034                         tf->gdd_reads = realloc(tf->gdd_reads, tf->io_plots_allocated * sizeof(struct graph_dot_data *));
1035                         if (!tf->gdd_reads)
1036                                 abort();
1037                         tf->gdd_writes = realloc(tf->gdd_writes, tf->io_plots_allocated * sizeof(struct graph_dot_data *));
1038                         if (!tf->gdd_writes)
1039                                 abort();
1040                         memset(tf->gdd_reads + tf->io_plots_allocated - GDD_PTR_ALLOC_STEP,
1041                                0, GDD_PTR_ALLOC_STEP * sizeof(struct graph_dot_data *));
1042                         memset(tf->gdd_writes + tf->io_plots_allocated - GDD_PTR_ALLOC_STEP,
1043                                0, GDD_PTR_ALLOC_STEP * sizeof(struct graph_dot_data *));
1044                 }
1045                 pm->index = tf->io_plots++;
1046
1047                 return pm;
1048         }
1049         return pm;
1050 }
1051
1052 void add_io(struct trace *trace, struct trace_file *tf)
1053 {
1054         struct blk_io_trace *io = trace->io;
1055         int action = io->action & BLK_TA_MASK;
1056         u64 offset;
1057         int index;
1058         char *label;
1059         struct pid_map *pm;
1060
1061         if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
1062                 return;
1063
1064         if (action != io_event(trace))
1065                 return;
1066
1067         offset = map_io(trace, io);
1068
1069         pm = get_pid_map(tf, io->pid);
1070         if (!pm) {
1071                 index = 0;
1072                 label = "";
1073         } else {
1074                 index = pm->index;
1075                 label = pm->name;
1076         }
1077         if (BLK_DATADIR(io->action) & BLK_TC_READ) {
1078                 if (!tf->gdd_reads[index])
1079                         tf->gdd_reads[index] = alloc_dot_data(tf->min_seconds, tf->max_seconds, tf->min_offset, tf->max_offset, tf->stop_seconds, pick_color(), strdup(label));
1080                 set_gdd_bit(tf->gdd_reads[index], offset, io->bytes, io->time);
1081         } else if (BLK_DATADIR(io->action) & BLK_TC_WRITE) {
1082                 if (!tf->gdd_writes[index])
1083                         tf->gdd_writes[index] = alloc_dot_data(tf->min_seconds, tf->max_seconds, tf->min_offset, tf->max_offset, tf->stop_seconds, pick_color(), strdup(label));
1084                 set_gdd_bit(tf->gdd_writes[index], offset, io->bytes, io->time);
1085         }
1086 }
1087
1088 void add_pending_io(struct trace *trace, struct graph_line_data *gld)
1089 {
1090         unsigned int seconds;
1091         struct blk_io_trace *io = trace->io;
1092         int action = io->action & BLK_TA_MASK;
1093         double avg;
1094         struct pending_io *pio;
1095
1096         if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
1097                 return;
1098
1099         if (action == __BLK_TA_QUEUE) {
1100                 if (trace->found_issue || trace->found_completion)
1101                         hash_queued_io(trace->io);
1102                 return;
1103         }
1104         if (action == __BLK_TA_REQUEUE) {
1105                 if (ios_in_flight > 0)
1106                         ios_in_flight--;
1107                 return;
1108         }
1109         if (action != __BLK_TA_ISSUE)
1110                 return;
1111
1112         pio = hash_dispatched_io(trace->io);
1113         if (!pio)
1114                 return;
1115
1116         if (!trace->found_completion) {
1117                 list_del(&pio->hash_list);
1118                 free(pio);
1119         }
1120
1121         ios_in_flight++;
1122
1123         seconds = SECONDS(io->time);
1124         gld->data[seconds].sum += ios_in_flight;
1125         gld->data[seconds].count++;
1126
1127         avg = (double)gld->data[seconds].sum / gld->data[seconds].count;
1128         if (gld->max < (u64)avg) {
1129                 gld->max = avg;
1130         }
1131 }
1132
1133 void add_completed_io(struct trace *trace,
1134                       struct graph_line_data *latency_gld)
1135 {
1136         struct blk_io_trace *io = trace->io;
1137         int seconds;
1138         int action = io->action & BLK_TA_MASK;
1139         struct pending_io *pio;
1140         double avg;
1141         u64 latency;
1142
1143         if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
1144                 return;
1145
1146         if (action != __BLK_TA_COMPLETE)
1147                 return;
1148
1149         seconds = SECONDS(io->time);
1150
1151         pio = hash_completed_io(trace->io);
1152         if (!pio)
1153                 return;
1154
1155         if (ios_in_flight > 0)
1156                 ios_in_flight--;
1157         if (io->time >= pio->dispatch_time) {
1158                 latency = io->time - pio->dispatch_time;
1159                 latency_gld->data[seconds].sum += latency;
1160                 latency_gld->data[seconds].count++;
1161         }
1162
1163         list_del(&pio->hash_list);
1164         free(pio);
1165
1166         avg = (double)latency_gld->data[seconds].sum /
1167                 latency_gld->data[seconds].count;
1168         if (latency_gld->max < (u64)avg) {
1169                 latency_gld->max = avg;
1170         }
1171 }
1172
1173 void add_iop(struct trace *trace, struct graph_line_data *gld)
1174 {
1175         struct blk_io_trace *io = trace->io;
1176         int action = io->action & BLK_TA_MASK;
1177         int seconds;
1178
1179         if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
1180                 return;
1181
1182         /* iops and tput use the same events */
1183         if (action != tput_event(trace))
1184                 return;
1185
1186         seconds = SECONDS(io->time);
1187         gld->data[seconds].sum += 1;
1188         gld->data[seconds].count = 1;
1189         if (gld->data[seconds].sum > gld->max)
1190                 gld->max = gld->data[seconds].sum;
1191 }
1192
1193 void check_record(struct trace *trace)
1194 {
1195         handle_notify(trace);
1196 }