iowatcher: iowatcher: specify ffmpeg codec
[blktrace.git] / iowatcher / blkparse.c
CommitLineData
9e066e23
CM
1/*
2 * Copyright (C) 2012 Fusion-io
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Parts of this file were imported from Jens Axboe's blktrace sources (also GPL)
18 */
19#include <sys/types.h>
20#include <sys/stat.h>
21#include <fcntl.h>
22#include <unistd.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <math.h>
26#include <inttypes.h>
27#include <string.h>
28#include <asm/types.h>
29#include <errno.h>
30#include <sys/mman.h>
31#include <time.h>
32#include <math.h>
33
34#include "plot.h"
35#include "blkparse.h"
36#include "list.h"
37#include "tracers.h"
38
39#define IO_HASH_TABLE_BITS 11
40#define IO_HASH_TABLE_SIZE (1 << IO_HASH_TABLE_BITS)
41static struct list_head io_hash_table[IO_HASH_TABLE_SIZE];
42static u64 ios_in_flight = 0;
43
0a43b43f
JK
44#define PROCESS_HASH_TABLE_BITS 7
45#define PROCESS_HASH_TABLE_SIZE (1 << PROCESS_HASH_TABLE_BITS)
46static struct list_head process_hash_table[PROCESS_HASH_TABLE_SIZE];
47
f2e40ddd 48extern int plot_io_action;
0a43b43f 49extern int io_per_process;
9e066e23
CM
50
51/*
52 * Trace categories
53 */
54enum {
55 BLK_TC_READ = 1 << 0, /* reads */
56 BLK_TC_WRITE = 1 << 1, /* writes */
57 BLK_TC_FLUSH = 1 << 2, /* flush */
58 BLK_TC_SYNC = 1 << 3, /* sync */
59 BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
60 BLK_TC_REQUEUE = 1 << 5, /* requeueing */
61 BLK_TC_ISSUE = 1 << 6, /* issue */
62 BLK_TC_COMPLETE = 1 << 7, /* completions */
63 BLK_TC_FS = 1 << 8, /* fs requests */
64 BLK_TC_PC = 1 << 9, /* pc requests */
65 BLK_TC_NOTIFY = 1 << 10, /* special message */
66 BLK_TC_AHEAD = 1 << 11, /* readahead */
67 BLK_TC_META = 1 << 12, /* metadata */
68 BLK_TC_DISCARD = 1 << 13, /* discard requests */
69 BLK_TC_DRV_DATA = 1 << 14, /* binary driver data */
70 BLK_TC_FUA = 1 << 15, /* fua requests */
71
72 BLK_TC_END = 1 << 15, /* we've run out of bits! */
73};
74
75#define BLK_TC_SHIFT (16)
76#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
77#define BLK_DATADIR(a) (((a) >> BLK_TC_SHIFT) & (BLK_TC_READ | BLK_TC_WRITE))
78
79/*
80 * Basic trace actions
81 */
82enum {
83 __BLK_TA_QUEUE = 1, /* queued */
84 __BLK_TA_BACKMERGE, /* back merged to existing rq */
85 __BLK_TA_FRONTMERGE, /* front merge to existing rq */
86 __BLK_TA_GETRQ, /* allocated new request */
87 __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
88 __BLK_TA_REQUEUE, /* request requeued */
89 __BLK_TA_ISSUE, /* sent to driver */
90 __BLK_TA_COMPLETE, /* completed by driver */
91 __BLK_TA_PLUG, /* queue was plugged */
92 __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
93 __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
94 __BLK_TA_INSERT, /* insert request */
95 __BLK_TA_SPLIT, /* bio was split */
96 __BLK_TA_BOUNCE, /* bio was bounced */
97 __BLK_TA_REMAP, /* bio was remapped */
98 __BLK_TA_ABORT, /* request aborted */
99 __BLK_TA_DRV_DATA, /* binary driver data */
100};
101
1582ecc9
JK
102#define BLK_TA_MASK ((1 << BLK_TC_SHIFT) - 1)
103
9e066e23
CM
104/*
105 * Notify events.
106 */
107enum blktrace_notify {
108 __BLK_TN_PROCESS = 0, /* establish pid/name mapping */
109 __BLK_TN_TIMESTAMP, /* include system clock */
110 __BLK_TN_MESSAGE, /* Character string message */
111};
112
113/*
114 * Trace actions in full. Additionally, read or write is masked
115 */
116#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
117#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
118#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
119#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
120#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
121#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
122#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
123#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
124#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
125#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
126#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
127#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
128#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
129#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
130#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
131#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
132#define BLK_TA_DRV_DATA (__BLK_TA_DRV_DATA | BLK_TC_ACT(BLK_TC_DRV_DATA))
133
134#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
135#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
136#define BLK_TN_MESSAGE (__BLK_TN_MESSAGE | BLK_TC_ACT(BLK_TC_NOTIFY))
137
138#define BLK_IO_TRACE_MAGIC 0x65617400
139#define BLK_IO_TRACE_VERSION 0x07
140/*
141 * The trace itself
142 */
143struct blk_io_trace {
144 __u32 magic; /* MAGIC << 8 | version */
145 __u32 sequence; /* event number */
146 __u64 time; /* in nanoseconds */
147 __u64 sector; /* disk offset */
148 __u32 bytes; /* transfer length */
149 __u32 action; /* what happened */
150 __u32 pid; /* who did it */
151 __u32 device; /* device identifier (dev_t) */
152 __u32 cpu; /* on what cpu did it happen */
153 __u16 error; /* completion error */
154 __u16 pdu_len; /* length of data after this trace */
155};
156
157struct pending_io {
158 /* sector offset of this IO */
159 u64 sector;
160
161 /* time this IO was dispatched */
162 u64 dispatch_time;
163 /* time this IO was finished */
164 u64 completion_time;
165 struct list_head hash_list;
0a43b43f
JK
166 /* process which queued this IO */
167 u32 pid;
168};
169
170struct pid_map {
171 struct list_head hash_list;
172 u32 pid;
173 int index;
174 char name[0];
9e066e23
CM
175};
176
177#define MINORBITS 20
178#define MINORMASK ((1 << MINORBITS) - 1)
179#define SECONDS(x) ((unsigned long long)(x) / 1000000000)
180#define NANO_SECONDS(x) ((unsigned long long)(x) % 1000000000)
181#define DOUBLE_TO_NANO_ULL(d) ((unsigned long long)((d) * 1000000000))
182#define CHECK_MAGIC(t) (((t)->magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
183
184void init_io_hash_table(void)
185{
186 int i;
187 struct list_head *head;
188
189 for (i = 0; i < IO_HASH_TABLE_SIZE; i++) {
190 head = io_hash_table + i;
191 INIT_LIST_HEAD(head);
192 }
193}
194
195/* taken from the kernel hash.h */
196static inline u64 hash_sector(u64 val)
197{
198 u64 hash = val;
199
200 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
201 u64 n = hash;
202 n <<= 18;
203 hash -= n;
204 n <<= 33;
205 hash -= n;
206 n <<= 3;
207 hash += n;
208 n <<= 3;
209 hash -= n;
210 n <<= 4;
211 hash += n;
212 n <<= 2;
213 hash += n;
214
215 /* High bits are more random, so use them. */
216 return hash >> (64 - IO_HASH_TABLE_BITS);
217}
218
0a43b43f 219static int io_hash_table_insert(struct pending_io *ins_pio)
9e066e23
CM
220{
221 u64 sector = ins_pio->sector;
222 int slot = hash_sector(sector);
223 struct list_head *head;
224 struct pending_io *pio;
225
226 head = io_hash_table + slot;
227 list_for_each_entry(pio, head, hash_list) {
228 if (pio->sector == sector)
229 return -EEXIST;
230 }
231 list_add_tail(&ins_pio->hash_list, head);
232 return 0;
233}
234
0a43b43f 235static struct pending_io *io_hash_table_search(u64 sector)
9e066e23
CM
236{
237 int slot = hash_sector(sector);
238 struct list_head *head;
239 struct pending_io *pio;
240
241 head = io_hash_table + slot;
242 list_for_each_entry(pio, head, hash_list) {
243 if (pio->sector == sector)
244 return pio;
245 }
246 return NULL;
247}
248
79d61530 249static struct pending_io *hash_queued_io(struct blk_io_trace *io)
9e066e23
CM
250{
251 struct pending_io *pio;
252 int ret;
253
254 pio = calloc(1, sizeof(*pio));
255 pio->sector = io->sector;
0a43b43f 256 pio->pid = io->pid;
9e066e23 257
0a43b43f
JK
258 ret = io_hash_table_insert(pio);
259 if (ret < 0) {
260 /* crud, the IO is there already */
9e066e23 261 free(pio);
79d61530 262 return NULL;
9e066e23 263 }
79d61530 264 return pio;
0a43b43f
JK
265}
266
854a1f24 267static struct pending_io *hash_dispatched_io(struct blk_io_trace *io)
0a43b43f
JK
268{
269 struct pending_io *pio;
270
271 pio = io_hash_table_search(io->sector);
79d61530
JK
272 if (!pio) {
273 pio = hash_queued_io(io);
274 if (!pio)
275 return NULL;
276 }
0a43b43f 277 pio->dispatch_time = io->time;
854a1f24 278 return pio;
9e066e23
CM
279}
280
281static struct pending_io *hash_completed_io(struct blk_io_trace *io)
282{
283 struct pending_io *pio;
284
0a43b43f 285 pio = io_hash_table_search(io->sector);
9e066e23
CM
286
287 if (!pio)
288 return NULL;
289 return pio;
290}
291
0a43b43f
JK
292void init_process_hash_table(void)
293{
294 int i;
295 struct list_head *head;
296
297 for (i = 0; i < PROCESS_HASH_TABLE_SIZE; i++) {
298 head = process_hash_table + i;
299 INIT_LIST_HEAD(head);
300 }
301}
302
303static u32 hash_pid(u32 pid)
304{
305 u32 hash = pid;
306
307 hash ^= pid >> 3;
308 hash ^= pid >> 3;
309 hash ^= pid >> 4;
310 hash ^= pid >> 6;
311 return (hash & (PROCESS_HASH_TABLE_SIZE - 1));
312}
313
314static struct pid_map *process_hash_search(u32 pid)
315{
316 int slot = hash_pid(pid);
317 struct list_head *head;
318 struct pid_map *pm;
319
320 head = process_hash_table + slot;
321 list_for_each_entry(pm, head, hash_list) {
322 if (pm->pid == pid)
323 return pm;
324 }
325 return NULL;
326}
327
328static struct pid_map *process_hash_insert(u32 pid, char *name)
329{
330 int slot = hash_pid(pid);
331 struct pid_map *pm;
332 int old_index = 0;
333 char buf[16];
334
335 pm = process_hash_search(pid);
336 if (pm) {
337 /* Entry exists and name shouldn't be changed? */
338 if (!name || !strcmp(name, pm->name))
339 return pm;
340 list_del(&pm->hash_list);
341 old_index = pm->index;
342 free(pm);
343 }
344 if (!name) {
345 sprintf(buf, "[%u]", pid);
346 name = buf;
347 }
348 pm = malloc(sizeof(struct pid_map) + strlen(name) + 1);
349 pm->pid = pid;
350 pm->index = old_index;
351 strcpy(pm->name, name);
352 list_add_tail(&pm->hash_list, process_hash_table + slot);
353
354 return pm;
355}
356
9e066e23
CM
357static void handle_notify(struct trace *trace)
358{
359 struct blk_io_trace *io = trace->io;
360 void *payload = (char *)io + sizeof(*io);
361 u32 two32[2];
362
0a43b43f
JK
363 if (io->action == BLK_TN_PROCESS) {
364 if (io_per_process)
365 process_hash_insert(io->pid, payload);
366 return;
367 }
9e066e23
CM
368
369 if (io->action != BLK_TN_TIMESTAMP)
370 return;
371
372 if (io->pdu_len != sizeof(two32))
373 return;
374
375 memcpy(two32, payload, sizeof(two32));
376 trace->start_timestamp = io->time;
377 trace->abs_start_time.tv_sec = two32[0];
378 trace->abs_start_time.tv_nsec = two32[1];
379 if (trace->abs_start_time.tv_nsec < 0) {
380 trace->abs_start_time.tv_sec--;
381 trace->abs_start_time.tv_nsec += 1000000000;
382 }
383}
384
385int next_record(struct trace *trace)
386{
387 int skip = trace->io->pdu_len;
388 u64 offset;
389
390 trace->cur += sizeof(*trace->io) + skip;
391 offset = trace->cur - trace->start;
392 if (offset >= trace->len)
393 return 1;
394
395 trace->io = (struct blk_io_trace *)trace->cur;
396 return 0;
397}
398
399void first_record(struct trace *trace)
400{
401 trace->cur = trace->start;
402 trace->io = (struct blk_io_trace *)trace->cur;
403}
404
bfb0e441
CM
405int is_io_event(struct blk_io_trace *test)
406{
407 char *message;
408 if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
409 return 1;
410 if (test->action == BLK_TN_MESSAGE) {
411 int len = test->pdu_len;
412 if (len < 3)
413 return 0;
414 message = (char *)(test + 1);
415 if (strncmp(message, "fio ", 4) == 0) {
416 return 1;
417 }
418 }
419 return 0;
420}
421
9e066e23
CM
422u64 find_last_time(struct trace *trace)
423{
424 char *p = trace->start + trace->len;
425 struct blk_io_trace *test;
426 int search_len = 0;
427 u64 found = 0;
428
429 if (trace->len < sizeof(*trace->io))
430 return 0;
431 p -= sizeof(*trace->io);
432 while (p >= trace->start) {
433 test = (struct blk_io_trace *)p;
bfb0e441 434 if (CHECK_MAGIC(test) && is_io_event(test)) {
9e066e23
CM
435 u64 offset = p - trace->start;
436 if (offset + sizeof(*test) + test->pdu_len == trace->len) {
437 return test->time;
438 }
439 }
440 p--;
441 search_len++;
442 if (search_len > 8192) {
443 break;
444 }
445 }
446
447 /* searching backwards didn't work out, we'll have to scan the file */
448 first_record(trace);
449 while (1) {
bfb0e441 450 if (is_io_event(trace->io))
9e066e23
CM
451 found = trace->io->time;
452 if (next_record(trace))
453 break;
454 }
455 first_record(trace);
456 return found;
457}
458
bfb0e441
CM
459int parse_fio_bank_message(struct trace *trace, u64 *bank_ret, u64 *offset_ret,
460 u64 *num_banks_ret)
461{
462 char *s;
463 char *next;
464 char *message;
465 struct blk_io_trace *test = trace->io;
466 int len = test->pdu_len;
467 u64 bank;
468 u64 offset;
469 u64 num_banks;
470
471 if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
472 return -1;
473 if (test->action != BLK_TN_MESSAGE)
474 return -1;
475
476 /* the message is fio rw bank offset num_banks */
477 if (len < 3)
478 return -1;
479 message = (char *)(test + 1);
480 if (strncmp(message, "fio r ", 6) != 0)
481 return -1;
482
483 message = strndup(message, len);
484 s = strchr(message, ' ');
485 if (!s)
486 goto out;
487 s++;
488 s = strchr(s, ' ');
489 if (!s)
490 goto out;
491
492 bank = strtoll(s, &next, 10);
493 if (s == next)
494 goto out;
495 s = next;
496
497 offset = strtoll(s, &next, 10);
498 if (s == next)
499 goto out;
500 s = next;
501
502 num_banks = strtoll(s, &next, 10);
503 if (s == next)
504 goto out;
505
506 *bank_ret = bank;
507 *offset_ret = offset;
508 *num_banks_ret = num_banks;
509
510 return 0;
511out:
512 free(message);
513 return -1;
514}
515
9b9fa04b
JK
516void find_extreme_offsets(struct trace *trace, u64 *min_ret, u64 *max_ret, u64 *max_bank_ret,
517 u64 *max_offset_ret)
9e066e23
CM
518{
519 u64 found = 0;
9b9fa04b 520 u64 max = 0, min = ~(u64)0;
bfb0e441
CM
521 u64 max_bank = 0;
522 u64 max_bank_offset = 0;
523 u64 num_banks = 0;
9e066e23
CM
524 first_record(trace);
525 while (1) {
526 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
527 found = trace->io->sector << 9;
9b9fa04b
JK
528 if (found < min)
529 min = found;
9e066e23 530
9b9fa04b
JK
531 found += trace->io->bytes;
532 if (max < found)
9e066e23 533 max = found;
bfb0e441
CM
534 } else {
535 u64 bank;
536 u64 offset;
537 if (!parse_fio_bank_message(trace, &bank,
538 &offset, &num_banks)) {
539 if (bank > max_bank)
540 max_bank = bank;
541 if (offset > max_bank_offset)
542 max_bank_offset = offset;
543 }
9e066e23
CM
544 }
545 if (next_record(trace))
546 break;
547 }
548 first_record(trace);
9b9fa04b 549 *min_ret = min;
bfb0e441
CM
550 *max_ret = max;
551 *max_bank_ret = max_bank;
552 *max_offset_ret = max_bank_offset;
9e066e23
CM
553}
554
854a1f24
CM
555static void check_io_types(struct trace *trace)
556{
557 struct blk_io_trace *io = trace->io;
558 int action = io->action & BLK_TA_MASK;
559
560 if (!(io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
561 switch (action) {
562 case __BLK_TA_COMPLETE:
563 trace->found_completion = 1;
564 break;
565 case __BLK_TA_ISSUE:
566 trace->found_issue = 1;
567 break;
568 case __BLK_TA_QUEUE:
569 trace->found_queue = 1;
570 break;
571 };
572 }
573}
574
575
9b9fa04b 576int filter_outliers(struct trace *trace, u64 min_offset, u64 max_offset,
9e066e23
CM
577 u64 *yzoom_min, u64 *yzoom_max)
578{
579 int hits[11];
580 u64 max_per_bucket[11];
9b9fa04b
JK
581 u64 min_per_bucket[11];
582 u64 bytes_per_bucket = (max_offset - min_offset + 1) / 10;
9e066e23
CM
583 int slot;
584 int fat_count = 0;
585
586 memset(hits, 0, sizeof(int) * 11);
587 memset(max_per_bucket, 0, sizeof(u64) * 11);
9b9fa04b 588 memset(min_per_bucket, 0xff, sizeof(u64) * 11);
9e066e23
CM
589 first_record(trace);
590 while (1) {
854a1f24 591 check_io_types(trace);
41fdf407
JK
592 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY)) &&
593 (trace->io->action & BLK_TA_MASK) == __BLK_TA_QUEUE) {
9b9fa04b
JK
594 u64 off = (trace->io->sector << 9) - min_offset;
595
596 slot = (int)(off / bytes_per_bucket);
597 hits[slot]++;
598 if (off < min_per_bucket[slot])
599 min_per_bucket[slot] = off;
600
854a1f24 601 off += trace->io->bytes;
9b9fa04b 602 slot = (int)(off / bytes_per_bucket);
9e066e23 603 hits[slot]++;
9b9fa04b
JK
604 if (off > max_per_bucket[slot])
605 max_per_bucket[slot] = off;
9e066e23
CM
606 }
607 if (next_record(trace))
608 break;
609 }
610 first_record(trace);
611 for (slot = 0; slot < 11; slot++) {
612 if (hits[slot] > fat_count) {
613 fat_count = hits[slot];
614 }
615 }
616
617 *yzoom_max = max_offset;
618 for (slot = 10; slot >= 0; slot--) {
619 double d = hits[slot];
620
621 if (d >= (double)fat_count * .05) {
9b9fa04b 622 *yzoom_max = max_per_bucket[slot] + min_offset;
9e066e23
CM
623 break;
624 }
625 }
626
9b9fa04b 627 *yzoom_min = min_offset;
9e066e23
CM
628 for (slot = 0; slot < 10; slot++) {
629 double d = hits[slot];
630
631 if (d >= (double)fat_count * .05) {
9b9fa04b 632 *yzoom_min = min_per_bucket[slot] + min_offset;
9e066e23
CM
633 break;
634 }
635 }
636 return 0;
637}
638
639static char *find_trace_file(char *filename)
640{
641 int ret;
642 struct stat st;
643 char line[1024];
644 char *dot;
645 char *try;
646
647 ret = stat(filename, &st);
648 if (ret == 0)
649 return strdup(filename);
650
651 snprintf(line, 1024, "%s.%s", filename, "dump");
e199d546 652 ret = stat(line, &st);
9e066e23
CM
653 if (ret == 0)
654 return strdup(line);
655
656 try = strdup(filename);
657 dot = strrchr(try, '.');
658 if (!dot || strcmp(".dump", dot) != 0) {
e95ba659 659 if (dot && dot != try)
9e066e23
CM
660 *dot = '\0';
661 snprintf(line, 1024, "%s%s", try, ".blktrace.0");
662 ret = stat(line, &st);
663 if (ret == 0) {
664 blktrace_to_dump(try);
665 snprintf(line, 1024, "%s.%s", try, "dump");
666 ret = stat(line, &st);
667 if (ret == 0) {
668 free(try);
669 return strdup(line);
670 }
671 }
672 }
673 free(try);
674 return NULL;
675}
676struct trace *open_trace(char *filename)
677{
678 int fd;
679 char *p;
680 struct stat st;
681 int ret;
682 struct trace *trace;
683 char *found_filename;
684
685 trace = calloc(1, sizeof(*trace));
686 if (!trace) {
687 fprintf(stderr, "unable to allocate memory for trace\n");
688 return NULL;
689 }
690
691 found_filename = find_trace_file(filename);
692 if (!found_filename) {
693 fprintf(stderr, "Unable to find trace file %s\n", filename);
694 goto fail;
695 }
9e066e23
CM
696 filename = found_filename;
697
698 fd = open(filename, O_RDONLY);
699 if (fd < 0) {
700 fprintf(stderr, "Unable to open trace file %s err %s\n", filename, strerror(errno));
701 goto fail;
702 }
703 ret = fstat(fd, &st);
704 if (ret < 0) {
705 fprintf(stderr, "stat failed on %s err %s\n", filename, strerror(errno));
706 goto fail_fd;
707 }
708 p = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
709 if (p == MAP_FAILED) {
710 fprintf(stderr, "Unable to mmap trace file %s, err %s\n", filename, strerror(errno));
711 goto fail_fd;
712 }
713 trace->fd = fd;
714 trace->len = st.st_size;
715 trace->start = p;
716 trace->cur = p;
717 trace->io = (struct blk_io_trace *)p;
718 return trace;
719
720fail_fd:
721 close(fd);
722fail:
723 free(trace);
724 return NULL;
725}
726static inline int tput_event(struct trace *trace)
727{
728 if (trace->found_completion)
729 return __BLK_TA_COMPLETE;
730 if (trace->found_issue)
731 return __BLK_TA_ISSUE;
732 if (trace->found_queue)
733 return __BLK_TA_QUEUE;
734
735 return __BLK_TA_COMPLETE;
736}
737
f2e40ddd
JK
738int action_char_to_num(char action)
739{
740 switch (action) {
741 case 'Q':
742 return __BLK_TA_QUEUE;
743 case 'D':
744 return __BLK_TA_ISSUE;
745 case 'C':
746 return __BLK_TA_COMPLETE;
747 }
748 return -1;
749}
750
9e066e23
CM
751static inline int io_event(struct trace *trace)
752{
f2e40ddd
JK
753 if (plot_io_action)
754 return plot_io_action;
9e066e23
CM
755 if (trace->found_queue)
756 return __BLK_TA_QUEUE;
757 if (trace->found_issue)
758 return __BLK_TA_ISSUE;
759 if (trace->found_completion)
760 return __BLK_TA_COMPLETE;
761
762 return __BLK_TA_COMPLETE;
763}
764
765void add_tput(struct trace *trace, struct graph_line_data *gld)
766{
767 struct blk_io_trace *io = trace->io;
1582ecc9 768 int action = io->action & BLK_TA_MASK;
9e066e23
CM
769 int seconds;
770
771 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
772 return;
773
774 if (action != tput_event(trace))
775 return;
776
777 seconds = SECONDS(io->time);
35686f9b
JK
778 if (seconds > gld->max_seconds)
779 return;
9e066e23
CM
780
781 gld->data[seconds].sum += io->bytes;
782 gld->data[seconds].count = 1;
783 if (gld->data[seconds].sum > gld->max)
784 gld->max = gld->data[seconds].sum;
785}
786
0a43b43f
JK
787#define GDD_PTR_ALLOC_STEP 16
788
789static struct pid_map *get_pid_map(struct trace_file *tf, u32 pid)
790{
791 struct pid_map *pm;
792
793 if (!io_per_process) {
794 if (!tf->io_plots)
795 tf->io_plots = 1;
796 return NULL;
797 }
798
799 pm = process_hash_insert(pid, NULL);
800 /* New entry? */
801 if (!pm->index) {
802 if (tf->io_plots == tf->io_plots_allocated) {
803 tf->io_plots_allocated += GDD_PTR_ALLOC_STEP;
804 tf->gdd_reads = realloc(tf->gdd_reads, tf->io_plots_allocated * sizeof(struct graph_dot_data *));
805 if (!tf->gdd_reads)
806 abort();
807 tf->gdd_writes = realloc(tf->gdd_writes, tf->io_plots_allocated * sizeof(struct graph_dot_data *));
808 if (!tf->gdd_writes)
809 abort();
810 memset(tf->gdd_reads + tf->io_plots_allocated - GDD_PTR_ALLOC_STEP,
811 0, GDD_PTR_ALLOC_STEP * sizeof(struct graph_dot_data *));
812 memset(tf->gdd_writes + tf->io_plots_allocated - GDD_PTR_ALLOC_STEP,
813 0, GDD_PTR_ALLOC_STEP * sizeof(struct graph_dot_data *));
814 }
815 pm->index = tf->io_plots++;
816
817 return pm;
818 }
819 return pm;
820}
821
822void add_io(struct trace *trace, struct trace_file *tf)
9e066e23
CM
823{
824 struct blk_io_trace *io = trace->io;
1582ecc9 825 int action = io->action & BLK_TA_MASK;
9e066e23 826 u64 offset;
0a43b43f
JK
827 int index;
828 char *label;
829 struct pid_map *pm;
9e066e23
CM
830
831 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
832 return;
833
834 if (action != io_event(trace))
835 return;
836
837 offset = io->sector << 9;
838
0a43b43f
JK
839 pm = get_pid_map(tf, io->pid);
840 if (!pm) {
841 index = 0;
842 label = "";
843 } else {
844 index = pm->index;
845 label = pm->name;
846 }
847 if (BLK_DATADIR(io->action) & BLK_TC_READ) {
848 if (!tf->gdd_reads[index])
849 tf->gdd_reads[index] = alloc_dot_data(tf->min_seconds, tf->max_seconds, tf->min_offset, tf->max_offset, tf->stop_seconds, pick_color(), strdup(label));
850 set_gdd_bit(tf->gdd_reads[index], offset, io->bytes, io->time);
851 } else if (BLK_DATADIR(io->action) & BLK_TC_WRITE) {
852 if (!tf->gdd_writes[index])
853 tf->gdd_writes[index] = alloc_dot_data(tf->min_seconds, tf->max_seconds, tf->min_offset, tf->max_offset, tf->stop_seconds, pick_color(), strdup(label));
854 set_gdd_bit(tf->gdd_writes[index], offset, io->bytes, io->time);
855 }
9e066e23
CM
856}
857
858void add_pending_io(struct trace *trace, struct graph_line_data *gld)
859{
9e066e23
CM
860 int seconds;
861 struct blk_io_trace *io = trace->io;
1582ecc9 862 int action = io->action & BLK_TA_MASK;
9e066e23 863 double avg;
854a1f24 864 struct pending_io *pio;
9e066e23
CM
865
866 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
867 return;
868
0a43b43f 869 if (action == __BLK_TA_QUEUE) {
854a1f24
CM
870 if (trace->found_issue || trace->found_completion)
871 hash_queued_io(trace->io);
0a43b43f
JK
872 return;
873 }
9e066e23
CM
874 if (action != __BLK_TA_ISSUE)
875 return;
876
877 seconds = SECONDS(io->time);
35686f9b
JK
878 if (seconds > gld->max_seconds)
879 return;
9e066e23 880
854a1f24
CM
881 pio = hash_dispatched_io(trace->io);
882 if (!pio)
9e066e23
CM
883 return;
884
854a1f24
CM
885 if (!trace->found_completion) {
886 list_del(&pio->hash_list);
887 free(pio);
888 }
889
9e066e23
CM
890 ios_in_flight++;
891
892 gld->data[seconds].sum += ios_in_flight;
893 gld->data[seconds].count++;
894
895 avg = (double)gld->data[seconds].sum / gld->data[seconds].count;
896 if (gld->max < (u64)avg) {
897 gld->max = avg;
898 }
899}
900
901void add_completed_io(struct trace *trace,
902 struct graph_line_data *latency_gld)
903{
904 struct blk_io_trace *io = trace->io;
905 int seconds;
1582ecc9 906 int action = io->action & BLK_TA_MASK;
9e066e23
CM
907 struct pending_io *pio;
908 double avg;
909 u64 latency;
910
911 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
912 return;
913
914 if (action != __BLK_TA_COMPLETE)
915 return;
916
917 seconds = SECONDS(io->time);
918
919 pio = hash_completed_io(trace->io);
920 if (!pio)
921 return;
922
923 if (ios_in_flight > 0)
924 ios_in_flight--;
925 if (io->time >= pio->dispatch_time) {
926 latency = io->time - pio->dispatch_time;
927 latency_gld->data[seconds].sum += latency;
928 latency_gld->data[seconds].count++;
929 }
930
931 list_del(&pio->hash_list);
932 free(pio);
933
934 avg = (double)latency_gld->data[seconds].sum /
935 latency_gld->data[seconds].count;
936 if (latency_gld->max < (u64)avg) {
937 latency_gld->max = avg;
938 }
939}
940
941void add_iop(struct trace *trace, struct graph_line_data *gld)
942{
943 struct blk_io_trace *io = trace->io;
1582ecc9 944 int action = io->action & BLK_TA_MASK;
9e066e23
CM
945 int seconds;
946
947 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
948 return;
949
950 /* iops and tput use the same events */
951 if (action != tput_event(trace))
952 return;
953
954 seconds = SECONDS(io->time);
35686f9b
JK
955 if (seconds > gld->max_seconds)
956 return;
9e066e23
CM
957
958 gld->data[seconds].sum += 1;
959 gld->data[seconds].count = 1;
960 if (gld->data[seconds].sum > gld->max)
961 gld->max = gld->data[seconds].sum;
962}
963
964void check_record(struct trace *trace)
965{
9e066e23
CM
966 handle_notify(trace);
967}