2 * Copyright (C) 2012 Fusion-io
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * Parts of this file were imported from Jens Axboe's blktrace sources (also GPL)
19 #include <sys/types.h>
28 #include <asm/types.h>
39 #define IO_HASH_TABLE_BITS 11
40 #define IO_HASH_TABLE_SIZE (1 << IO_HASH_TABLE_BITS)
41 static struct list_head io_hash_table[IO_HASH_TABLE_SIZE];
42 static u64 ios_in_flight = 0;
49 BLK_TC_READ = 1 << 0, /* reads */
50 BLK_TC_WRITE = 1 << 1, /* writes */
51 BLK_TC_FLUSH = 1 << 2, /* flush */
52 BLK_TC_SYNC = 1 << 3, /* sync */
53 BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
54 BLK_TC_REQUEUE = 1 << 5, /* requeueing */
55 BLK_TC_ISSUE = 1 << 6, /* issue */
56 BLK_TC_COMPLETE = 1 << 7, /* completions */
57 BLK_TC_FS = 1 << 8, /* fs requests */
58 BLK_TC_PC = 1 << 9, /* pc requests */
59 BLK_TC_NOTIFY = 1 << 10, /* special message */
60 BLK_TC_AHEAD = 1 << 11, /* readahead */
61 BLK_TC_META = 1 << 12, /* metadata */
62 BLK_TC_DISCARD = 1 << 13, /* discard requests */
63 BLK_TC_DRV_DATA = 1 << 14, /* binary driver data */
64 BLK_TC_FUA = 1 << 15, /* fua requests */
66 BLK_TC_END = 1 << 15, /* we've run out of bits! */
69 #define BLK_TC_SHIFT (16)
70 #define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
71 #define BLK_DATADIR(a) (((a) >> BLK_TC_SHIFT) & (BLK_TC_READ | BLK_TC_WRITE))
77 __BLK_TA_QUEUE = 1, /* queued */
78 __BLK_TA_BACKMERGE, /* back merged to existing rq */
79 __BLK_TA_FRONTMERGE, /* front merge to existing rq */
80 __BLK_TA_GETRQ, /* allocated new request */
81 __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
82 __BLK_TA_REQUEUE, /* request requeued */
83 __BLK_TA_ISSUE, /* sent to driver */
84 __BLK_TA_COMPLETE, /* completed by driver */
85 __BLK_TA_PLUG, /* queue was plugged */
86 __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
87 __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
88 __BLK_TA_INSERT, /* insert request */
89 __BLK_TA_SPLIT, /* bio was split */
90 __BLK_TA_BOUNCE, /* bio was bounced */
91 __BLK_TA_REMAP, /* bio was remapped */
92 __BLK_TA_ABORT, /* request aborted */
93 __BLK_TA_DRV_DATA, /* binary driver data */
96 #define BLK_TA_MASK ((1 << BLK_TC_SHIFT) - 1)
101 enum blktrace_notify {
102 __BLK_TN_PROCESS = 0, /* establish pid/name mapping */
103 __BLK_TN_TIMESTAMP, /* include system clock */
104 __BLK_TN_MESSAGE, /* Character string message */
108 * Trace actions in full. Additionally, read or write is masked
110 #define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
111 #define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
112 #define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
113 #define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
114 #define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
115 #define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
116 #define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
117 #define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
118 #define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
119 #define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
120 #define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
121 #define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
122 #define BLK_TA_SPLIT (__BLK_TA_SPLIT)
123 #define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
124 #define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
125 #define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
126 #define BLK_TA_DRV_DATA (__BLK_TA_DRV_DATA | BLK_TC_ACT(BLK_TC_DRV_DATA))
128 #define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
129 #define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
130 #define BLK_TN_MESSAGE (__BLK_TN_MESSAGE | BLK_TC_ACT(BLK_TC_NOTIFY))
132 #define BLK_IO_TRACE_MAGIC 0x65617400
133 #define BLK_IO_TRACE_VERSION 0x07
137 struct blk_io_trace {
138 __u32 magic; /* MAGIC << 8 | version */
139 __u32 sequence; /* event number */
140 __u64 time; /* in nanoseconds */
141 __u64 sector; /* disk offset */
142 __u32 bytes; /* transfer length */
143 __u32 action; /* what happened */
144 __u32 pid; /* who did it */
145 __u32 device; /* device identifier (dev_t) */
146 __u32 cpu; /* on what cpu did it happen */
147 __u16 error; /* completion error */
148 __u16 pdu_len; /* length of data after this trace */
152 /* sector offset of this IO */
155 /* time this IO was dispatched */
157 /* time this IO was finished */
159 struct list_head hash_list;
163 #define MINORMASK ((1 << MINORBITS) - 1)
164 #define SECONDS(x) ((unsigned long long)(x) / 1000000000)
165 #define NANO_SECONDS(x) ((unsigned long long)(x) % 1000000000)
166 #define DOUBLE_TO_NANO_ULL(d) ((unsigned long long)((d) * 1000000000))
167 #define CHECK_MAGIC(t) (((t)->magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
169 void init_io_hash_table(void)
172 struct list_head *head;
174 for (i = 0; i < IO_HASH_TABLE_SIZE; i++) {
175 head = io_hash_table + i;
176 INIT_LIST_HEAD(head);
180 /* taken from the kernel hash.h */
181 static inline u64 hash_sector(u64 val)
185 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
200 /* High bits are more random, so use them. */
201 return hash >> (64 - IO_HASH_TABLE_BITS);
204 static int hash_table_insert(struct pending_io *ins_pio)
206 u64 sector = ins_pio->sector;
207 int slot = hash_sector(sector);
208 struct list_head *head;
209 struct pending_io *pio;
211 head = io_hash_table + slot;
212 list_for_each_entry(pio, head, hash_list) {
213 if (pio->sector == sector)
216 list_add_tail(&ins_pio->hash_list, head);
220 static struct pending_io *hash_table_search(u64 sector)
222 int slot = hash_sector(sector);
223 struct list_head *head;
224 struct pending_io *pio;
226 head = io_hash_table + slot;
227 list_for_each_entry(pio, head, hash_list) {
228 if (pio->sector == sector)
234 static int hash_dispatched_io(struct blk_io_trace *io)
236 struct pending_io *pio;
239 pio = calloc(1, sizeof(*pio));
240 pio->sector = io->sector;
241 pio->dispatch_time = io->time;
243 ret = hash_table_insert(pio);
244 if (ret == -EEXIST) {
245 /* crud, the IO isn't here */
251 static struct pending_io *hash_completed_io(struct blk_io_trace *io)
253 struct pending_io *pio;
255 pio = hash_table_search(io->sector);
262 static void handle_notify(struct trace *trace)
264 struct blk_io_trace *io = trace->io;
265 void *payload = (char *)io + sizeof(*io);
269 if (io->action != BLK_TN_TIMESTAMP)
272 if (io->pdu_len != sizeof(two32))
275 memcpy(two32, payload, sizeof(two32));
276 trace->start_timestamp = io->time;
277 trace->abs_start_time.tv_sec = two32[0];
278 trace->abs_start_time.tv_nsec = two32[1];
279 if (trace->abs_start_time.tv_nsec < 0) {
280 trace->abs_start_time.tv_sec--;
281 trace->abs_start_time.tv_nsec += 1000000000;
285 int next_record(struct trace *trace)
287 int skip = trace->io->pdu_len;
290 trace->cur += sizeof(*trace->io) + skip;
291 offset = trace->cur - trace->start;
292 if (offset >= trace->len)
295 trace->io = (struct blk_io_trace *)trace->cur;
299 void first_record(struct trace *trace)
301 trace->cur = trace->start;
302 trace->io = (struct blk_io_trace *)trace->cur;
305 int is_io_event(struct blk_io_trace *test)
308 if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
310 if (test->action == BLK_TN_MESSAGE) {
311 int len = test->pdu_len;
314 message = (char *)(test + 1);
315 if (strncmp(message, "fio ", 4) == 0) {
322 u64 find_last_time(struct trace *trace)
324 char *p = trace->start + trace->len;
325 struct blk_io_trace *test;
329 if (trace->len < sizeof(*trace->io))
331 p -= sizeof(*trace->io);
332 while (p >= trace->start) {
333 test = (struct blk_io_trace *)p;
334 if (CHECK_MAGIC(test) && is_io_event(test)) {
335 u64 offset = p - trace->start;
336 if (offset + sizeof(*test) + test->pdu_len == trace->len) {
342 if (search_len > 8192) {
347 /* searching backwards didn't work out, we'll have to scan the file */
350 if (is_io_event(trace->io))
351 found = trace->io->time;
352 if (next_record(trace))
359 int parse_fio_bank_message(struct trace *trace, u64 *bank_ret, u64 *offset_ret,
365 struct blk_io_trace *test = trace->io;
366 int len = test->pdu_len;
371 if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
373 if (test->action != BLK_TN_MESSAGE)
376 /* the message is fio rw bank offset num_banks */
379 message = (char *)(test + 1);
380 if (strncmp(message, "fio r ", 6) != 0)
383 message = strndup(message, len);
384 s = strchr(message, ' ');
392 bank = strtoll(s, &next, 10);
397 offset = strtoll(s, &next, 10);
402 num_banks = strtoll(s, &next, 10);
407 *offset_ret = offset;
408 *num_banks_ret = num_banks;
416 void find_highest_offset(struct trace *trace, u64 *max_ret, u64 *max_bank_ret,
422 u64 max_bank_offset = 0;
426 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
427 found = trace->io->sector << 9;
428 found += trace->io->bytes;
436 if (!parse_fio_bank_message(trace, &bank,
437 &offset, &num_banks)) {
440 if (offset > max_bank_offset)
441 max_bank_offset = offset;
444 if (next_record(trace))
449 *max_bank_ret = max_bank;
450 *max_offset_ret = max_bank_offset;
453 int filter_outliers(struct trace *trace, u64 max_offset,
454 u64 *yzoom_min, u64 *yzoom_max)
457 u64 max_per_bucket[11];
458 u64 bytes_per_bucket = max_offset / 10;
462 memset(hits, 0, sizeof(int) * 11);
463 memset(max_per_bucket, 0, sizeof(u64) * 11);
466 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
467 u64 top = (trace->io->sector << 9) + trace->io->bytes;
468 slot = (int)(top / bytes_per_bucket);
470 if (top > max_per_bucket[slot])
471 max_per_bucket[slot] = top;
473 if (next_record(trace))
477 for (slot = 0; slot < 11; slot++) {
478 if (hits[slot] > fat_count) {
479 fat_count = hits[slot];
483 *yzoom_max = max_offset;
484 for (slot = 10; slot >= 0; slot--) {
485 double d = hits[slot];
487 if (d >= (double)fat_count * .05) {
488 *yzoom_max = max_per_bucket[slot];
494 for (slot = 0; slot < 10; slot++) {
495 double d = hits[slot];
497 if (d >= (double)fat_count * .05) {
498 *yzoom_min = slot * bytes_per_bucket;
505 static char *find_trace_file(char *filename)
513 ret = stat(filename, &st);
515 return strdup(filename);
517 snprintf(line, 1024, "%s.%s", filename, "dump");
518 ret = stat(line, &st);
522 try = strdup(filename);
523 dot = strrchr(try, '.');
524 if (!dot || strcmp(".dump", dot) != 0) {
527 snprintf(line, 1024, "%s%s", try, ".blktrace.0");
528 ret = stat(line, &st);
530 blktrace_to_dump(try);
531 snprintf(line, 1024, "%s.%s", try, "dump");
532 ret = stat(line, &st);
542 struct trace *open_trace(char *filename)
549 char *found_filename;
551 trace = calloc(1, sizeof(*trace));
553 fprintf(stderr, "unable to allocate memory for trace\n");
557 found_filename = find_trace_file(filename);
558 if (!found_filename) {
559 fprintf(stderr, "Unable to find trace file %s\n", filename);
562 filename = found_filename;
564 fd = open(filename, O_RDONLY);
566 fprintf(stderr, "Unable to open trace file %s err %s\n", filename, strerror(errno));
569 ret = fstat(fd, &st);
571 fprintf(stderr, "stat failed on %s err %s\n", filename, strerror(errno));
574 p = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
575 if (p == MAP_FAILED) {
576 fprintf(stderr, "Unable to mmap trace file %s, err %s\n", filename, strerror(errno));
580 trace->len = st.st_size;
583 trace->io = (struct blk_io_trace *)p;
592 static inline int tput_event(struct trace *trace)
594 if (trace->found_completion)
595 return __BLK_TA_COMPLETE;
596 if (trace->found_issue)
597 return __BLK_TA_ISSUE;
598 if (trace->found_queue)
599 return __BLK_TA_QUEUE;
601 return __BLK_TA_COMPLETE;
604 static inline int io_event(struct trace *trace)
606 if (trace->found_queue)
607 return __BLK_TA_QUEUE;
608 if (trace->found_issue)
609 return __BLK_TA_ISSUE;
610 if (trace->found_completion)
611 return __BLK_TA_COMPLETE;
613 return __BLK_TA_COMPLETE;
616 void add_tput(struct trace *trace, struct graph_line_data *gld)
618 struct blk_io_trace *io = trace->io;
619 int action = io->action & BLK_TA_MASK;
622 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
625 if (action != tput_event(trace))
628 seconds = SECONDS(io->time);
629 if (seconds > gld->seconds) {
630 fprintf(stderr, "Bad record %d %d %d\n", seconds, gld->seconds, action);
634 gld->data[seconds].sum += io->bytes;
635 gld->data[seconds].count = 1;
636 if (gld->data[seconds].sum > gld->max)
637 gld->max = gld->data[seconds].sum;
640 void add_io(struct trace *trace, struct graph_dot_data *gdd_writes,
641 struct graph_dot_data *gdd_reads)
643 struct blk_io_trace *io = trace->io;
644 int action = io->action & BLK_TA_MASK;
647 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
650 if (action != io_event(trace))
653 offset = io->sector << 9;
655 if (BLK_DATADIR(io->action) & BLK_TC_READ)
656 set_gdd_bit(gdd_reads, offset, io->bytes, io->time);
657 else if (BLK_DATADIR(io->action) & BLK_TC_WRITE)
658 set_gdd_bit(gdd_writes, offset, io->bytes, io->time);
661 void add_pending_io(struct trace *trace, struct graph_line_data *gld)
665 struct blk_io_trace *io = trace->io;
666 int action = io->action & BLK_TA_MASK;
669 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
672 if (action != __BLK_TA_ISSUE)
675 seconds = SECONDS(io->time);
676 if (seconds > gld->seconds) {
677 fprintf(stderr, "Bad record %d %d\n", seconds, gld->seconds);
681 ret = hash_dispatched_io(trace->io);
687 gld->data[seconds].sum += ios_in_flight;
688 gld->data[seconds].count++;
690 avg = (double)gld->data[seconds].sum / gld->data[seconds].count;
691 if (gld->max < (u64)avg) {
696 void add_completed_io(struct trace *trace,
697 struct graph_line_data *latency_gld)
699 struct blk_io_trace *io = trace->io;
701 int action = io->action & BLK_TA_MASK;
702 struct pending_io *pio;
706 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
709 if (action != __BLK_TA_COMPLETE)
712 seconds = SECONDS(io->time);
714 pio = hash_completed_io(trace->io);
718 if (ios_in_flight > 0)
720 if (io->time >= pio->dispatch_time) {
721 latency = io->time - pio->dispatch_time;
722 latency_gld->data[seconds].sum += latency;
723 latency_gld->data[seconds].count++;
726 list_del(&pio->hash_list);
729 avg = (double)latency_gld->data[seconds].sum /
730 latency_gld->data[seconds].count;
731 if (latency_gld->max < (u64)avg) {
732 latency_gld->max = avg;
736 void add_iop(struct trace *trace, struct graph_line_data *gld)
738 struct blk_io_trace *io = trace->io;
739 int action = io->action & BLK_TA_MASK;
742 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
745 /* iops and tput use the same events */
746 if (action != tput_event(trace))
749 seconds = SECONDS(io->time);
750 if (seconds > gld->seconds) {
751 fprintf(stderr, "Bad record %d %d\n", seconds, gld->seconds);
755 gld->data[seconds].sum += 1;
756 gld->data[seconds].count = 1;
757 if (gld->data[seconds].sum > gld->max)
758 gld->max = gld->data[seconds].sum;
761 void check_record(struct trace *trace)
763 struct blk_io_trace *io = trace->io;
764 int action = io->action & BLK_TA_MASK;
766 if (!(io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
768 case __BLK_TA_COMPLETE:
769 trace->found_completion = 1;
772 trace->found_issue = 1;
775 trace->found_queue = 1;
779 handle_notify(trace);