2 * Copyright (C) 2012 Fusion-io
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * Parts of this file were imported from Jens Axboe's blktrace sources (also GPL)
19 #include <sys/types.h>
28 #include <asm/types.h>
39 #define IO_HASH_TABLE_BITS 11
40 #define IO_HASH_TABLE_SIZE (1 << IO_HASH_TABLE_BITS)
41 static struct list_head io_hash_table[IO_HASH_TABLE_SIZE];
42 static u64 ios_in_flight = 0;
44 #define PROCESS_HASH_TABLE_BITS 7
45 #define PROCESS_HASH_TABLE_SIZE (1 << PROCESS_HASH_TABLE_BITS)
46 static struct list_head process_hash_table[PROCESS_HASH_TABLE_SIZE];
48 extern int plot_io_action;
49 extern int io_per_process;
55 BLK_TC_READ = 1 << 0, /* reads */
56 BLK_TC_WRITE = 1 << 1, /* writes */
57 BLK_TC_FLUSH = 1 << 2, /* flush */
58 BLK_TC_SYNC = 1 << 3, /* sync */
59 BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
60 BLK_TC_REQUEUE = 1 << 5, /* requeueing */
61 BLK_TC_ISSUE = 1 << 6, /* issue */
62 BLK_TC_COMPLETE = 1 << 7, /* completions */
63 BLK_TC_FS = 1 << 8, /* fs requests */
64 BLK_TC_PC = 1 << 9, /* pc requests */
65 BLK_TC_NOTIFY = 1 << 10, /* special message */
66 BLK_TC_AHEAD = 1 << 11, /* readahead */
67 BLK_TC_META = 1 << 12, /* metadata */
68 BLK_TC_DISCARD = 1 << 13, /* discard requests */
69 BLK_TC_DRV_DATA = 1 << 14, /* binary driver data */
70 BLK_TC_FUA = 1 << 15, /* fua requests */
72 BLK_TC_END = 1 << 15, /* we've run out of bits! */
75 #define BLK_TC_SHIFT (16)
76 #define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
77 #define BLK_DATADIR(a) (((a) >> BLK_TC_SHIFT) & (BLK_TC_READ | BLK_TC_WRITE))
83 __BLK_TA_QUEUE = 1, /* queued */
84 __BLK_TA_BACKMERGE, /* back merged to existing rq */
85 __BLK_TA_FRONTMERGE, /* front merge to existing rq */
86 __BLK_TA_GETRQ, /* allocated new request */
87 __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
88 __BLK_TA_REQUEUE, /* request requeued */
89 __BLK_TA_ISSUE, /* sent to driver */
90 __BLK_TA_COMPLETE, /* completed by driver */
91 __BLK_TA_PLUG, /* queue was plugged */
92 __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
93 __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
94 __BLK_TA_INSERT, /* insert request */
95 __BLK_TA_SPLIT, /* bio was split */
96 __BLK_TA_BOUNCE, /* bio was bounced */
97 __BLK_TA_REMAP, /* bio was remapped */
98 __BLK_TA_ABORT, /* request aborted */
99 __BLK_TA_DRV_DATA, /* binary driver data */
102 #define BLK_TA_MASK ((1 << BLK_TC_SHIFT) - 1)
107 enum blktrace_notify {
108 __BLK_TN_PROCESS = 0, /* establish pid/name mapping */
109 __BLK_TN_TIMESTAMP, /* include system clock */
110 __BLK_TN_MESSAGE, /* Character string message */
114 * Trace actions in full. Additionally, read or write is masked
116 #define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
117 #define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
118 #define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
119 #define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
120 #define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
121 #define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
122 #define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
123 #define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
124 #define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
125 #define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
126 #define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
127 #define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
128 #define BLK_TA_SPLIT (__BLK_TA_SPLIT)
129 #define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
130 #define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
131 #define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
132 #define BLK_TA_DRV_DATA (__BLK_TA_DRV_DATA | BLK_TC_ACT(BLK_TC_DRV_DATA))
134 #define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
135 #define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
136 #define BLK_TN_MESSAGE (__BLK_TN_MESSAGE | BLK_TC_ACT(BLK_TC_NOTIFY))
138 #define BLK_IO_TRACE_MAGIC 0x65617400
139 #define BLK_IO_TRACE_VERSION 0x07
143 struct blk_io_trace {
144 __u32 magic; /* MAGIC << 8 | version */
145 __u32 sequence; /* event number */
146 __u64 time; /* in nanoseconds */
147 __u64 sector; /* disk offset */
148 __u32 bytes; /* transfer length */
149 __u32 action; /* what happened */
150 __u32 pid; /* who did it */
151 __u32 device; /* device identifier (dev_t) */
152 __u32 cpu; /* on what cpu did it happen */
153 __u16 error; /* completion error */
154 __u16 pdu_len; /* length of data after this trace */
158 /* sector offset of this IO */
161 /* time this IO was dispatched */
163 /* time this IO was finished */
165 struct list_head hash_list;
166 /* process which queued this IO */
171 struct list_head hash_list;
178 #define MINORMASK ((1 << MINORBITS) - 1)
179 #define SECONDS(x) ((unsigned long long)(x) / 1000000000)
180 #define NANO_SECONDS(x) ((unsigned long long)(x) % 1000000000)
181 #define DOUBLE_TO_NANO_ULL(d) ((unsigned long long)((d) * 1000000000))
182 #define CHECK_MAGIC(t) (((t)->magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
184 void init_io_hash_table(void)
187 struct list_head *head;
189 for (i = 0; i < IO_HASH_TABLE_SIZE; i++) {
190 head = io_hash_table + i;
191 INIT_LIST_HEAD(head);
195 /* taken from the kernel hash.h */
196 static inline u64 hash_sector(u64 val)
200 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
215 /* High bits are more random, so use them. */
216 return hash >> (64 - IO_HASH_TABLE_BITS);
219 static int io_hash_table_insert(struct pending_io *ins_pio)
221 u64 sector = ins_pio->sector;
222 int slot = hash_sector(sector);
223 struct list_head *head;
224 struct pending_io *pio;
226 head = io_hash_table + slot;
227 list_for_each_entry(pio, head, hash_list) {
228 if (pio->sector == sector)
231 list_add_tail(&ins_pio->hash_list, head);
235 static struct pending_io *io_hash_table_search(u64 sector)
237 int slot = hash_sector(sector);
238 struct list_head *head;
239 struct pending_io *pio;
241 head = io_hash_table + slot;
242 list_for_each_entry(pio, head, hash_list) {
243 if (pio->sector == sector)
249 static int hash_queued_io(struct blk_io_trace *io)
251 struct pending_io *pio;
254 pio = calloc(1, sizeof(*pio));
255 pio->sector = io->sector;
258 ret = io_hash_table_insert(pio);
260 /* crud, the IO is there already */
267 static int hash_dispatched_io(struct blk_io_trace *io)
269 struct pending_io *pio;
271 pio = io_hash_table_search(io->sector);
273 /* crud, the IO isn't here */
276 pio->dispatch_time = io->time;
280 static struct pending_io *hash_completed_io(struct blk_io_trace *io)
282 struct pending_io *pio;
284 pio = io_hash_table_search(io->sector);
291 void init_process_hash_table(void)
294 struct list_head *head;
296 for (i = 0; i < PROCESS_HASH_TABLE_SIZE; i++) {
297 head = process_hash_table + i;
298 INIT_LIST_HEAD(head);
302 static u32 hash_pid(u32 pid)
310 return (hash & (PROCESS_HASH_TABLE_SIZE - 1));
313 static struct pid_map *process_hash_search(u32 pid)
315 int slot = hash_pid(pid);
316 struct list_head *head;
319 head = process_hash_table + slot;
320 list_for_each_entry(pm, head, hash_list) {
327 static struct pid_map *process_hash_insert(u32 pid, char *name)
329 int slot = hash_pid(pid);
334 pm = process_hash_search(pid);
336 /* Entry exists and name shouldn't be changed? */
337 if (!name || !strcmp(name, pm->name))
339 list_del(&pm->hash_list);
340 old_index = pm->index;
344 sprintf(buf, "[%u]", pid);
347 pm = malloc(sizeof(struct pid_map) + strlen(name) + 1);
349 pm->index = old_index;
350 strcpy(pm->name, name);
351 list_add_tail(&pm->hash_list, process_hash_table + slot);
356 static void handle_notify(struct trace *trace)
358 struct blk_io_trace *io = trace->io;
359 void *payload = (char *)io + sizeof(*io);
362 if (io->action == BLK_TN_PROCESS) {
364 process_hash_insert(io->pid, payload);
368 if (io->action != BLK_TN_TIMESTAMP)
371 if (io->pdu_len != sizeof(two32))
374 memcpy(two32, payload, sizeof(two32));
375 trace->start_timestamp = io->time;
376 trace->abs_start_time.tv_sec = two32[0];
377 trace->abs_start_time.tv_nsec = two32[1];
378 if (trace->abs_start_time.tv_nsec < 0) {
379 trace->abs_start_time.tv_sec--;
380 trace->abs_start_time.tv_nsec += 1000000000;
384 int next_record(struct trace *trace)
386 int skip = trace->io->pdu_len;
389 trace->cur += sizeof(*trace->io) + skip;
390 offset = trace->cur - trace->start;
391 if (offset >= trace->len)
394 trace->io = (struct blk_io_trace *)trace->cur;
398 void first_record(struct trace *trace)
400 trace->cur = trace->start;
401 trace->io = (struct blk_io_trace *)trace->cur;
404 int is_io_event(struct blk_io_trace *test)
407 if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
409 if (test->action == BLK_TN_MESSAGE) {
410 int len = test->pdu_len;
413 message = (char *)(test + 1);
414 if (strncmp(message, "fio ", 4) == 0) {
421 u64 find_last_time(struct trace *trace)
423 char *p = trace->start + trace->len;
424 struct blk_io_trace *test;
428 if (trace->len < sizeof(*trace->io))
430 p -= sizeof(*trace->io);
431 while (p >= trace->start) {
432 test = (struct blk_io_trace *)p;
433 if (CHECK_MAGIC(test) && is_io_event(test)) {
434 u64 offset = p - trace->start;
435 if (offset + sizeof(*test) + test->pdu_len == trace->len) {
441 if (search_len > 8192) {
446 /* searching backwards didn't work out, we'll have to scan the file */
449 if (is_io_event(trace->io))
450 found = trace->io->time;
451 if (next_record(trace))
458 int parse_fio_bank_message(struct trace *trace, u64 *bank_ret, u64 *offset_ret,
464 struct blk_io_trace *test = trace->io;
465 int len = test->pdu_len;
470 if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
472 if (test->action != BLK_TN_MESSAGE)
475 /* the message is fio rw bank offset num_banks */
478 message = (char *)(test + 1);
479 if (strncmp(message, "fio r ", 6) != 0)
482 message = strndup(message, len);
483 s = strchr(message, ' ');
491 bank = strtoll(s, &next, 10);
496 offset = strtoll(s, &next, 10);
501 num_banks = strtoll(s, &next, 10);
506 *offset_ret = offset;
507 *num_banks_ret = num_banks;
515 void find_extreme_offsets(struct trace *trace, u64 *min_ret, u64 *max_ret, u64 *max_bank_ret,
519 u64 max = 0, min = ~(u64)0;
521 u64 max_bank_offset = 0;
525 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
526 found = trace->io->sector << 9;
530 found += trace->io->bytes;
536 if (!parse_fio_bank_message(trace, &bank,
537 &offset, &num_banks)) {
540 if (offset > max_bank_offset)
541 max_bank_offset = offset;
544 if (next_record(trace))
550 *max_bank_ret = max_bank;
551 *max_offset_ret = max_bank_offset;
554 int filter_outliers(struct trace *trace, u64 min_offset, u64 max_offset,
555 u64 *yzoom_min, u64 *yzoom_max)
558 u64 max_per_bucket[11];
559 u64 min_per_bucket[11];
560 u64 bytes_per_bucket = (max_offset - min_offset + 1) / 10;
564 memset(hits, 0, sizeof(int) * 11);
565 memset(max_per_bucket, 0, sizeof(u64) * 11);
566 memset(min_per_bucket, 0xff, sizeof(u64) * 11);
569 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY)) &&
570 (trace->io->action & BLK_TA_MASK) == __BLK_TA_QUEUE) {
571 u64 off = (trace->io->sector << 9) - min_offset;
573 slot = (int)(off / bytes_per_bucket);
575 if (off < min_per_bucket[slot])
576 min_per_bucket[slot] = off;
578 off += trace->io->bytes;
579 slot = (int)(off / bytes_per_bucket);
581 if (off > max_per_bucket[slot])
582 max_per_bucket[slot] = off;
584 if (next_record(trace))
588 for (slot = 0; slot < 11; slot++) {
589 if (hits[slot] > fat_count) {
590 fat_count = hits[slot];
594 *yzoom_max = max_offset;
595 for (slot = 10; slot >= 0; slot--) {
596 double d = hits[slot];
598 if (d >= (double)fat_count * .05) {
599 *yzoom_max = max_per_bucket[slot] + min_offset;
604 *yzoom_min = min_offset;
605 for (slot = 0; slot < 10; slot++) {
606 double d = hits[slot];
608 if (d >= (double)fat_count * .05) {
609 *yzoom_min = min_per_bucket[slot] + min_offset;
616 static char *find_trace_file(char *filename)
624 ret = stat(filename, &st);
626 return strdup(filename);
628 snprintf(line, 1024, "%s.%s", filename, "dump");
629 ret = stat(line, &st);
633 try = strdup(filename);
634 dot = strrchr(try, '.');
635 if (!dot || strcmp(".dump", dot) != 0) {
636 if (dot && dot != try)
638 snprintf(line, 1024, "%s%s", try, ".blktrace.0");
639 ret = stat(line, &st);
641 blktrace_to_dump(try);
642 snprintf(line, 1024, "%s.%s", try, "dump");
643 ret = stat(line, &st);
653 struct trace *open_trace(char *filename)
660 char *found_filename;
662 trace = calloc(1, sizeof(*trace));
664 fprintf(stderr, "unable to allocate memory for trace\n");
668 found_filename = find_trace_file(filename);
669 if (!found_filename) {
670 fprintf(stderr, "Unable to find trace file %s\n", filename);
673 filename = found_filename;
675 fd = open(filename, O_RDONLY);
677 fprintf(stderr, "Unable to open trace file %s err %s\n", filename, strerror(errno));
680 ret = fstat(fd, &st);
682 fprintf(stderr, "stat failed on %s err %s\n", filename, strerror(errno));
685 p = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
686 if (p == MAP_FAILED) {
687 fprintf(stderr, "Unable to mmap trace file %s, err %s\n", filename, strerror(errno));
691 trace->len = st.st_size;
694 trace->io = (struct blk_io_trace *)p;
703 static inline int tput_event(struct trace *trace)
705 if (trace->found_completion)
706 return __BLK_TA_COMPLETE;
707 if (trace->found_issue)
708 return __BLK_TA_ISSUE;
709 if (trace->found_queue)
710 return __BLK_TA_QUEUE;
712 return __BLK_TA_COMPLETE;
715 int action_char_to_num(char action)
719 return __BLK_TA_QUEUE;
721 return __BLK_TA_ISSUE;
723 return __BLK_TA_COMPLETE;
728 static inline int io_event(struct trace *trace)
731 return plot_io_action;
732 if (trace->found_queue)
733 return __BLK_TA_QUEUE;
734 if (trace->found_issue)
735 return __BLK_TA_ISSUE;
736 if (trace->found_completion)
737 return __BLK_TA_COMPLETE;
739 return __BLK_TA_COMPLETE;
742 void add_tput(struct trace *trace, struct graph_line_data *gld)
744 struct blk_io_trace *io = trace->io;
745 int action = io->action & BLK_TA_MASK;
748 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
751 if (action != tput_event(trace))
754 seconds = SECONDS(io->time);
755 if (seconds > gld->max_seconds)
758 gld->data[seconds].sum += io->bytes;
759 gld->data[seconds].count = 1;
760 if (gld->data[seconds].sum > gld->max)
761 gld->max = gld->data[seconds].sum;
764 #define GDD_PTR_ALLOC_STEP 16
766 static struct pid_map *get_pid_map(struct trace_file *tf, u32 pid)
770 if (!io_per_process) {
776 pm = process_hash_insert(pid, NULL);
779 if (tf->io_plots == tf->io_plots_allocated) {
780 tf->io_plots_allocated += GDD_PTR_ALLOC_STEP;
781 tf->gdd_reads = realloc(tf->gdd_reads, tf->io_plots_allocated * sizeof(struct graph_dot_data *));
784 tf->gdd_writes = realloc(tf->gdd_writes, tf->io_plots_allocated * sizeof(struct graph_dot_data *));
787 memset(tf->gdd_reads + tf->io_plots_allocated - GDD_PTR_ALLOC_STEP,
788 0, GDD_PTR_ALLOC_STEP * sizeof(struct graph_dot_data *));
789 memset(tf->gdd_writes + tf->io_plots_allocated - GDD_PTR_ALLOC_STEP,
790 0, GDD_PTR_ALLOC_STEP * sizeof(struct graph_dot_data *));
792 pm->index = tf->io_plots++;
799 void add_io(struct trace *trace, struct trace_file *tf)
801 struct blk_io_trace *io = trace->io;
802 int action = io->action & BLK_TA_MASK;
808 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
811 if (action != io_event(trace))
814 offset = io->sector << 9;
816 pm = get_pid_map(tf, io->pid);
824 if (BLK_DATADIR(io->action) & BLK_TC_READ) {
825 if (!tf->gdd_reads[index])
826 tf->gdd_reads[index] = alloc_dot_data(tf->min_seconds, tf->max_seconds, tf->min_offset, tf->max_offset, tf->stop_seconds, pick_color(), strdup(label));
827 set_gdd_bit(tf->gdd_reads[index], offset, io->bytes, io->time);
828 } else if (BLK_DATADIR(io->action) & BLK_TC_WRITE) {
829 if (!tf->gdd_writes[index])
830 tf->gdd_writes[index] = alloc_dot_data(tf->min_seconds, tf->max_seconds, tf->min_offset, tf->max_offset, tf->stop_seconds, pick_color(), strdup(label));
831 set_gdd_bit(tf->gdd_writes[index], offset, io->bytes, io->time);
835 void add_pending_io(struct trace *trace, struct graph_line_data *gld)
839 struct blk_io_trace *io = trace->io;
840 int action = io->action & BLK_TA_MASK;
843 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
846 if (action == __BLK_TA_QUEUE) {
847 hash_queued_io(trace->io);
850 if (action != __BLK_TA_ISSUE)
853 seconds = SECONDS(io->time);
854 if (seconds > gld->max_seconds)
857 ret = hash_dispatched_io(trace->io);
863 gld->data[seconds].sum += ios_in_flight;
864 gld->data[seconds].count++;
866 avg = (double)gld->data[seconds].sum / gld->data[seconds].count;
867 if (gld->max < (u64)avg) {
872 void add_completed_io(struct trace *trace,
873 struct graph_line_data *latency_gld)
875 struct blk_io_trace *io = trace->io;
877 int action = io->action & BLK_TA_MASK;
878 struct pending_io *pio;
882 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
885 if (action != __BLK_TA_COMPLETE)
888 seconds = SECONDS(io->time);
890 pio = hash_completed_io(trace->io);
894 if (ios_in_flight > 0)
896 if (io->time >= pio->dispatch_time) {
897 latency = io->time - pio->dispatch_time;
898 latency_gld->data[seconds].sum += latency;
899 latency_gld->data[seconds].count++;
902 list_del(&pio->hash_list);
905 avg = (double)latency_gld->data[seconds].sum /
906 latency_gld->data[seconds].count;
907 if (latency_gld->max < (u64)avg) {
908 latency_gld->max = avg;
912 void add_iop(struct trace *trace, struct graph_line_data *gld)
914 struct blk_io_trace *io = trace->io;
915 int action = io->action & BLK_TA_MASK;
918 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
921 /* iops and tput use the same events */
922 if (action != tput_event(trace))
925 seconds = SECONDS(io->time);
926 if (seconds > gld->max_seconds)
929 gld->data[seconds].sum += 1;
930 gld->data[seconds].count = 1;
931 if (gld->data[seconds].sum > gld->max)
932 gld->max = gld->data[seconds].sum;
935 void check_record(struct trace *trace)
937 struct blk_io_trace *io = trace->io;
938 int action = io->action & BLK_TA_MASK;
940 if (!(io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
942 case __BLK_TA_COMPLETE:
943 trace->found_completion = 1;
946 trace->found_issue = 1;
949 trace->found_queue = 1;
953 handle_notify(trace);