2 * Copyright (C) 2012 Fusion-io
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 * Parts of this file were imported from Jens Axboe's blktrace sources (also GPL)
19 #include <sys/types.h>
28 #include <asm/types.h>
40 #define IO_HASH_TABLE_BITS 11
41 #define IO_HASH_TABLE_SIZE (1 << IO_HASH_TABLE_BITS)
42 static struct list_head io_hash_table[IO_HASH_TABLE_SIZE];
43 static u64 ios_in_flight = 0;
45 #define PROCESS_HASH_TABLE_BITS 7
46 #define PROCESS_HASH_TABLE_SIZE (1 << PROCESS_HASH_TABLE_BITS)
47 static struct list_head process_hash_table[PROCESS_HASH_TABLE_SIZE];
49 extern int plot_io_action;
50 extern int io_per_process;
52 static const int line_len = 1024;
53 static char line[1024];
59 BLK_TC_READ = 1 << 0, /* reads */
60 BLK_TC_WRITE = 1 << 1, /* writes */
61 BLK_TC_FLUSH = 1 << 2, /* flush */
62 BLK_TC_SYNC = 1 << 3, /* sync */
63 BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
64 BLK_TC_REQUEUE = 1 << 5, /* requeueing */
65 BLK_TC_ISSUE = 1 << 6, /* issue */
66 BLK_TC_COMPLETE = 1 << 7, /* completions */
67 BLK_TC_FS = 1 << 8, /* fs requests */
68 BLK_TC_PC = 1 << 9, /* pc requests */
69 BLK_TC_NOTIFY = 1 << 10, /* special message */
70 BLK_TC_AHEAD = 1 << 11, /* readahead */
71 BLK_TC_META = 1 << 12, /* metadata */
72 BLK_TC_DISCARD = 1 << 13, /* discard requests */
73 BLK_TC_DRV_DATA = 1 << 14, /* binary driver data */
74 BLK_TC_FUA = 1 << 15, /* fua requests */
76 BLK_TC_END = 1 << 15, /* we've run out of bits! */
79 #define BLK_TC_SHIFT (16)
80 #define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
81 #define BLK_DATADIR(a) (((a) >> BLK_TC_SHIFT) & (BLK_TC_READ | BLK_TC_WRITE))
87 __BLK_TA_QUEUE = 1, /* queued */
88 __BLK_TA_BACKMERGE, /* back merged to existing rq */
89 __BLK_TA_FRONTMERGE, /* front merge to existing rq */
90 __BLK_TA_GETRQ, /* allocated new request */
91 __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
92 __BLK_TA_REQUEUE, /* request requeued */
93 __BLK_TA_ISSUE, /* sent to driver */
94 __BLK_TA_COMPLETE, /* completed by driver */
95 __BLK_TA_PLUG, /* queue was plugged */
96 __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
97 __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
98 __BLK_TA_INSERT, /* insert request */
99 __BLK_TA_SPLIT, /* bio was split */
100 __BLK_TA_BOUNCE, /* bio was bounced */
101 __BLK_TA_REMAP, /* bio was remapped */
102 __BLK_TA_ABORT, /* request aborted */
103 __BLK_TA_DRV_DATA, /* binary driver data */
106 #define BLK_TA_MASK ((1 << BLK_TC_SHIFT) - 1)
111 enum blktrace_notify {
112 __BLK_TN_PROCESS = 0, /* establish pid/name mapping */
113 __BLK_TN_TIMESTAMP, /* include system clock */
114 __BLK_TN_MESSAGE, /* Character string message */
118 * Trace actions in full. Additionally, read or write is masked
120 #define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
121 #define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
122 #define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
123 #define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
124 #define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
125 #define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
126 #define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
127 #define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
128 #define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
129 #define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
130 #define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
131 #define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
132 #define BLK_TA_SPLIT (__BLK_TA_SPLIT)
133 #define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
134 #define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
135 #define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
136 #define BLK_TA_DRV_DATA (__BLK_TA_DRV_DATA | BLK_TC_ACT(BLK_TC_DRV_DATA))
138 #define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
139 #define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
140 #define BLK_TN_MESSAGE (__BLK_TN_MESSAGE | BLK_TC_ACT(BLK_TC_NOTIFY))
142 #define BLK_IO_TRACE_MAGIC 0x65617400
143 #define BLK_IO_TRACE_VERSION 0x07
147 struct blk_io_trace {
148 __u32 magic; /* MAGIC << 8 | version */
149 __u32 sequence; /* event number */
150 __u64 time; /* in nanoseconds */
151 __u64 sector; /* disk offset */
152 __u32 bytes; /* transfer length */
153 __u32 action; /* what happened */
154 __u32 pid; /* who did it */
155 __u32 device; /* device identifier (dev_t) */
156 __u32 cpu; /* on what cpu did it happen */
157 __u16 error; /* completion error */
158 __u16 pdu_len; /* length of data after this trace */
162 /* sector offset of this IO */
165 /* dev_t for this IO */
168 /* time this IO was dispatched */
170 /* time this IO was finished */
172 struct list_head hash_list;
173 /* process which queued this IO */
178 struct list_head hash_list;
184 u64 get_record_time(struct trace *trace)
186 return trace->io->time;
189 void init_io_hash_table(void)
192 struct list_head *head;
194 for (i = 0; i < IO_HASH_TABLE_SIZE; i++) {
195 head = io_hash_table + i;
196 INIT_LIST_HEAD(head);
200 /* taken from the kernel hash.h */
201 static inline u64 hash_sector(u64 val)
205 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
220 /* High bits are more random, so use them. */
221 return hash >> (64 - IO_HASH_TABLE_BITS);
224 static int io_hash_table_insert(struct pending_io *ins_pio)
226 u64 sector = ins_pio->sector;
227 u32 dev = ins_pio->device;
228 int slot = hash_sector(sector);
229 struct list_head *head;
230 struct pending_io *pio;
232 head = io_hash_table + slot;
233 list_for_each_entry(pio, head, hash_list) {
234 if (pio->sector == sector && pio->device == dev)
237 list_add_tail(&ins_pio->hash_list, head);
241 static struct pending_io *io_hash_table_search(u64 sector, u32 dev)
243 int slot = hash_sector(sector);
244 struct list_head *head;
245 struct pending_io *pio;
247 head = io_hash_table + slot;
248 list_for_each_entry(pio, head, hash_list) {
249 if (pio->sector == sector && pio->device == dev)
255 static struct pending_io *hash_queued_io(struct blk_io_trace *io)
257 struct pending_io *pio;
260 pio = calloc(1, sizeof(*pio));
261 pio->sector = io->sector;
262 pio->device = io->device;
265 ret = io_hash_table_insert(pio);
267 /* crud, the IO is there already */
274 static struct pending_io *hash_dispatched_io(struct blk_io_trace *io)
276 struct pending_io *pio;
278 pio = io_hash_table_search(io->sector, io->device);
280 pio = hash_queued_io(io);
284 pio->dispatch_time = io->time;
288 static struct pending_io *hash_completed_io(struct blk_io_trace *io)
290 struct pending_io *pio;
292 pio = io_hash_table_search(io->sector, io->device);
299 void init_process_hash_table(void)
302 struct list_head *head;
304 for (i = 0; i < PROCESS_HASH_TABLE_SIZE; i++) {
305 head = process_hash_table + i;
306 INIT_LIST_HEAD(head);
310 static u32 hash_pid(u32 pid)
318 return (hash & (PROCESS_HASH_TABLE_SIZE - 1));
321 static struct pid_map *process_hash_search(u32 pid)
323 int slot = hash_pid(pid);
324 struct list_head *head;
327 head = process_hash_table + slot;
328 list_for_each_entry(pm, head, hash_list) {
335 static struct pid_map *process_hash_insert(u32 pid, char *name)
337 int slot = hash_pid(pid);
342 pm = process_hash_search(pid);
344 /* Entry exists and name shouldn't be changed? */
345 if (!name || !strcmp(name, pm->name))
347 list_del(&pm->hash_list);
348 old_index = pm->index;
352 sprintf(buf, "[%u]", pid);
355 pm = malloc(sizeof(struct pid_map) + strlen(name) + 1);
357 pm->index = old_index;
358 strcpy(pm->name, name);
359 list_add_tail(&pm->hash_list, process_hash_table + slot);
364 static void handle_notify(struct trace *trace)
366 struct blk_io_trace *io = trace->io;
367 void *payload = (char *)io + sizeof(*io);
370 if (io->action == BLK_TN_PROCESS) {
372 process_hash_insert(io->pid, payload);
376 if (io->action != BLK_TN_TIMESTAMP)
379 if (io->pdu_len != sizeof(two32))
382 memcpy(two32, payload, sizeof(two32));
383 trace->start_timestamp = io->time;
384 trace->abs_start_time.tv_sec = two32[0];
385 trace->abs_start_time.tv_nsec = two32[1];
386 if (trace->abs_start_time.tv_nsec < 0) {
387 trace->abs_start_time.tv_sec--;
388 trace->abs_start_time.tv_nsec += 1000000000;
392 int next_record(struct trace *trace)
394 int skip = trace->io->pdu_len;
397 trace->cur += sizeof(*trace->io) + skip;
398 offset = trace->cur - trace->start;
399 if (offset >= trace->len)
402 trace->io = (struct blk_io_trace *)trace->cur;
406 void first_record(struct trace *trace)
408 trace->cur = trace->start;
409 trace->io = (struct blk_io_trace *)trace->cur;
412 int is_io_event(struct blk_io_trace *test)
415 if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
417 if (test->action == BLK_TN_MESSAGE) {
418 int len = test->pdu_len;
421 message = (char *)(test + 1);
422 if (strncmp(message, "fio ", 4) == 0) {
429 u64 find_last_time(struct trace *trace)
431 char *p = trace->start + trace->len;
432 struct blk_io_trace *test;
436 if (trace->len < sizeof(*trace->io))
438 p -= sizeof(*trace->io);
439 while (p >= trace->start) {
440 test = (struct blk_io_trace *)p;
441 if (CHECK_MAGIC(test) && is_io_event(test)) {
442 u64 offset = p - trace->start;
443 if (offset + sizeof(*test) + test->pdu_len == trace->len) {
449 if (search_len > 8192) {
454 /* searching backwards didn't work out, we'll have to scan the file */
457 if (is_io_event(trace->io))
458 found = trace->io->time;
459 if (next_record(trace))
466 int parse_fio_bank_message(struct trace *trace, u64 *bank_ret, u64 *offset_ret,
472 struct blk_io_trace *test = trace->io;
473 int len = test->pdu_len;
478 if (!(test->action & BLK_TC_ACT(BLK_TC_NOTIFY)))
480 if (test->action != BLK_TN_MESSAGE)
483 /* the message is fio rw bank offset num_banks */
486 message = (char *)(test + 1);
487 if (strncmp(message, "fio r ", 6) != 0)
490 message = strndup(message, len);
491 s = strchr(message, ' ');
499 bank = strtoll(s, &next, 10);
504 offset = strtoll(s, &next, 10);
509 num_banks = strtoll(s, &next, 10);
514 *offset_ret = offset;
515 *num_banks_ret = num_banks;
523 static struct dev_info *lookup_dev(struct trace *trace, struct blk_io_trace *io)
525 u32 dev = io->device;
527 struct dev_info *di = NULL;
529 for (i = 0; i < trace->num_devices; i++) {
530 if (trace->devices[i].device == dev) {
531 di = trace->devices + i;
535 i = trace->num_devices++;
536 if (i >= MAX_DEVICES_PER_TRACE) {
537 fprintf(stderr, "Trace contains too many devices (%d)\n", i);
540 di = trace->devices + i;
546 static void map_devices(struct trace *trace)
555 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
556 di = lookup_dev(trace, trace->io);
557 found = trace->io->sector << 9;
561 found += trace->io->bytes;
565 if (next_record(trace))
569 for (i = 0; i < trace->num_devices; i++) {
570 di = trace->devices + i;
572 map_start += di->max - di->min;
576 u64 map_io(struct trace *trace, struct blk_io_trace *io)
578 struct dev_info *di = lookup_dev(trace, io);
579 u64 val = trace->io->sector << 9;
580 return di->map + val - di->min;
583 void find_extreme_offsets(struct trace *trace, u64 *min_ret, u64 *max_ret, u64 *max_bank_ret,
587 u64 max = 0, min = ~(u64)0;
589 u64 max_bank_offset = 0;
596 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
597 found = map_io(trace, trace->io);
601 found += trace->io->bytes;
607 if (!parse_fio_bank_message(trace, &bank,
608 &offset, &num_banks)) {
611 if (offset > max_bank_offset)
612 max_bank_offset = offset;
615 if (next_record(trace))
621 *max_bank_ret = max_bank;
622 *max_offset_ret = max_bank_offset;
625 static void check_io_types(struct trace *trace)
627 struct blk_io_trace *io = trace->io;
628 int action = io->action & BLK_TA_MASK;
630 if (!(io->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
632 case __BLK_TA_COMPLETE:
633 trace->found_completion = 1;
636 trace->found_issue = 1;
639 trace->found_queue = 1;
646 int filter_outliers(struct trace *trace, u64 min_offset, u64 max_offset,
647 u64 *yzoom_min, u64 *yzoom_max)
650 u64 max_per_bucket[11];
651 u64 min_per_bucket[11];
652 u64 bytes_per_bucket = (max_offset - min_offset + 1) / 10;
656 memset(hits, 0, sizeof(int) * 11);
657 memset(max_per_bucket, 0, sizeof(u64) * 11);
658 memset(min_per_bucket, 0xff, sizeof(u64) * 11);
661 check_io_types(trace);
662 if (!(trace->io->action & BLK_TC_ACT(BLK_TC_NOTIFY)) &&
663 (trace->io->action & BLK_TA_MASK) == __BLK_TA_QUEUE) {
664 u64 off = map_io(trace, trace->io) - min_offset;
666 slot = (int)(off / bytes_per_bucket);
668 if (off < min_per_bucket[slot])
669 min_per_bucket[slot] = off;
671 off += trace->io->bytes;
672 slot = (int)(off / bytes_per_bucket);
674 if (off > max_per_bucket[slot])
675 max_per_bucket[slot] = off;
677 if (next_record(trace))
681 for (slot = 0; slot < 11; slot++) {
682 if (hits[slot] > fat_count) {
683 fat_count = hits[slot];
687 *yzoom_max = max_offset;
688 for (slot = 10; slot >= 0; slot--) {
689 double d = hits[slot];
691 if (d >= (double)fat_count * .05) {
692 *yzoom_max = max_per_bucket[slot] + min_offset;
697 *yzoom_min = min_offset;
698 for (slot = 0; slot < 10; slot++) {
699 double d = hits[slot];
701 if (d >= (double)fat_count * .05) {
702 *yzoom_min = min_per_bucket[slot] + min_offset;
709 static char footer[] = ".blktrace.0";
710 static int footer_len = sizeof(footer) - 1;
712 static int match_trace(char *name, int *len)
717 match_len = strlen(name);
718 if (match_len <= footer_len)
721 footer_start = match_len - footer_len;
722 if (strcmp(name + footer_start, footer) != 0)
731 struct tracelist *next;
735 static struct tracelist *traces_list(char *dir_name, int *len)
738 struct tracelist *traces = NULL;
739 DIR *dir = opendir(dir_name);
745 struct tracelist *tl;
746 struct dirent *d = readdir(dir);
750 if (!match_trace(d->d_name, &len))
753 /* Allocate space for tracelist + filename */
754 tl = calloc(1, sizeof(struct tracelist) + (sizeof(char) * (len + 1)));
758 tl->name = (char *)(tl + 1);
759 strncpy(tl->name, d->d_name, len);
772 static void traces_free(struct tracelist *traces)
775 struct tracelist *tl = traces;
776 traces = traces->next;
781 static char *combine_blktrace_devs(char *dir_name)
783 struct tracelist *traces = NULL;
784 struct tracelist *tl;
792 if (!asprintf(&dumpfile, "%s.dump", dir_name))
795 traces = traces_list(dir_name, &argc);
799 argc *= 2; /* {"-i", trace } */
800 argc += 6; /* See below */
801 argv = calloc(argc + 1, sizeof(char *));
806 argv[i++] = "blkparse";
809 argv[i++] = dir_name;
811 argv[i++] = dumpfile;
812 for (tl = traces; tl != NULL; tl = tl->next) {
814 argv[i++] = tl->name;
817 err = run_program2(argc, argv);
820 fprintf(stderr, "blkparse failed with exit code %d\n", err);
829 static char *find_trace_file(char *filename)
837 /* look for an exact match of whatever they pass in.
838 * If it is a file, assume it is the dump file.
839 * If a directory, remember that it existed so we
840 * can combine traces in that directory later
842 ret = stat(filename, &st);
844 if (S_ISREG(st.st_mode))
845 return strdup(filename);
847 if (S_ISDIR(st.st_mode))
853 /* Eat up trailing '/'s */
854 for (i = strlen(filename) - 1; filename[i] == '/'; i--)
859 * try tacking .dump onto the end and see if that already
862 snprintf(line, line_len, "%s.%s", filename, "dump");
863 ret = stat(line, &st);
868 * try to generate the .dump from all the traces in
872 try = combine_blktrace_devs(filename);
878 * try to generate the .dump from all the blktrace
879 * files for a named trace
881 try = strdup(filename);
882 dot = strrchr(try, '.');
883 if (!dot || strcmp(".dump", dot) != 0) {
884 if (dot && dot != try)
886 snprintf(line, line_len, "%s%s", try, ".blktrace.0");
887 ret = stat(line, &st);
889 blktrace_to_dump(try);
890 snprintf(line, line_len, "%s.%s", try, "dump");
891 ret = stat(line, &st);
901 struct trace *open_trace(char *filename)
908 char *found_filename;
910 trace = calloc(1, sizeof(*trace));
912 fprintf(stderr, "unable to allocate memory for trace\n");
916 found_filename = find_trace_file(filename);
917 if (!found_filename) {
918 fprintf(stderr, "Unable to find trace file %s\n", filename);
921 filename = found_filename;
923 fd = open(filename, O_RDONLY);
925 fprintf(stderr, "Unable to open trace file %s err %s\n", filename, strerror(errno));
928 ret = fstat(fd, &st);
930 fprintf(stderr, "stat failed on %s err %s\n", filename, strerror(errno));
933 p = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
934 if (p == MAP_FAILED) {
935 fprintf(stderr, "Unable to mmap trace file %s, err %s\n", filename, strerror(errno));
939 trace->len = st.st_size;
942 trace->io = (struct blk_io_trace *)p;
951 static inline int tput_event(struct trace *trace)
953 if (trace->found_completion)
954 return __BLK_TA_COMPLETE;
955 if (trace->found_issue)
956 return __BLK_TA_ISSUE;
957 if (trace->found_queue)
958 return __BLK_TA_QUEUE;
960 return __BLK_TA_COMPLETE;
963 int action_char_to_num(char action)
967 return __BLK_TA_QUEUE;
969 return __BLK_TA_ISSUE;
971 return __BLK_TA_COMPLETE;
976 static inline int io_event(struct trace *trace)
979 return plot_io_action;
980 if (trace->found_queue)
981 return __BLK_TA_QUEUE;
982 if (trace->found_issue)
983 return __BLK_TA_ISSUE;
984 if (trace->found_completion)
985 return __BLK_TA_COMPLETE;
987 return __BLK_TA_COMPLETE;
990 void add_tput(struct trace *trace, struct graph_line_data *writes_gld,
991 struct graph_line_data *reads_gld)
993 struct blk_io_trace *io = trace->io;
994 struct graph_line_data *gld;
995 int action = io->action & BLK_TA_MASK;
998 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
1001 if (action != tput_event(trace))
1004 if (BLK_DATADIR(io->action) & BLK_TC_READ)
1009 seconds = SECONDS(io->time);
1010 gld->data[seconds].sum += io->bytes;
1012 gld->data[seconds].count = 1;
1013 if (gld->data[seconds].sum > gld->max)
1014 gld->max = gld->data[seconds].sum;
1017 #define GDD_PTR_ALLOC_STEP 16
1019 static struct pid_map *get_pid_map(struct trace_file *tf, u32 pid)
1023 if (!io_per_process) {
1029 pm = process_hash_insert(pid, NULL);
1032 if (tf->io_plots == tf->io_plots_allocated) {
1033 tf->io_plots_allocated += GDD_PTR_ALLOC_STEP;
1034 tf->gdd_reads = realloc(tf->gdd_reads, tf->io_plots_allocated * sizeof(struct graph_dot_data *));
1037 tf->gdd_writes = realloc(tf->gdd_writes, tf->io_plots_allocated * sizeof(struct graph_dot_data *));
1038 if (!tf->gdd_writes)
1040 memset(tf->gdd_reads + tf->io_plots_allocated - GDD_PTR_ALLOC_STEP,
1041 0, GDD_PTR_ALLOC_STEP * sizeof(struct graph_dot_data *));
1042 memset(tf->gdd_writes + tf->io_plots_allocated - GDD_PTR_ALLOC_STEP,
1043 0, GDD_PTR_ALLOC_STEP * sizeof(struct graph_dot_data *));
1045 pm->index = tf->io_plots++;
1052 void add_io(struct trace *trace, struct trace_file *tf)
1054 struct blk_io_trace *io = trace->io;
1055 int action = io->action & BLK_TA_MASK;
1061 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
1064 if (action != io_event(trace))
1067 offset = map_io(trace, io);
1069 pm = get_pid_map(tf, io->pid);
1077 if (BLK_DATADIR(io->action) & BLK_TC_READ) {
1078 if (!tf->gdd_reads[index])
1079 tf->gdd_reads[index] = alloc_dot_data(tf->min_seconds, tf->max_seconds, tf->min_offset, tf->max_offset, tf->stop_seconds, pick_color(), strdup(label));
1080 set_gdd_bit(tf->gdd_reads[index], offset, io->bytes, io->time);
1081 } else if (BLK_DATADIR(io->action) & BLK_TC_WRITE) {
1082 if (!tf->gdd_writes[index])
1083 tf->gdd_writes[index] = alloc_dot_data(tf->min_seconds, tf->max_seconds, tf->min_offset, tf->max_offset, tf->stop_seconds, pick_color(), strdup(label));
1084 set_gdd_bit(tf->gdd_writes[index], offset, io->bytes, io->time);
1088 void add_pending_io(struct trace *trace, struct graph_line_data *gld)
1090 unsigned int seconds;
1091 struct blk_io_trace *io = trace->io;
1092 int action = io->action & BLK_TA_MASK;
1094 struct pending_io *pio;
1096 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
1099 if (action == __BLK_TA_QUEUE) {
1100 if (trace->found_issue || trace->found_completion)
1101 hash_queued_io(trace->io);
1104 if (action == __BLK_TA_REQUEUE) {
1105 if (ios_in_flight > 0)
1109 if (action != __BLK_TA_ISSUE)
1112 pio = hash_dispatched_io(trace->io);
1116 if (!trace->found_completion) {
1117 list_del(&pio->hash_list);
1123 seconds = SECONDS(io->time);
1124 gld->data[seconds].sum += ios_in_flight;
1125 gld->data[seconds].count++;
1127 avg = (double)gld->data[seconds].sum / gld->data[seconds].count;
1128 if (gld->max < (u64)avg) {
1133 void add_completed_io(struct trace *trace,
1134 struct graph_line_data *latency_gld)
1136 struct blk_io_trace *io = trace->io;
1138 int action = io->action & BLK_TA_MASK;
1139 struct pending_io *pio;
1143 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
1146 if (action != __BLK_TA_COMPLETE)
1149 seconds = SECONDS(io->time);
1151 pio = hash_completed_io(trace->io);
1155 if (ios_in_flight > 0)
1157 if (io->time >= pio->dispatch_time) {
1158 latency = io->time - pio->dispatch_time;
1159 latency_gld->data[seconds].sum += latency;
1160 latency_gld->data[seconds].count++;
1163 list_del(&pio->hash_list);
1166 avg = (double)latency_gld->data[seconds].sum /
1167 latency_gld->data[seconds].count;
1168 if (latency_gld->max < (u64)avg) {
1169 latency_gld->max = avg;
1173 void add_iop(struct trace *trace, struct graph_line_data *gld)
1175 struct blk_io_trace *io = trace->io;
1176 int action = io->action & BLK_TA_MASK;
1179 if (io->action & BLK_TC_ACT(BLK_TC_NOTIFY))
1182 /* iops and tput use the same events */
1183 if (action != tput_event(trace))
1186 seconds = SECONDS(io->time);
1187 gld->data[seconds].sum += 1;
1188 gld->data[seconds].count = 1;
1189 if (gld->data[seconds].sum > gld->max)
1190 gld->max = gld->data[seconds].sum;
1193 void check_record(struct trace *trace)
1195 handle_notify(trace);