7 #include "../io_ddir.h"
11 #include "../blktrace_api.h"
14 #include "../lib/linux-dev-lookup.h"
16 #define TRACE_FIFO_SIZE 8192
18 static unsigned int rt_threshold = 1000000;
19 static unsigned int ios_threshold = 10;
20 static unsigned int rate_threshold;
21 static unsigned int set_rate;
22 static unsigned int max_depth = 256;
23 static int output_ascii = 1;
24 static char *filename;
38 unsigned long ios[DDIR_RWDIR_CNT];
39 unsigned long merges[DDIR_RWDIR_CNT];
41 uint64_t last_end[DDIR_RWDIR_CNT];
42 uint64_t seq[DDIR_RWDIR_CNT];
44 struct bs *bs[DDIR_RWDIR_CNT];
45 unsigned int nr_bs[DDIR_RWDIR_CNT];
52 uint64_t first_ttime[DDIR_RWDIR_CNT];
53 uint64_t last_ttime[DDIR_RWDIR_CNT];
54 uint64_t kb[DDIR_RWDIR_CNT];
60 struct flist_head hash_list;
61 struct flist_head pid_list;
64 struct trace_file *files;
66 unsigned int last_major, last_minor;
72 struct flist_head list;
77 #define PID_HASH_BITS 10
78 #define PID_HASH_SIZE (1U << PID_HASH_BITS)
80 static struct flist_head pid_hash[PID_HASH_SIZE];
81 static FLIST_HEAD(pid_list);
83 #define INFLIGHT_HASH_BITS 8
84 #define INFLIGHT_HASH_SIZE (1U << INFLIGHT_HASH_BITS)
85 static struct flist_head inflight_hash[INFLIGHT_HASH_SIZE];
87 static uint64_t first_ttime = -1ULL;
89 static struct inflight *inflight_find(uint64_t sector)
91 struct flist_head *inflight_list;
94 inflight_list = &inflight_hash[hash_long(sector, INFLIGHT_HASH_BITS)];
96 flist_for_each(e, inflight_list) {
97 struct inflight *i = flist_entry(e, struct inflight, list);
99 if (i->end_sector == sector)
106 static void inflight_remove(struct inflight *i)
108 struct btrace_out *o = &i->p->o;
111 assert(o->inflight >= 0);
116 static void __inflight_add(struct inflight *i)
118 struct flist_head *list;
120 list = &inflight_hash[hash_long(i->end_sector, INFLIGHT_HASH_BITS)];
121 flist_add_tail(&i->list, list);
124 static void inflight_add(struct btrace_pid *p, uint64_t sector, uint32_t len)
126 struct btrace_out *o = &p->o;
129 i = calloc(1, sizeof(*i));
132 if (!o->depth_disabled) {
133 o->depth = max((int) o->depth, o->inflight);
134 if (o->depth >= max_depth && !o->complete_seen) {
135 o->depth_disabled = 1;
136 o->depth = max_depth;
139 i->end_sector = sector + (len >> 9);
143 static void inflight_merge(struct inflight *i, int rw, unsigned int size)
145 i->p->o.merges[rw]++;
147 i->end_sector += (size >> 9);
154 * fifo refill frontend, to avoid reading data in trace sized bites
156 static int refill_fifo(struct fifo *fifo, int fd)
158 char buf[TRACE_FIFO_SIZE];
163 if (total > fifo_room(fifo))
164 total = fifo_room(fifo);
166 ret = read(fd, buf, total);
168 perror("read refill");
173 ret = fifo_put(fifo, buf, ret);
179 * Retrieve 'len' bytes from the fifo, refilling if necessary.
181 static int trace_fifo_get(struct fifo *fifo, int fd, void *buf,
184 if (fifo_len(fifo) < len) {
185 int ret = refill_fifo(fifo, fd);
191 return fifo_get(fifo, buf, len);
195 * Just discard the pdu by seeking past it.
197 static int discard_pdu(struct fifo *fifo, int fd, struct blk_io_trace *t)
202 return trace_fifo_get(fifo, fd, NULL, t->pdu_len);
205 static int handle_trace_notify(struct blk_io_trace *t)
209 //printf("got process notify: %x, %d\n", t->action, t->pid);
211 case BLK_TN_TIMESTAMP:
212 //printf("got timestamp notify: %x, %d\n", t->action, t->pid);
217 log_err("unknown trace act %x\n", t->action);
224 static void __add_bs(struct btrace_out *o, unsigned int len, int rw)
226 o->bs[rw] = realloc(o->bs[rw], (o->nr_bs[rw] + 1) * sizeof(struct bs));
227 o->bs[rw][o->nr_bs[rw]].bs = len;
228 o->bs[rw][o->nr_bs[rw]].nr = 1;
232 static void add_bs(struct btrace_out *o, unsigned int len, int rw)
234 struct bs *bs = o->bs[rw];
238 __add_bs(o, len, rw);
242 for (i = 0; i < o->nr_bs[rw]; i++) {
243 if (bs[i].bs == len) {
249 __add_bs(o, len, rw);
252 #define FMINORBITS 20
253 #define FMINORMASK ((1U << FMINORBITS) - 1)
254 #define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
255 #define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
257 static int btrace_add_file(struct btrace_pid *p, uint32_t devno)
259 unsigned int maj = FMAJOR(devno);
260 unsigned int min = FMINOR(devno);
261 struct trace_file *f;
267 if (p->last_major == maj && p->last_minor == min)
274 * check for this file in our list
276 for (i = 0; i < p->nr_files; i++) {
279 if (f->major == maj && f->minor == min)
284 if (!blktrace_lookup_device(NULL, dev, maj, min)) {
285 log_err("fio: failed to find device %u/%u\n", maj, min);
287 log_err("fio: use -d to specify device\n");
293 p->files = realloc(p->files, (p->nr_files + 1) * sizeof(*f));
294 f = &p->files[p->nr_files];
295 f->name = strdup(dev);
302 static int t_to_rwdir(struct blk_io_trace *t)
304 if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
307 return (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
310 static int handle_trace_discard(struct blk_io_trace *t, struct btrace_pid *p)
312 struct btrace_out *o = &p->o;
314 if (btrace_add_file(p, t->device))
317 if (o->first_ttime[2] == -1ULL)
318 o->first_ttime[2] = t->time;
321 add_bs(o, t->bytes, DDIR_TRIM);
325 static int handle_trace_fs(struct blk_io_trace *t, struct btrace_pid *p)
327 struct btrace_out *o = &p->o;
330 if (btrace_add_file(p, t->device))
333 first_ttime = min(first_ttime, (uint64_t) t->time);
335 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
337 if (o->first_ttime[rw] == -1ULL)
338 o->first_ttime[rw] = t->time;
340 add_bs(o, t->bytes, rw);
343 if (t->sector == o->last_end[rw] || o->last_end[rw] == -1ULL)
346 o->last_end[rw] = t->sector + (t->bytes >> 9);
350 static int handle_queue_trace(struct blk_io_trace *t, struct btrace_pid *p)
352 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
353 return handle_trace_notify(t);
354 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
355 return handle_trace_discard(t, p);
357 return handle_trace_fs(t, p);
360 static int handle_trace(struct blk_io_trace *t, struct btrace_pid *p)
362 unsigned int act = t->action & 0xffff;
365 if (act == __BLK_TA_QUEUE) {
366 inflight_add(p, t->sector, t->bytes);
367 ret = handle_queue_trace(t, p);
368 } else if (act == __BLK_TA_REQUEUE) {
370 } else if (act == __BLK_TA_BACKMERGE) {
373 i = inflight_find(t->sector + (t->bytes >> 9));
377 i = inflight_find(t->sector);
379 inflight_merge(i, t_to_rwdir(t), t->bytes);
380 } else if (act == __BLK_TA_FRONTMERGE) {
383 i = inflight_find(t->sector + (t->bytes >> 9));
387 i = inflight_find(t->sector);
389 inflight_merge(i, t_to_rwdir(t), 0);
390 } else if (act == __BLK_TA_COMPLETE) {
393 i = inflight_find(t->sector + (t->bytes >> 9));
395 i->p->o.kb[t_to_rwdir(t)] += (t->bytes >> 10);
396 i->p->o.complete_seen = 1;
404 static void byteswap_trace(struct blk_io_trace *t)
406 t->magic = fio_swap32(t->magic);
407 t->sequence = fio_swap32(t->sequence);
408 t->time = fio_swap64(t->time);
409 t->sector = fio_swap64(t->sector);
410 t->bytes = fio_swap32(t->bytes);
411 t->action = fio_swap32(t->action);
412 t->pid = fio_swap32(t->pid);
413 t->device = fio_swap32(t->device);
414 t->cpu = fio_swap32(t->cpu);
415 t->error = fio_swap16(t->error);
416 t->pdu_len = fio_swap16(t->pdu_len);
419 static struct btrace_pid *pid_hash_find(pid_t pid, struct flist_head *list)
421 struct flist_head *e;
422 struct btrace_pid *p;
424 flist_for_each(e, list) {
425 p = flist_entry(e, struct btrace_pid, hash_list);
433 static struct btrace_pid *pid_hash_get(pid_t pid)
435 struct flist_head *hash_list;
436 struct btrace_pid *p;
438 hash_list = &pid_hash[hash_long(pid, PID_HASH_BITS)];
440 p = pid_hash_find(pid, hash_list);
444 p = calloc(1, sizeof(*p));
446 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
447 p->o.first_ttime[i] = -1ULL;
448 p->o.last_ttime[i] = -1ULL;
449 p->o.last_end[i] = -1ULL;
453 flist_add_tail(&p->hash_list, hash_list);
454 flist_add_tail(&p->pid_list, &pid_list);
461 * Load a blktrace file by reading all the blk_io_trace entries, and storing
462 * them as io_pieces like the fio text version would do.
464 static int load_blktrace(const char *fname, int need_swap)
466 struct btrace_pid *p;
467 unsigned long traces;
468 struct blk_io_trace t;
472 fd = open(fname, O_RDONLY);
474 perror("open trace file\n");
478 fifo = fifo_alloc(TRACE_FIFO_SIZE);
482 ret = trace_fifo_get(fifo, fd, &t, sizeof(t));
487 else if (ret < (int) sizeof(t)) {
488 log_err("fio: short fifo get\n");
495 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
496 log_err("fio: bad magic in blktrace data: %x\n", t.magic);
499 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
500 log_err("fio: bad blktrace version %d\n", t.magic & 0xff);
503 ret = discard_pdu(fifo, fd, &t);
505 log_err("blktrace lseek\n");
507 } else if (t.pdu_len != ret) {
508 log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
512 p = pid_hash_get(t.pid);
513 ret = handle_trace(&t, p);
516 p->o.last_ttime[t_to_rwdir(&t)] = t.time;
527 printf("Traces loaded: %lu\n", traces);
536 static int bs_cmp(const void *ba, const void *bb)
538 const struct bs *bsa = ba;
539 const struct bs *bsb = bb;
541 return bsb->nr - bsa->nr;
544 static unsigned long o_to_kb_rate(struct btrace_out *o, int rw)
546 uint64_t usec = (o->last_ttime[rw] - o->first_ttime[rw]) / 1000ULL;
556 val = o->kb[rw] * 1000ULL;
560 static uint64_t o_first_ttime(struct btrace_out *o)
564 first = min(o->first_ttime[0], o->first_ttime[1]);
565 return min(first, o->first_ttime[2]);
568 static uint64_t o_longest_ttime(struct btrace_out *o)
573 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
576 diff = o->last_ttime[i] - o->first_ttime[i];
577 ret = max(diff, ret);
583 static void __output_p_ascii(struct btrace_pid *p, unsigned long *ios)
585 const char *msg[] = { "reads", "writes", "trims" };
586 struct btrace_out *o = &p->o;
587 unsigned long total, usec;
590 printf("[pid:\t%u]\n", p->pid);
592 total = ddir_rw_sum(o->ios);
593 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
599 ios[i] += o->ios[i] + o->merges[i];
600 printf("%s\n", msg[i]);
601 perc = ((float) o->ios[i] * 100.0) / (float) total;
602 printf("\tios: %lu (perc=%3.2f%%)\n", o->ios[i], perc);
603 perc = ((float) o->merges[i] * 100.0) / (float) total;
604 printf("\tmerges: %lu (perc=%3.2f%%)\n", o->merges[i], perc);
605 perc = ((float) o->seq[i] * 100.0) / (float) o->ios[i];
606 printf("\tseq: %lu (perc=%3.2f%%)\n", (unsigned long) o->seq[i], perc);
607 printf("\trate: %lu KB/sec\n", o_to_kb_rate(o, i));
609 for (j = 0; j < o->nr_bs[i]; j++) {
610 struct bs *bs = &o->bs[i][j];
612 perc = (((float) bs->nr * 100.0) / (float) o->ios[i]);
613 printf("\tbs=%u, perc=%3.2f%%\n", bs->bs, perc);
617 printf("depth:\t%u\n", o->depth);
618 usec = o_longest_ttime(o) / 1000ULL;
619 printf("usec:\t%lu (delay=%llu)\n", usec, (unsigned long long) o->start_delay);
622 for (i = 0; i < p->nr_files; i++)
623 printf("%s,", p->files[i].name);
629 static int __output_p_fio(struct btrace_pid *p, unsigned long *ios)
631 struct btrace_out *o = &p->o;
633 unsigned long long time;
637 if ((o->ios[0] + o->ios[1]) && o->ios[2]) {
638 log_err("fio: trace has both read/write and trim\n");
642 log_err("fio: no devices found\n");
646 printf("[pid%u]\n", p->pid);
647 printf("direct=1\n");
649 printf("ioengine=sync\n");
651 printf("ioengine=libaio\niodepth=%u\n", o->depth);
653 if (o->ios[0] && !o->ios[1])
654 printf("rw=randread\n");
655 else if (!o->ios[0] && o->ios[1])
656 printf("rw=randwrite\n");
658 printf("rw=randtrim\n");
660 printf("rw=randrw\n");
661 total = ddir_rw_sum(o->ios);
662 perc = ((float) o->ios[0] * 100.0) / (float) total;
663 printf("rwmixread=%u\n", (int) (perc + 0.99));
666 printf("percentage_random=");
667 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
668 if (o->seq[i] && o->ios[i]) {
669 perc = ((float) o->seq[i] * 100.0) / (float) o->ios[i];
678 printf("%u", (int) perc);
683 for (i = 0; i < p->nr_files; i++) {
686 printf("%s", p->files[i].name);
690 printf("startdelay=%llus\n", o->start_delay / 1000000ULL);
692 time = o_longest_ttime(o);
693 time = (time + 1000000000ULL - 1) / 1000000000ULL;
694 printf("runtime=%llus\n", time);
697 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
699 if (i && o->nr_bs[i - 1] && o->nr_bs[i])
702 for (j = 0; j < o->nr_bs[i]; j++) {
703 struct bs *bs = &o->bs[i][j];
705 perc = (((float) bs->nr * 100.0) / (float) o->ios[i]);
710 if (j + 1 == o->nr_bs[i])
711 printf("%u/", bs->bs);
713 printf("%u/%u", bs->bs, (int) perc);
720 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
723 rate = o_to_kb_rate(o, i);
727 printf("%luk", rate);
736 static int __output_p(struct btrace_pid *p, unsigned long *ios)
738 struct btrace_out *o = &p->o;
741 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
742 if (o->nr_bs[i] <= 1)
744 qsort(o->bs[i], o->nr_bs[i], sizeof(struct bs), bs_cmp);
748 p->files = malloc(sizeof(struct trace_file));
750 p->files[0].name = filename;
754 __output_p_ascii(p, ios);
756 ret = __output_p_fio(p, ios);
761 static void remove_ddir(struct btrace_out *o, int rw)
766 static int prune_entry(struct btrace_out *o)
772 if (ddir_rw_sum(o->ios) < ios_threshold)
775 time = o_longest_ttime(o) / 1000ULL;
776 if (time < rt_threshold)
780 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
781 unsigned long this_rate;
783 this_rate = o_to_kb_rate(o, i);
784 if (this_rate < rate_threshold) {
791 if (rate < rate_threshold)
797 static int entry_cmp(void *priv, struct flist_head *a, struct flist_head *b)
799 struct btrace_pid *pa = flist_entry(a, struct btrace_pid, pid_list);
800 struct btrace_pid *pb = flist_entry(b, struct btrace_pid, pid_list);
802 return ddir_rw_sum(pb->o.ios) - ddir_rw_sum(pa->o.ios);
805 static void free_p(struct btrace_pid *p)
807 struct btrace_out *o = &p->o;
810 for (i = 0; i < p->nr_files; i++) {
811 if (p->files[i].name && p->files[i].name != filename)
812 free(p->files[i].name);
815 for (i = 0; i < DDIR_RWDIR_CNT; i++)
819 flist_del(&p->pid_list);
820 flist_del(&p->hash_list);
824 static int output_p(void)
826 unsigned long ios[DDIR_RWDIR_CNT];
827 struct flist_head *e, *tmp;
828 int depth_disabled = 0;
831 flist_for_each_safe(e, tmp, &pid_list) {
832 struct btrace_pid *p;
834 p = flist_entry(e, struct btrace_pid, pid_list);
835 if (prune_entry(&p->o)) {
839 p->o.start_delay = (o_first_ttime(&p->o) / 1000ULL) - first_ttime;
840 depth_disabled += p->o.depth_disabled;
844 log_err("fio: missing completion traces, depths capped at %u\n", max_depth);
846 memset(ios, 0, sizeof(ios));
848 flist_sort(NULL, &pid_list, entry_cmp);
850 flist_for_each(e, &pid_list) {
851 struct btrace_pid *p;
853 p = flist_entry(e, struct btrace_pid, pid_list);
854 ret |= __output_p(p, ios);
855 if (ret && !output_ascii)
860 printf("Total: reads=%lu, writes=%lu\n", ios[0], ios[1]);
865 static int usage(char *argv[])
867 log_err("%s: <blktrace bin file>\n", argv[0]);
868 log_err("\t-t\tUsec threshold to ignore task\n");
869 log_err("\t-n\tNumber IOS threshold to ignore task\n");
870 log_err("\t-f\tFio job file output\n");
871 log_err("\t-d\tUse this file/device for replay\n");
872 log_err("\t-r\tIgnore jobs with less than this KB/sec rate\n");
873 log_err("\t-R\tSet rate in fio job\n");
874 log_err("\t-D\tCap queue depth at this value (def=%u)\n", max_depth);
878 static int trace_needs_swap(const char *trace_file, int *swap)
880 struct blk_io_trace t;
885 fd = open(trace_file, O_RDONLY);
891 ret = read(fd, &t, sizeof(t));
896 } else if (ret != sizeof(t)) {
898 log_err("fio: short read on trace file\n");
904 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
908 * Maybe it needs to be endian swapped...
910 t.magic = fio_swap32(t.magic);
911 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
916 log_err("fio: blktrace appears corrupt\n");
923 int main(int argc, char *argv[])
930 while ((c = getopt(argc, argv, "t:n:fd:r:RD:")) != -1) {
936 rate_threshold = atoi(optarg);
939 rt_threshold = atoi(optarg);
942 ios_threshold = atoi(optarg);
948 filename = strdup(optarg);
951 max_depth = atoi(optarg);
962 if (trace_needs_swap(argv[optind], &need_swap))
965 for (i = 0; i < PID_HASH_SIZE; i++)
966 INIT_FLIST_HEAD(&pid_hash[i]);
967 for (i = 0; i < INFLIGHT_HASH_SIZE; i++)
968 INIT_FLIST_HEAD(&inflight_hash[i]);
970 load_blktrace(argv[optind], need_swap);
971 first_ttime /= 1000ULL;