7 #include "../io_ddir.h"
11 #include "../blktrace_api.h"
14 #include "../lib/linux-dev-lookup.h"
16 #define TRACE_FIFO_SIZE 8192
18 static unsigned int rt_threshold = 1000000;
19 static unsigned int ios_threshold = 10;
20 static int output_ascii = 1;
34 unsigned long ios[DDIR_RWDIR_CNT];
35 unsigned long rw_bs[DDIR_RWDIR_CNT];
36 unsigned long merges[DDIR_RWDIR_CNT];
38 uint64_t last_end[DDIR_RWDIR_CNT];
39 uint64_t seq[DDIR_RWDIR_CNT];
41 struct bs *bs[DDIR_RWDIR_CNT];
42 unsigned int nr_bs[DDIR_RWDIR_CNT];
49 struct trace_file *files;
51 unsigned int last_major, last_minor;
57 struct flist_head hash_list;
58 struct flist_head pid_list;
64 struct flist_head list;
69 #define PID_HASH_BITS 10
70 #define PID_HASH_SIZE (1U << PID_HASH_BITS)
72 static struct flist_head pid_hash[PID_HASH_SIZE];
73 static FLIST_HEAD(pid_list);
75 static FLIST_HEAD(inflight_list);
77 static uint64_t first_ttime = -1ULL;
79 static struct inflight *inflight_find(uint64_t sector)
83 flist_for_each(e, &inflight_list) {
84 struct inflight *i = flist_entry(e, struct inflight, list);
86 if (i->end_sector == sector)
93 static void inflight_remove(struct inflight *i)
95 struct btrace_out *o = &i->p->o;
98 assert(o->inflight >= 0);
103 static void inflight_merge(struct inflight *i, int rw, unsigned int size)
105 i->p->o.merges[rw]++;
107 i->end_sector += (size >> 9);
110 static void inflight_add(struct btrace_pid *p, uint64_t sector, uint32_t len)
112 struct btrace_out *o = &p->o;
115 i = calloc(1, sizeof(*i));
118 o->depth = max((int) o->depth, o->inflight);
119 i->end_sector = sector + (len >> 9);
120 flist_add_tail(&i->list, &inflight_list);
124 * fifo refill frontend, to avoid reading data in trace sized bites
126 static int refill_fifo(struct fifo *fifo, int fd)
128 char buf[TRACE_FIFO_SIZE];
133 if (total > fifo_room(fifo))
134 total = fifo_room(fifo);
136 ret = read(fd, buf, total);
138 perror("read refill");
143 ret = fifo_put(fifo, buf, ret);
149 * Retrieve 'len' bytes from the fifo, refilling if necessary.
151 static int trace_fifo_get(struct fifo *fifo, int fd, void *buf,
154 if (fifo_len(fifo) < len) {
155 int ret = refill_fifo(fifo, fd);
161 return fifo_get(fifo, buf, len);
165 * Just discard the pdu by seeking past it.
167 static int discard_pdu(struct fifo *fifo, int fd, struct blk_io_trace *t)
172 return trace_fifo_get(fifo, fd, NULL, t->pdu_len);
175 static void handle_trace_notify(struct blk_io_trace *t)
179 //printf("got process notify: %x, %d\n", t->action, t->pid);
181 case BLK_TN_TIMESTAMP:
182 //printf("got timestamp notify: %x, %d\n", t->action, t->pid);
187 fprintf(stderr, "unknown trace act %x\n", t->action);
192 static void __add_bs(struct btrace_out *o, unsigned int len, int rw)
194 o->bs[rw] = realloc(o->bs[rw], (o->nr_bs[rw] + 1) * sizeof(struct bs));
195 o->bs[rw][o->nr_bs[rw]].bs = len;
196 o->bs[rw][o->nr_bs[rw]].nr = 1;
200 static void add_bs(struct btrace_out *o, unsigned int len, int rw)
202 struct bs *bs = o->bs[rw];
206 __add_bs(o, len, rw);
210 for (i = 0; i < o->nr_bs[rw]; i++) {
211 if (bs[i].bs == len) {
217 __add_bs(o, len, rw);
220 #define FMINORBITS 20
221 #define FMINORMASK ((1U << FMINORBITS) - 1)
222 #define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
223 #define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
225 static void btrace_add_file(struct btrace_out *o, uint32_t devno)
227 unsigned int maj = FMAJOR(devno);
228 unsigned int min = FMINOR(devno);
229 struct trace_file *f;
233 if (o->last_major == maj && o->last_minor == min)
240 * check for this file in our list
242 for (i = 0; i < o->nr_files; i++) {
245 if (f->major == maj && f->minor == min)
250 if (!blktrace_lookup_device(NULL, dev, maj, min)) {
251 log_err("fio: failed to find device %u/%u\n", maj, min);
255 o->files = realloc(o->files, (o->nr_files + 1) * sizeof(*f));
256 f = &o->files[o->nr_files];
257 f->name = strdup(dev);
263 static void handle_trace_discard(struct blk_io_trace *t, struct btrace_out *o)
265 btrace_add_file(o, t->device);
267 if (o->first_ttime == -1ULL)
268 o->first_ttime = t->time;
271 add_bs(o, t->bytes, DDIR_TRIM);
274 static void handle_trace_fs(struct blk_io_trace *t, struct btrace_out *o)
278 btrace_add_file(o, t->device);
280 first_ttime = min(first_ttime, (uint64_t) t->time);
282 if (o->first_ttime == -1ULL)
283 o->first_ttime = t->time;
285 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
287 add_bs(o, t->bytes, rw);
290 if (t->sector == o->last_end[rw] || o->last_end[rw] == -1ULL)
293 o->last_end[rw] = t->sector + (t->bytes >> 9);
296 static void handle_queue_trace(struct blk_io_trace *t, struct btrace_out *o)
298 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
299 handle_trace_notify(t);
300 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
301 handle_trace_discard(t, o);
303 handle_trace_fs(t, o);
306 static void handle_trace(struct blk_io_trace *t, struct btrace_pid *p)
308 unsigned int act = t->action & 0xffff;
310 if (act == __BLK_TA_QUEUE) {
311 inflight_add(p, t->sector, t->bytes);
312 handle_queue_trace(t, &p->o);
313 } else if (act == __BLK_TA_REQUEUE) {
315 } else if (act == __BLK_TA_BACKMERGE) {
318 i = inflight_find(t->sector + (t->bytes >> 9));
322 i = inflight_find(t->sector);
324 int rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
326 inflight_merge(i, rw, t->bytes);
328 } else if (act == __BLK_TA_FRONTMERGE) {
331 i = inflight_find(t->sector + (t->bytes >> 9));
335 i = inflight_find(t->sector);
337 int rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
339 inflight_merge(i, rw, 0);
341 } else if (act == __BLK_TA_COMPLETE) {
344 i = inflight_find(t->sector + (t->bytes >> 9));
350 static void byteswap_trace(struct blk_io_trace *t)
352 t->magic = fio_swap32(t->magic);
353 t->sequence = fio_swap32(t->sequence);
354 t->time = fio_swap64(t->time);
355 t->sector = fio_swap64(t->sector);
356 t->bytes = fio_swap32(t->bytes);
357 t->action = fio_swap32(t->action);
358 t->pid = fio_swap32(t->pid);
359 t->device = fio_swap32(t->device);
360 t->cpu = fio_swap32(t->cpu);
361 t->error = fio_swap16(t->error);
362 t->pdu_len = fio_swap16(t->pdu_len);
365 static struct btrace_pid *pid_hash_find(pid_t pid, struct flist_head *list)
367 struct flist_head *e;
368 struct btrace_pid *p;
370 flist_for_each(e, list) {
371 p = flist_entry(e, struct btrace_pid, hash_list);
379 static struct btrace_pid *pid_hash_get(pid_t pid)
381 struct flist_head *hash_list;
382 struct btrace_pid *p;
384 hash_list = &pid_hash[hash_long(pid, PID_HASH_BITS)];
386 p = pid_hash_find(pid, hash_list);
390 p = calloc(1, sizeof(*p));
391 p->o.first_ttime = -1ULL;
392 p->o.last_ttime = -1ULL;
394 for (i = 0; i < DDIR_RWDIR_CNT; i++)
395 p->o.last_end[i] = -1ULL;
398 flist_add_tail(&p->hash_list, hash_list);
399 flist_add_tail(&p->pid_list, &pid_list);
406 * Load a blktrace file by reading all the blk_io_trace entries, and storing
407 * them as io_pieces like the fio text version would do.
409 static int load_blktrace(const char *filename, int need_swap)
411 struct btrace_pid *p;
412 unsigned long traces;
413 struct blk_io_trace t;
417 fd = open(filename, O_RDONLY);
419 perror("open trace file\n");
423 fifo = fifo_alloc(TRACE_FIFO_SIZE);
427 int ret = trace_fifo_get(fifo, fd, &t, sizeof(t));
433 else if (ret < (int) sizeof(t)) {
434 fprintf(stderr, "fio: short fifo get\n");
441 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
442 fprintf(stderr, "fio: bad magic in blktrace data: %x\n",
446 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
447 fprintf(stderr, "fio: bad blktrace version %d\n",
451 ret = discard_pdu(fifo, fd, &t);
453 fprintf(stderr, "blktrace lseek\n");
455 } else if (t.pdu_len != ret) {
456 fprintf(stderr, "fio: discarded %d of %d\n", ret, t.pdu_len);
460 p = pid_hash_get(t.pid);
462 p->o.last_ttime = t.time;
470 printf("Traces loaded: %lu\n", traces);
479 static int bs_cmp(const void *ba, const void *bb)
481 const struct bs *bsa = ba;
482 const struct bs *bsb = bb;
484 return bsb->nr - bsa->nr;
487 static void __output_p_ascii(struct btrace_pid *p, unsigned long *ios)
489 const char *msg[] = { "reads", "writes", "trims" };
490 struct btrace_out *o = &p->o;
494 printf("[pid:\t%u]\n", p->pid);
496 total = ddir_rw_sum(o->ios);
497 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
503 ios[i] += o->ios[i] + o->merges[i];
504 printf("%s\n", msg[i]);
505 perc = ((float) o->ios[i] * 100.0) / (float) total;
506 printf("\tios: %lu (perc=%3.2f%%)\n", o->ios[i], perc);
507 perc = ((float) o->merges[i] * 100.0) / (float) total;
508 printf("\tmerges: %lu (perc=%3.2f%%)\n", o->merges[i], perc);
509 perc = ((float) o->seq[i] * 100.0) / (float) o->ios[i];
510 printf("\tseq: %lu (perc=%3.2f%%)\n", o->seq[i], perc);
512 for (j = 0; j < o->nr_bs[i]; j++) {
513 struct bs *bs = &o->bs[i][j];
515 perc = (((float) bs->nr * 100.0) / (float) o->ios[i]);
516 printf("\tbs=%u, perc=%3.2f%%\n", bs->bs, perc);
520 printf("depth:\t%u\n", o->depth);
521 printf("usec:\t%llu (delay=%llu)\n", (o->last_ttime - o->first_ttime) / 1000ULL, (unsigned long long) o->start_delay);
524 for (i = 0; i < o->nr_files; i++)
525 printf("%s,", o->files[i].name);
531 static int __output_p_fio(struct btrace_pid *p, unsigned long *ios)
533 struct btrace_out *o = &p->o;
538 if ((o->ios[0] + o->ios[1]) && o->ios[2]) {
539 log_err("fio: trace has both read/write and trim\n");
543 printf("[pid%u]\n", p->pid);
544 printf("direct=1\n");
546 printf("ioengine=sync\n");
548 printf("ioengine=libaio\niodepth=%u\n", o->depth);
550 if (o->ios[0] && !o->ios[1])
551 printf("rw=randread\n");
552 else if (!o->ios[0] && o->ios[1])
553 printf("rw=randwrite\n");
555 printf("rw=randtrim\n");
557 printf("rw=randrw\n");
558 total = ddir_rw_sum(o->ios);
559 perc = ((float) o->ios[0] * 100.0) / (float) total;
560 printf("rwmixread=%u\n", (int) (perc + 0.99));
563 printf("percentage_sequential=");
564 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
565 if (o->seq[i] && o->ios[i]) {
566 perc = ((float) o->seq[i] * 100.0) / (float) o->ios[i];
574 printf("%u", (int) perc);
579 for (i = 0; i < o->nr_files; i++) {
582 printf("%s", o->files[i].name);
586 printf("startdelay=%llus\n", o->start_delay / 1000000ULL);
589 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
591 if (i && o->nr_bs[i - 1] && o->nr_bs[i])
594 for (j = 0; j < o->nr_bs[i]; j++) {
595 struct bs *bs = &o->bs[i][j];
597 perc = (((float) bs->nr * 100.0) / (float) o->ios[i]);
602 if (j + 1 == o->nr_bs[i])
603 printf("%u/", bs->bs);
605 printf("%u/%u", bs->bs, (int) perc);
613 static int __output_p(struct btrace_pid *p, unsigned long *ios)
615 struct btrace_out *o = &p->o;
618 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
619 if (o->nr_bs[i] <= 1)
621 qsort(o->bs[i], o->nr_bs[i], sizeof(struct bs), bs_cmp);
625 __output_p_ascii(p, ios);
627 ret = __output_p_fio(p, ios);
632 static int prune_entry(struct btrace_out *o)
636 if (ddir_rw_sum(o->ios) < ios_threshold)
639 time = (o->last_ttime - o->first_ttime) / 1000ULL;
640 if (time < rt_threshold)
646 static int entry_cmp(void *priv, struct flist_head *a, struct flist_head *b)
648 struct btrace_pid *pa = flist_entry(a, struct btrace_pid, pid_list);
649 struct btrace_pid *pb = flist_entry(b, struct btrace_pid, pid_list);
651 return ddir_rw_sum(pb->o.ios) - ddir_rw_sum(pa->o.ios);
654 static int output_p(void)
656 unsigned long ios[DDIR_RWDIR_CNT];
657 struct flist_head *e, *tmp;
660 flist_for_each_safe(e, tmp, &pid_list) {
661 struct btrace_pid *p;
663 p = flist_entry(e, struct btrace_pid, pid_list);
664 if (prune_entry(&p->o)) {
665 flist_del(&p->pid_list);
666 flist_del(&p->hash_list);
670 p->o.start_delay = (p->o.first_ttime / 1000ULL) - first_ttime;
673 memset(ios, 0, sizeof(ios));
675 flist_sort(NULL, &pid_list, entry_cmp);
677 flist_for_each(e, &pid_list) {
678 struct btrace_pid *p;
680 p = flist_entry(e, struct btrace_pid, pid_list);
681 ret |= __output_p(p, ios);
685 printf("Total: reads=%lu, writes=%lu\n", ios[0], ios[1]);
690 static int usage(char *argv[])
692 fprintf(stderr, "%s: <blktrace bin file>\n", argv[0]);
693 fprintf(stderr, "\t-t\tUsec threshold to ignore task\n");
694 fprintf(stderr, "\t-n\tNumber IOS threshold to ignore task\n");
695 fprintf(stderr, "\t-f\tFio job file output\n");
699 int main(int argc, char *argv[])
701 int fd, ret, need_swap = -1;
702 struct blk_io_trace t;
708 while ((c = getopt(argc, argv, "t:n:f")) != -1) {
711 rt_threshold = atoi(optarg);
714 ios_threshold = atoi(optarg);
728 fd = open(argv[optind], O_RDONLY);
734 ret = read(fd, &t, sizeof(t));
738 } else if (ret != sizeof(t)) {
739 fprintf(stderr, "fio: short read on trace file\n");
745 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
749 * Maybe it needs to be endian swapped...
751 t.magic = fio_swap32(t.magic);
752 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
756 if (need_swap == -1) {
757 fprintf(stderr, "fio: blktrace appears corrupt\n");
761 for (i = 0; i < PID_HASH_SIZE; i++)
762 INIT_FLIST_HEAD(&pid_hash[i]);
764 load_blktrace(argv[optind], need_swap);
765 first_ttime /= 1000ULL;