8 #include "../io_ddir.h"
12 #include "../blktrace_api.h"
15 #include "../lib/linux-dev-lookup.h"
17 #define TRACE_FIFO_SIZE 8192
19 static unsigned int rt_threshold = 1000000;
20 static unsigned int ios_threshold = 10;
21 static unsigned int rate_threshold;
22 static unsigned int set_rate;
23 static unsigned int max_depth = 256;
24 static int output_ascii = 1;
25 static char *filename;
30 static unsigned int collapse_entries = 0;
31 static unsigned int depth_diff = 1;
32 static unsigned int random_diff = 5;
46 unsigned long ios[DDIR_RWDIR_CNT];
47 unsigned long merges[DDIR_RWDIR_CNT];
49 uint64_t last_end[DDIR_RWDIR_CNT];
50 uint64_t seq[DDIR_RWDIR_CNT];
52 struct bs *bs[DDIR_RWDIR_CNT];
53 unsigned int nr_bs[DDIR_RWDIR_CNT];
60 uint64_t first_ttime[DDIR_RWDIR_CNT];
61 uint64_t last_ttime[DDIR_RWDIR_CNT];
62 uint64_t kb[DDIR_RWDIR_CNT];
68 struct flist_head hash_list;
69 struct flist_head pid_list;
73 unsigned int nr_merge_pids;
75 struct trace_file *files;
77 unsigned int last_major, last_minor;
85 struct flist_head list;
90 #define PID_HASH_BITS 10
91 #define PID_HASH_SIZE (1U << PID_HASH_BITS)
93 static struct flist_head pid_hash[PID_HASH_SIZE];
94 static FLIST_HEAD(pid_list);
96 #define INFLIGHT_HASH_BITS 8
97 #define INFLIGHT_HASH_SIZE (1U << INFLIGHT_HASH_BITS)
98 static struct flist_head inflight_hash[INFLIGHT_HASH_SIZE];
100 static uint64_t first_ttime = -1ULL;
102 static struct inflight *inflight_find(uint64_t sector)
104 struct flist_head *inflight_list;
105 struct flist_head *e;
107 inflight_list = &inflight_hash[hash_long(sector, INFLIGHT_HASH_BITS)];
109 flist_for_each(e, inflight_list) {
110 struct inflight *i = flist_entry(e, struct inflight, list);
112 if (i->end_sector == sector)
119 static void inflight_remove(struct inflight *i)
121 struct btrace_out *o = &i->p->o;
124 assert(o->inflight >= 0);
129 static void __inflight_add(struct inflight *i)
131 struct flist_head *list;
133 list = &inflight_hash[hash_long(i->end_sector, INFLIGHT_HASH_BITS)];
134 flist_add_tail(&i->list, list);
137 static void inflight_add(struct btrace_pid *p, uint64_t sector, uint32_t len)
139 struct btrace_out *o = &p->o;
142 i = calloc(1, sizeof(*i));
145 if (!o->depth_disabled) {
146 o->depth = max((int) o->depth, o->inflight);
147 if (o->depth >= max_depth && !o->complete_seen) {
148 o->depth_disabled = 1;
149 o->depth = max_depth;
152 i->end_sector = sector + (len >> 9);
156 static void inflight_merge(struct inflight *i, int rw, unsigned int size)
158 i->p->o.merges[rw]++;
160 i->end_sector += (size >> 9);
167 * fifo refill frontend, to avoid reading data in trace sized bites
169 static int refill_fifo(struct fifo *fifo, int fd)
171 char buf[TRACE_FIFO_SIZE];
176 if (total > fifo_room(fifo))
177 total = fifo_room(fifo);
179 ret = read(fd, buf, total);
181 perror("read refill");
186 ret = fifo_put(fifo, buf, ret);
192 * Retrieve 'len' bytes from the fifo, refilling if necessary.
194 static int trace_fifo_get(struct fifo *fifo, int fd, void *buf,
197 if (fifo_len(fifo) < len) {
198 int ret = refill_fifo(fifo, fd);
204 return fifo_get(fifo, buf, len);
208 * Just discard the pdu by seeking past it.
210 static int discard_pdu(struct fifo *fifo, int fd, struct blk_io_trace *t)
215 return trace_fifo_get(fifo, fd, NULL, t->pdu_len);
218 static int handle_trace_notify(struct blk_io_trace *t)
222 //printf("got process notify: %x, %d\n", t->action, t->pid);
224 case BLK_TN_TIMESTAMP:
225 //printf("got timestamp notify: %x, %d\n", t->action, t->pid);
230 log_err("unknown trace act %x\n", t->action);
237 static void __add_bs(struct btrace_out *o, unsigned int len, int rw)
239 o->bs[rw] = realloc(o->bs[rw], (o->nr_bs[rw] + 1) * sizeof(struct bs));
240 o->bs[rw][o->nr_bs[rw]].bs = len;
241 o->bs[rw][o->nr_bs[rw]].nr = 1;
245 static void add_bs(struct btrace_out *o, unsigned int len, int rw)
247 struct bs *bs = o->bs[rw];
251 __add_bs(o, len, rw);
255 for (i = 0; i < o->nr_bs[rw]; i++) {
256 if (bs[i].bs == len) {
262 __add_bs(o, len, rw);
265 #define FMINORBITS 20
266 #define FMINORMASK ((1U << FMINORBITS) - 1)
267 #define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
268 #define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
270 static int btrace_add_file(struct btrace_pid *p, uint32_t devno)
272 unsigned int maj = FMAJOR(devno);
273 unsigned int min = FMINOR(devno);
274 struct trace_file *f;
280 if (p->last_major == maj && p->last_minor == min)
287 * check for this file in our list
289 for (i = 0; i < p->nr_files; i++) {
292 if (f->major == maj && f->minor == min)
297 if (!blktrace_lookup_device(NULL, dev, maj, min)) {
298 log_err("fio: failed to find device %u/%u\n", maj, min);
300 log_err("fio: use -d to specify device\n");
306 p->files = realloc(p->files, (p->nr_files + 1) * sizeof(*f));
307 f = &p->files[p->nr_files];
308 f->name = strdup(dev);
315 static int t_to_rwdir(struct blk_io_trace *t)
317 if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
320 return (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
323 static int handle_trace_discard(struct blk_io_trace *t, struct btrace_pid *p)
325 struct btrace_out *o = &p->o;
327 if (btrace_add_file(p, t->device))
330 if (o->first_ttime[2] == -1ULL)
331 o->first_ttime[2] = t->time;
334 add_bs(o, t->bytes, DDIR_TRIM);
338 static int handle_trace_fs(struct blk_io_trace *t, struct btrace_pid *p)
340 struct btrace_out *o = &p->o;
343 if (btrace_add_file(p, t->device))
346 first_ttime = min(first_ttime, (uint64_t) t->time);
348 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
350 if (o->first_ttime[rw] == -1ULL)
351 o->first_ttime[rw] = t->time;
353 add_bs(o, t->bytes, rw);
356 if (t->sector == o->last_end[rw] || o->last_end[rw] == -1ULL)
359 o->last_end[rw] = t->sector + (t->bytes >> 9);
363 static int handle_queue_trace(struct blk_io_trace *t, struct btrace_pid *p)
365 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
366 return handle_trace_notify(t);
367 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
368 return handle_trace_discard(t, p);
370 return handle_trace_fs(t, p);
373 static int handle_trace(struct blk_io_trace *t, struct btrace_pid *p)
375 unsigned int act = t->action & 0xffff;
378 if (act == __BLK_TA_QUEUE) {
379 inflight_add(p, t->sector, t->bytes);
380 ret = handle_queue_trace(t, p);
381 } else if (act == __BLK_TA_BACKMERGE) {
384 i = inflight_find(t->sector + (t->bytes >> 9));
388 i = inflight_find(t->sector);
390 inflight_merge(i, t_to_rwdir(t), t->bytes);
391 } else if (act == __BLK_TA_FRONTMERGE) {
394 i = inflight_find(t->sector + (t->bytes >> 9));
398 i = inflight_find(t->sector);
400 inflight_merge(i, t_to_rwdir(t), 0);
401 } else if (act == __BLK_TA_COMPLETE) {
404 i = inflight_find(t->sector + (t->bytes >> 9));
406 i->p->o.kb[t_to_rwdir(t)] += (t->bytes >> 10);
407 i->p->o.complete_seen = 1;
415 static void byteswap_trace(struct blk_io_trace *t)
417 t->magic = fio_swap32(t->magic);
418 t->sequence = fio_swap32(t->sequence);
419 t->time = fio_swap64(t->time);
420 t->sector = fio_swap64(t->sector);
421 t->bytes = fio_swap32(t->bytes);
422 t->action = fio_swap32(t->action);
423 t->pid = fio_swap32(t->pid);
424 t->device = fio_swap32(t->device);
425 t->cpu = fio_swap32(t->cpu);
426 t->error = fio_swap16(t->error);
427 t->pdu_len = fio_swap16(t->pdu_len);
430 static struct btrace_pid *pid_hash_find(pid_t pid, struct flist_head *list)
432 struct flist_head *e;
433 struct btrace_pid *p;
435 flist_for_each(e, list) {
436 p = flist_entry(e, struct btrace_pid, hash_list);
444 static struct btrace_pid *pid_hash_get(pid_t pid)
446 struct flist_head *hash_list;
447 struct btrace_pid *p;
449 hash_list = &pid_hash[hash_long(pid, PID_HASH_BITS)];
451 p = pid_hash_find(pid, hash_list);
455 p = calloc(1, sizeof(*p));
457 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
458 p->o.first_ttime[i] = -1ULL;
459 p->o.last_ttime[i] = -1ULL;
460 p->o.last_end[i] = -1ULL;
465 flist_add_tail(&p->hash_list, hash_list);
466 flist_add_tail(&p->pid_list, &pid_list);
473 * Load a blktrace file by reading all the blk_io_trace entries, and storing
474 * them as io_pieces like the fio text version would do.
476 static int load_blktrace(const char *fname, int need_swap)
478 struct btrace_pid *p;
479 unsigned long traces;
480 struct blk_io_trace t;
484 fd = open(fname, O_RDONLY);
486 perror("open trace file\n");
490 fifo = fifo_alloc(TRACE_FIFO_SIZE);
494 ret = trace_fifo_get(fifo, fd, &t, sizeof(t));
499 else if (ret < (int) sizeof(t)) {
500 log_err("fio: short fifo get\n");
507 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
508 log_err("fio: bad magic in blktrace data: %x\n", t.magic);
511 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
512 log_err("fio: bad blktrace version %d\n", t.magic & 0xff);
515 ret = discard_pdu(fifo, fd, &t);
517 log_err("blktrace lseek\n");
519 } else if (t.pdu_len != ret) {
520 log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
524 p = pid_hash_get(t.pid);
525 ret = handle_trace(&t, p);
528 p->o.last_ttime[t_to_rwdir(&t)] = t.time;
539 printf("Traces loaded: %lu\n", traces);
548 static int bs_cmp(const void *ba, const void *bb)
550 const struct bs *bsa = ba;
551 const struct bs *bsb = bb;
553 return bsb->nr - bsa->nr;
556 static unsigned long o_to_kb_rate(struct btrace_out *o, int rw)
558 uint64_t usec = (o->last_ttime[rw] - o->first_ttime[rw]) / 1000ULL;
568 val = o->kb[rw] * 1000ULL;
572 static uint64_t o_first_ttime(struct btrace_out *o)
576 first = min(o->first_ttime[0], o->first_ttime[1]);
577 return min(first, o->first_ttime[2]);
580 static uint64_t o_longest_ttime(struct btrace_out *o)
585 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
588 diff = o->last_ttime[i] - o->first_ttime[i];
589 ret = max(diff, ret);
595 static void __output_p_ascii(struct btrace_pid *p, unsigned long *ios)
597 const char *msg[] = { "reads", "writes", "trims" };
598 struct btrace_out *o = &p->o;
599 unsigned long total, usec;
602 printf("[pid:\t%u", p->pid);
603 if (p->nr_merge_pids)
604 for (i = 0; i < p->nr_merge_pids; i++)
605 printf(", %u", p->merge_pids[i]);
608 total = ddir_rw_sum(o->ios);
609 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
615 ios[i] += o->ios[i] + o->merges[i];
616 printf("%s\n", msg[i]);
617 perc = ((float) o->ios[i] * 100.0) / (float) total;
618 printf("\tios: %lu (perc=%3.2f%%)\n", o->ios[i], perc);
619 perc = ((float) o->merges[i] * 100.0) / (float) total;
620 printf("\tmerges: %lu (perc=%3.2f%%)\n", o->merges[i], perc);
621 perc = ((float) o->seq[i] * 100.0) / (float) o->ios[i];
622 printf("\tseq: %lu (perc=%3.2f%%)\n", (unsigned long) o->seq[i], perc);
623 printf("\trate: %lu KB/sec\n", o_to_kb_rate(o, i));
625 for (j = 0; j < o->nr_bs[i]; j++) {
626 struct bs *bs = &o->bs[i][j];
628 perc = (((float) bs->nr * 100.0) / (float) o->ios[i]);
629 printf("\tbs=%u, perc=%3.2f%%\n", bs->bs, perc);
633 printf("depth:\t%u\n", o->depth);
634 usec = o_longest_ttime(o) / 1000ULL;
635 printf("usec:\t%lu (delay=%llu)\n", usec, (unsigned long long) o->start_delay);
638 for (i = 0; i < p->nr_files; i++)
639 printf("%s,", p->files[i].name);
645 static int __output_p_fio(struct btrace_pid *p, unsigned long *ios)
647 struct btrace_out *o = &p->o;
649 unsigned long long time;
653 if ((o->ios[0] + o->ios[1]) && o->ios[2]) {
654 log_err("fio: trace has both read/write and trim\n");
658 log_err("fio: no devices found\n");
662 printf("[pid%u", p->pid);
663 if (p->nr_merge_pids)
664 for (i = 0; i < p->nr_merge_pids; i++)
665 printf(",pid%u", p->merge_pids[i]);
668 printf("numjobs=%u\n", p->numjobs);
669 printf("direct=1\n");
671 printf("ioengine=sync\n");
673 printf("ioengine=libaio\niodepth=%u\n", o->depth);
675 if (o->ios[0] && !o->ios[1])
676 printf("rw=randread\n");
677 else if (!o->ios[0] && o->ios[1])
678 printf("rw=randwrite\n");
680 printf("rw=randtrim\n");
682 printf("rw=randrw\n");
683 total = ddir_rw_sum(o->ios);
684 perc = ((float) o->ios[0] * 100.0) / (float) total;
685 printf("rwmixread=%u\n", (int) floor(perc + 0.50));
688 printf("percentage_random=");
689 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
690 if (o->seq[i] && o->ios[i]) {
691 perc = ((float) o->seq[i] * 100.0) / (float) o->ios[i];
700 printf("%u", (int) floor(perc + 0.5));
705 for (i = 0; i < p->nr_files; i++) {
708 printf("%s", p->files[i].name);
712 if (o->start_delay / 1000000ULL)
713 printf("startdelay=%llus\n", o->start_delay / 1000000ULL);
715 time = o_longest_ttime(o);
716 time = (time + 1000000000ULL - 1) / 1000000000ULL;
717 printf("runtime=%llus\n", time);
720 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
722 if (i && o->nr_bs[i - 1] && o->nr_bs[i])
725 for (j = 0; j < o->nr_bs[i]; j++) {
726 struct bs *bs = &o->bs[i][j];
728 perc = (((float) bs->nr * 100.0) / (float) o->ios[i]);
733 if (j + 1 == o->nr_bs[i])
734 printf("%u/", bs->bs);
736 printf("%u/%u", bs->bs, (int) floor(perc + 0.5));
743 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
746 rate = o_to_kb_rate(o, i);
750 printf("%luk", rate);
759 static int __output_p(struct btrace_pid *p, unsigned long *ios)
761 struct btrace_out *o = &p->o;
764 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
765 if (o->nr_bs[i] <= 1)
767 qsort(o->bs[i], o->nr_bs[i], sizeof(struct bs), bs_cmp);
771 p->files = malloc(sizeof(struct trace_file));
773 p->files[0].name = filename;
777 __output_p_ascii(p, ios);
779 ret = __output_p_fio(p, ios);
784 static void remove_ddir(struct btrace_out *o, int rw)
789 static int prune_entry(struct btrace_out *o)
795 if (ddir_rw_sum(o->ios) < ios_threshold)
798 time = o_longest_ttime(o) / 1000ULL;
799 if (time < rt_threshold)
803 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
804 unsigned long this_rate;
806 this_rate = o_to_kb_rate(o, i);
807 if (this_rate < rate_threshold) {
814 if (rate < rate_threshold)
820 static int entry_cmp(void *priv, struct flist_head *a, struct flist_head *b)
822 struct btrace_pid *pa = flist_entry(a, struct btrace_pid, pid_list);
823 struct btrace_pid *pb = flist_entry(b, struct btrace_pid, pid_list);
825 return ddir_rw_sum(pb->o.ios) - ddir_rw_sum(pa->o.ios);
828 static void free_p(struct btrace_pid *p)
830 struct btrace_out *o = &p->o;
833 for (i = 0; i < p->nr_files; i++) {
834 if (p->files[i].name && p->files[i].name != filename)
835 free(p->files[i].name);
838 for (i = 0; i < DDIR_RWDIR_CNT; i++)
842 flist_del(&p->pid_list);
843 flist_del(&p->hash_list);
847 static int entries_close(struct btrace_pid *pida, struct btrace_pid *pidb)
849 float perca, percb, fdiff;
852 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
853 if ((pida->o.ios[i] && !pidb->o.ios[i]) ||
854 (pidb->o.ios[i] && !pida->o.ios[i]))
856 if (pida->o.ios[i] && pidb->o.ios[i]) {
857 perca = ((float) pida->o.seq[i] * 100.0) / (float) pida->o.ios[i];
858 percb = ((float) pidb->o.seq[i] * 100.0) / (float) pidb->o.ios[i];
859 fdiff = perca - percb;
860 if (fabs(fdiff) > random_diff)
864 idiff = pida->o.depth - pidb->o.depth;
865 if (abs(idiff) > depth_diff)
872 static void merge_bs(struct bs **bsap, unsigned int *nr_bsap,
873 struct bs *bsb, unsigned int nr_bsb)
875 struct bs *bsa = *bsap;
876 unsigned int nr_bsa = *nr_bsap;
879 for (b = 0; b < nr_bsb; b++) {
882 for (a = 0; a < nr_bsa; a++) {
883 if (bsb[b].bs != bsa[a].bs)
886 bsa[a].nr += bsb[b].nr;
887 bsa[a].merges += bsb[b].merges;
896 bsa = realloc(bsa, (next + 1) * sizeof(struct bs));
897 bsa[next].bs = bsb[b].bs;
898 bsa[next].nr = bsb[b].nr;
904 static int merge_entries(struct btrace_pid *pida, struct btrace_pid *pidb)
908 if (!entries_close(pida, pidb))
911 pida->nr_merge_pids++;
912 pida->merge_pids = realloc(pida->merge_pids, pida->nr_merge_pids * sizeof(pid_t));
913 pida->merge_pids[pida->nr_merge_pids - 1] = pidb->pid;
915 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
916 struct btrace_out *oa = &pida->o;
917 struct btrace_out *ob = &pidb->o;
919 oa->ios[i] += ob->ios[i];
920 oa->merges[i] += ob->merges[i];
921 oa->seq[i] += ob->seq[i];
922 oa->kb[i] += ob->kb[i];
923 oa->first_ttime[i] = min(oa->first_ttime[i], ob->first_ttime[i]);
924 oa->last_ttime[i] = max(oa->last_ttime[i], ob->last_ttime[i]);
925 merge_bs(&oa->bs[i], &oa->nr_bs[i], ob->bs[i], ob->nr_bs[i]);
928 pida->o.start_delay = min(pida->o.start_delay, pidb->o.start_delay);
929 pida->o.depth = (pida->o.depth + pidb->o.depth) / 2;
933 static void check_merges(struct btrace_pid *p, struct flist_head *pid_list)
935 struct flist_head *e, *tmp;
940 flist_for_each_safe(e, tmp, pid_list) {
941 struct btrace_pid *pidb;
943 pidb = flist_entry(e, struct btrace_pid, pid_list);
947 if (merge_entries(p, pidb)) {
954 static int output_p(void)
956 unsigned long ios[DDIR_RWDIR_CNT];
957 struct flist_head *e, *tmp;
958 int depth_disabled = 0;
961 flist_for_each_safe(e, tmp, &pid_list) {
962 struct btrace_pid *p;
964 p = flist_entry(e, struct btrace_pid, pid_list);
965 if (prune_entry(&p->o)) {
969 p->o.start_delay = (o_first_ttime(&p->o) / 1000ULL) - first_ttime;
970 depth_disabled += p->o.depth_disabled;
973 if (collapse_entries) {
974 struct btrace_pid *p;
976 flist_for_each_safe(e, tmp, &pid_list) {
977 p = flist_entry(e, struct btrace_pid, pid_list);
978 check_merges(p, &pid_list);
981 flist_for_each_safe(e, tmp, &pid_list) {
982 p = flist_entry(e, struct btrace_pid, pid_list);
989 log_err("fio: missing completion traces, depths capped at %u\n", max_depth);
991 memset(ios, 0, sizeof(ios));
993 flist_sort(NULL, &pid_list, entry_cmp);
995 flist_for_each(e, &pid_list) {
996 struct btrace_pid *p;
998 p = flist_entry(e, struct btrace_pid, pid_list);
999 ret |= __output_p(p, ios);
1000 if (ret && !output_ascii)
1005 printf("Total: reads=%lu, writes=%lu\n", ios[0], ios[1]);
1010 static int usage(char *argv[])
1012 log_err("%s: [options] <blktrace bin file>\n", argv[0]);
1013 log_err("\t-t\tUsec threshold to ignore task\n");
1014 log_err("\t-n\tNumber IOS threshold to ignore task\n");
1015 log_err("\t-f\tFio job file output\n");
1016 log_err("\t-d\tUse this file/device for replay\n");
1017 log_err("\t-r\tIgnore jobs with less than this KB/sec rate\n");
1018 log_err("\t-R\tSet rate in fio job (def=%u)\n", set_rate);
1019 log_err("\t-D\tCap queue depth at this value (def=%u)\n", max_depth);
1020 log_err("\t-c\tCollapse \"identical\" jobs (def=%u)\n", collapse_entries);
1021 log_err("\t-u\tDepth difference for collapse (def=%u)\n", depth_diff);
1022 log_err("\t-x\tRandom difference for collapse (def=%u)\n", random_diff);
1026 static int trace_needs_swap(const char *trace_file, int *swap)
1028 struct blk_io_trace t;
1033 fd = open(trace_file, O_RDONLY);
1039 ret = read(fd, &t, sizeof(t));
1044 } else if (ret != sizeof(t)) {
1046 log_err("fio: short read on trace file\n");
1052 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
1056 * Maybe it needs to be endian swapped...
1058 t.magic = fio_swap32(t.magic);
1059 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
1064 log_err("fio: blktrace appears corrupt\n");
1071 int main(int argc, char *argv[])
1073 int need_swap, i, c;
1078 while ((c = getopt(argc, argv, "t:n:fd:r:RD:c:u:x:")) != -1) {
1084 rate_threshold = atoi(optarg);
1087 rt_threshold = atoi(optarg);
1090 ios_threshold = atoi(optarg);
1096 filename = strdup(optarg);
1099 max_depth = atoi(optarg);
1102 collapse_entries = atoi(optarg);
1105 depth_diff = atoi(optarg);
1108 random_diff = atoi(optarg);
1119 if (trace_needs_swap(argv[optind], &need_swap))
1122 for (i = 0; i < PID_HASH_SIZE; i++)
1123 INIT_FLIST_HEAD(&pid_hash[i]);
1124 for (i = 0; i < INFLIGHT_HASH_SIZE; i++)
1125 INIT_FLIST_HEAD(&inflight_hash[i]);
1127 load_blktrace(argv[optind], need_swap);
1128 first_ttime /= 1000ULL;