2 * block queue tracing parse application
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <sys/types.h>
38 static char blkparse_version[] = "0.99";
41 unsigned long start, end;
42 struct skip_info *prev, *next;
50 unsigned long long events;
51 unsigned long long first_reported_time;
52 unsigned long long last_reported_time;
53 unsigned long long last_read_time;
54 struct io_stats io_stats;
55 unsigned long skips, nskips;
56 unsigned long long seq_skips, seq_nskips;
57 unsigned int max_depth[2];
58 unsigned int cur_depth[2];
60 struct rb_root rb_track;
65 unsigned long *cpu_map;
66 unsigned int cpu_map_max;
68 struct per_cpu_info *cpus;
71 struct per_process_info {
74 struct io_stats io_stats;
75 struct per_process_info *hash_next, *list_next;
81 unsigned long long longest_allocation_wait[2];
82 unsigned long long longest_dispatch_wait[2];
83 unsigned long long longest_completion_wait[2];
86 #define PPI_HASH_SHIFT (8)
87 #define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
88 #define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
89 static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
90 static struct per_process_info *ppi_list;
91 static int ppi_list_entries;
93 #define S_OPTS "a:A:i:o:b:stqw:f:F:vVhD:"
94 static struct option l_opts[] = {
97 .has_arg = required_argument,
103 .has_arg = required_argument,
109 .has_arg = required_argument,
115 .has_arg = required_argument,
121 .has_arg = required_argument,
126 .name = "per-program-stats",
127 .has_arg = no_argument,
133 .has_arg = no_argument,
139 .has_arg = no_argument,
145 .has_arg = required_argument,
151 .has_arg = required_argument,
156 .name = "format-spec",
157 .has_arg = required_argument,
162 .name = "hash-by-name",
163 .has_arg = no_argument,
169 .has_arg = no_argument,
175 .has_arg = no_argument,
180 .name = "input-directory",
181 .has_arg = required_argument,
191 * for sorting the displayed output
194 struct blk_io_trace *bit;
195 struct rb_node rb_node;
199 static struct rb_root rb_sort_root;
200 static unsigned long rb_sort_entries;
202 static struct trace *trace_list;
207 static struct blk_io_trace *bit_alloc_list;
208 static struct trace *t_alloc_list;
211 * for tracking individual ios
214 struct rb_node rb_node;
219 unsigned long long allocation_time;
220 unsigned long long queue_time;
221 unsigned long long dispatch_time;
222 unsigned long long completion_time;
226 static struct per_dev_info *devices;
227 static char *get_dev_name(struct per_dev_info *, char *, int);
228 static int trace_rb_insert_last(struct per_dev_info *, struct trace *);
231 static char *output_name;
232 static char *input_dir;
234 static unsigned long long genesis_time;
235 static unsigned long long last_allowed_time;
236 static unsigned long long stopwatch_start; /* start from zero by default */
237 static unsigned long long stopwatch_end = -1ULL; /* "infinity" */
239 static int per_process_stats;
240 static int per_device_and_cpu_stats = 1;
241 static int track_ios;
242 static int ppi_hash_by_pid = 1;
244 static unsigned int act_mask = -1U;
245 static int stats_printed;
247 static unsigned int t_alloc_cache;
248 static unsigned int bit_alloc_cache;
250 #define RB_BATCH_DEFAULT (512)
251 static unsigned int rb_batch = RB_BATCH_DEFAULT;
255 #define is_done() (*(volatile int *)(&done))
256 static volatile int done;
258 #define JHASH_RANDOM (0x3af5f2ee)
260 #define CPUS_PER_LONG (8 * sizeof(unsigned long))
261 #define CPU_IDX(cpu) ((cpu) / CPUS_PER_LONG)
262 #define CPU_BIT(cpu) ((cpu) & (CPUS_PER_LONG - 1))
264 static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
266 struct per_cpu_info *cpus = pdi->cpus;
267 int ncpus = pdi->ncpus;
268 int new_count = cpu + 1;
272 size = new_count * sizeof(struct per_cpu_info);
273 cpus = realloc(cpus, size);
276 fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
277 get_dev_name(pdi, name, sizeof(name)), size);
281 new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
282 new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
283 memset(new_start, 0, new_space);
285 pdi->ncpus = new_count;
288 for (new_count = 0; new_count < pdi->ncpus; new_count++) {
289 struct per_cpu_info *pci = &pdi->cpus[new_count];
293 memset(&pci->rb_last, 0, sizeof(pci->rb_last));
294 pci->rb_last_entries = 0;
295 pci->last_sequence = -1;
300 static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
302 struct per_cpu_info *pci;
304 if (cpu >= pdi->ncpus)
305 resize_cpu_info(pdi, cpu);
307 pci = &pdi->cpus[cpu];
313 static int resize_devices(char *name)
315 int size = (ndevices + 1) * sizeof(struct per_dev_info);
317 devices = realloc(devices, size);
319 fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
322 memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
323 devices[ndevices].name = name;
328 static struct per_dev_info *get_dev_info(dev_t dev)
330 struct per_dev_info *pdi;
333 for (i = 0; i < ndevices; i++) {
335 devices[i].dev = dev;
336 if (devices[i].dev == dev)
340 if (resize_devices(NULL))
343 pdi = &devices[ndevices - 1];
345 pdi->first_reported_time = 0;
346 pdi->last_read_time = 0;
351 static void insert_skip(struct per_cpu_info *pci, unsigned long start,
354 struct skip_info *sip;
356 for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
357 if (end == (sip->start - 1)) {
360 } else if (start == (sip->end + 1)) {
366 sip = malloc(sizeof(struct skip_info));
369 sip->prev = sip->next = NULL;
370 if (pci->skips_tail == NULL)
371 pci->skips_head = pci->skips_tail = sip;
373 sip->prev = pci->skips_tail;
374 pci->skips_tail->next = sip;
375 pci->skips_tail = sip;
379 static void remove_sip(struct per_cpu_info *pci, struct skip_info *sip)
381 if (sip->prev == NULL) {
382 if (sip->next == NULL)
383 pci->skips_head = pci->skips_tail = NULL;
385 pci->skips_head = sip->next;
386 sip->next->prev = NULL;
388 } else if (sip->next == NULL) {
389 pci->skips_tail = sip->prev;
390 sip->prev->next = NULL;
392 sip->prev->next = sip->next;
393 sip->next->prev = sip->prev;
396 sip->prev = sip->next = NULL;
400 #define IN_SKIP(sip,seq) (((sip)->start <= (seq)) && ((seq) <= sip->end))
401 static int check_current_skips(struct per_cpu_info *pci, unsigned long seq)
403 struct skip_info *sip;
405 for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
406 if (IN_SKIP(sip, seq)) {
407 if (sip->start == seq) {
409 remove_sip(pci, sip);
412 } else if (sip->end == seq)
416 insert_skip(pci, seq + 1, sip->end);
425 static void collect_pdi_skips(struct per_dev_info *pdi)
427 struct skip_info *sip;
433 for (cpu = 0; cpu < pdi->ncpus; cpu++) {
434 struct per_cpu_info *pci = &pdi->cpus[cpu];
436 for (sip = pci->skips_head; sip != NULL; sip = sip->next) {
438 pdi->seq_skips += (sip->end - sip->start + 1);
440 fprintf(stderr,"(%d,%d): skipping %lu -> %lu\n",
441 MAJOR(pdi->dev), MINOR(pdi->dev),
442 sip->start, sip->end);
447 static void cpu_mark_online(struct per_dev_info *pdi, unsigned int cpu)
449 if (cpu >= pdi->cpu_map_max || !pdi->cpu_map) {
450 int new_max = (cpu + CPUS_PER_LONG) & ~(CPUS_PER_LONG - 1);
451 unsigned long *map = malloc(new_max / sizeof(long));
453 memset(map, 0, new_max / sizeof(long));
456 memcpy(map, pdi->cpu_map, pdi->cpu_map_max / sizeof(long));
461 pdi->cpu_map_max = new_max;
464 pdi->cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
467 static inline void cpu_mark_offline(struct per_dev_info *pdi, int cpu)
469 pdi->cpu_map[CPU_IDX(cpu)] &= ~(1UL << CPU_BIT(cpu));
472 static inline int cpu_is_online(struct per_dev_info *pdi, int cpu)
474 return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
477 static inline int ppi_hash_pid(__u32 pid)
479 return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
482 static inline int ppi_hash_name(const char *name)
484 return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
487 static inline int ppi_hash(struct per_process_info *ppi)
490 return ppi_hash_pid(ppi->pid);
492 return ppi_hash_name(ppi->name);
495 static inline void add_process_to_hash(struct per_process_info *ppi)
497 const int hash_idx = ppi_hash(ppi);
499 ppi->hash_next = ppi_hash_table[hash_idx];
500 ppi_hash_table[hash_idx] = ppi;
503 static inline void add_process_to_list(struct per_process_info *ppi)
505 ppi->list_next = ppi_list;
510 static struct per_process_info *find_process_by_name(char *name)
512 const int hash_idx = ppi_hash_name(name);
513 struct per_process_info *ppi;
515 ppi = ppi_hash_table[hash_idx];
517 if (!strcmp(ppi->name, name))
520 ppi = ppi->hash_next;
526 static struct per_process_info *find_process_by_pid(__u32 pid)
528 const int hash_idx = ppi_hash_pid(pid);
529 struct per_process_info *ppi;
531 ppi = ppi_hash_table[hash_idx];
536 ppi = ppi->hash_next;
542 static struct per_process_info *find_process(__u32 pid, char *name)
544 struct per_process_info *ppi;
547 return find_process_by_pid(pid);
549 ppi = find_process_by_name(name);
550 if (ppi && ppi->pid != pid)
551 ppi->more_than_one = 1;
557 * struct trace and blktrace allocation cache, we do potentially
558 * millions of mallocs for these structures while only using at most
559 * a few thousand at the time
561 static inline void t_free(struct trace *t)
563 if (t_alloc_cache < 1024) {
564 t->next = t_alloc_list;
571 static inline struct trace *t_alloc(void)
573 struct trace *t = t_alloc_list;
576 t_alloc_list = t->next;
581 return malloc(sizeof(*t));
584 static inline void bit_free(struct blk_io_trace *bit)
586 if (bit_alloc_cache < 1024 && !bit->pdu_len) {
588 * abuse a 64-bit field for a next pointer for the free item
590 bit->time = (__u64) (unsigned long) bit_alloc_list;
591 bit_alloc_list = (struct blk_io_trace *) bit;
597 static inline struct blk_io_trace *bit_alloc(void)
599 struct blk_io_trace *bit = bit_alloc_list;
602 bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
608 return malloc(sizeof(*bit));
611 static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
613 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
615 rb_erase(&t->rb_node, &pci->rb_last);
616 pci->rb_last_entries--;
622 static void put_trace(struct per_dev_info *pdi, struct trace *t)
624 rb_erase(&t->rb_node, &rb_sort_root);
627 trace_rb_insert_last(pdi, t);
630 static inline int trace_rb_insert(struct trace *t, struct rb_root *root)
632 struct rb_node **p = &root->rb_node;
633 struct rb_node *parent = NULL;
639 __t = rb_entry(parent, struct trace, rb_node);
641 if (t->bit->time < __t->bit->time)
643 else if (t->bit->time > __t->bit->time)
645 else if (t->bit->device < __t->bit->device)
647 else if (t->bit->device > __t->bit->device)
649 else if (t->bit->sequence < __t->bit->sequence)
651 else /* >= sequence */
655 rb_link_node(&t->rb_node, parent, p);
656 rb_insert_color(&t->rb_node, root);
660 static inline int trace_rb_insert_sort(struct trace *t)
662 if (!trace_rb_insert(t, &rb_sort_root)) {
670 static int trace_rb_insert_last(struct per_dev_info *pdi, struct trace *t)
672 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
674 if (trace_rb_insert(t, &pci->rb_last))
677 pci->rb_last_entries++;
679 if (pci->rb_last_entries > rb_batch * pdi->nfiles) {
680 struct rb_node *n = rb_first(&pci->rb_last);
682 t = rb_entry(n, struct trace, rb_node);
683 __put_trace_last(pdi, t);
689 static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
690 struct rb_root *root, int order)
692 struct rb_node *n = root->rb_node;
693 struct rb_node *prev = NULL;
697 __t = rb_entry(n, struct trace, rb_node);
700 if (device < __t->bit->device)
702 else if (device > __t->bit->device)
704 else if (sequence < __t->bit->sequence)
706 else if (sequence > __t->bit->sequence)
713 * hack - the list may not be sequence ordered because some
714 * events don't have sequence and time matched. so we end up
715 * being a little off in the rb lookup here, because we don't
716 * know the time we are looking for. compensate by browsing
717 * a little ahead from the last entry to find the match
722 while (((n = rb_next(prev)) != NULL) && max--) {
723 __t = rb_entry(n, struct trace, rb_node);
725 if (__t->bit->device == device &&
726 __t->bit->sequence == sequence)
736 static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
737 struct per_cpu_info *pci,
740 return trace_rb_find(pdi->dev, seq, &pci->rb_last, 0);
743 static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
745 struct rb_node **p = &pdi->rb_track.rb_node;
746 struct rb_node *parent = NULL;
747 struct io_track *__iot;
751 __iot = rb_entry(parent, struct io_track, rb_node);
753 if (iot->sector < __iot->sector)
755 else if (iot->sector > __iot->sector)
759 "sector alias (%Lu) on device %d,%d!\n",
760 (unsigned long long) iot->sector,
761 MAJOR(pdi->dev), MINOR(pdi->dev));
766 rb_link_node(&iot->rb_node, parent, p);
767 rb_insert_color(&iot->rb_node, &pdi->rb_track);
771 static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
773 struct rb_node *n = pdi->rb_track.rb_node;
774 struct io_track *__iot;
777 __iot = rb_entry(n, struct io_track, rb_node);
779 if (sector < __iot->sector)
781 else if (sector > __iot->sector)
790 static struct io_track *find_track(struct per_dev_info *pdi, __u32 pid,
791 char *comm, __u64 sector)
793 struct io_track *iot;
795 iot = __find_track(pdi, sector);
797 iot = malloc(sizeof(*iot));
799 memcpy(iot->comm, comm, sizeof(iot->comm));
800 iot->sector = sector;
801 track_rb_insert(pdi, iot);
807 static void log_track_frontmerge(struct per_dev_info *pdi,
808 struct blk_io_trace *t)
810 struct io_track *iot;
815 iot = __find_track(pdi, t->sector + t_sec(t));
818 fprintf(stderr, "merge not found for (%d,%d): %llu\n",
819 MAJOR(pdi->dev), MINOR(pdi->dev),
820 (unsigned long long) t->sector + t_sec(t));
824 rb_erase(&iot->rb_node, &pdi->rb_track);
825 iot->sector -= t_sec(t);
826 track_rb_insert(pdi, iot);
829 static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
831 struct io_track *iot;
836 iot = find_track(pdi, t->pid, t->comm, t->sector);
837 iot->allocation_time = t->time;
841 * return time between rq allocation and insertion
843 static unsigned long long log_track_insert(struct per_dev_info *pdi,
844 struct blk_io_trace *t)
846 unsigned long long elapsed;
847 struct io_track *iot;
852 iot = find_track(pdi, t->pid, t->comm, t->sector);
853 iot->queue_time = t->time;
855 if (!iot->allocation_time)
858 elapsed = iot->queue_time - iot->allocation_time;
860 if (per_process_stats) {
861 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
862 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
864 if (ppi && elapsed > ppi->longest_allocation_wait[w])
865 ppi->longest_allocation_wait[w] = elapsed;
872 * return time between queue and issue
874 static unsigned long long log_track_issue(struct per_dev_info *pdi,
875 struct blk_io_trace *t)
877 unsigned long long elapsed;
878 struct io_track *iot;
882 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
885 iot = __find_track(pdi, t->sector);
888 fprintf(stderr, "issue not found for (%d,%d): %llu\n",
889 MAJOR(pdi->dev), MINOR(pdi->dev),
890 (unsigned long long) t->sector);
894 iot->dispatch_time = t->time;
895 elapsed = iot->dispatch_time - iot->queue_time;
897 if (per_process_stats) {
898 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
899 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
901 if (ppi && elapsed > ppi->longest_dispatch_wait[w])
902 ppi->longest_dispatch_wait[w] = elapsed;
909 * return time between dispatch and complete
911 static unsigned long long log_track_complete(struct per_dev_info *pdi,
912 struct blk_io_trace *t)
914 unsigned long long elapsed;
915 struct io_track *iot;
919 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
922 iot = __find_track(pdi, t->sector);
925 fprintf(stderr,"complete not found for (%d,%d): %llu\n",
926 MAJOR(pdi->dev), MINOR(pdi->dev),
927 (unsigned long long) t->sector);
931 iot->completion_time = t->time;
932 elapsed = iot->completion_time - iot->dispatch_time;
934 if (per_process_stats) {
935 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
936 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
938 if (ppi && elapsed > ppi->longest_completion_wait[w])
939 ppi->longest_completion_wait[w] = elapsed;
943 * kill the trace, we don't need it after completion
945 rb_erase(&iot->rb_node, &pdi->rb_track);
952 static struct io_stats *find_process_io_stats(__u32 pid, char *name)
954 struct per_process_info *ppi = find_process(pid, name);
957 ppi = malloc(sizeof(*ppi));
958 memset(ppi, 0, sizeof(*ppi));
959 memcpy(ppi->name, name, 16);
961 add_process_to_hash(ppi);
962 add_process_to_list(ppi);
965 return &ppi->io_stats;
968 static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
971 snprintf(buffer, size, "%s", pdi->name);
973 snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
977 static void check_time(struct per_dev_info *pdi, struct blk_io_trace *bit)
979 unsigned long long this = bit->time;
980 unsigned long long last = pdi->last_reported_time;
982 pdi->backwards = (this < last) ? 'B' : ' ';
983 pdi->last_reported_time = this;
986 static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
991 ios->qwrite_kb += t_kb(t);
994 ios->qread_kb += t_kb(t);
998 static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
1001 __account_m(&pci->io_stats, t, rw);
1003 if (per_process_stats) {
1004 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1006 __account_m(ios, t, rw);
1010 static inline void __account_queue(struct io_stats *ios, struct blk_io_trace *t,
1015 ios->qwrite_kb += t_kb(t);
1018 ios->qread_kb += t_kb(t);
1022 static inline void account_queue(struct blk_io_trace *t,
1023 struct per_cpu_info *pci, int rw)
1025 __account_queue(&pci->io_stats, t, rw);
1027 if (per_process_stats) {
1028 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1030 __account_queue(ios, t, rw);
1034 static inline void __account_c(struct io_stats *ios, int rw, int bytes)
1038 ios->cwrite_kb += bytes >> 10;
1041 ios->cread_kb += bytes >> 10;
1045 static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
1048 __account_c(&pci->io_stats, rw, bytes);
1050 if (per_process_stats) {
1051 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1053 __account_c(ios, rw, bytes);
1057 static inline void __account_issue(struct io_stats *ios, int rw,
1062 ios->iwrite_kb += bytes >> 10;
1065 ios->iread_kb += bytes >> 10;
1069 static inline void account_issue(struct blk_io_trace *t,
1070 struct per_cpu_info *pci, int rw)
1072 __account_issue(&pci->io_stats, rw, t->bytes);
1074 if (per_process_stats) {
1075 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1077 __account_issue(ios, rw, t->bytes);
1081 static inline void __account_unplug(struct io_stats *ios, int timer)
1084 ios->timer_unplugs++;
1089 static inline void account_unplug(struct blk_io_trace *t,
1090 struct per_cpu_info *pci, int timer)
1092 __account_unplug(&pci->io_stats, timer);
1094 if (per_process_stats) {
1095 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1097 __account_unplug(ios, timer);
1101 static inline void __account_requeue(struct io_stats *ios,
1102 struct blk_io_trace *t, int rw)
1106 ios->iwrite_kb -= t_kb(t);
1109 ios->iread_kb -= t_kb(t);
1113 static inline void account_requeue(struct blk_io_trace *t,
1114 struct per_cpu_info *pci, int rw)
1116 __account_requeue(&pci->io_stats, t, rw);
1118 if (per_process_stats) {
1119 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1121 __account_requeue(ios, t, rw);
1125 static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
1126 struct blk_io_trace *t, char *act)
1128 process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
1131 static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
1132 struct blk_io_trace *t, char *act)
1134 process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
1137 static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
1140 process_fmt(act, pci, t, -1, 0, NULL);
1143 static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
1144 struct blk_io_trace *t, char *act)
1146 process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
1149 static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
1150 struct blk_io_trace *t, char *act)
1153 log_track_frontmerge(pdi, t);
1155 process_fmt(act, pci, t, -1ULL, 0, NULL);
1158 static void log_action(struct per_cpu_info *pci, struct blk_io_trace *t,
1161 process_fmt(act, pci, t, -1ULL, 0, NULL);
1164 static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
1167 process_fmt(act, pci, t, -1ULL, 0, NULL);
1170 static void log_unplug(struct per_cpu_info *pci, struct blk_io_trace *t,
1173 process_fmt(act, pci, t, -1ULL, 0, NULL);
1176 static void log_split(struct per_cpu_info *pci, struct blk_io_trace *t,
1179 process_fmt(act, pci, t, -1ULL, 0, NULL);
1182 static void log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char *act)
1184 unsigned char *buf = (unsigned char *) t + sizeof(*t);
1186 process_fmt(act, pci, t, -1ULL, t->pdu_len, buf);
1189 static void dump_trace_pc(struct blk_io_trace *t, struct per_cpu_info *pci)
1191 int act = t->action & 0xffff;
1194 case __BLK_TA_QUEUE:
1195 log_generic(pci, t, "Q");
1197 case __BLK_TA_GETRQ:
1198 log_generic(pci, t, "G");
1200 case __BLK_TA_SLEEPRQ:
1201 log_generic(pci, t, "S");
1203 case __BLK_TA_REQUEUE:
1204 log_generic(pci, t, "R");
1206 case __BLK_TA_ISSUE:
1207 log_pc(pci, t, "D");
1209 case __BLK_TA_COMPLETE:
1210 log_pc(pci, t, "C");
1212 case __BLK_TA_INSERT:
1213 log_pc(pci, t, "I");
1216 fprintf(stderr, "Bad pc action %x\n", act);
1221 static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
1222 struct per_cpu_info *pci)
1224 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1225 int act = t->action & 0xffff;
1228 case __BLK_TA_QUEUE:
1229 account_queue(t, pci, w);
1230 log_queue(pci, t, "Q");
1232 case __BLK_TA_INSERT:
1233 log_insert(pdi, pci, t, "I");
1235 case __BLK_TA_BACKMERGE:
1236 account_m(t, pci, w);
1237 log_merge(pdi, pci, t, "M");
1239 case __BLK_TA_FRONTMERGE:
1240 account_m(t, pci, w);
1241 log_merge(pdi, pci, t, "F");
1243 case __BLK_TA_GETRQ:
1244 log_track_getrq(pdi, t);
1245 log_generic(pci, t, "G");
1247 case __BLK_TA_SLEEPRQ:
1248 log_generic(pci, t, "S");
1250 case __BLK_TA_REQUEUE:
1251 pdi->cur_depth[w]--;
1252 account_requeue(t, pci, w);
1253 log_queue(pci, t, "R");
1255 case __BLK_TA_ISSUE:
1256 account_issue(t, pci, w);
1257 pdi->cur_depth[w]++;
1258 if (pdi->cur_depth[w] > pdi->max_depth[w])
1259 pdi->max_depth[w] = pdi->cur_depth[w];
1260 log_issue(pdi, pci, t, "D");
1262 case __BLK_TA_COMPLETE:
1263 pdi->cur_depth[w]--;
1264 account_c(t, pci, w, t->bytes);
1265 log_complete(pdi, pci, t, "C");
1268 log_action(pci, t, "P");
1270 case __BLK_TA_UNPLUG_IO:
1271 account_unplug(t, pci, 0);
1272 log_unplug(pci, t, "U");
1274 case __BLK_TA_UNPLUG_TIMER:
1275 account_unplug(t, pci, 1);
1276 log_unplug(pci, t, "UT");
1278 case __BLK_TA_SPLIT:
1279 log_split(pci, t, "X");
1281 case __BLK_TA_BOUNCE:
1282 log_generic(pci, t, "B");
1284 case __BLK_TA_REMAP:
1285 log_generic(pci, t, "A");
1288 fprintf(stderr, "Bad fs action %x\n", t->action);
1293 static void dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci,
1294 struct per_dev_info *pdi)
1296 if (t->action & BLK_TC_ACT(BLK_TC_PC))
1297 dump_trace_pc(t, pci);
1299 dump_trace_fs(t, pdi, pci);
1302 pdi->first_reported_time = t->time;
1308 * print in a proper way, not too small and not too big. if more than
1309 * 1000,000K, turn into M and so on
1311 static char *size_cnv(char *dst, unsigned long long num, int in_kb)
1313 char suff[] = { '\0', 'K', 'M', 'G', 'P' };
1319 while (num > 1000 * 1000ULL && (i < sizeof(suff) - 1)) {
1324 sprintf(dst, "%'8Lu%c", num, suff[i]);
1328 static void dump_io_stats(struct per_dev_info *pdi, struct io_stats *ios,
1331 static char x[256], y[256];
1333 fprintf(ofp, "%s\n", msg);
1335 fprintf(ofp, " Reads Queued: %s, %siB\t", size_cnv(x, ios->qreads, 0), size_cnv(y, ios->qread_kb, 1));
1336 fprintf(ofp, " Writes Queued: %s, %siB\n", size_cnv(x, ios->qwrites, 0), size_cnv(y, ios->qwrite_kb, 1));
1338 fprintf(ofp, " Read Dispatches: %s, %siB\t", size_cnv(x, ios->ireads, 0), size_cnv(y, ios->iread_kb, 1));
1339 fprintf(ofp, " Write Dispatches: %s, %siB\n", size_cnv(x, ios->iwrites, 0), size_cnv(y, ios->iwrite_kb, 1));
1340 fprintf(ofp, " Reads Requeued: %s\t\t", size_cnv(x, ios->rrqueue, 0));
1341 fprintf(ofp, " Writes Requeued: %s\n", size_cnv(x, ios->wrqueue, 0));
1342 fprintf(ofp, " Reads Completed: %s, %siB\t", size_cnv(x, ios->creads, 0), size_cnv(y, ios->cread_kb, 1));
1343 fprintf(ofp, " Writes Completed: %s, %siB\n", size_cnv(x, ios->cwrites, 0), size_cnv(y, ios->cwrite_kb, 1));
1344 fprintf(ofp, " Read Merges: %'8lu%8c\t", ios->mreads, ' ');
1345 fprintf(ofp, " Write Merges: %'8lu\n", ios->mwrites);
1347 fprintf(ofp, " Read depth: %'8u%8c\t", pdi->max_depth[0], ' ');
1348 fprintf(ofp, " Write depth: %'8u\n", pdi->max_depth[1]);
1350 fprintf(ofp, " IO unplugs: %'8lu%8c\t", ios->io_unplugs, ' ');
1351 fprintf(ofp, " Timer unplugs: %'8lu\n", ios->timer_unplugs);
1354 static void dump_wait_stats(struct per_process_info *ppi)
1356 unsigned long rawait = ppi->longest_allocation_wait[0] / 1000;
1357 unsigned long rdwait = ppi->longest_dispatch_wait[0] / 1000;
1358 unsigned long rcwait = ppi->longest_completion_wait[0] / 1000;
1359 unsigned long wawait = ppi->longest_allocation_wait[1] / 1000;
1360 unsigned long wdwait = ppi->longest_dispatch_wait[1] / 1000;
1361 unsigned long wcwait = ppi->longest_completion_wait[1] / 1000;
1363 fprintf(ofp, " Allocation wait: %'8lu%8c\t", rawait, ' ');
1364 fprintf(ofp, " Allocation wait: %'8lu\n", wawait);
1365 fprintf(ofp, " Dispatch wait: %'8lu%8c\t", rdwait, ' ');
1366 fprintf(ofp, " Dispatch wait: %'8lu\n", wdwait);
1367 fprintf(ofp, " Completion wait: %'8lu%8c\t", rcwait, ' ');
1368 fprintf(ofp, " Completion wait: %'8lu\n", wcwait);
1371 static int ppi_name_compare(const void *p1, const void *p2)
1373 struct per_process_info *ppi1 = *((struct per_process_info **) p1);
1374 struct per_process_info *ppi2 = *((struct per_process_info **) p2);
1377 res = strverscmp(ppi1->name, ppi2->name);
1379 res = ppi1->pid > ppi2->pid;
1384 static void sort_process_list(void)
1386 struct per_process_info **ppis;
1387 struct per_process_info *ppi;
1390 ppis = malloc(ppi_list_entries * sizeof(struct per_process_info *));
1395 ppi = ppi->list_next;
1398 qsort(ppis, ppi_list_entries, sizeof(ppi), ppi_name_compare);
1400 i = ppi_list_entries - 1;
1405 ppi->list_next = ppi_list;
1413 static void show_process_stats(void)
1415 struct per_process_info *ppi;
1417 sort_process_list();
1423 if (ppi->more_than_one)
1424 sprintf(name, "%s (%u, ...)", ppi->name, ppi->pid);
1426 sprintf(name, "%s (%u)", ppi->name, ppi->pid);
1428 dump_io_stats(NULL, &ppi->io_stats, name);
1429 dump_wait_stats(ppi);
1430 ppi = ppi->list_next;
1436 static void show_device_and_cpu_stats(void)
1438 struct per_dev_info *pdi;
1439 struct per_cpu_info *pci;
1440 struct io_stats total, *ios;
1441 unsigned long long rrate, wrate, msec;
1442 int i, j, pci_events;
1443 char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
1446 for (pdi = devices, i = 0; i < ndevices; i++, pdi++) {
1448 memset(&total, 0, sizeof(total));
1454 for (pci = pdi->cpus, j = 0; j < pdi->ncpus; j++, pci++) {
1458 ios = &pci->io_stats;
1459 total.qreads += ios->qreads;
1460 total.qwrites += ios->qwrites;
1461 total.creads += ios->creads;
1462 total.cwrites += ios->cwrites;
1463 total.mreads += ios->mreads;
1464 total.mwrites += ios->mwrites;
1465 total.ireads += ios->ireads;
1466 total.iwrites += ios->iwrites;
1467 total.rrqueue += ios->rrqueue;
1468 total.wrqueue += ios->wrqueue;
1469 total.qread_kb += ios->qread_kb;
1470 total.qwrite_kb += ios->qwrite_kb;
1471 total.cread_kb += ios->cread_kb;
1472 total.cwrite_kb += ios->cwrite_kb;
1473 total.iread_kb += ios->iread_kb;
1474 total.iwrite_kb += ios->iwrite_kb;
1475 total.timer_unplugs += ios->timer_unplugs;
1476 total.io_unplugs += ios->io_unplugs;
1478 snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
1479 j, get_dev_name(pdi, name, sizeof(name)));
1480 dump_io_stats(pdi, ios, line);
1484 if (pci_events > 1) {
1486 snprintf(line, sizeof(line) - 1, "Total (%s):",
1487 get_dev_name(pdi, name, sizeof(name)));
1488 dump_io_stats(NULL, &total, line);
1492 msec = (pdi->last_reported_time - pdi->first_reported_time) / 1000000;
1494 rrate = 1000 * total.cread_kb / msec;
1495 wrate = 1000 * total.cwrite_kb / msec;
1498 fprintf(ofp, "\nThroughput (R/W): %'LuKiB/s / %'LuKiB/s\n",
1500 fprintf(ofp, "Events (%s): %'Lu entries\n",
1501 get_dev_name(pdi, line, sizeof(line)), pdi->events);
1503 collect_pdi_skips(pdi);
1504 fprintf(ofp, "Skips: %'lu forward (%'llu - %5.1lf%%)\n",
1505 pdi->skips,pdi->seq_skips,
1506 100.0 * ((double)pdi->seq_skips /
1507 (double)(pdi->events + pdi->seq_skips)));
1511 static void find_genesis(void)
1513 struct trace *t = trace_list;
1515 genesis_time = -1ULL;
1517 if (t->bit->time < genesis_time)
1518 genesis_time = t->bit->time;
1524 static inline int check_stopwatch(struct blk_io_trace *bit)
1526 if (bit->time < stopwatch_end &&
1527 bit->time >= stopwatch_start)
1534 * return youngest entry read
1536 static int sort_entries(unsigned long long *youngest)
1538 struct per_dev_info *pdi = NULL;
1539 struct per_cpu_info *pci = NULL;
1546 while ((t = trace_list) != NULL) {
1547 struct blk_io_trace *bit = t->bit;
1549 trace_list = t->next;
1551 bit->time -= genesis_time;
1553 if (bit->time < *youngest || !*youngest)
1554 *youngest = bit->time;
1556 if (!pdi || pdi->dev != bit->device) {
1557 pdi = get_dev_info(bit->device);
1561 if (!pci || pci->cpu != bit->cpu)
1562 pci = get_cpu_info(pdi, bit->cpu);
1564 if (bit->sequence < pci->smallest_seq_read)
1565 pci->smallest_seq_read = bit->sequence;
1567 if (check_stopwatch(bit)) {
1573 if (trace_rb_insert_sort(t))
1581 * to continue, we must have traces from all online cpus in the tree
1583 static int check_cpu_map(struct per_dev_info *pdi)
1585 unsigned long *cpu_map;
1592 * create a map of the cpus we have traces for
1594 cpu_map = malloc(pdi->cpu_map_max / sizeof(long));
1595 n = rb_first(&rb_sort_root);
1597 __t = rb_entry(n, struct trace, rb_node);
1598 cpu = __t->bit->cpu;
1600 cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
1605 * we can't continue if pdi->cpu_map has entries set that we don't
1606 * have in the sort rbtree. the opposite is not a problem, though
1609 for (i = 0; i < pdi->cpu_map_max / CPUS_PER_LONG; i++) {
1610 if (pdi->cpu_map[i] & ~(cpu_map[i])) {
1620 static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
1622 struct blk_io_trace *bit = t->bit;
1623 unsigned long expected_sequence;
1624 struct per_cpu_info *pci;
1627 pci = get_cpu_info(pdi, bit->cpu);
1628 expected_sequence = pci->last_sequence + 1;
1630 if (!expected_sequence) {
1632 * 1 should be the first entry, just allow it
1634 if (bit->sequence == 1)
1636 if (bit->sequence == pci->smallest_seq_read)
1639 return check_cpu_map(pdi);
1642 if (bit->sequence == expected_sequence)
1646 * we may not have seen that sequence yet. if we are not doing
1647 * the final run, break and wait for more entries.
1649 if (expected_sequence < pci->smallest_seq_read) {
1650 __t = trace_rb_find_last(pdi, pci, expected_sequence);
1654 __put_trace_last(pdi, __t);
1656 } else if (!force) {
1660 if (check_current_skips(pci, bit->sequence))
1663 if (expected_sequence < bit->sequence)
1664 insert_skip(pci, expected_sequence, bit->sequence - 1);
1669 static void show_entries_rb(int force)
1671 struct per_dev_info *pdi = NULL;
1672 struct per_cpu_info *pci = NULL;
1673 struct blk_io_trace *bit;
1677 while ((n = rb_first(&rb_sort_root)) != NULL) {
1678 if (is_done() && !force && !pipeline)
1681 t = rb_entry(n, struct trace, rb_node);
1684 if (!pdi || pdi->dev != bit->device) {
1685 pdi = get_dev_info(bit->device);
1690 fprintf(stderr, "Unknown device ID? (%d,%d)\n",
1691 MAJOR(bit->device), MINOR(bit->device));
1695 if (check_sequence(pdi, t, force))
1698 if (!force && bit->time > last_allowed_time)
1701 check_time(pdi, bit);
1703 if (!pci || pci->cpu != bit->cpu)
1704 pci = get_cpu_info(pdi, bit->cpu);
1706 pci->last_sequence = bit->sequence;
1710 if (bit->action & (act_mask << BLK_TC_SHIFT))
1711 dump_trace(bit, pci, pdi);
1717 static int read_data(int fd, void *buffer, int bytes, int block, int *fdblock)
1719 int ret, bytes_left, fl;
1722 if (block != *fdblock) {
1723 fl = fcntl(fd, F_GETFL);
1727 fcntl(fd, F_SETFL, fl | O_NONBLOCK);
1730 fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
1736 while (bytes_left > 0) {
1737 ret = read(fd, p, bytes_left);
1741 if (errno != EAGAIN) {
1747 * never do partial reads. we can return if we
1748 * didn't read anything and we should not block,
1749 * otherwise wait for data
1751 if ((bytes_left == bytes) && !block)
1765 static int read_events(int fd, int always_block, int *fdblock)
1767 struct per_dev_info *pdi = NULL;
1768 unsigned int events = 0;
1770 while (!is_done() && events < rb_batch) {
1771 struct blk_io_trace *bit;
1773 int pdu_len, should_block, ret;
1778 should_block = !events || always_block;
1780 ret = read_data(fd, bit, sizeof(*bit), should_block, fdblock);
1783 if (!events && ret < 0)
1788 magic = be32_to_cpu(bit->magic);
1789 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
1790 fprintf(stderr, "Bad magic %x\n", magic);
1794 pdu_len = be16_to_cpu(bit->pdu_len);
1796 void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
1798 if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1, fdblock)) {
1808 if (verify_trace(bit)) {
1814 memset(t, 0, sizeof(*t));
1817 t->next = trace_list;
1820 if (!pdi || pdi->dev != bit->device)
1821 pdi = get_dev_info(bit->device);
1823 if (bit->time > pdi->last_read_time)
1824 pdi->last_read_time = bit->time;
1832 static int do_file(void)
1834 struct per_cpu_info *pci;
1835 struct per_dev_info *pdi;
1836 int i, j, events, events_added;
1839 * first prepare all files for reading
1841 for (i = 0; i < ndevices; i++) {
1850 pci = get_cpu_info(pdi, j);
1855 p = strdup(pdi->name);
1857 if (strcmp(dname, ".")) {
1859 p = strdup(pdi->name);
1860 strcpy(pdi->name, basename(p));
1865 len = sprintf(pci->fname, "%s/", input_dir);
1867 snprintf(pci->fname + len, sizeof(pci->fname)-1-len,
1868 "%s.blktrace.%d", pdi->name, pci->cpu);
1869 if (stat(pci->fname, &st) < 0)
1872 pci->fd = open(pci->fname, O_RDONLY);
1879 printf("Input file %s added\n", pci->fname);
1881 cpu_mark_online(pdi, pci->cpu);
1886 * now loop over the files reading in the data
1889 unsigned long long youngest;
1892 last_allowed_time = -1ULL;
1894 for (i = 0; i < ndevices; i++) {
1896 pdi->last_read_time = -1ULL;
1898 for (j = 0; j < pdi->nfiles; j++) {
1900 pci = get_cpu_info(pdi, j);
1905 pci->smallest_seq_read = -1;
1907 events = read_events(pci->fd, 1, &pci->fdblock);
1909 cpu_mark_offline(pdi, pci->cpu);
1915 if (pdi->last_read_time < last_allowed_time)
1916 last_allowed_time = pdi->last_read_time;
1918 events_added += events;
1922 if (sort_entries(&youngest))
1925 if (youngest > stopwatch_end)
1930 } while (events_added);
1932 if (rb_sort_entries)
1938 static int do_stdin(void)
1940 unsigned long long youngest;
1941 int fd, events, fdblock;
1943 last_allowed_time = -1ULL;
1944 fd = dup(STDIN_FILENO);
1946 perror("dup stdin");
1951 while ((events = read_events(fd, 0, &fdblock)) > 0) {
1954 smallest_seq_read = -1U;
1957 if (sort_entries(&youngest))
1960 if (youngest > stopwatch_end)
1966 if (rb_sort_entries)
1973 static void show_stats(void)
1982 if (per_process_stats)
1983 show_process_stats();
1985 if (per_device_and_cpu_stats)
1986 show_device_and_cpu_stats();
1991 static void handle_sigint(__attribute__((__unused__)) int sig)
1997 * Extract start and duration times from a string, allowing
1998 * us to specify a time interval of interest within a trace.
1999 * Format: "duration" (start is zero) or "start:duration".
2001 static int find_stopwatch_interval(char *string)
2006 value = strtod(string, &sp);
2008 fprintf(stderr,"Invalid stopwatch timer: %s\n", string);
2012 stopwatch_start = DOUBLE_TO_NANO_ULL(value);
2014 value = strtod(string, &sp);
2015 if (sp == string || *sp != '\0') {
2016 fprintf(stderr,"Invalid stopwatch duration time: %s\n",
2020 } else if (*sp != '\0') {
2021 fprintf(stderr,"Invalid stopwatch start timer: %s\n", string);
2024 stopwatch_end = DOUBLE_TO_NANO_ULL(value);
2025 if (stopwatch_end <= stopwatch_start) {
2026 fprintf(stderr, "Invalid stopwatch interval: %Lu -> %Lu\n",
2027 stopwatch_start, stopwatch_end);
2034 static char usage_str[] = \
2035 "[ -i <input name> ] [-o <output name> [ -s ] [ -t ] [ -q ]\n" \
2036 "[ -w start:stop ] [ -f output format ] [ -F format spec ] [ -v] \n\n" \
2037 "\t-i Input file containing trace data, or '-' for stdin\n" \
2038 "\t-D Directory to prepend to input file names\n" \
2039 "\t-o Output file. If not given, output is stdout\n" \
2040 "\t-b stdin read batching\n" \
2041 "\t-s Show per-program io statistics\n" \
2042 "\t-h Hash processes by name, not pid\n" \
2043 "\t-t Track individual ios. Will tell you the time a request took\n" \
2044 "\t to get queued, to get dispatched, and to get completed\n" \
2045 "\t-q Quiet. Don't display any stats at the end of the trace\n" \
2046 "\t-w Only parse data between the given time interval in seconds.\n" \
2047 "\t If 'start' isn't given, blkparse defaults the start time to 0\n" \
2048 "\t-f Output format. Customize the output format. The format field\n" \
2049 "\t identifies can be found in the documentation\n" \
2050 "\t-F Format specification. Can be found in the documentation\n" \
2051 "\t-v More verbose for marginal errors\n" \
2052 "\t-V Print program version info\n\n";
2054 static void usage(char *prog)
2056 fprintf(stderr, "Usage: %s %s %s", prog, blkparse_version, usage_str);
2059 int main(int argc, char *argv[])
2062 int i, c, ret, mode;
2063 int act_mask_tmp = 0;
2065 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
2068 i = find_mask_map(optarg);
2070 fprintf(stderr,"Invalid action mask %s\n",
2078 if ((sscanf(optarg, "%x", &i) != 1) ||
2079 !valid_act_opt(i)) {
2081 "Invalid set action mask %s/0x%x\n",
2088 if (!strcmp(optarg, "-") && !pipeline)
2090 else if (resize_devices(optarg) != 0)
2097 output_name = optarg;
2100 rb_batch = atoi(optarg);
2102 rb_batch = RB_BATCH_DEFAULT;
2105 per_process_stats = 1;
2111 per_device_and_cpu_stats = 0;
2114 if (find_stopwatch_interval(optarg) != 0)
2118 set_all_format_specs(optarg);
2121 if (add_format_spec(optarg) != 0)
2125 ppi_hash_by_pid = 0;
2131 printf("%s version %s\n", argv[0], blkparse_version);
2139 while (optind < argc) {
2140 if (!strcmp(argv[optind], "-") && !pipeline)
2142 else if (resize_devices(argv[optind]) != 0)
2147 if (!pipeline && !ndevices) {
2152 if (act_mask_tmp != 0)
2153 act_mask = act_mask_tmp;
2155 memset(&rb_sort_root, 0, sizeof(rb_sort_root));
2157 signal(SIGINT, handle_sigint);
2158 signal(SIGHUP, handle_sigint);
2159 signal(SIGTERM, handle_sigint);
2161 setlocale(LC_NUMERIC, "en_US");
2164 ofp = fdopen(STDOUT_FILENO, "w");
2169 snprintf(ofname, sizeof(ofname) - 1, "%s", output_name);
2170 ofp = fopen(ofname, "w");
2179 ofp_buffer = malloc(4096);
2180 if (setvbuf(ofp, ofp_buffer, mode, 4096)) {