2 * block queue tracing parse application
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <sys/types.h>
38 static char blkparse_version[] = "0.99";
41 unsigned long start, end;
42 struct skip_info *prev, *next;
50 unsigned long long events;
51 unsigned long long first_reported_time;
52 unsigned long long last_reported_time;
53 unsigned long long last_read_time;
54 struct io_stats io_stats;
55 unsigned long skips, nskips;
56 unsigned long long seq_skips, seq_nskips;
57 unsigned int max_depth[2];
58 unsigned int cur_depth[2];
60 struct rb_root rb_track;
65 unsigned long *cpu_map;
66 unsigned int cpu_map_max;
68 struct per_cpu_info *cpus;
69 struct skip_info *skips_head;
70 struct skip_info *skips_tail;
73 struct per_process_info {
76 struct io_stats io_stats;
77 struct per_process_info *hash_next, *list_next;
83 unsigned long long longest_allocation_wait[2];
84 unsigned long long longest_dispatch_wait[2];
85 unsigned long long longest_completion_wait[2];
88 #define PPI_HASH_SHIFT (8)
89 #define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
90 #define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
91 static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
92 static struct per_process_info *ppi_list;
93 static int ppi_list_entries;
95 #define S_OPTS "a:A:i:o:b:stqw:f:F:vVhD:"
96 static struct option l_opts[] = {
99 .has_arg = required_argument,
105 .has_arg = required_argument,
111 .has_arg = required_argument,
117 .has_arg = required_argument,
123 .has_arg = required_argument,
128 .name = "per-program-stats",
129 .has_arg = no_argument,
135 .has_arg = no_argument,
141 .has_arg = no_argument,
147 .has_arg = required_argument,
153 .has_arg = required_argument,
158 .name = "format-spec",
159 .has_arg = required_argument,
164 .name = "hash-by-name",
165 .has_arg = no_argument,
171 .has_arg = no_argument,
177 .has_arg = no_argument,
182 .name = "input-directory",
183 .has_arg = required_argument,
193 * for sorting the displayed output
196 struct blk_io_trace *bit;
197 struct rb_node rb_node;
201 static struct rb_root rb_sort_root;
202 static unsigned long rb_sort_entries;
204 static struct trace *trace_list;
209 static struct blk_io_trace *bit_alloc_list;
210 static struct trace *t_alloc_list;
213 * for tracking individual ios
216 struct rb_node rb_node;
221 unsigned long long allocation_time;
222 unsigned long long queue_time;
223 unsigned long long dispatch_time;
224 unsigned long long completion_time;
228 static struct per_dev_info *devices;
229 static char *get_dev_name(struct per_dev_info *, char *, int);
230 static int trace_rb_insert_last(struct per_dev_info *, struct trace *);
233 static char *output_name;
234 static char *input_dir;
236 static unsigned long long genesis_time;
237 static unsigned long long last_allowed_time;
238 static unsigned long long stopwatch_start; /* start from zero by default */
239 static unsigned long long stopwatch_end = -1ULL; /* "infinity" */
241 static int per_process_stats;
242 static int per_device_and_cpu_stats = 1;
243 static int track_ios;
244 static int ppi_hash_by_pid = 1;
246 static unsigned int act_mask = -1U;
247 static int stats_printed;
249 static unsigned int t_alloc_cache;
250 static unsigned int bit_alloc_cache;
252 #define RB_BATCH_DEFAULT (512)
253 static unsigned int rb_batch = RB_BATCH_DEFAULT;
257 #define is_done() (*(volatile int *)(&done))
258 static volatile int done;
260 #define JHASH_RANDOM (0x3af5f2ee)
262 #define CPUS_PER_LONG (8 * sizeof(unsigned long))
263 #define CPU_IDX(cpu) ((cpu) / CPUS_PER_LONG)
264 #define CPU_BIT(cpu) ((cpu) & (CPUS_PER_LONG - 1))
266 static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
268 struct per_cpu_info *cpus = pdi->cpus;
269 int ncpus = pdi->ncpus;
270 int new_count = cpu + 1;
274 size = new_count * sizeof(struct per_cpu_info);
275 cpus = realloc(cpus, size);
278 fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
279 get_dev_name(pdi, name, sizeof(name)), size);
283 new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
284 new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
285 memset(new_start, 0, new_space);
287 pdi->ncpus = new_count;
290 for (new_count = 0; new_count < pdi->ncpus; new_count++) {
291 struct per_cpu_info *pci = &pdi->cpus[new_count];
295 memset(&pci->rb_last, 0, sizeof(pci->rb_last));
296 pci->rb_last_entries = 0;
297 pci->last_sequence = -1;
302 static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
304 struct per_cpu_info *pci;
306 if (cpu >= pdi->ncpus)
307 resize_cpu_info(pdi, cpu);
309 pci = &pdi->cpus[cpu];
315 static int resize_devices(char *name)
317 int size = (ndevices + 1) * sizeof(struct per_dev_info);
319 devices = realloc(devices, size);
321 fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
324 memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
325 devices[ndevices].name = name;
330 static struct per_dev_info *get_dev_info(dev_t dev)
332 struct per_dev_info *pdi;
335 for (i = 0; i < ndevices; i++) {
337 devices[i].dev = dev;
338 if (devices[i].dev == dev)
342 if (resize_devices(NULL))
345 pdi = &devices[ndevices - 1];
347 pdi->first_reported_time = 0;
348 pdi->last_read_time = 0;
349 pdi->skips_head = pdi->skips_tail = NULL;
354 static void insert_skip(struct per_dev_info *pdi, unsigned long start,
357 struct skip_info *sip;
359 for (sip = pdi->skips_tail; sip != NULL; sip = sip->prev) {
360 if (end == (sip->start - 1)) {
363 } else if (start == (sip->end + 1)) {
369 sip = malloc(sizeof(struct skip_info));
372 sip->prev = sip->next = NULL;
373 if (pdi->skips_tail == NULL)
374 pdi->skips_head = pdi->skips_tail = sip;
376 sip->prev = pdi->skips_tail;
377 pdi->skips_tail->next = sip;
378 pdi->skips_tail = sip;
382 static void remove_sip(struct per_dev_info *pdi, struct skip_info *sip)
384 if (sip->prev == NULL) {
385 if (sip->next == NULL)
386 pdi->skips_head = pdi->skips_tail = NULL;
388 pdi->skips_head = sip->next;
389 sip->next->prev = NULL;
391 } else if (sip->next == NULL) {
392 pdi->skips_tail = sip->prev;
393 sip->prev->next = NULL;
395 sip->prev->next = sip->next;
396 sip->next->prev = sip->prev;
399 sip->prev = sip->next = NULL;
403 #define IN_SKIP(sip,seq) (((sip)->start <= (seq)) && ((seq) <= sip->end))
404 static int check_current_skips(struct per_dev_info *pdi, unsigned long seq)
406 struct skip_info *sip;
408 for (sip = pdi->skips_tail; sip != NULL; sip = sip->prev) {
409 if (IN_SKIP(sip,seq)) {
410 if (sip->start == seq) {
415 } else if (sip->end == seq)
419 insert_skip(pdi,seq+1,sip->end);
427 static void collect_pdi_skips(struct per_dev_info *pdi)
429 struct skip_info *sip;
433 for (sip = pdi->skips_head; sip != NULL; sip = sip->next) {
435 pdi->seq_skips += (sip->end - sip->start + 1);
437 fprintf(stderr, "(%d,%d): skipping %lu -> %lu\n",
438 MAJOR(pdi->dev), MINOR(pdi->dev),
439 sip->start, sip->end);
443 static void cpu_mark_online(struct per_dev_info *pdi, unsigned int cpu)
445 if (cpu >= pdi->cpu_map_max || !pdi->cpu_map) {
446 int new_max = (cpu + CPUS_PER_LONG) & ~(CPUS_PER_LONG - 1);
447 unsigned long *map = malloc(new_max / sizeof(long));
449 memset(map, 0, new_max / sizeof(long));
452 memcpy(map, pdi->cpu_map, pdi->cpu_map_max / sizeof(long));
457 pdi->cpu_map_max = new_max;
460 pdi->cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
463 static inline void cpu_mark_offline(struct per_dev_info *pdi, int cpu)
465 pdi->cpu_map[CPU_IDX(cpu)] &= ~(1UL << CPU_BIT(cpu));
468 static inline int cpu_is_online(struct per_dev_info *pdi, int cpu)
470 return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
473 static inline int ppi_hash_pid(__u32 pid)
475 return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
478 static inline int ppi_hash_name(const char *name)
480 return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
483 static inline int ppi_hash(struct per_process_info *ppi)
486 return ppi_hash_pid(ppi->pid);
488 return ppi_hash_name(ppi->name);
491 static inline void add_process_to_hash(struct per_process_info *ppi)
493 const int hash_idx = ppi_hash(ppi);
495 ppi->hash_next = ppi_hash_table[hash_idx];
496 ppi_hash_table[hash_idx] = ppi;
499 static inline void add_process_to_list(struct per_process_info *ppi)
501 ppi->list_next = ppi_list;
506 static struct per_process_info *find_process_by_name(char *name)
508 const int hash_idx = ppi_hash_name(name);
509 struct per_process_info *ppi;
511 ppi = ppi_hash_table[hash_idx];
513 if (!strcmp(ppi->name, name))
516 ppi = ppi->hash_next;
522 static struct per_process_info *find_process_by_pid(__u32 pid)
524 const int hash_idx = ppi_hash_pid(pid);
525 struct per_process_info *ppi;
527 ppi = ppi_hash_table[hash_idx];
532 ppi = ppi->hash_next;
538 static struct per_process_info *find_process(__u32 pid, char *name)
540 struct per_process_info *ppi;
543 return find_process_by_pid(pid);
545 ppi = find_process_by_name(name);
546 if (ppi && ppi->pid != pid)
547 ppi->more_than_one = 1;
553 * struct trace and blktrace allocation cache, we do potentially
554 * millions of mallocs for these structures while only using at most
555 * a few thousand at the time
557 static inline void t_free(struct trace *t)
559 if (t_alloc_cache < 1024) {
560 t->next = t_alloc_list;
567 static inline struct trace *t_alloc(void)
569 struct trace *t = t_alloc_list;
572 t_alloc_list = t->next;
577 return malloc(sizeof(*t));
580 static inline void bit_free(struct blk_io_trace *bit)
582 if (bit_alloc_cache < 1024 && !bit->pdu_len) {
584 * abuse a 64-bit field for a next pointer for the free item
586 bit->time = (__u64) (unsigned long) bit_alloc_list;
587 bit_alloc_list = (struct blk_io_trace *) bit;
593 static inline struct blk_io_trace *bit_alloc(void)
595 struct blk_io_trace *bit = bit_alloc_list;
598 bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
604 return malloc(sizeof(*bit));
607 static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
609 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
611 rb_erase(&t->rb_node, &pci->rb_last);
612 pci->rb_last_entries--;
618 static void put_trace(struct per_dev_info *pdi, struct trace *t)
620 rb_erase(&t->rb_node, &rb_sort_root);
623 trace_rb_insert_last(pdi, t);
626 static inline int trace_rb_insert(struct trace *t, struct rb_root *root)
628 struct rb_node **p = &root->rb_node;
629 struct rb_node *parent = NULL;
635 __t = rb_entry(parent, struct trace, rb_node);
637 if (t->bit->time < __t->bit->time)
639 else if (t->bit->time > __t->bit->time)
641 else if (t->bit->device < __t->bit->device)
643 else if (t->bit->device > __t->bit->device)
645 else if (t->bit->sequence < __t->bit->sequence)
647 else /* >= sequence */
651 rb_link_node(&t->rb_node, parent, p);
652 rb_insert_color(&t->rb_node, root);
656 static inline int trace_rb_insert_sort(struct trace *t)
658 if (!trace_rb_insert(t, &rb_sort_root)) {
666 static int trace_rb_insert_last(struct per_dev_info *pdi, struct trace *t)
668 struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
670 if (trace_rb_insert(t, &pci->rb_last))
673 pci->rb_last_entries++;
675 if (pci->rb_last_entries > rb_batch * pdi->nfiles) {
676 struct rb_node *n = rb_first(&pci->rb_last);
678 t = rb_entry(n, struct trace, rb_node);
679 __put_trace_last(pdi, t);
685 static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
686 struct rb_root *root, int order)
688 struct rb_node *n = root->rb_node;
689 struct rb_node *prev = NULL;
693 __t = rb_entry(n, struct trace, rb_node);
696 if (device < __t->bit->device)
698 else if (device > __t->bit->device)
700 else if (sequence < __t->bit->sequence)
702 else if (sequence > __t->bit->sequence)
709 * hack - the list may not be sequence ordered because some
710 * events don't have sequence and time matched. so we end up
711 * being a little off in the rb lookup here, because we don't
712 * know the time we are looking for. compensate by browsing
713 * a little ahead from the last entry to find the match
718 while (((n = rb_next(prev)) != NULL) && max--) {
719 __t = rb_entry(n, struct trace, rb_node);
721 if (__t->bit->device == device &&
722 __t->bit->sequence == sequence)
732 static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
733 struct per_cpu_info *pci,
736 return trace_rb_find(pdi->dev, seq, &pci->rb_last, 0);
739 static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
741 struct rb_node **p = &pdi->rb_track.rb_node;
742 struct rb_node *parent = NULL;
743 struct io_track *__iot;
747 __iot = rb_entry(parent, struct io_track, rb_node);
749 if (iot->sector < __iot->sector)
751 else if (iot->sector > __iot->sector)
755 "sector alias (%Lu) on device %d,%d!\n",
756 (unsigned long long) iot->sector,
757 MAJOR(pdi->dev), MINOR(pdi->dev));
762 rb_link_node(&iot->rb_node, parent, p);
763 rb_insert_color(&iot->rb_node, &pdi->rb_track);
767 static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
769 struct rb_node *n = pdi->rb_track.rb_node;
770 struct io_track *__iot;
773 __iot = rb_entry(n, struct io_track, rb_node);
775 if (sector < __iot->sector)
777 else if (sector > __iot->sector)
786 static struct io_track *find_track(struct per_dev_info *pdi, __u32 pid,
787 char *comm, __u64 sector)
789 struct io_track *iot;
791 iot = __find_track(pdi, sector);
793 iot = malloc(sizeof(*iot));
795 memcpy(iot->comm, comm, sizeof(iot->comm));
796 iot->sector = sector;
797 track_rb_insert(pdi, iot);
803 static void log_track_frontmerge(struct per_dev_info *pdi,
804 struct blk_io_trace *t)
806 struct io_track *iot;
811 iot = __find_track(pdi, t->sector + t_sec(t));
814 fprintf(stderr, "merge not found for (%d,%d): %llu\n",
815 MAJOR(pdi->dev), MINOR(pdi->dev),
816 (unsigned long long) t->sector + t_sec(t));
820 rb_erase(&iot->rb_node, &pdi->rb_track);
821 iot->sector -= t_sec(t);
822 track_rb_insert(pdi, iot);
825 static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
827 struct io_track *iot;
832 iot = find_track(pdi, t->pid, t->comm, t->sector);
833 iot->allocation_time = t->time;
837 * return time between rq allocation and insertion
839 static unsigned long long log_track_insert(struct per_dev_info *pdi,
840 struct blk_io_trace *t)
842 unsigned long long elapsed;
843 struct io_track *iot;
848 iot = find_track(pdi, t->pid, t->comm, t->sector);
849 iot->queue_time = t->time;
851 if (!iot->allocation_time)
854 elapsed = iot->queue_time - iot->allocation_time;
856 if (per_process_stats) {
857 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
858 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
860 if (ppi && elapsed > ppi->longest_allocation_wait[w])
861 ppi->longest_allocation_wait[w] = elapsed;
868 * return time between queue and issue
870 static unsigned long long log_track_issue(struct per_dev_info *pdi,
871 struct blk_io_trace *t)
873 unsigned long long elapsed;
874 struct io_track *iot;
878 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
881 iot = __find_track(pdi, t->sector);
884 fprintf(stderr, "issue not found for (%d,%d): %llu\n",
885 MAJOR(pdi->dev), MINOR(pdi->dev),
886 (unsigned long long) t->sector);
890 iot->dispatch_time = t->time;
891 elapsed = iot->dispatch_time - iot->queue_time;
893 if (per_process_stats) {
894 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
895 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
897 if (ppi && elapsed > ppi->longest_dispatch_wait[w])
898 ppi->longest_dispatch_wait[w] = elapsed;
905 * return time between dispatch and complete
907 static unsigned long long log_track_complete(struct per_dev_info *pdi,
908 struct blk_io_trace *t)
910 unsigned long long elapsed;
911 struct io_track *iot;
915 if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
918 iot = __find_track(pdi, t->sector);
921 fprintf(stderr,"complete not found for (%d,%d): %llu\n",
922 MAJOR(pdi->dev), MINOR(pdi->dev),
923 (unsigned long long) t->sector);
927 iot->completion_time = t->time;
928 elapsed = iot->completion_time - iot->dispatch_time;
930 if (per_process_stats) {
931 struct per_process_info *ppi = find_process(iot->pid,iot->comm);
932 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
934 if (ppi && elapsed > ppi->longest_completion_wait[w])
935 ppi->longest_completion_wait[w] = elapsed;
939 * kill the trace, we don't need it after completion
941 rb_erase(&iot->rb_node, &pdi->rb_track);
948 static struct io_stats *find_process_io_stats(__u32 pid, char *name)
950 struct per_process_info *ppi = find_process(pid, name);
953 ppi = malloc(sizeof(*ppi));
954 memset(ppi, 0, sizeof(*ppi));
955 memcpy(ppi->name, name, 16);
957 add_process_to_hash(ppi);
958 add_process_to_list(ppi);
961 return &ppi->io_stats;
964 static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
967 snprintf(buffer, size, "%s", pdi->name);
969 snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
973 static void check_time(struct per_dev_info *pdi, struct blk_io_trace *bit)
975 unsigned long long this = bit->time;
976 unsigned long long last = pdi->last_reported_time;
978 pdi->backwards = (this < last) ? 'B' : ' ';
979 pdi->last_reported_time = this;
982 static inline void __account_m(struct io_stats *ios, struct blk_io_trace *t,
987 ios->qwrite_kb += t_kb(t);
990 ios->qread_kb += t_kb(t);
994 static inline void account_m(struct blk_io_trace *t, struct per_cpu_info *pci,
997 __account_m(&pci->io_stats, t, rw);
999 if (per_process_stats) {
1000 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1002 __account_m(ios, t, rw);
1006 static inline void __account_queue(struct io_stats *ios, struct blk_io_trace *t,
1011 ios->qwrite_kb += t_kb(t);
1014 ios->qread_kb += t_kb(t);
1018 static inline void account_queue(struct blk_io_trace *t,
1019 struct per_cpu_info *pci, int rw)
1021 __account_queue(&pci->io_stats, t, rw);
1023 if (per_process_stats) {
1024 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1026 __account_queue(ios, t, rw);
1030 static inline void __account_c(struct io_stats *ios, int rw, int bytes)
1034 ios->cwrite_kb += bytes >> 10;
1037 ios->cread_kb += bytes >> 10;
1041 static inline void account_c(struct blk_io_trace *t, struct per_cpu_info *pci,
1044 __account_c(&pci->io_stats, rw, bytes);
1046 if (per_process_stats) {
1047 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1049 __account_c(ios, rw, bytes);
1053 static inline void __account_issue(struct io_stats *ios, int rw,
1058 ios->iwrite_kb += bytes >> 10;
1061 ios->iread_kb += bytes >> 10;
1065 static inline void account_issue(struct blk_io_trace *t,
1066 struct per_cpu_info *pci, int rw)
1068 __account_issue(&pci->io_stats, rw, t->bytes);
1070 if (per_process_stats) {
1071 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1073 __account_issue(ios, rw, t->bytes);
1077 static inline void __account_unplug(struct io_stats *ios, int timer)
1080 ios->timer_unplugs++;
1085 static inline void account_unplug(struct blk_io_trace *t,
1086 struct per_cpu_info *pci, int timer)
1088 __account_unplug(&pci->io_stats, timer);
1090 if (per_process_stats) {
1091 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1093 __account_unplug(ios, timer);
1097 static inline void __account_requeue(struct io_stats *ios,
1098 struct blk_io_trace *t, int rw)
1102 ios->iwrite_kb -= t_kb(t);
1105 ios->iread_kb -= t_kb(t);
1109 static inline void account_requeue(struct blk_io_trace *t,
1110 struct per_cpu_info *pci, int rw)
1112 __account_requeue(&pci->io_stats, t, rw);
1114 if (per_process_stats) {
1115 struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
1117 __account_requeue(ios, t, rw);
1121 static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
1122 struct blk_io_trace *t, char *act)
1124 process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
1127 static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
1128 struct blk_io_trace *t, char *act)
1130 process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
1133 static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
1136 process_fmt(act, pci, t, -1, 0, NULL);
1139 static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
1140 struct blk_io_trace *t, char *act)
1142 process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
1145 static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
1146 struct blk_io_trace *t, char *act)
1149 log_track_frontmerge(pdi, t);
1151 process_fmt(act, pci, t, -1ULL, 0, NULL);
1154 static void log_action(struct per_cpu_info *pci, struct blk_io_trace *t,
1157 process_fmt(act, pci, t, -1ULL, 0, NULL);
1160 static void log_generic(struct per_cpu_info *pci, struct blk_io_trace *t,
1163 process_fmt(act, pci, t, -1ULL, 0, NULL);
1166 static void log_unplug(struct per_cpu_info *pci, struct blk_io_trace *t,
1169 process_fmt(act, pci, t, -1ULL, 0, NULL);
1172 static void log_split(struct per_cpu_info *pci, struct blk_io_trace *t,
1175 process_fmt(act, pci, t, -1ULL, 0, NULL);
1178 static void log_pc(struct per_cpu_info *pci, struct blk_io_trace *t, char *act)
1180 unsigned char *buf = (unsigned char *) t + sizeof(*t);
1182 process_fmt(act, pci, t, -1ULL, t->pdu_len, buf);
1185 static void dump_trace_pc(struct blk_io_trace *t, struct per_cpu_info *pci)
1187 int act = t->action & 0xffff;
1190 case __BLK_TA_QUEUE:
1191 log_generic(pci, t, "Q");
1193 case __BLK_TA_GETRQ:
1194 log_generic(pci, t, "G");
1196 case __BLK_TA_SLEEPRQ:
1197 log_generic(pci, t, "S");
1199 case __BLK_TA_REQUEUE:
1200 log_generic(pci, t, "R");
1202 case __BLK_TA_ISSUE:
1203 log_pc(pci, t, "D");
1205 case __BLK_TA_COMPLETE:
1206 log_pc(pci, t, "C");
1208 case __BLK_TA_INSERT:
1209 log_pc(pci, t, "I");
1212 fprintf(stderr, "Bad pc action %x\n", act);
1217 static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
1218 struct per_cpu_info *pci)
1220 int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
1221 int act = t->action & 0xffff;
1224 case __BLK_TA_QUEUE:
1225 account_queue(t, pci, w);
1226 log_queue(pci, t, "Q");
1228 case __BLK_TA_INSERT:
1229 log_insert(pdi, pci, t, "I");
1231 case __BLK_TA_BACKMERGE:
1232 account_m(t, pci, w);
1233 log_merge(pdi, pci, t, "M");
1235 case __BLK_TA_FRONTMERGE:
1236 account_m(t, pci, w);
1237 log_merge(pdi, pci, t, "F");
1239 case __BLK_TA_GETRQ:
1240 log_track_getrq(pdi, t);
1241 log_generic(pci, t, "G");
1243 case __BLK_TA_SLEEPRQ:
1244 log_generic(pci, t, "S");
1246 case __BLK_TA_REQUEUE:
1247 pdi->cur_depth[w]--;
1248 account_requeue(t, pci, w);
1249 log_queue(pci, t, "R");
1251 case __BLK_TA_ISSUE:
1252 account_issue(t, pci, w);
1253 pdi->cur_depth[w]++;
1254 if (pdi->cur_depth[w] > pdi->max_depth[w])
1255 pdi->max_depth[w] = pdi->cur_depth[w];
1256 log_issue(pdi, pci, t, "D");
1258 case __BLK_TA_COMPLETE:
1259 pdi->cur_depth[w]--;
1260 account_c(t, pci, w, t->bytes);
1261 log_complete(pdi, pci, t, "C");
1264 log_action(pci, t, "P");
1266 case __BLK_TA_UNPLUG_IO:
1267 account_unplug(t, pci, 0);
1268 log_unplug(pci, t, "U");
1270 case __BLK_TA_UNPLUG_TIMER:
1271 account_unplug(t, pci, 1);
1272 log_unplug(pci, t, "UT");
1274 case __BLK_TA_SPLIT:
1275 log_split(pci, t, "X");
1277 case __BLK_TA_BOUNCE:
1278 log_generic(pci, t, "B");
1280 case __BLK_TA_REMAP:
1281 log_generic(pci, t, "A");
1284 fprintf(stderr, "Bad fs action %x\n", t->action);
1289 static void dump_trace(struct blk_io_trace *t, struct per_cpu_info *pci,
1290 struct per_dev_info *pdi)
1292 if (t->action & BLK_TC_ACT(BLK_TC_PC))
1293 dump_trace_pc(t, pci);
1295 dump_trace_fs(t, pdi, pci);
1298 pdi->first_reported_time = t->time;
1304 * print in a proper way, not too small and not too big. if more than
1305 * 1000,000K, turn into M and so on
1307 static char *size_cnv(char *dst, unsigned long long num, int in_kb)
1309 char suff[] = { '\0', 'K', 'M', 'G', 'P' };
1315 while (num > 1000 * 1000ULL && (i < sizeof(suff) - 1)) {
1320 sprintf(dst, "%'8Lu%c", num, suff[i]);
1324 static void dump_io_stats(struct per_dev_info *pdi, struct io_stats *ios,
1327 static char x[256], y[256];
1329 fprintf(ofp, "%s\n", msg);
1331 fprintf(ofp, " Reads Queued: %s, %siB\t", size_cnv(x, ios->qreads, 0), size_cnv(y, ios->qread_kb, 1));
1332 fprintf(ofp, " Writes Queued: %s, %siB\n", size_cnv(x, ios->qwrites, 0), size_cnv(y, ios->qwrite_kb, 1));
1334 fprintf(ofp, " Read Dispatches: %s, %siB\t", size_cnv(x, ios->ireads, 0), size_cnv(y, ios->iread_kb, 1));
1335 fprintf(ofp, " Write Dispatches: %s, %siB\n", size_cnv(x, ios->iwrites, 0), size_cnv(y, ios->iwrite_kb, 1));
1336 fprintf(ofp, " Reads Requeued: %s\t\t", size_cnv(x, ios->rrqueue, 0));
1337 fprintf(ofp, " Writes Requeued: %s\n", size_cnv(x, ios->wrqueue, 0));
1338 fprintf(ofp, " Reads Completed: %s, %siB\t", size_cnv(x, ios->creads, 0), size_cnv(y, ios->cread_kb, 1));
1339 fprintf(ofp, " Writes Completed: %s, %siB\n", size_cnv(x, ios->cwrites, 0), size_cnv(y, ios->cwrite_kb, 1));
1340 fprintf(ofp, " Read Merges: %'8lu%8c\t", ios->mreads, ' ');
1341 fprintf(ofp, " Write Merges: %'8lu\n", ios->mwrites);
1343 fprintf(ofp, " Read depth: %'8u%8c\t", pdi->max_depth[0], ' ');
1344 fprintf(ofp, " Write depth: %'8u\n", pdi->max_depth[1]);
1346 fprintf(ofp, " IO unplugs: %'8lu%8c\t", ios->io_unplugs, ' ');
1347 fprintf(ofp, " Timer unplugs: %'8lu\n", ios->timer_unplugs);
1350 static void dump_wait_stats(struct per_process_info *ppi)
1352 unsigned long rawait = ppi->longest_allocation_wait[0] / 1000;
1353 unsigned long rdwait = ppi->longest_dispatch_wait[0] / 1000;
1354 unsigned long rcwait = ppi->longest_completion_wait[0] / 1000;
1355 unsigned long wawait = ppi->longest_allocation_wait[1] / 1000;
1356 unsigned long wdwait = ppi->longest_dispatch_wait[1] / 1000;
1357 unsigned long wcwait = ppi->longest_completion_wait[1] / 1000;
1359 fprintf(ofp, " Allocation wait: %'8lu%8c\t", rawait, ' ');
1360 fprintf(ofp, " Allocation wait: %'8lu\n", wawait);
1361 fprintf(ofp, " Dispatch wait: %'8lu%8c\t", rdwait, ' ');
1362 fprintf(ofp, " Dispatch wait: %'8lu\n", wdwait);
1363 fprintf(ofp, " Completion wait: %'8lu%8c\t", rcwait, ' ');
1364 fprintf(ofp, " Completion wait: %'8lu\n", wcwait);
1367 static int ppi_name_compare(const void *p1, const void *p2)
1369 struct per_process_info *ppi1 = *((struct per_process_info **) p1);
1370 struct per_process_info *ppi2 = *((struct per_process_info **) p2);
1373 res = strverscmp(ppi1->name, ppi2->name);
1375 res = ppi1->pid > ppi2->pid;
1380 static void sort_process_list(void)
1382 struct per_process_info **ppis;
1383 struct per_process_info *ppi;
1386 ppis = malloc(ppi_list_entries * sizeof(struct per_process_info *));
1391 ppi = ppi->list_next;
1394 qsort(ppis, ppi_list_entries, sizeof(ppi), ppi_name_compare);
1396 i = ppi_list_entries - 1;
1401 ppi->list_next = ppi_list;
1409 static void show_process_stats(void)
1411 struct per_process_info *ppi;
1413 sort_process_list();
1419 if (ppi->more_than_one)
1420 sprintf(name, "%s (%u, ...)", ppi->name, ppi->pid);
1422 sprintf(name, "%s (%u)", ppi->name, ppi->pid);
1424 dump_io_stats(NULL, &ppi->io_stats, name);
1425 dump_wait_stats(ppi);
1426 ppi = ppi->list_next;
1432 static void show_device_and_cpu_stats(void)
1434 struct per_dev_info *pdi;
1435 struct per_cpu_info *pci;
1436 struct io_stats total, *ios;
1437 unsigned long long rrate, wrate, msec;
1438 int i, j, pci_events;
1439 char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
1442 for (pdi = devices, i = 0; i < ndevices; i++, pdi++) {
1444 memset(&total, 0, sizeof(total));
1450 for (pci = pdi->cpus, j = 0; j < pdi->ncpus; j++, pci++) {
1454 ios = &pci->io_stats;
1455 total.qreads += ios->qreads;
1456 total.qwrites += ios->qwrites;
1457 total.creads += ios->creads;
1458 total.cwrites += ios->cwrites;
1459 total.mreads += ios->mreads;
1460 total.mwrites += ios->mwrites;
1461 total.ireads += ios->ireads;
1462 total.iwrites += ios->iwrites;
1463 total.rrqueue += ios->rrqueue;
1464 total.wrqueue += ios->wrqueue;
1465 total.qread_kb += ios->qread_kb;
1466 total.qwrite_kb += ios->qwrite_kb;
1467 total.cread_kb += ios->cread_kb;
1468 total.cwrite_kb += ios->cwrite_kb;
1469 total.iread_kb += ios->iread_kb;
1470 total.iwrite_kb += ios->iwrite_kb;
1471 total.timer_unplugs += ios->timer_unplugs;
1472 total.io_unplugs += ios->io_unplugs;
1474 snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
1475 j, get_dev_name(pdi, name, sizeof(name)));
1476 dump_io_stats(pdi, ios, line);
1480 if (pci_events > 1) {
1482 snprintf(line, sizeof(line) - 1, "Total (%s):",
1483 get_dev_name(pdi, name, sizeof(name)));
1484 dump_io_stats(NULL, &total, line);
1488 msec = (pdi->last_reported_time - pdi->first_reported_time) / 1000000;
1490 rrate = 1000 * total.cread_kb / msec;
1491 wrate = 1000 * total.cwrite_kb / msec;
1494 fprintf(ofp, "\nThroughput (R/W): %'LuKiB/s / %'LuKiB/s\n",
1496 fprintf(ofp, "Events (%s): %'Lu entries\n",
1497 get_dev_name(pdi, line, sizeof(line)), pdi->events);
1499 collect_pdi_skips(pdi);
1500 fprintf(ofp, "Skips: %'lu forward (%'llu - %5.1lf%%)\n",
1501 pdi->skips,pdi->seq_skips,
1502 100.0 * ((double)pdi->seq_skips /
1503 (double)(pdi->events + pdi->seq_skips)));
1507 static void find_genesis(void)
1509 struct trace *t = trace_list;
1511 genesis_time = -1ULL;
1513 if (t->bit->time < genesis_time)
1514 genesis_time = t->bit->time;
1520 static inline int check_stopwatch(struct blk_io_trace *bit)
1522 if (bit->time < stopwatch_end &&
1523 bit->time >= stopwatch_start)
1530 * return youngest entry read
1532 static int sort_entries(unsigned long long *youngest)
1534 struct per_dev_info *pdi = NULL;
1535 struct per_cpu_info *pci = NULL;
1542 while ((t = trace_list) != NULL) {
1543 struct blk_io_trace *bit = t->bit;
1545 trace_list = t->next;
1547 bit->time -= genesis_time;
1549 if (bit->time < *youngest || !*youngest)
1550 *youngest = bit->time;
1552 if (!pdi || pdi->dev != bit->device) {
1553 pdi = get_dev_info(bit->device);
1557 if (!pci || pci->cpu != bit->cpu)
1558 pci = get_cpu_info(pdi, bit->cpu);
1560 if (bit->sequence < pci->smallest_seq_read)
1561 pci->smallest_seq_read = bit->sequence;
1563 if (check_stopwatch(bit)) {
1569 if (trace_rb_insert_sort(t))
1577 * to continue, we must have traces from all online cpus in the tree
1579 static int check_cpu_map(struct per_dev_info *pdi)
1581 unsigned long *cpu_map;
1588 * create a map of the cpus we have traces for
1590 cpu_map = malloc(pdi->cpu_map_max / sizeof(long));
1591 n = rb_first(&rb_sort_root);
1593 __t = rb_entry(n, struct trace, rb_node);
1594 cpu = __t->bit->cpu;
1596 cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
1601 * we can't continue if pdi->cpu_map has entries set that we don't
1602 * have in the sort rbtree. the opposite is not a problem, though
1605 for (i = 0; i < pdi->cpu_map_max / CPUS_PER_LONG; i++) {
1606 if (pdi->cpu_map[i] & ~(cpu_map[i])) {
1616 static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
1618 struct blk_io_trace *bit = t->bit;
1619 unsigned long expected_sequence;
1620 struct per_cpu_info *pci;
1623 pci = get_cpu_info(pdi, bit->cpu);
1624 expected_sequence = pci->last_sequence + 1;
1626 if (!expected_sequence) {
1628 * 1 should be the first entry, just allow it
1630 if (bit->sequence == 1)
1632 if (bit->sequence == pci->smallest_seq_read)
1635 return check_cpu_map(pdi);
1638 if (bit->sequence == expected_sequence)
1642 * we may not have seen that sequence yet. if we are not doing
1643 * the final run, break and wait for more entries.
1645 if (expected_sequence < pci->smallest_seq_read) {
1646 __t = trace_rb_find_last(pdi, pci, expected_sequence);
1650 __put_trace_last(pdi, __t);
1652 } else if (!force) {
1656 if (check_current_skips(pdi,bit->sequence))
1659 if (expected_sequence < bit->sequence)
1660 insert_skip(pdi, expected_sequence, bit->sequence - 1);
1665 static void show_entries_rb(int force)
1667 struct per_dev_info *pdi = NULL;
1668 struct per_cpu_info *pci = NULL;
1669 struct blk_io_trace *bit;
1673 while ((n = rb_first(&rb_sort_root)) != NULL) {
1674 if (is_done() && !force && !pipeline)
1677 t = rb_entry(n, struct trace, rb_node);
1680 if (!pdi || pdi->dev != bit->device) {
1681 pdi = get_dev_info(bit->device);
1686 fprintf(stderr, "Unknown device ID? (%d,%d)\n",
1687 MAJOR(bit->device), MINOR(bit->device));
1691 if (check_sequence(pdi, t, force))
1694 if (!force && bit->time > last_allowed_time)
1697 check_time(pdi, bit);
1699 if (!pci || pci->cpu != bit->cpu)
1700 pci = get_cpu_info(pdi, bit->cpu);
1702 pci->last_sequence = bit->sequence;
1706 if (bit->action & (act_mask << BLK_TC_SHIFT))
1707 dump_trace(bit, pci, pdi);
1713 static int read_data(int fd, void *buffer, int bytes, int block, int *fdblock)
1715 int ret, bytes_left, fl;
1718 if (block != *fdblock) {
1719 fl = fcntl(fd, F_GETFL);
1723 fcntl(fd, F_SETFL, fl | O_NONBLOCK);
1726 fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
1732 while (bytes_left > 0) {
1733 ret = read(fd, p, bytes_left);
1737 if (errno != EAGAIN) {
1743 * never do partial reads. we can return if we
1744 * didn't read anything and we should not block,
1745 * otherwise wait for data
1747 if ((bytes_left == bytes) && !block)
1761 static int read_events(int fd, int always_block, int *fdblock)
1763 struct per_dev_info *pdi = NULL;
1764 unsigned int events = 0;
1766 while (!is_done() && events < rb_batch) {
1767 struct blk_io_trace *bit;
1769 int pdu_len, should_block, ret;
1774 should_block = !events || always_block;
1776 ret = read_data(fd, bit, sizeof(*bit), should_block, fdblock);
1779 if (!events && ret < 0)
1784 magic = be32_to_cpu(bit->magic);
1785 if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
1786 fprintf(stderr, "Bad magic %x\n", magic);
1790 pdu_len = be16_to_cpu(bit->pdu_len);
1792 void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
1794 if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1, fdblock)) {
1804 if (verify_trace(bit)) {
1810 memset(t, 0, sizeof(*t));
1813 t->next = trace_list;
1816 if (!pdi || pdi->dev != bit->device)
1817 pdi = get_dev_info(bit->device);
1819 if (bit->time > pdi->last_read_time)
1820 pdi->last_read_time = bit->time;
1828 static int do_file(void)
1830 struct per_cpu_info *pci;
1831 struct per_dev_info *pdi;
1832 int i, j, events, events_added;
1835 * first prepare all files for reading
1837 for (i = 0; i < ndevices; i++) {
1846 pci = get_cpu_info(pdi, j);
1851 p = strdup(pdi->name);
1853 if (strcmp(dname, ".")) {
1855 p = strdup(pdi->name);
1856 strcpy(pdi->name, basename(p));
1861 len = sprintf(pci->fname, "%s/", input_dir);
1863 snprintf(pci->fname + len, sizeof(pci->fname)-1-len,
1864 "%s.blktrace.%d", pdi->name, pci->cpu);
1865 if (stat(pci->fname, &st) < 0)
1868 pci->fd = open(pci->fname, O_RDONLY);
1875 printf("Input file %s added\n", pci->fname);
1877 cpu_mark_online(pdi, pci->cpu);
1882 * now loop over the files reading in the data
1885 unsigned long long youngest;
1888 last_allowed_time = -1ULL;
1890 for (i = 0; i < ndevices; i++) {
1893 for (j = 0; j < pdi->nfiles; j++) {
1895 pci = get_cpu_info(pdi, j);
1900 pci->smallest_seq_read = -1;
1902 events = read_events(pci->fd, 1, &pci->fdblock);
1904 cpu_mark_offline(pdi, pci->cpu);
1910 if (pdi->last_read_time < last_allowed_time)
1911 last_allowed_time = pdi->last_read_time;
1913 events_added += events;
1917 if (sort_entries(&youngest))
1920 if (youngest > stopwatch_end)
1925 } while (events_added);
1927 if (rb_sort_entries)
1933 static int do_stdin(void)
1935 unsigned long long youngest;
1936 int fd, events, fdblock;
1938 last_allowed_time = -1ULL;
1939 fd = dup(STDIN_FILENO);
1941 perror("dup stdin");
1946 while ((events = read_events(fd, 0, &fdblock)) > 0) {
1949 smallest_seq_read = -1U;
1952 if (sort_entries(&youngest))
1955 if (youngest > stopwatch_end)
1961 if (rb_sort_entries)
1968 static void show_stats(void)
1977 if (per_process_stats)
1978 show_process_stats();
1980 if (per_device_and_cpu_stats)
1981 show_device_and_cpu_stats();
1986 static void handle_sigint(__attribute__((__unused__)) int sig)
1992 * Extract start and duration times from a string, allowing
1993 * us to specify a time interval of interest within a trace.
1994 * Format: "duration" (start is zero) or "start:duration".
1996 static int find_stopwatch_interval(char *string)
2001 value = strtod(string, &sp);
2003 fprintf(stderr,"Invalid stopwatch timer: %s\n", string);
2007 stopwatch_start = DOUBLE_TO_NANO_ULL(value);
2009 value = strtod(string, &sp);
2010 if (sp == string || *sp != '\0') {
2011 fprintf(stderr,"Invalid stopwatch duration time: %s\n",
2015 } else if (*sp != '\0') {
2016 fprintf(stderr,"Invalid stopwatch start timer: %s\n", string);
2019 stopwatch_end = DOUBLE_TO_NANO_ULL(value);
2020 if (stopwatch_end <= stopwatch_start) {
2021 fprintf(stderr, "Invalid stopwatch interval: %Lu -> %Lu\n",
2022 stopwatch_start, stopwatch_end);
2029 static char usage_str[] = \
2030 "[ -i <input name> ] [-o <output name> [ -s ] [ -t ] [ -q ]\n" \
2031 "[ -w start:stop ] [ -f output format ] [ -F format spec ] [ -v] \n\n" \
2032 "\t-i Input file containing trace data, or '-' for stdin\n" \
2033 "\t-D Directory to prepend to input file names\n" \
2034 "\t-o Output file. If not given, output is stdout\n" \
2035 "\t-b stdin read batching\n" \
2036 "\t-s Show per-program io statistics\n" \
2037 "\t-h Hash processes by name, not pid\n" \
2038 "\t-t Track individual ios. Will tell you the time a request took\n" \
2039 "\t to get queued, to get dispatched, and to get completed\n" \
2040 "\t-q Quiet. Don't display any stats at the end of the trace\n" \
2041 "\t-w Only parse data between the given time interval in seconds.\n" \
2042 "\t If 'start' isn't given, blkparse defaults the start time to 0\n" \
2043 "\t-f Output format. Customize the output format. The format field\n" \
2044 "\t identifies can be found in the documentation\n" \
2045 "\t-F Format specification. Can be found in the documentation\n" \
2046 "\t-v More verbose for marginal errors\n" \
2047 "\t-V Print program version info\n\n";
2049 static void usage(char *prog)
2051 fprintf(stderr, "Usage: %s %s %s", prog, blkparse_version, usage_str);
2054 int main(int argc, char *argv[])
2057 int i, c, ret, mode;
2058 int act_mask_tmp = 0;
2060 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
2063 i = find_mask_map(optarg);
2065 fprintf(stderr,"Invalid action mask %s\n",
2073 if ((sscanf(optarg, "%x", &i) != 1) ||
2074 !valid_act_opt(i)) {
2076 "Invalid set action mask %s/0x%x\n",
2083 if (!strcmp(optarg, "-") && !pipeline)
2085 else if (resize_devices(optarg) != 0)
2092 output_name = optarg;
2095 rb_batch = atoi(optarg);
2097 rb_batch = RB_BATCH_DEFAULT;
2100 per_process_stats = 1;
2106 per_device_and_cpu_stats = 0;
2109 if (find_stopwatch_interval(optarg) != 0)
2113 set_all_format_specs(optarg);
2116 if (add_format_spec(optarg) != 0)
2120 ppi_hash_by_pid = 0;
2126 printf("%s version %s\n", argv[0], blkparse_version);
2134 while (optind < argc) {
2135 if (!strcmp(argv[optind], "-") && !pipeline)
2137 else if (resize_devices(argv[optind]) != 0)
2142 if (!pipeline && !ndevices) {
2147 if (act_mask_tmp != 0)
2148 act_mask = act_mask_tmp;
2150 memset(&rb_sort_root, 0, sizeof(rb_sort_root));
2152 signal(SIGINT, handle_sigint);
2153 signal(SIGHUP, handle_sigint);
2154 signal(SIGTERM, handle_sigint);
2156 setlocale(LC_NUMERIC, "en_US");
2159 ofp = fdopen(STDOUT_FILENO, "w");
2164 snprintf(ofname, sizeof(ofname) - 1, "%s", output_name);
2165 ofp = fopen(ofname, "w");
2174 ofp_buffer = malloc(4096);
2175 if (setvbuf(ofp, ofp_buffer, mode, 4096)) {