2 * block queue tracing application
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <sys/types.h>
29 #include <sys/ioctl.h>
30 #include <sys/param.h>
31 #include <sys/statfs.h>
44 static char blktrace_version[] = "0.99";
47 * You may want to increase this even more, if you are logging at a high
48 * rate and see skipped/missed events
50 #define BUF_SIZE (512 * 1024)
53 #define OFILE_BUF (128 * 1024)
55 #define RELAYFS_TYPE 0xF0B4A981
57 #define RING_INIT_NR (2)
58 #define RING_MAX_NR (16UL)
60 #define S_OPTS "d:a:A:r:o:kw:Vb:n:D:"
61 static struct option l_opts[] = {
64 .has_arg = required_argument,
70 .has_arg = required_argument,
76 .has_arg = required_argument,
82 .has_arg = required_argument,
88 .has_arg = required_argument,
94 .has_arg = no_argument,
100 .has_arg = required_argument,
106 .has_arg = no_argument,
111 .name = "buffer-size",
112 .has_arg = required_argument,
117 .name = "num-sub-buffers",
118 .has_arg = required_argument,
123 .name = "output-dir",
124 .has_arg = required_argument,
136 unsigned int max_len;
139 #define FIFO_SIZE (1024) /* should be plenty big! */
140 #define CL_SIZE (128) /* cache line, any bigger? */
142 struct tip_subbuf_fifo {
143 int tail __attribute__((aligned(CL_SIZE)));
144 int head __attribute__((aligned(CL_SIZE)));
145 struct tip_subbuf *q[FIFO_SIZE];
148 struct thread_information {
154 char fn[MAXPATHLEN + 64];
160 unsigned long events_processed;
161 unsigned long long data_read;
162 struct device_information *device;
169 struct tip_subbuf_fifo fifo;
170 struct tip_subbuf *leftover_ts;
173 * mmap controlled output files
175 unsigned long long fs_size;
176 unsigned long long fs_max_size;
177 unsigned long fs_off;
179 unsigned long fs_buf_len;
182 struct device_information {
186 volatile int trace_started;
187 unsigned long drop_count;
188 struct thread_information *threads;
192 static struct thread_information *thread_information;
194 static struct device_information *device_information;
196 /* command line option globals */
197 static char *relay_path;
198 static char *output_name;
199 static char *output_dir;
200 static int act_mask = ~0U;
201 static int kill_running_trace;
202 static unsigned long buf_size = BUF_SIZE;
203 static unsigned long buf_nr = BUF_NR;
204 static unsigned int page_size;
206 #define is_done() (*(volatile int *)(&done))
207 static volatile int done;
209 #define is_trace_stopped() (*(volatile int *)(&trace_stopped))
210 static volatile int trace_stopped;
212 #define is_stat_shown() (*(volatile int *)(&stat_shown))
213 static volatile int stat_shown;
215 static void exit_trace(int status);
217 #define dip_tracing(dip) (*(volatile int *)(&(dip)->trace_started))
218 #define dip_set_tracing(dip, v) ((dip)->trace_started = (v))
220 #define __for_each_dip(__d, __i, __e) \
221 for (__i = 0, __d = device_information; __i < __e; __i++, __d++)
223 #define for_each_dip(__d, __i) __for_each_dip(__d, __i, ndevs)
224 #define for_each_tip(__d, __t, __j) \
225 for (__j = 0, __t = (__d)->threads; __j < ncpus; __j++, __t++)
227 static int get_dropped_count(const char *buts_name)
230 char tmp[MAXPATHLEN + 64];
232 snprintf(tmp, sizeof(tmp), "%s/block/%s/dropped",
233 relay_path, buts_name);
235 fd = open(tmp, O_RDONLY);
238 * this may be ok, if the kernel doesn't support dropped counts
243 fprintf(stderr, "Couldn't open dropped file %s\n", tmp);
247 if (read(fd, tmp, sizeof(tmp)) < 0) {
258 static int start_trace(struct device_information *dip)
260 struct blk_user_trace_setup buts;
262 memset(&buts, 0, sizeof(buts));
263 buts.buf_size = buf_size;
264 buts.buf_nr = buf_nr;
265 buts.act_mask = act_mask;
267 if (ioctl(dip->fd, BLKSTARTTRACE, &buts) < 0) {
268 perror("BLKSTARTTRACE");
272 memcpy(dip->buts_name, buts.name, sizeof(dip->buts_name));
273 dip_set_tracing(dip, 1);
277 static void stop_trace(struct device_information *dip)
279 if (dip_tracing(dip) || kill_running_trace) {
280 dip_set_tracing(dip, 0);
282 if (ioctl(dip->fd, BLKSTOPTRACE) < 0)
283 perror("BLKSTOPTRACE");
290 static void stop_all_traces(void)
292 struct device_information *dip;
295 for_each_dip(dip, i) {
296 dip->drop_count = get_dropped_count(dip->buts_name);
301 static void wait_for_data(struct thread_information *tip)
303 struct pollfd pfd = { .fd = tip->fd, .events = POLLIN };
307 if (pfd.revents & POLLIN)
309 if (tip->ofile_stdout)
311 } while (!is_done());
314 static int read_data(struct thread_information *tip, void *buf, int len)
321 ret = read(tip->fd, buf, len);
327 if (errno != EAGAIN) {
329 fprintf(stderr,"Thread %d failed read of %s\n",
335 } while (!is_done());
340 static inline struct tip_subbuf *subbuf_fifo_dequeue(struct thread_information *tip)
342 const int head = tip->fifo.head;
343 const int next = (head + 1) & (FIFO_SIZE - 1);
345 if (head != tip->fifo.tail) {
346 struct tip_subbuf *ts = tip->fifo.q[head];
349 tip->fifo.head = next;
356 static inline int subbuf_fifo_queue(struct thread_information *tip,
357 struct tip_subbuf *ts)
359 const int tail = tip->fifo.tail;
360 const int next = (tail + 1) & (FIFO_SIZE - 1);
362 if (next != tip->fifo.head) {
363 tip->fifo.q[tail] = ts;
365 tip->fifo.tail = next;
369 fprintf(stderr, "fifo too small!\n");
374 * For file output, truncate and mmap the file appropriately
376 static int mmap_subbuf(struct thread_information *tip)
378 int ofd = fileno(tip->ofile);
382 * extend file, if we have to. use chunks of 16 subbuffers.
384 if (tip->fs_off + buf_size > tip->fs_buf_len) {
386 munmap(tip->fs_buf, tip->fs_buf_len);
390 tip->fs_off = tip->fs_size & (page_size - 1);
391 tip->fs_buf_len = (16 * buf_size) - tip->fs_off;
392 tip->fs_max_size += tip->fs_buf_len;
394 if (ftruncate(ofd, tip->fs_max_size) < 0) {
399 tip->fs_buf = mmap(NULL, tip->fs_buf_len, PROT_WRITE,
400 MAP_SHARED, ofd, tip->fs_size - tip->fs_off);
401 if (tip->fs_buf == MAP_FAILED) {
407 ret = read_data(tip, tip->fs_buf + tip->fs_off, buf_size);
409 tip->data_read += ret;
419 * Use the copy approach for pipes
421 static int get_subbuf(struct thread_information *tip)
423 struct tip_subbuf *ts;
426 ts = malloc(sizeof(*ts));
427 ts->buf = malloc(buf_size);
428 ts->max_len = buf_size;
430 ret = read_data(tip, ts->buf, ts->max_len);
433 return subbuf_fifo_queue(tip, ts);
441 static void close_thread(struct thread_information *tip)
447 if (tip->ofile_buffer)
448 free(tip->ofile_buffer);
454 tip->ofile_buffer = NULL;
458 static void *thread_main(void *arg)
460 struct thread_information *tip = arg;
461 pid_t pid = getpid();
465 CPU_SET((tip->cpu), &cpu_mask);
467 if (sched_setaffinity(pid, sizeof(cpu_mask), &cpu_mask) == -1) {
468 perror("sched_setaffinity");
472 snprintf(tip->fn, sizeof(tip->fn), "%s/block/%s/trace%d",
473 relay_path, tip->device->buts_name, tip->cpu);
474 tip->fd = open(tip->fn, O_RDONLY);
477 fprintf(stderr,"Thread %d failed open of %s\n", tip->cpu,
483 if (tip->ofile_stdout) {
487 if (mmap_subbuf(tip))
493 * truncate to right size and cleanup mmap
495 if (!tip->ofile_stdout) {
496 int ofd = fileno(tip->ofile);
499 munmap(tip->fs_buf, tip->fs_buf_len);
501 ftruncate(ofd, tip->fs_size);
508 static int write_data(struct thread_information *tip,
509 void *buf, unsigned int buf_len)
517 ret = fwrite(buf, buf_len, 1, tip->ofile);
527 if (tip->ofile_stdout)
533 static int flush_subbuf(struct thread_information *tip, struct tip_subbuf *ts)
535 unsigned int offset = 0;
536 struct blk_io_trace *t;
537 int pdu_len, events = 0;
540 * surplus from last run
542 if (tip->leftover_ts) {
543 struct tip_subbuf *prev_ts = tip->leftover_ts;
545 if (prev_ts->len + ts->len > prev_ts->max_len) {
546 prev_ts->max_len += ts->len;
547 prev_ts->buf = realloc(prev_ts->buf, prev_ts->max_len);
550 memcpy(prev_ts->buf + prev_ts->len, ts->buf, ts->len);
551 prev_ts->len += ts->len;
557 tip->leftover_ts = NULL;
560 while (offset + sizeof(*t) <= ts->len) {
561 t = ts->buf + offset;
563 if (verify_trace(t)) {
564 write_data(tip, ts->buf, offset);
568 pdu_len = t->pdu_len;
570 if (offset + sizeof(*t) + pdu_len > ts->len)
573 offset += sizeof(*t) + pdu_len;
574 tip->events_processed++;
575 tip->data_read += sizeof(*t) + pdu_len;
579 if (write_data(tip, ts->buf, offset))
583 * leftover bytes, save them for next time
585 if (offset != ts->len) {
586 tip->leftover_ts = ts;
588 memmove(ts->buf, ts->buf + offset, ts->len);
597 static int write_tip_events(struct thread_information *tip)
599 struct tip_subbuf *ts = subbuf_fifo_dequeue(tip);
602 return flush_subbuf(tip, ts);
608 * scans the tips we know and writes out the subbuffers we accumulate
610 static void get_and_write_events(void)
612 struct device_information *dip;
613 struct thread_information *tip;
614 int i, j, events, ret, tips_running;
619 for_each_dip(dip, i) {
620 for_each_tip(dip, tip, j) {
621 ret = write_tip_events(tip);
637 for_each_dip(dip, i) {
638 for_each_tip(dip, tip, j) {
639 ret = write_tip_events(tip);
642 tips_running += !tip->exited;
646 } while (events || tips_running);
649 static void wait_for_threads(void)
652 * for piped output, poll and fetch data for writeout. for files,
653 * we just wait around for trace threads to exit
655 if (output_name && !strcmp(output_name, "-"))
656 get_and_write_events();
658 struct device_information *dip;
659 struct thread_information *tip;
660 int i, j, tips_running;
667 for_each_tip(dip, tip, j)
668 tips_running += !tip->exited;
669 } while (tips_running);
673 static int start_threads(struct device_information *dip)
675 struct thread_information *tip;
677 int j, pipeline = output_name && !strcmp(output_name, "-");
678 int len, mode, vbuf_size;
680 for_each_tip(dip, tip, j) {
683 tip->events_processed = 0;
684 memset(&tip->fifo, 0, sizeof(tip->fifo));
685 tip->leftover_ts = NULL;
688 tip->ofile = fdopen(STDOUT_FILENO, "w");
689 tip->ofile_stdout = 1;
696 len = sprintf(op, "%s/", output_dir);
699 sprintf(op + len, "%s.blktrace.%d", output_name,
702 sprintf(op + len, "%s.blktrace.%d",
703 dip->buts_name, tip->cpu);
705 tip->ofile = fopen(op, "w+");
706 tip->ofile_stdout = 0;
708 vbuf_size = OFILE_BUF;
711 if (tip->ofile == NULL) {
716 tip->ofile_buffer = malloc(vbuf_size);
717 if (setvbuf(tip->ofile, tip->ofile_buffer, mode, vbuf_size)) {
723 if (pthread_create(&tip->thread, NULL, thread_main, tip)) {
724 perror("pthread_create");
733 static void stop_threads(struct device_information *dip)
735 struct thread_information *tip;
739 for_each_tip(dip, tip, i) {
740 (void) pthread_join(tip->thread, (void *) &ret);
745 static void stop_all_threads(void)
747 struct device_information *dip;
754 static void stop_all_tracing(void)
756 struct device_information *dip;
763 static void exit_trace(int status)
765 if (!is_trace_stopped()) {
774 static int resize_devices(char *path)
776 int size = (ndevs + 1) * sizeof(struct device_information);
778 device_information = realloc(device_information, size);
779 if (!device_information) {
780 fprintf(stderr, "Out of memory, device %s (%d)\n", path, size);
783 device_information[ndevs].path = path;
788 static int open_devices(void)
790 struct device_information *dip;
793 for_each_dip(dip, i) {
794 dip->fd = open(dip->path, O_RDONLY | O_NONBLOCK);
804 static int start_devices(void)
806 struct device_information *dip;
809 size = ncpus * sizeof(struct thread_information);
810 thread_information = malloc(size * ndevs);
811 if (!thread_information) {
812 fprintf(stderr, "Out of memory, threads (%d)\n", size * ndevs);
816 for_each_dip(dip, i) {
817 if (start_trace(dip)) {
819 fprintf(stderr, "Failed to start trace on %s\n",
826 __for_each_dip(dip, j, i)
832 for_each_dip(dip, i) {
833 dip->threads = thread_information + (i * ncpus);
834 if (start_threads(dip)) {
835 fprintf(stderr, "Failed to start worker threads\n");
841 __for_each_dip(dip, j, i)
852 static void show_stats(void)
854 struct device_information *dip;
855 struct thread_information *tip;
856 unsigned long long events_processed, data_read;
857 unsigned long total_drops;
858 int i, j, no_stdout = 0;
863 if (output_name && !strcmp(output_name, "-"))
869 for_each_dip(dip, i) {
871 printf("Device: %s\n", dip->path);
872 events_processed = 0;
874 for_each_tip(dip, tip, j) {
876 printf(" CPU%3d: %20lu events, %8llu KiB data\n",
877 tip->cpu, tip->events_processed,
878 tip->data_read >> 10);
879 events_processed += tip->events_processed;
880 data_read += tip->data_read;
882 total_drops += dip->drop_count;
884 printf(" Total: %20llu events (dropped %lu), %8llu KiB data\n",
885 events_processed, dip->drop_count,
890 fprintf(stderr, "You have dropped events, consider using a larger buffer size (-b)\n");
893 static char usage_str[] = \
894 "-d <dev> [ -r relay path ] [ -o <output> ] [-k ] [ -w time ]\n" \
895 "[ -a action ] [ -A action mask ] [ -v ]\n\n" \
896 "\t-d Use specified device. May also be given last after options\n" \
897 "\t-r Path to mounted relayfs, defaults to /relay\n" \
898 "\t-o File(s) to send output to\n" \
899 "\t-D Directory to prepend to output file names\n" \
900 "\t-k Kill a running trace\n" \
901 "\t-w Stop after defined time, in seconds\n" \
902 "\t-a Only trace specified actions. See documentation\n" \
903 "\t-A Give trace mask as a single value. See documentation\n" \
904 "\t-b Sub buffer size in KiB\n" \
905 "\t-n Number of sub buffers\n" \
906 "\t-v Print program version info\n\n";
908 static void show_usage(char *program)
910 fprintf(stderr, "Usage: %s %s %s",program, blktrace_version, usage_str);
912 static void handle_sigint(__attribute__((__unused__)) int sig)
917 int main(int argc, char *argv[])
919 static char default_relay_path[] = "/relay";
923 int act_mask_tmp = 0;
925 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) >= 0) {
928 i = find_mask_map(optarg);
930 fprintf(stderr,"Invalid action mask %s\n",
938 if ((sscanf(optarg, "%x", &i) != 1) ||
941 "Invalid set action mask %s/0x%x\n",
949 if (resize_devices(optarg) != 0)
958 output_name = optarg;
961 kill_running_trace = 1;
964 stop_watch = atoi(optarg);
965 if (stop_watch <= 0) {
967 "Invalid stopwatch value (%d secs)\n",
973 printf("%s version %s\n", argv[0], blktrace_version);
976 buf_size = strtoul(optarg, NULL, 10);
977 if (buf_size <= 0 || buf_size > 16*1024) {
979 "Invalid buffer size (%lu)\n",buf_size);
985 buf_nr = strtoul(optarg, NULL, 10);
988 "Invalid buffer nr (%lu)\n", buf_nr);
1001 while (optind < argc) {
1002 if (resize_devices(argv[optind++]) != 0)
1007 show_usage(argv[0]);
1012 relay_path = default_relay_path;
1014 if (act_mask_tmp != 0)
1015 act_mask = act_mask_tmp;
1017 if (statfs(relay_path, &st) < 0) {
1019 fprintf(stderr,"%s does not appear to be a valid path\n",
1022 } else if (st.f_type != (long) RELAYFS_TYPE) {
1023 fprintf(stderr,"%s does not appear to be a relay filesystem\n",
1028 if (open_devices() != 0)
1031 if (kill_running_trace) {
1036 setlocale(LC_NUMERIC, "en_US");
1038 ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1040 fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed\n");
1044 page_size = getpagesize();
1046 if (start_devices() != 0)
1049 signal(SIGINT, handle_sigint);
1050 signal(SIGHUP, handle_sigint);
1051 signal(SIGTERM, handle_sigint);
1052 signal(SIGALRM, handle_sigint);
1054 atexit(stop_all_tracing);
1061 if (!is_trace_stopped()) {