2 * block queue tracing application
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <sys/types.h>
29 #include <sys/ioctl.h>
30 #include <sys/param.h>
31 #include <sys/statfs.h>
44 static char blktrace_version[] = "0.99";
47 * You may want to increase this even more, if you are logging at a high
48 * rate and see skipped/missed events
50 #define BUF_SIZE (512 * 1024)
53 #define OFILE_BUF (128 * 1024)
55 #define RELAYFS_TYPE 0xF0B4A981
57 #define RING_INIT_NR (2)
58 #define RING_MAX_NR (16UL)
60 #define S_OPTS "d:a:A:r:o:kw:Vb:n:D:"
61 static struct option l_opts[] = {
64 .has_arg = required_argument,
70 .has_arg = required_argument,
76 .has_arg = required_argument,
82 .has_arg = required_argument,
88 .has_arg = required_argument,
94 .has_arg = no_argument,
100 .has_arg = required_argument,
106 .has_arg = no_argument,
111 .name = "buffer-size",
112 .has_arg = required_argument,
117 .name = "num-sub-buffers",
118 .has_arg = required_argument,
123 .name = "output-dir",
124 .has_arg = required_argument,
136 unsigned int max_len;
139 #define FIFO_SIZE (1024) /* should be plenty big! */
140 #define CL_SIZE (128) /* cache line, any bigger? */
142 struct tip_subbuf_fifo {
143 int tail __attribute__((aligned(CL_SIZE)));
144 int head __attribute__((aligned(CL_SIZE)));
145 struct tip_subbuf *q[FIFO_SIZE];
148 struct thread_information {
154 unsigned long fd_off;
155 unsigned long fd_size;
156 unsigned long fd_max_size;
157 char fn[MAXPATHLEN + 64];
163 unsigned long events_processed;
164 struct device_information *device;
168 struct tip_subbuf_fifo fifo;
169 struct tip_subbuf *leftover_ts;
172 struct device_information {
176 volatile int trace_started;
177 unsigned long drop_count;
178 struct thread_information *threads;
182 static struct thread_information *thread_information;
184 static struct device_information *device_information;
186 /* command line option globals */
187 static char *relay_path;
188 static char *output_name;
189 static char *output_dir;
190 static int act_mask = ~0U;
191 static int kill_running_trace;
192 static unsigned long buf_size = BUF_SIZE;
193 static unsigned long buf_nr = BUF_NR;
195 #define is_done() (*(volatile int *)(&done))
196 static volatile int done;
198 #define is_trace_stopped() (*(volatile int *)(&trace_stopped))
199 static volatile int trace_stopped;
201 #define is_stat_shown() (*(volatile int *)(&stat_shown))
202 static volatile int stat_shown;
204 static void exit_trace(int status);
206 #define dip_tracing(dip) (*(volatile int *)(&(dip)->trace_started))
207 #define dip_set_tracing(dip, v) ((dip)->trace_started = (v))
209 #define __for_each_dip(__d, __i, __e) \
210 for (__i = 0, __d = device_information; __i < __e; __i++, __d++)
212 #define for_each_dip(__d, __i) __for_each_dip(__d, __i, ndevs)
213 #define for_each_tip(__d, __t, __j) \
214 for (__j = 0, __t = (__d)->threads; __j < ncpus; __j++, __t++)
216 static int get_dropped_count(const char *buts_name)
219 char tmp[MAXPATHLEN + 64];
221 snprintf(tmp, sizeof(tmp), "%s/block/%s/dropped",
222 relay_path, buts_name);
224 fd = open(tmp, O_RDONLY);
227 * this may be ok, if the kernel doesn't support dropped counts
232 fprintf(stderr, "Couldn't open dropped file %s\n", tmp);
236 if (read(fd, tmp, sizeof(tmp)) < 0) {
247 static int start_trace(struct device_information *dip)
249 struct blk_user_trace_setup buts;
251 memset(&buts, 0, sizeof(buts));
252 buts.buf_size = buf_size;
253 buts.buf_nr = buf_nr;
254 buts.act_mask = act_mask;
256 if (ioctl(dip->fd, BLKSTARTTRACE, &buts) < 0) {
257 perror("BLKSTARTTRACE");
261 memcpy(dip->buts_name, buts.name, sizeof(dip->buts_name));
262 dip_set_tracing(dip, 1);
266 static void stop_trace(struct device_information *dip)
268 if (dip_tracing(dip) || kill_running_trace) {
269 dip_set_tracing(dip, 0);
271 if (ioctl(dip->fd, BLKSTOPTRACE) < 0)
272 perror("BLKSTOPTRACE");
279 static void stop_all_traces(void)
281 struct device_information *dip;
284 for_each_dip(dip, i) {
285 dip->drop_count = get_dropped_count(dip->buts_name);
290 static void wait_for_data(struct thread_information *tip)
292 struct pollfd pfd = { .fd = tip->fd, .events = POLLIN };
296 if (pfd.revents & POLLIN)
298 if (tip->ofile_stdout)
300 } while (!is_done());
303 static int read_data(struct thread_information *tip, void *buf, int len)
310 ret = read(tip->fd, buf, len);
316 if (errno != EAGAIN) {
318 fprintf(stderr,"Thread %d failed read of %s\n",
324 } while (!is_done());
329 static inline struct tip_subbuf *subbuf_fifo_dequeue(struct thread_information *tip)
331 const int head = tip->fifo.head;
332 const int next = (head + 1) & (FIFO_SIZE - 1);
334 if (head != tip->fifo.tail) {
335 struct tip_subbuf *ts = tip->fifo.q[head];
338 tip->fifo.head = next;
345 static inline int subbuf_fifo_queue(struct thread_information *tip,
346 struct tip_subbuf *ts)
348 const int tail = tip->fifo.tail;
349 const int next = (tail + 1) & (FIFO_SIZE - 1);
351 if (next != tip->fifo.head) {
352 tip->fifo.q[tail] = ts;
354 tip->fifo.tail = next;
358 fprintf(stderr, "fifo too small!\n");
362 static int get_subbuf(struct thread_information *tip)
364 struct tip_subbuf *ts;
367 ts = malloc(sizeof(*ts));
368 ts->buf = malloc(buf_size);
369 ts->max_len = buf_size;
371 ret = read_data(tip, ts->buf, ts->max_len);
374 return subbuf_fifo_queue(tip, ts);
382 static void close_thread(struct thread_information *tip)
388 if (tip->ofile_buffer)
389 free(tip->ofile_buffer);
395 tip->ofile_buffer = NULL;
399 static void *thread_main(void *arg)
401 struct thread_information *tip = arg;
402 pid_t pid = getpid();
406 CPU_SET((tip->cpu), &cpu_mask);
408 if (sched_setaffinity(pid, sizeof(cpu_mask), &cpu_mask) == -1) {
409 perror("sched_setaffinity");
413 snprintf(tip->fn, sizeof(tip->fn), "%s/block/%s/trace%d",
414 relay_path, tip->device->buts_name, tip->cpu);
415 tip->fd = open(tip->fn, O_RDONLY);
418 fprintf(stderr,"Thread %d failed open of %s\n", tip->cpu,
432 static int write_data(struct thread_information *tip,
433 void *buf, unsigned int buf_len)
441 ret = fwrite(buf, buf_len, 1, tip->ofile);
451 if (tip->ofile_stdout)
457 static int flush_subbuf(struct thread_information *tip, struct tip_subbuf *ts)
459 unsigned int offset = 0;
460 struct blk_io_trace *t;
461 int pdu_len, events = 0;
464 * surplus from last run
466 if (tip->leftover_ts) {
467 struct tip_subbuf *prev_ts = tip->leftover_ts;
469 if (prev_ts->len + ts->len > prev_ts->max_len) {
470 prev_ts->max_len += ts->len;
471 prev_ts->buf = realloc(prev_ts->buf, prev_ts->max_len);
474 memcpy(prev_ts->buf + prev_ts->len, ts->buf, ts->len);
475 prev_ts->len += ts->len;
481 tip->leftover_ts = NULL;
484 while (offset + sizeof(*t) <= ts->len) {
485 t = ts->buf + offset;
487 if (verify_trace(t)) {
488 write_data(tip, ts->buf, offset);
492 pdu_len = t->pdu_len;
494 if (offset + sizeof(*t) + pdu_len > ts->len)
499 offset += sizeof(*t) + pdu_len;
500 tip->events_processed++;
504 if (write_data(tip, ts->buf, offset))
508 * leftover bytes, save them for next time
510 if (offset != ts->len) {
511 tip->leftover_ts = ts;
513 memmove(ts->buf, ts->buf + offset, ts->len);
522 static int write_tip_events(struct thread_information *tip)
524 struct tip_subbuf *ts = subbuf_fifo_dequeue(tip);
527 return flush_subbuf(tip, ts);
533 * scans the tips we know and writes out the subbuffers we accumulate
535 static void get_and_write_events(void)
537 struct device_information *dip;
538 struct thread_information *tip;
539 int i, j, events, ret, tips_running;
544 for_each_dip(dip, i) {
545 for_each_tip(dip, tip, j) {
546 ret = write_tip_events(tip);
562 for_each_dip(dip, i) {
563 for_each_tip(dip, tip, j) {
564 ret = write_tip_events(tip);
567 tips_running += !tip->exited;
571 } while (events || tips_running);
574 static int start_threads(struct device_information *dip)
576 struct thread_information *tip;
578 int j, pipeline = output_name && !strcmp(output_name, "-");
579 int len, mode, vbuf_size;
581 for_each_tip(dip, tip, j) {
584 tip->events_processed = 0;
585 memset(&tip->fifo, 0, sizeof(tip->fifo));
586 tip->leftover_ts = NULL;
589 tip->ofile = fdopen(STDOUT_FILENO, "w");
590 tip->ofile_stdout = 1;
597 len = sprintf(op, "%s/", output_dir);
600 sprintf(op + len, "%s.blktrace.%d", output_name,
603 sprintf(op + len, "%s.blktrace.%d",
604 dip->buts_name, tip->cpu);
606 tip->ofile = fopen(op, "w");
607 tip->ofile_stdout = 0;
609 vbuf_size = OFILE_BUF;
612 if (tip->ofile == NULL) {
617 tip->ofile_buffer = malloc(vbuf_size);
618 if (setvbuf(tip->ofile, tip->ofile_buffer, mode, vbuf_size)) {
624 if (pthread_create(&tip->thread, NULL, thread_main, tip)) {
625 perror("pthread_create");
634 static void stop_threads(struct device_information *dip)
636 struct thread_information *tip;
640 for_each_tip(dip, tip, i) {
641 (void) pthread_join(tip->thread, (void *) &ret);
646 static void stop_all_threads(void)
648 struct device_information *dip;
655 static void stop_all_tracing(void)
657 struct device_information *dip;
664 static void exit_trace(int status)
666 if (!is_trace_stopped()) {
675 static int resize_devices(char *path)
677 int size = (ndevs + 1) * sizeof(struct device_information);
679 device_information = realloc(device_information, size);
680 if (!device_information) {
681 fprintf(stderr, "Out of memory, device %s (%d)\n", path, size);
684 device_information[ndevs].path = path;
689 static int open_devices(void)
691 struct device_information *dip;
694 for_each_dip(dip, i) {
695 dip->fd = open(dip->path, O_RDONLY | O_NONBLOCK);
705 static int start_devices(void)
707 struct device_information *dip;
710 size = ncpus * sizeof(struct thread_information);
711 thread_information = malloc(size * ndevs);
712 if (!thread_information) {
713 fprintf(stderr, "Out of memory, threads (%d)\n", size * ndevs);
717 for_each_dip(dip, i) {
718 if (start_trace(dip)) {
720 fprintf(stderr, "Failed to start trace on %s\n",
727 __for_each_dip(dip, j, i)
733 for_each_dip(dip, i) {
734 dip->threads = thread_information + (i * ncpus);
735 if (start_threads(dip)) {
736 fprintf(stderr, "Failed to start worker threads\n");
742 __for_each_dip(dip, j, i)
753 static void show_stats(void)
755 struct device_information *dip;
756 struct thread_information *tip;
757 unsigned long long events_processed;
758 unsigned long total_drops;
759 int i, j, no_stdout = 0;
764 if (output_name && !strcmp(output_name, "-"))
770 for_each_dip(dip, i) {
772 printf("Device: %s\n", dip->path);
773 events_processed = 0;
774 for_each_tip(dip, tip, j) {
776 printf(" CPU%3d: %20ld events\n",
777 tip->cpu, tip->events_processed);
778 events_processed += tip->events_processed;
780 total_drops += dip->drop_count;
782 printf(" Total: %20lld events (dropped %lu)\n",
783 events_processed, dip->drop_count);
787 fprintf(stderr, "You have dropped events, consider using a larger buffer size (-b)\n");
790 static char usage_str[] = \
791 "-d <dev> [ -r relay path ] [ -o <output> ] [-k ] [ -w time ]\n" \
792 "[ -a action ] [ -A action mask ] [ -v ]\n\n" \
793 "\t-d Use specified device. May also be given last after options\n" \
794 "\t-r Path to mounted relayfs, defaults to /relay\n" \
795 "\t-o File(s) to send output to\n" \
796 "\t-D Directory to prepend to output file names\n" \
797 "\t-k Kill a running trace\n" \
798 "\t-w Stop after defined time, in seconds\n" \
799 "\t-a Only trace specified actions. See documentation\n" \
800 "\t-A Give trace mask as a single value. See documentation\n" \
801 "\t-b Sub buffer size in KiB\n" \
802 "\t-n Number of sub buffers\n" \
803 "\t-v Print program version info\n\n";
805 static void show_usage(char *program)
807 fprintf(stderr, "Usage: %s %s %s",program, blktrace_version, usage_str);
809 static void handle_sigint(__attribute__((__unused__)) int sig)
814 int main(int argc, char *argv[])
816 static char default_relay_path[] = "/relay";
820 int act_mask_tmp = 0;
822 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) >= 0) {
825 i = find_mask_map(optarg);
827 fprintf(stderr,"Invalid action mask %s\n",
835 if ((sscanf(optarg, "%x", &i) != 1) ||
838 "Invalid set action mask %s/0x%x\n",
846 if (resize_devices(optarg) != 0)
855 output_name = optarg;
858 kill_running_trace = 1;
861 stop_watch = atoi(optarg);
862 if (stop_watch <= 0) {
864 "Invalid stopwatch value (%d secs)\n",
870 printf("%s version %s\n", argv[0], blktrace_version);
873 buf_size = strtoul(optarg, NULL, 10);
874 if (buf_size <= 0 || buf_size > 16*1024) {
876 "Invalid buffer size (%lu)\n",buf_size);
882 buf_nr = strtoul(optarg, NULL, 10);
885 "Invalid buffer nr (%lu)\n", buf_nr);
898 while (optind < argc) {
899 if (resize_devices(argv[optind++]) != 0)
909 relay_path = default_relay_path;
911 if (act_mask_tmp != 0)
912 act_mask = act_mask_tmp;
914 if (statfs(relay_path, &st) < 0) {
916 fprintf(stderr,"%s does not appear to be a valid path\n",
919 } else if (st.f_type != (long) RELAYFS_TYPE) {
920 fprintf(stderr,"%s does not appear to be a relay filesystem\n",
925 if (open_devices() != 0)
928 if (kill_running_trace) {
933 setlocale(LC_NUMERIC, "en_US");
935 ncpus = sysconf(_SC_NPROCESSORS_ONLN);
937 fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed\n");
941 if (start_devices() != 0)
944 signal(SIGINT, handle_sigint);
945 signal(SIGHUP, handle_sigint);
946 signal(SIGTERM, handle_sigint);
947 signal(SIGALRM, handle_sigint);
949 atexit(stop_all_tracing);
954 get_and_write_events();
956 if (!is_trace_stopped()) {