2 * block queue tracing application
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <sys/types.h>
29 #include <sys/ioctl.h>
30 #include <sys/param.h>
31 #include <sys/statfs.h>
34 #include <sys/socket.h>
41 #include <netinet/in.h>
42 #include <arpa/inet.h>
48 static char blktrace_version[] = "0.99";
51 * You may want to increase this even more, if you are logging at a high
52 * rate and see skipped/missed events
54 #define BUF_SIZE (512 * 1024)
57 #define OFILE_BUF (128 * 1024)
59 #define RELAYFS_TYPE 0xF0B4A981
61 #define S_OPTS "d:a:A:r:o:kw:Vb:n:D:lh:p:"
62 static struct option l_opts[] = {
65 .has_arg = required_argument,
71 .has_arg = required_argument,
77 .has_arg = required_argument,
83 .has_arg = required_argument,
89 .has_arg = required_argument,
95 .has_arg = no_argument,
101 .has_arg = required_argument,
107 .has_arg = no_argument,
112 .name = "buffer-size",
113 .has_arg = required_argument,
118 .name = "num-sub-buffers",
119 .has_arg = required_argument,
124 .name = "output-dir",
125 .has_arg = required_argument,
131 .has_arg = no_argument,
137 .has_arg = required_argument,
143 .has_arg = required_argument,
155 unsigned int max_len;
158 #define FIFO_SIZE (1024) /* should be plenty big! */
159 #define CL_SIZE (128) /* cache line, any bigger? */
161 struct tip_subbuf_fifo {
162 int tail __attribute__((aligned(CL_SIZE)));
163 int head __attribute__((aligned(CL_SIZE)));
164 struct tip_subbuf *q[FIFO_SIZE];
167 struct thread_information {
173 char fn[MAXPATHLEN + 64];
180 unsigned long events_processed;
181 unsigned long long data_read;
182 struct device_information *device;
189 struct tip_subbuf_fifo fifo;
190 struct tip_subbuf *leftover_ts;
193 * mmap controlled output files
195 unsigned long long fs_size;
196 unsigned long long fs_max_size;
197 unsigned long fs_off;
199 unsigned long fs_buf_len;
202 struct device_information {
206 volatile int trace_started;
207 unsigned long drop_count;
208 struct thread_information *threads;
212 static struct thread_information *thread_information;
214 static struct device_information *device_information;
216 /* command line option globals */
217 static char *relay_path;
218 static char *output_name;
219 static char *output_dir;
220 static int act_mask = ~0U;
221 static int kill_running_trace;
222 static unsigned long buf_size = BUF_SIZE;
223 static unsigned long buf_nr = BUF_NR;
224 static unsigned int page_size;
226 #define is_done() (*(volatile int *)(&done))
227 static volatile int done;
229 #define is_trace_stopped() (*(volatile int *)(&trace_stopped))
230 static volatile int trace_stopped;
232 #define is_stat_shown() (*(volatile int *)(&stat_shown))
233 static volatile int stat_shown;
235 int data_is_native = -1;
237 static void exit_trace(int status);
239 #define dip_tracing(dip) (*(volatile int *)(&(dip)->trace_started))
240 #define dip_set_tracing(dip, v) ((dip)->trace_started = (v))
242 #define __for_each_dip(__d, __i, __e) \
243 for (__i = 0, __d = device_information; __i < __e; __i++, __d++)
245 #define for_each_dip(__d, __i) __for_each_dip(__d, __i, ndevs)
246 #define for_each_tip(__d, __t, __j) \
247 for (__j = 0, __t = (__d)->threads; __j < ncpus; __j++, __t++)
250 * networking stuff follows. we include a magic number so we know whether
251 * to endianness convert or not
253 struct blktrace_net_hdr {
254 u32 magic; /* same as trace magic */
255 char buts_name[32]; /* trace name */
256 u32 cpu; /* for which cpu */
258 u32 len; /* length of following trace data */
261 #define TRACE_NET_PORT (8462)
270 * network cmd line params
272 static char hostname[MAXHOSTNAMELEN];
273 static int net_port = TRACE_NET_PORT;
274 static int net_mode = 0;
276 static int net_in_fd = -1;
277 static int net_out_fd = -1;
279 static void handle_sigint(__attribute__((__unused__)) int sig)
284 static int get_dropped_count(const char *buts_name)
287 char tmp[MAXPATHLEN + 64];
289 snprintf(tmp, sizeof(tmp), "%s/block/%s/dropped",
290 relay_path, buts_name);
292 fd = open(tmp, O_RDONLY);
295 * this may be ok, if the kernel doesn't support dropped counts
300 fprintf(stderr, "Couldn't open dropped file %s\n", tmp);
304 if (read(fd, tmp, sizeof(tmp)) < 0) {
315 static int start_trace(struct device_information *dip)
317 struct blk_user_trace_setup buts;
319 memset(&buts, 0, sizeof(buts));
320 buts.buf_size = buf_size;
321 buts.buf_nr = buf_nr;
322 buts.act_mask = act_mask;
324 if (ioctl(dip->fd, BLKTRACESETUP, &buts) < 0) {
325 perror("BLKTRACESETUP");
329 if (ioctl(dip->fd, BLKTRACESTART) < 0) {
330 perror("BLKTRACESTART");
334 memcpy(dip->buts_name, buts.name, sizeof(dip->buts_name));
335 dip_set_tracing(dip, 1);
339 static void stop_trace(struct device_information *dip)
341 if (dip_tracing(dip) || kill_running_trace) {
342 dip_set_tracing(dip, 0);
344 if (ioctl(dip->fd, BLKTRACESTOP) < 0)
345 perror("BLKTRACESTOP");
346 if (ioctl(dip->fd, BLKTRACETEARDOWN) < 0)
347 perror("BLKTRACETEARDOWN");
354 static void stop_all_traces(void)
356 struct device_information *dip;
359 for_each_dip(dip, i) {
360 dip->drop_count = get_dropped_count(dip->buts_name);
365 static void wait_for_data(struct thread_information *tip)
367 struct pollfd pfd = { .fd = tip->fd, .events = POLLIN };
371 if (pfd.revents & POLLIN)
373 if (tip->ofile_stdout)
375 } while (!is_done());
378 static int read_data_file(struct thread_information *tip, void *buf, int len)
385 ret = read(tip->fd, buf, len);
391 if (errno != EAGAIN) {
393 fprintf(stderr,"Thread %d failed read of %s\n",
399 } while (!is_done());
405 static int read_data_net(struct thread_information *tip, void *buf, int len)
407 unsigned int bytes_left = len;
411 ret = recv(net_in_fd, buf, bytes_left, MSG_WAITALL);
416 if (errno != EAGAIN) {
418 fprintf(stderr, "server: failed read\n");
426 } while (!is_done() && bytes_left);
428 return len - bytes_left;
431 static int read_data(struct thread_information *tip, void *buf, int len)
435 if (net_mode == Net_server)
436 ret = read_data_net(tip, buf, len);
438 ret = read_data_file(tip, buf, len);
441 tip->data_read += ret;
446 static inline struct tip_subbuf *
447 subbuf_fifo_dequeue(struct thread_information *tip)
449 const int head = tip->fifo.head;
450 const int next = (head + 1) & (FIFO_SIZE - 1);
452 if (head != tip->fifo.tail) {
453 struct tip_subbuf *ts = tip->fifo.q[head];
456 tip->fifo.head = next;
463 static inline int subbuf_fifo_queue(struct thread_information *tip,
464 struct tip_subbuf *ts)
466 const int tail = tip->fifo.tail;
467 const int next = (tail + 1) & (FIFO_SIZE - 1);
469 if (next != tip->fifo.head) {
470 tip->fifo.q[tail] = ts;
472 tip->fifo.tail = next;
476 fprintf(stderr, "fifo too small!\n");
481 * For file output, truncate and mmap the file appropriately
483 static int mmap_subbuf(struct thread_information *tip, unsigned int maxlen)
485 int ofd = fileno(tip->ofile);
489 * extend file, if we have to. use chunks of 16 subbuffers.
491 if (tip->fs_off + buf_size > tip->fs_buf_len) {
493 munlock(tip->fs_buf, tip->fs_buf_len);
494 munmap(tip->fs_buf, tip->fs_buf_len);
498 tip->fs_off = tip->fs_size & (page_size - 1);
499 tip->fs_buf_len = (16 * buf_size) - tip->fs_off;
500 tip->fs_max_size += tip->fs_buf_len;
502 if (ftruncate(ofd, tip->fs_max_size) < 0) {
507 tip->fs_buf = mmap(NULL, tip->fs_buf_len, PROT_WRITE,
508 MAP_SHARED, ofd, tip->fs_size - tip->fs_off);
509 if (tip->fs_buf == MAP_FAILED) {
513 mlock(tip->fs_buf, tip->fs_buf_len);
516 ret = read_data(tip, tip->fs_buf + tip->fs_off, maxlen);
527 * Use the copy approach for pipes and network
529 static int get_subbuf(struct thread_information *tip)
531 struct tip_subbuf *ts = malloc(sizeof(*ts));
534 ts->buf = malloc(buf_size);
535 ts->max_len = buf_size;
537 ret = read_data(tip, ts->buf, ts->max_len);
540 return subbuf_fifo_queue(tip, ts);
546 static void close_thread(struct thread_information *tip)
552 if (tip->ofile_buffer)
553 free(tip->ofile_buffer);
559 tip->ofile_buffer = NULL;
563 static void tip_ftrunc_final(struct thread_information *tip)
566 * truncate to right size and cleanup mmap
568 if (tip->ofile_mmap) {
569 int ofd = fileno(tip->ofile);
572 munmap(tip->fs_buf, tip->fs_buf_len);
574 ftruncate(ofd, tip->fs_size);
578 static void *thread_main(void *arg)
580 struct thread_information *tip = arg;
581 pid_t pid = getpid();
585 CPU_SET((tip->cpu), &cpu_mask);
587 if (sched_setaffinity(pid, sizeof(cpu_mask), &cpu_mask) == -1) {
588 perror("sched_setaffinity");
592 snprintf(tip->fn, sizeof(tip->fn), "%s/block/%s/trace%d",
593 relay_path, tip->device->buts_name, tip->cpu);
594 tip->fd = open(tip->fn, O_RDONLY);
597 fprintf(stderr,"Thread %d failed open of %s\n", tip->cpu,
603 if (tip->ofile_mmap && net_mode != Net_client) {
604 if (mmap_subbuf(tip, buf_size))
612 tip_ftrunc_final(tip);
617 static int write_data_net(int fd, void *buf, unsigned int buf_len)
619 unsigned int bytes_left = buf_len;
623 ret = send(fd, buf, bytes_left, 0);
636 static int flush_subbuf_net(struct thread_information *tip,
637 struct tip_subbuf *ts)
639 struct blktrace_net_hdr hdr;
641 hdr.magic = BLK_IO_TRACE_MAGIC;
642 strcpy(hdr.buts_name, tip->device->buts_name);
644 hdr.max_cpus = ncpus;
647 if (write_data_net(net_out_fd, &hdr, sizeof(hdr)))
650 if (write_data_net(net_out_fd, ts->buf, ts->len))
658 static int write_data(struct thread_information *tip, void *buf,
659 unsigned int buf_len)
667 ret = fwrite(buf, buf_len, 1, tip->ofile);
677 if (tip->ofile_stdout)
683 static int flush_subbuf_file(struct thread_information *tip,
684 struct tip_subbuf *ts)
686 unsigned int offset = 0;
687 struct blk_io_trace *t;
688 int pdu_len, events = 0;
691 * surplus from last run
693 if (tip->leftover_ts) {
694 struct tip_subbuf *prev_ts = tip->leftover_ts;
696 if (prev_ts->len + ts->len > prev_ts->max_len) {
697 prev_ts->max_len += ts->len;
698 prev_ts->buf = realloc(prev_ts->buf, prev_ts->max_len);
701 memcpy(prev_ts->buf + prev_ts->len, ts->buf, ts->len);
702 prev_ts->len += ts->len;
708 tip->leftover_ts = NULL;
711 while (offset + sizeof(*t) <= ts->len) {
712 t = ts->buf + offset;
714 if (verify_trace(t)) {
715 write_data(tip, ts->buf, offset);
719 pdu_len = t->pdu_len;
721 if (offset + sizeof(*t) + pdu_len > ts->len)
724 offset += sizeof(*t) + pdu_len;
725 tip->events_processed++;
726 tip->data_read += sizeof(*t) + pdu_len;
730 if (write_data(tip, ts->buf, offset))
734 * leftover bytes, save them for next time
736 if (offset != ts->len) {
737 tip->leftover_ts = ts;
739 memmove(ts->buf, ts->buf + offset, ts->len);
748 static int write_tip_events(struct thread_information *tip)
750 struct tip_subbuf *ts = subbuf_fifo_dequeue(tip);
753 if (net_mode == Net_client)
754 return flush_subbuf_net(tip, ts);
756 return flush_subbuf_file(tip, ts);
763 * scans the tips we know and writes out the subbuffers we accumulate
765 static void get_and_write_events(void)
767 struct device_information *dip;
768 struct thread_information *tip;
769 int i, j, events, ret, tips_running;
774 for_each_dip(dip, i) {
775 for_each_tip(dip, tip, j) {
776 ret = write_tip_events(tip);
792 for_each_dip(dip, i) {
793 for_each_tip(dip, tip, j) {
794 ret = write_tip_events(tip);
797 tips_running += !tip->exited;
801 } while (events || tips_running);
804 static void wait_for_threads(void)
807 * for piped or network output, poll and fetch data for writeout.
808 * for files, we just wait around for trace threads to exit
810 if ((output_name && !strcmp(output_name, "-")) ||
811 net_mode == Net_client)
812 get_and_write_events();
814 struct device_information *dip;
815 struct thread_information *tip;
816 int i, j, tips_running;
823 for_each_tip(dip, tip, j)
824 tips_running += !tip->exited;
825 } while (tips_running);
829 static void fill_ofname(char *dst, char *buts_name, int cpu)
834 len = sprintf(dst, "%s/", output_dir);
837 sprintf(dst + len, "%s.blktrace.%d", output_name, cpu);
839 sprintf(dst + len, "%s.blktrace.%d", buts_name, cpu);
842 static int start_threads(struct device_information *dip)
844 struct thread_information *tip;
845 int j, pipeline = output_name && !strcmp(output_name, "-");
849 for_each_tip(dip, tip, j) {
852 tip->events_processed = 0;
853 memset(&tip->fifo, 0, sizeof(tip->fifo));
854 tip->leftover_ts = NULL;
857 tip->ofile = fdopen(STDOUT_FILENO, "w");
858 tip->ofile_stdout = 1;
863 fill_ofname(op, dip->buts_name, tip->cpu);
864 tip->ofile = fopen(op, "w+");
865 tip->ofile_stdout = 0;
868 vbuf_size = OFILE_BUF;
871 if (tip->ofile == NULL) {
876 tip->ofile_buffer = malloc(vbuf_size);
877 if (setvbuf(tip->ofile, tip->ofile_buffer, mode, vbuf_size)) {
883 if (pthread_create(&tip->thread, NULL, thread_main, tip)) {
884 perror("pthread_create");
893 static void stop_threads(struct device_information *dip)
895 struct thread_information *tip;
899 for_each_tip(dip, tip, i) {
900 (void) pthread_join(tip->thread, (void *) &ret);
905 static void stop_all_threads(void)
907 struct device_information *dip;
914 static void stop_all_tracing(void)
916 struct device_information *dip;
923 static void exit_trace(int status)
925 if (!is_trace_stopped()) {
934 static int resize_devices(char *path)
936 int size = (ndevs + 1) * sizeof(struct device_information);
938 device_information = realloc(device_information, size);
939 if (!device_information) {
940 fprintf(stderr, "Out of memory, device %s (%d)\n", path, size);
943 device_information[ndevs].path = path;
948 static int open_devices(void)
950 struct device_information *dip;
953 for_each_dip(dip, i) {
954 dip->fd = open(dip->path, O_RDONLY | O_NONBLOCK);
964 static int start_devices(void)
966 struct device_information *dip;
969 size = ncpus * sizeof(struct thread_information);
970 thread_information = malloc(size * ndevs);
971 if (!thread_information) {
972 fprintf(stderr, "Out of memory, threads (%d)\n", size * ndevs);
976 for_each_dip(dip, i) {
977 if (start_trace(dip)) {
979 fprintf(stderr, "Failed to start trace on %s\n",
986 __for_each_dip(dip, j, i)
992 for_each_dip(dip, i) {
993 dip->threads = thread_information + (i * ncpus);
994 if (start_threads(dip)) {
995 fprintf(stderr, "Failed to start worker threads\n");
1001 __for_each_dip(dip, j, i)
1003 for_each_dip(dip, i)
1012 static void show_stats(void)
1014 struct device_information *dip;
1015 struct thread_information *tip;
1016 unsigned long long events_processed, data_read;
1017 unsigned long total_drops;
1018 int i, j, no_stdout = 0;
1020 if (is_stat_shown())
1023 if (output_name && !strcmp(output_name, "-"))
1029 for_each_dip(dip, i) {
1031 printf("Device: %s\n", dip->path);
1032 events_processed = 0;
1034 for_each_tip(dip, tip, j) {
1036 printf(" CPU%3d: %20lu events, %8llu KiB data\n",
1037 tip->cpu, tip->events_processed,
1038 tip->data_read >> 10);
1039 events_processed += tip->events_processed;
1040 data_read += tip->data_read;
1042 total_drops += dip->drop_count;
1044 printf(" Total: %20llu events (dropped %lu), %8llu KiB data\n",
1045 events_processed, dip->drop_count,
1050 fprintf(stderr, "You have dropped events, consider using a larger buffer size (-b)\n");
1053 static struct device_information *net_get_dip(char *buts_name)
1055 struct device_information *dip;
1058 for (i = 0; i < ndevs; i++) {
1059 dip = &device_information[i];
1061 if (!strcmp(dip->buts_name, buts_name))
1065 device_information = realloc(device_information, (ndevs + 1) * sizeof(*dip));
1066 dip = &device_information[ndevs];
1067 strcpy(dip->buts_name, buts_name);
1068 strcpy(dip->path, buts_name);
1070 dip->threads = malloc(ncpus * sizeof(struct thread_information));
1071 memset(dip->threads, 0, ncpus * sizeof(struct thread_information));
1076 for (i = 0; i < ncpus; i++) {
1077 struct thread_information *tip = &dip->threads[i];
1081 tip->ofile_stdout = 0;
1082 tip->ofile_mmap = 1;
1085 fill_ofname(op, dip->buts_name, tip->cpu);
1087 tip->ofile = fopen(op, "w+");
1097 static struct thread_information *net_get_tip(struct blktrace_net_hdr *bnh)
1099 struct device_information *dip;
1101 ncpus = bnh->max_cpus;
1102 dip = net_get_dip(bnh->buts_name);
1103 return &dip->threads[bnh->cpu];
1106 static int net_get_header(struct blktrace_net_hdr *bnh)
1108 int fl = fcntl(net_in_fd, F_GETFL);
1109 int bytes_left, ret;
1112 fcntl(net_in_fd, F_SETFL, fl | O_NONBLOCK);
1113 bytes_left = sizeof(*bnh);
1114 while (bytes_left && !is_done()) {
1115 ret = recv(net_in_fd, p, bytes_left, MSG_WAITALL);
1117 if (errno != EAGAIN) {
1118 perror("recv header");
1131 fcntl(net_in_fd, F_SETFL, fl & ~O_NONBLOCK);
1135 static int net_server_loop(void)
1137 struct thread_information *tip;
1138 struct blktrace_net_hdr bnh;
1140 if (net_get_header(&bnh))
1143 if (data_is_native == -1 && check_data_endianness(bnh.magic)) {
1144 fprintf(stderr, "server: received data is bad\n");
1148 if (!data_is_native) {
1149 bnh.cpu = be32_to_cpu(bnh.cpu);
1150 bnh.len = be32_to_cpu(bnh.len);
1153 tip = net_get_tip(&bnh);
1157 if (mmap_subbuf(tip, bnh.len))
1164 * Start here when we are in server mode - just fetch data from the network
1167 static int net_server(void)
1169 struct sockaddr_in addr;
1173 fd = socket(AF_INET, SOCK_STREAM, 0);
1175 perror("server: socket");
1180 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
1181 perror("setsockopt");
1185 memset(&addr, 0, sizeof(addr));
1186 addr.sin_family = AF_INET;
1187 addr.sin_addr.s_addr = htonl(INADDR_ANY);
1188 addr.sin_port = htons(net_port);
1190 if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
1195 if (listen(fd, 1) < 0) {
1200 printf("blktrace: waiting for incoming connection...\n");
1202 socklen = sizeof(addr);
1203 net_in_fd = accept(fd, (struct sockaddr *) &addr, &socklen);
1204 if (net_in_fd < 0) {
1209 signal(SIGINT, handle_sigint);
1210 signal(SIGHUP, handle_sigint);
1211 signal(SIGTERM, handle_sigint);
1212 signal(SIGALRM, handle_sigint);
1214 printf("blktrace: connected!\n");
1216 while (!is_done()) {
1217 if (net_server_loop())
1221 for (i = 0; i < ndevs; i++) {
1222 struct device_information *dip = &device_information[i];
1224 for (j = 0; j < ncpus; j++)
1225 tip_ftrunc_final(&dip->threads[j]);
1233 * Setup outgoing network connection where we will transmit data
1235 static int net_setup_client(void)
1237 struct sockaddr_in addr;
1240 fd = socket(AF_INET, SOCK_STREAM, 0);
1242 perror("client: socket");
1246 memset(&addr, 0, sizeof(addr));
1247 addr.sin_family = AF_INET;
1248 addr.sin_port = htons(net_port);
1250 if (inet_aton(hostname, &addr.sin_addr) != 1) {
1251 struct hostent *hent = gethostbyname(hostname);
1253 perror("gethostbyname");
1257 memcpy(&addr.sin_addr, hent->h_addr, 4);
1258 strcpy(hostname, hent->h_name);
1261 printf("blktrace: connecting to %s\n", hostname);
1263 if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
1264 perror("client: connect");
1268 printf("blktrace: connected!\n");
1273 static char usage_str[] = \
1274 "-d <dev> [ -r relay path ] [ -o <output> ] [-k ] [ -w time ]\n" \
1275 "[ -a action ] [ -A action mask ] [ -v ]\n\n" \
1276 "\t-d Use specified device. May also be given last after options\n" \
1277 "\t-r Path to mounted relayfs, defaults to /relay\n" \
1278 "\t-o File(s) to send output to\n" \
1279 "\t-D Directory to prepend to output file names\n" \
1280 "\t-k Kill a running trace\n" \
1281 "\t-w Stop after defined time, in seconds\n" \
1282 "\t-a Only trace specified actions. See documentation\n" \
1283 "\t-A Give trace mask as a single value. See documentation\n" \
1284 "\t-b Sub buffer size in KiB\n" \
1285 "\t-n Number of sub buffers\n" \
1286 "\t-v Print program version info\n\n";
1288 static void show_usage(char *program)
1290 fprintf(stderr, "Usage: %s %s %s",program, blktrace_version, usage_str);
1293 int main(int argc, char *argv[])
1295 static char default_relay_path[] = "/relay";
1299 int act_mask_tmp = 0;
1301 while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) >= 0) {
1304 i = find_mask_map(optarg);
1306 fprintf(stderr,"Invalid action mask %s\n",
1314 if ((sscanf(optarg, "%x", &i) != 1) ||
1315 !valid_act_opt(i)) {
1317 "Invalid set action mask %s/0x%x\n",
1325 if (resize_devices(optarg) != 0)
1330 relay_path = optarg;
1334 output_name = optarg;
1337 kill_running_trace = 1;
1340 stop_watch = atoi(optarg);
1341 if (stop_watch <= 0) {
1343 "Invalid stopwatch value (%d secs)\n",
1349 printf("%s version %s\n", argv[0], blktrace_version);
1352 buf_size = strtoul(optarg, NULL, 10);
1353 if (buf_size <= 0 || buf_size > 16*1024) {
1355 "Invalid buffer size (%lu)\n",buf_size);
1361 buf_nr = strtoul(optarg, NULL, 10);
1364 "Invalid buffer nr (%lu)\n", buf_nr);
1369 output_dir = optarg;
1372 net_mode = Net_client;
1373 strcpy(hostname, optarg);
1376 net_mode = Net_server;
1379 net_port = atoi(optarg);
1382 show_usage(argv[0]);
1387 setlocale(LC_NUMERIC, "en_US");
1389 page_size = getpagesize();
1391 if (net_mode == Net_server)
1392 return net_server();
1394 while (optind < argc) {
1395 if (resize_devices(argv[optind++]) != 0)
1400 show_usage(argv[0]);
1405 relay_path = default_relay_path;
1407 if (act_mask_tmp != 0)
1408 act_mask = act_mask_tmp;
1410 if (statfs(relay_path, &st) < 0) {
1412 fprintf(stderr,"%s does not appear to be a valid path\n",
1415 } else if (st.f_type != (long) RELAYFS_TYPE) {
1416 fprintf(stderr,"%s does not appear to be a relay filesystem\n",
1421 if (open_devices() != 0)
1424 if (kill_running_trace) {
1429 ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1431 fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed\n");
1435 signal(SIGINT, handle_sigint);
1436 signal(SIGHUP, handle_sigint);
1437 signal(SIGTERM, handle_sigint);
1438 signal(SIGALRM, handle_sigint);
1440 if (net_mode == Net_client && net_setup_client())
1443 if (start_devices() != 0)
1446 atexit(stop_all_tracing);
1453 if (!is_trace_stopped()) {