[PATCH] blktrace: store server output in client-date directory
[blktrace.git] / blktrace.c
1 /*
2  * block queue tracing application
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include <pthread.h>
22 #include <sys/types.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25 #include <locale.h>
26 #include <signal.h>
27 #include <fcntl.h>
28 #include <string.h>
29 #include <sys/ioctl.h>
30 #include <sys/param.h>
31 #include <sys/statfs.h>
32 #include <sys/poll.h>
33 #include <sys/mman.h>
34 #include <sys/socket.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <getopt.h>
40 #include <errno.h>
41 #include <netinet/in.h>
42 #include <arpa/inet.h>
43 #include <netdb.h>
44 #include <sys/sendfile.h>
45
46 #include "blktrace.h"
47 #include "barrier.h"
48
49 static char blktrace_version[] = "0.99";
50
51 /*
52  * You may want to increase this even more, if you are logging at a high
53  * rate and see skipped/missed events
54  */
55 #define BUF_SIZE        (512 * 1024)
56 #define BUF_NR          (4)
57
58 #define OFILE_BUF       (128 * 1024)
59
60 #define RELAYFS_TYPE    0xF0B4A981
61
62 #define S_OPTS  "d:a:A:r:o:kw:Vb:n:D:lh:p:s"
63 static struct option l_opts[] = {
64         {
65                 .name = "dev",
66                 .has_arg = required_argument,
67                 .flag = NULL,
68                 .val = 'd'
69         },
70         {
71                 .name = "act-mask",
72                 .has_arg = required_argument,
73                 .flag = NULL,
74                 .val = 'a'
75         },
76         {
77                 .name = "set-mask",
78                 .has_arg = required_argument,
79                 .flag = NULL,
80                 .val = 'A'
81         },
82         {
83                 .name = "relay",
84                 .has_arg = required_argument,
85                 .flag = NULL,
86                 .val = 'r'
87         },
88         {
89                 .name = "output",
90                 .has_arg = required_argument,
91                 .flag = NULL,
92                 .val = 'o'
93         },
94         {
95                 .name = "kill",
96                 .has_arg = no_argument,
97                 .flag = NULL,
98                 .val = 'k'
99         },
100         {
101                 .name = "stopwatch",
102                 .has_arg = required_argument,
103                 .flag = NULL,
104                 .val = 'w'
105         },
106         {
107                 .name = "version",
108                 .has_arg = no_argument,
109                 .flag = NULL,
110                 .val = 'V'
111         },
112         {
113                 .name = "buffer-size",
114                 .has_arg = required_argument,
115                 .flag = NULL,
116                 .val = 'b'
117         },
118         {
119                 .name = "num-sub-buffers",
120                 .has_arg = required_argument,
121                 .flag = NULL,
122                 .val = 'n'
123         },
124         {
125                 .name = "output-dir",
126                 .has_arg = required_argument,
127                 .flag = NULL,
128                 .val = 'D'
129         },
130         {
131                 .name = "listen",
132                 .has_arg = no_argument,
133                 .flag = NULL,
134                 .val = 'l'
135         },
136         {
137                 .name = "host",
138                 .has_arg = required_argument,
139                 .flag = NULL,
140                 .val = 'h'
141         },
142         {
143                 .name = "port",
144                 .has_arg = required_argument,
145                 .flag = NULL,
146                 .val = 'p'
147         },
148         {
149                 .name = "sendfile",
150                 .has_arg = no_argument,
151                 .flag = NULL,
152                 .val = 's'
153         },
154         {
155                 .name = NULL,
156         }
157 };
158
159 struct tip_subbuf {
160         void *buf;
161         unsigned int len;
162         unsigned int max_len;
163         off_t offset;
164 };
165
166 #define FIFO_SIZE       (1024)  /* should be plenty big! */
167 #define CL_SIZE         (128)   /* cache line, any bigger? */
168
169 struct tip_subbuf_fifo {
170         int tail __attribute__((aligned(CL_SIZE)));
171         int head __attribute__((aligned(CL_SIZE)));
172         struct tip_subbuf *q[FIFO_SIZE];
173 };
174
175 struct thread_information {
176         int cpu;
177         pthread_t thread;
178
179         int fd;
180         void *fd_buf;
181         char fn[MAXPATHLEN + 64];
182
183         int pfd;
184         size_t *pfd_buf;
185
186         struct in_addr cl_in_addr;
187
188         FILE *ofile;
189         char *ofile_buffer;
190         off_t ofile_offset;
191         int ofile_stdout;
192         int ofile_mmap;
193
194         int (*get_subbuf)(struct thread_information *, unsigned int);
195         int (*flush_subbuf)(struct thread_information *, struct tip_subbuf *);
196         int (*read_data)(struct thread_information *, void *, unsigned int);
197
198         unsigned long events_processed;
199         unsigned long long data_read;
200         struct device_information *device;
201
202         int exited;
203
204         /*
205          * piped fifo buffers
206          */
207         struct tip_subbuf_fifo fifo;
208         struct tip_subbuf *leftover_ts;
209
210         /*
211          * mmap controlled output files
212          */
213         unsigned long long fs_size;
214         unsigned long long fs_max_size;
215         unsigned long fs_off;
216         void *fs_buf;
217         unsigned long fs_buf_len;
218 };
219
220 struct device_information {
221         int fd;
222         char *path;
223         char buts_name[32];
224         volatile int trace_started;
225         unsigned long drop_count;
226         struct thread_information *threads;
227 };
228
229 static int ncpus;
230 static struct thread_information *thread_information;
231 static int ndevs;
232 static struct device_information *device_information;
233
234 /* command line option globals */
235 static char *relay_path;
236 static char *output_name;
237 static char *output_dir;
238 static int act_mask = ~0U;
239 static int kill_running_trace;
240 static unsigned long buf_size = BUF_SIZE;
241 static unsigned long buf_nr = BUF_NR;
242 static unsigned int page_size;
243
244 #define is_done()       (*(volatile int *)(&done))
245 static volatile int done;
246
247 #define is_trace_stopped()      (*(volatile int *)(&trace_stopped))
248 static volatile int trace_stopped;
249
250 #define is_stat_shown() (*(volatile int *)(&stat_shown))
251 static volatile int stat_shown;
252
253 int data_is_native = -1;
254
255 static void exit_trace(int status);
256
257 #define dip_tracing(dip)        (*(volatile int *)(&(dip)->trace_started))
258 #define dip_set_tracing(dip, v) ((dip)->trace_started = (v))
259
260 #define __for_each_dip(__d, __i, __e)   \
261         for (__i = 0, __d = device_information; __i < __e; __i++, __d++)
262
263 #define for_each_dip(__d, __i)  __for_each_dip(__d, __i, ndevs)
264 #define for_each_tip(__d, __t, __j)     \
265         for (__j = 0, __t = (__d)->threads; __j < ncpus; __j++, __t++)
266
267 /*
268  * networking stuff follows. we include a magic number so we know whether
269  * to endianness convert or not
270  */
271 struct blktrace_net_hdr {
272         u32 magic;              /* same as trace magic */
273         char buts_name[32];     /* trace name */
274         u32 cpu;                /* for which cpu */
275         u32 max_cpus;
276         u32 len;                /* length of following trace data */
277 };
278
279 #define TRACE_NET_PORT          (8462)
280
281 enum {
282         Net_none = 0,
283         Net_server,
284         Net_client,
285 };
286
287 /*
288  * network cmd line params
289  */
290 static char hostname[MAXHOSTNAMELEN];
291 static int net_port = TRACE_NET_PORT;
292 static int net_mode = 0;
293 static int net_sendfile;
294
295 static int net_in_fd = -1;
296 static int net_out_fd = -1;
297
298 static void handle_sigint(__attribute__((__unused__)) int sig)
299 {
300         struct device_information *dip;
301         int i;
302
303         /*
304          * stop trace so we can reap currently produced data
305          */
306         for_each_dip(dip, i) {
307                 if (ioctl(dip->fd, BLKTRACESTOP) < 0)
308                         perror("BLKTRACESTOP");
309         }
310
311         done = 1;
312 }
313
314 static int get_dropped_count(const char *buts_name)
315 {
316         int fd;
317         char tmp[MAXPATHLEN + 64];
318
319         snprintf(tmp, sizeof(tmp), "%s/block/%s/dropped",
320                  relay_path, buts_name);
321
322         fd = open(tmp, O_RDONLY);
323         if (fd < 0) {
324                 /*
325                  * this may be ok, if the kernel doesn't support dropped counts
326                  */
327                 if (errno == ENOENT)
328                         return 0;
329
330                 fprintf(stderr, "Couldn't open dropped file %s\n", tmp);
331                 return -1;
332         }
333
334         if (read(fd, tmp, sizeof(tmp)) < 0) {
335                 perror(tmp);
336                 close(fd);
337                 return -1;
338         }
339
340         close(fd);
341
342         return atoi(tmp);
343 }
344
345 static size_t get_subbuf_padding(struct thread_information *tip,
346                                  unsigned subbuf)
347 {
348         size_t padding_size = buf_nr * sizeof(size_t);
349         size_t ret;
350
351         if (read(tip->pfd, tip->pfd_buf, padding_size) < 0) {
352                 perror("tip pad read");
353                 ret = -1;
354         } else
355                 ret = tip->pfd_buf[subbuf];
356
357         return ret;
358 }
359
360 static int start_trace(struct device_information *dip)
361 {
362         struct blk_user_trace_setup buts;
363
364         memset(&buts, 0, sizeof(buts));
365         buts.buf_size = buf_size;
366         buts.buf_nr = buf_nr;
367         buts.act_mask = act_mask;
368
369         if (ioctl(dip->fd, BLKTRACESETUP, &buts) < 0) {
370                 perror("BLKTRACESETUP");
371                 return 1;
372         }
373
374         if (ioctl(dip->fd, BLKTRACESTART) < 0) {
375                 perror("BLKTRACESTART");
376                 return 1;
377         }
378
379         memcpy(dip->buts_name, buts.name, sizeof(dip->buts_name));
380         dip_set_tracing(dip, 1);
381         return 0;
382 }
383
384 static void stop_trace(struct device_information *dip)
385 {
386         if (dip_tracing(dip) || kill_running_trace) {
387                 dip_set_tracing(dip, 0);
388
389                 /*
390                  * should be stopped, just don't complain if it isn't
391                  */
392                 ioctl(dip->fd, BLKTRACESTOP);
393
394                 if (ioctl(dip->fd, BLKTRACETEARDOWN) < 0)
395                         perror("BLKTRACETEARDOWN");
396
397                 close(dip->fd);
398                 dip->fd = -1;
399         }
400 }
401
402 static void stop_all_traces(void)
403 {
404         struct device_information *dip;
405         int i;
406
407         for_each_dip(dip, i) {
408                 dip->drop_count = get_dropped_count(dip->buts_name);
409                 stop_trace(dip);
410         }
411 }
412
413 static void wait_for_data(struct thread_information *tip)
414 {
415         struct pollfd pfd = { .fd = tip->fd, .events = POLLIN };
416
417         do {
418                 poll(&pfd, 1, 100);
419                 if (pfd.revents & POLLIN)
420                         break;
421                 if (tip->ofile_stdout)
422                         break;
423         } while (!is_done());
424 }
425
426 static int read_data_file(struct thread_information *tip, void *buf,
427                           unsigned int len)
428 {
429         int ret = 0;
430
431         do {
432                 wait_for_data(tip);
433
434                 ret = read(tip->fd, buf, len);
435                 if (!ret)
436                         continue;
437                 else if (ret > 0)
438                         return ret;
439                 else {
440                         if (errno != EAGAIN) {
441                                 perror(tip->fn);
442                                 fprintf(stderr,"Thread %d failed read of %s\n",
443                                         tip->cpu, tip->fn);
444                                 break;
445                         }
446                         continue;
447                 }
448         } while (!is_done());
449
450         return ret;
451
452 }
453
454 static int read_data_net(struct thread_information *tip, void *buf,
455                          unsigned int len)
456 {
457         unsigned int bytes_left = len;
458         int ret = 0;
459
460         do {
461                 ret = recv(net_in_fd, buf, bytes_left, MSG_WAITALL);
462
463                 if (!ret)
464                         continue;
465                 else if (ret < 0) {
466                         if (errno != EAGAIN) {
467                                 perror(tip->fn);
468                                 fprintf(stderr, "server: failed read\n");
469                                 return 0;
470                         }
471                         continue;
472                 } else {
473                         buf += ret;
474                         bytes_left -= ret;
475                 }
476         } while (!is_done() && bytes_left);
477
478         return len - bytes_left;
479 }
480
481 static int read_data(struct thread_information *tip, void *buf,
482                      unsigned int len)
483 {
484         return tip->read_data(tip, buf, len);
485 }
486
487 static inline struct tip_subbuf *
488 subbuf_fifo_dequeue(struct thread_information *tip)
489 {
490         const int head = tip->fifo.head;
491         const int next = (head + 1) & (FIFO_SIZE - 1);
492
493         if (head != tip->fifo.tail) {
494                 struct tip_subbuf *ts = tip->fifo.q[head];
495
496                 store_barrier();
497                 tip->fifo.head = next;
498                 return ts;
499         }
500
501         return NULL;
502 }
503
504 static inline int subbuf_fifo_queue(struct thread_information *tip,
505                                     struct tip_subbuf *ts)
506 {
507         const int tail = tip->fifo.tail;
508         const int next = (tail + 1) & (FIFO_SIZE - 1);
509
510         if (next != tip->fifo.head) {
511                 tip->fifo.q[tail] = ts;
512                 store_barrier();
513                 tip->fifo.tail = next;
514                 return 0;
515         }
516
517         fprintf(stderr, "fifo too small!\n");
518         return 1;
519 }
520
521 /*
522  * For file output, truncate and mmap the file appropriately
523  */
524 static int mmap_subbuf(struct thread_information *tip, unsigned int maxlen)
525 {
526         int ofd = fileno(tip->ofile);
527         int ret;
528
529         /*
530          * extend file, if we have to. use chunks of 16 subbuffers.
531          */
532         if (tip->fs_off + buf_size > tip->fs_buf_len) {
533                 if (tip->fs_buf) {
534                         munlock(tip->fs_buf, tip->fs_buf_len);
535                         munmap(tip->fs_buf, tip->fs_buf_len);
536                         tip->fs_buf = NULL;
537                 }
538
539                 tip->fs_off = tip->fs_size & (page_size - 1);
540                 tip->fs_buf_len = (16 * buf_size) - tip->fs_off;
541                 tip->fs_max_size += tip->fs_buf_len;
542
543                 if (ftruncate(ofd, tip->fs_max_size) < 0) {
544                         perror("ftruncate");
545                         return -1;
546                 }
547
548                 tip->fs_buf = mmap(NULL, tip->fs_buf_len, PROT_WRITE,
549                                    MAP_SHARED, ofd, tip->fs_size - tip->fs_off);
550                 if (tip->fs_buf == MAP_FAILED) {
551                         perror("mmap");
552                         return -1;
553                 }
554                 mlock(tip->fs_buf, tip->fs_buf_len);
555         }
556
557         ret = read_data(tip, tip->fs_buf + tip->fs_off, maxlen);
558         if (ret >= 0) {
559                 tip->data_read += ret;
560                 tip->fs_size += ret;
561                 tip->fs_off += ret;
562                 return 0;
563         }
564
565         return -1;
566 }
567
568 /*
569  * Use the copy approach for pipes and network
570  */
571 static int get_subbuf(struct thread_information *tip, unsigned int maxlen)
572 {
573         struct tip_subbuf *ts = malloc(sizeof(*ts));
574         int ret;
575
576         ts->buf = malloc(buf_size);
577         ts->max_len = maxlen;
578
579         ret = read_data(tip, ts->buf, ts->max_len);
580         if (ret > 0) {
581                 ts->len = ret;
582                 tip->data_read += ret;
583                 if (subbuf_fifo_queue(tip, ts))
584                         return -1;
585         }
586
587         return ret;
588 }
589
590 static int get_subbuf_sendfile(struct thread_information *tip,
591                                unsigned int maxlen)
592 {
593         struct tip_subbuf *ts;
594         struct stat sb;
595         unsigned int ready, this_size, total;
596
597         wait_for_data(tip);
598
599         /*
600          * hack to get last data out, we can't use sendfile for that
601          */
602         if (is_done())
603                 return get_subbuf(tip, maxlen);
604
605         if (fstat(tip->fd, &sb) < 0) {
606                 perror("trace stat");
607                 return -1;
608         }
609
610         ready = sb.st_size - tip->ofile_offset;
611         if (!ready) {
612                 /*
613                  * delay a little, since poll() will return data available
614                  * until sendfile() is run
615                  */
616                 usleep(100);
617                 return 0;
618         }
619
620         this_size = buf_size;
621         total = ready;
622         while (ready) {
623                 if (this_size > ready)
624                         this_size = ready;
625
626                 ts = malloc(sizeof(*ts));
627
628                 ts->buf = NULL;
629                 ts->max_len = 0;
630
631                 ts->len = this_size;
632                 ts->offset = tip->ofile_offset;
633                 tip->ofile_offset += ts->len;
634
635                 if (subbuf_fifo_queue(tip, ts))
636                         return -1;
637
638                 ready -= this_size;
639         }
640
641         return total;
642 }
643
644 static void close_thread(struct thread_information *tip)
645 {
646         if (tip->fd != -1)
647                 close(tip->fd);
648         if (tip->pfd != -1)
649                 close(tip->pfd);
650         if (tip->ofile)
651                 fclose(tip->ofile);
652         if (tip->ofile_buffer)
653                 free(tip->ofile_buffer);
654         if (tip->fd_buf)
655                 free(tip->fd_buf);
656         if (tip->pfd_buf)
657                 free(tip->pfd_buf);
658
659         tip->fd = -1;
660         tip->pfd = -1;
661         tip->ofile = NULL;
662         tip->ofile_buffer = NULL;
663         tip->fd_buf = NULL;
664 }
665
666 static void tip_ftrunc_final(struct thread_information *tip)
667 {
668         /*
669          * truncate to right size and cleanup mmap
670          */
671         if (tip->ofile_mmap) {
672                 int ofd = fileno(tip->ofile);
673
674                 if (tip->fs_buf)
675                         munmap(tip->fs_buf, tip->fs_buf_len);
676
677                 ftruncate(ofd, tip->fs_size);
678         }
679 }
680
681 static void *thread_main(void *arg)
682 {
683         struct thread_information *tip = arg;
684         pid_t pid = getpid();
685         cpu_set_t cpu_mask;
686
687         CPU_ZERO(&cpu_mask);
688         CPU_SET((tip->cpu), &cpu_mask);
689
690         if (sched_setaffinity(pid, sizeof(cpu_mask), &cpu_mask) == -1) {
691                 perror("sched_setaffinity");
692                 exit_trace(1);
693         }
694
695         snprintf(tip->fn, sizeof(tip->fn), "%s/block/%s/trace%d",
696                         relay_path, tip->device->buts_name, tip->cpu);
697         tip->fd = open(tip->fn, O_RDONLY);
698         if (tip->fd < 0) {
699                 perror(tip->fn);
700                 fprintf(stderr,"Thread %d failed open of %s\n", tip->cpu,
701                         tip->fn);
702                 exit_trace(1);
703         }
704
705         if (net_mode == Net_client && net_sendfile) {
706                 char tmp[MAXPATHLEN + 64];
707
708                 snprintf(tmp, sizeof(tmp), "%s/block/%s/trace%d.padding",
709                          relay_path, tip->device->buts_name, tip->cpu);
710
711                 tip->pfd = open(tmp, O_RDONLY);
712                 if (tip->pfd < 0) {
713                         fprintf(stderr, "Couldn't open padding file %s\n", tmp);
714                         exit_trace(1);
715                 }
716
717                 tip->pfd_buf = malloc(buf_nr * sizeof(size_t));
718         }
719
720         while (!is_done()) {
721                 if (tip->get_subbuf(tip, buf_size) < 0)
722                         break;
723         }
724
725         /*
726          * trace is stopped, pull data until we get a short read
727          */
728         while (tip->get_subbuf(tip, buf_size) > 0)
729                 ;
730
731         tip_ftrunc_final(tip);
732         tip->exited = 1;
733         return NULL;
734 }
735
736 static int write_data_net(int fd, void *buf, unsigned int buf_len)
737 {
738         unsigned int bytes_left = buf_len;
739         int ret;
740
741         while (bytes_left) {
742                 ret = send(fd, buf, bytes_left, 0);
743                 if (ret < 0) {
744                         perror("send");
745                         return 1;
746                 }
747
748                 buf += ret;
749                 bytes_left -= ret;
750         }
751
752         return 0;
753 }
754
755 static int net_send_header(struct thread_information *tip, unsigned int len)
756 {
757         struct blktrace_net_hdr hdr;
758
759         hdr.magic = BLK_IO_TRACE_MAGIC;
760         strcpy(hdr.buts_name, tip->device->buts_name);
761         hdr.cpu = tip->cpu;
762         hdr.max_cpus = ncpus;
763         hdr.len = len;
764
765         return write_data_net(net_out_fd, &hdr, sizeof(hdr));
766 }
767
768 /*
769  * send header with 0 length to signal end-of-run
770  */
771 static void net_client_send_close(void)
772 {
773         struct blktrace_net_hdr hdr;
774
775         hdr.magic = BLK_IO_TRACE_MAGIC;
776         hdr.cpu = 0;
777         hdr.max_cpus = ncpus;
778         hdr.len = 0;
779
780         write_data_net(net_out_fd, &hdr, sizeof(hdr));
781 }
782
783 static int flush_subbuf_net(struct thread_information *tip,
784                             struct tip_subbuf *ts)
785 {
786         if (net_send_header(tip, ts->len))
787                 return 1;
788         if (write_data_net(net_out_fd, ts->buf, ts->len))
789                 return 1;
790
791         free(ts->buf);
792         free(ts);
793         return 0;
794 }
795
796 static int flush_subbuf_sendfile(struct thread_information *tip,
797                                  struct tip_subbuf *ts)
798 {
799         size_t padding;
800         unsigned subbuf;
801         unsigned len;
802
803         /*
804          * currently we cannot use sendfile() on the last bytes read, as they
805          * may not be a full subbuffer. get_subbuf_sendfile() falls back to
806          * the read approach for those, so use send() to ship them out
807          */
808         if (ts->buf)
809                 return flush_subbuf_net(tip, ts);
810         
811         subbuf = (ts->offset / buf_size) % buf_nr;
812         padding = get_subbuf_padding(tip, subbuf);
813         len = ts->len - padding;
814
815         if (net_send_header(tip, len))
816                 return 1;
817         if (sendfile(net_out_fd, tip->fd, &ts->offset, len) < 0) {
818                 perror("sendfile");
819                 return 1;
820         }
821
822         tip->data_read += len;
823         free(ts);
824         return 0;
825 }
826
827 static int write_data(struct thread_information *tip, void *buf,
828                       unsigned int buf_len)
829 {
830         int ret;
831
832         if (!buf_len)
833                 return 0;
834
835         while (1) {
836                 ret = fwrite(buf, buf_len, 1, tip->ofile);
837                 if (ret == 1)
838                         break;
839
840                 if (ret < 0) {
841                         perror("write");
842                         return 1;
843                 }
844         }
845
846         if (tip->ofile_stdout)
847                 fflush(tip->ofile);
848
849         return 0;
850 }
851
852 static int flush_subbuf_file(struct thread_information *tip,
853                              struct tip_subbuf *ts)
854 {
855         unsigned int offset = 0;
856         struct blk_io_trace *t;
857         int pdu_len, events = 0;
858
859         /*
860          * surplus from last run
861          */
862         if (tip->leftover_ts) {
863                 struct tip_subbuf *prev_ts = tip->leftover_ts;
864
865                 if (prev_ts->len + ts->len > prev_ts->max_len) {
866                         prev_ts->max_len += ts->len;
867                         prev_ts->buf = realloc(prev_ts->buf, prev_ts->max_len);
868                 }
869
870                 memcpy(prev_ts->buf + prev_ts->len, ts->buf, ts->len);
871                 prev_ts->len += ts->len;
872
873                 free(ts->buf);
874                 free(ts);
875
876                 ts = prev_ts;
877                 tip->leftover_ts = NULL;
878         }
879
880         while (offset + sizeof(*t) <= ts->len) {
881                 t = ts->buf + offset;
882
883                 if (verify_trace(t)) {
884                         write_data(tip, ts->buf, offset);
885                         return -1;
886                 }
887
888                 pdu_len = t->pdu_len;
889
890                 if (offset + sizeof(*t) + pdu_len > ts->len)
891                         break;
892
893                 offset += sizeof(*t) + pdu_len;
894                 tip->events_processed++;
895                 tip->data_read += sizeof(*t) + pdu_len;
896                 events++;
897         }
898
899         if (write_data(tip, ts->buf, offset))
900                 return -1;
901
902         /*
903          * leftover bytes, save them for next time
904          */
905         if (offset != ts->len) {
906                 tip->leftover_ts = ts;
907                 ts->len -= offset;
908                 memmove(ts->buf, ts->buf + offset, ts->len);
909         } else {
910                 free(ts->buf);
911                 free(ts);
912         }
913
914         return events;
915 }
916
917 static int write_tip_events(struct thread_information *tip)
918 {
919         struct tip_subbuf *ts = subbuf_fifo_dequeue(tip);
920
921         if (ts)
922                 return tip->flush_subbuf(tip, ts);
923
924         return 0;
925 }
926
927 /*
928  * scans the tips we know and writes out the subbuffers we accumulate
929  */
930 static void get_and_write_events(void)
931 {
932         struct device_information *dip;
933         struct thread_information *tip;
934         int i, j, events, ret, tips_running;
935
936         while (!is_done()) {
937                 events = 0;
938
939                 for_each_dip(dip, i) {
940                         for_each_tip(dip, tip, j) {
941                                 ret = write_tip_events(tip);
942                                 if (ret > 0)
943                                         events += ret;
944                         }
945                 }
946
947                 if (!events)
948                         usleep(10);
949         }
950
951         /*
952          * reap stored events
953          */
954         do {
955                 events = 0;
956                 tips_running = 0;
957                 for_each_dip(dip, i) {
958                         for_each_tip(dip, tip, j) {
959                                 ret = write_tip_events(tip);
960                                 if (ret > 0)
961                                         events += ret;
962                                 tips_running += !tip->exited;
963                         }
964                 }
965                 usleep(10);
966         } while (events || tips_running);
967 }
968
969 static void wait_for_threads(void)
970 {
971         /*
972          * for piped or network output, poll and fetch data for writeout.
973          * for files, we just wait around for trace threads to exit
974          */
975         if ((output_name && !strcmp(output_name, "-")) ||
976             net_mode == Net_client)
977                 get_and_write_events();
978         else {
979                 struct device_information *dip;
980                 struct thread_information *tip;
981                 int i, j, tips_running;
982
983                 do {
984                         tips_running = 0;
985                         usleep(1000);
986
987                         for_each_dip(dip, i)
988                                 for_each_tip(dip, tip, j)
989                                         tips_running += !tip->exited;
990                 } while (tips_running);
991         }
992
993         if (net_mode == Net_client)
994                 net_client_send_close();
995 }
996
997 static int fill_ofname(struct thread_information *tip, char *dst,
998                        char *buts_name)
999 {
1000         struct stat sb;
1001         int len = 0;
1002         time_t t;
1003
1004         if (output_dir)
1005                 len = sprintf(dst, "%s/", output_dir);
1006
1007         if (net_mode == Net_server) {
1008                 len += sprintf(dst + len, "%s-", inet_ntoa(tip->cl_in_addr));
1009                 time(&t);
1010                 len += strftime(dst + len, 64, "%F-%T/", gmtime(&t));
1011         }
1012
1013         if (stat(dst, &sb) < 0) {
1014                 if (errno != ENOENT) {
1015                         perror("stat");
1016                         return 1;
1017                 }
1018                 if (mkdir(dst, 0755) < 0) {
1019                         perror(dst);
1020                         fprintf(stderr, "Can't make output dir\n");
1021                         return 1;
1022                 }
1023         }
1024
1025         if (output_name)
1026                 sprintf(dst + len, "%s.blktrace.%d", output_name, tip->cpu);
1027         else
1028                 sprintf(dst + len, "%s.blktrace.%d", buts_name, tip->cpu);
1029
1030         return 0;
1031 }
1032
1033 static void fill_ops(struct thread_information *tip)
1034 {
1035         /*
1036          * setup ops
1037          */
1038         if (net_mode == Net_client) {
1039                 if (net_sendfile) {
1040                         tip->get_subbuf = get_subbuf_sendfile;
1041                         tip->flush_subbuf = flush_subbuf_sendfile;
1042                 } else {
1043                         tip->get_subbuf = get_subbuf;
1044                         tip->flush_subbuf = flush_subbuf_net;
1045                 }
1046         } else {
1047                 if (tip->ofile_mmap)
1048                         tip->get_subbuf = mmap_subbuf;
1049                 else
1050                         tip->get_subbuf = get_subbuf;
1051
1052                 tip->flush_subbuf = flush_subbuf_file;
1053         }
1054                         
1055         if (net_mode == Net_server)
1056                 tip->read_data = read_data_net;
1057         else
1058                 tip->read_data = read_data_file;
1059 }
1060
1061 static int tip_open_output(struct device_information *dip,
1062                            struct thread_information *tip)
1063 {
1064         int pipeline = output_name && !strcmp(output_name, "-");
1065         int mode, vbuf_size;
1066         char op[128];
1067
1068         if (net_mode == Net_client) {
1069                 tip->ofile = NULL;
1070                 tip->ofile_stdout = 0;
1071                 tip->ofile_mmap = 0;
1072                 vbuf_size = 0;
1073                 mode = 0; /* gcc 4.x issues a bogus warning */
1074         } else if (pipeline) {
1075                 tip->ofile = fdopen(STDOUT_FILENO, "w");
1076                 tip->ofile_stdout = 1;
1077                 tip->ofile_mmap = 0;
1078                 mode = _IOLBF;
1079                 vbuf_size = 512;
1080         } else {
1081                 if (fill_ofname(tip, op, dip->buts_name))
1082                         return 1;
1083                 tip->ofile = fopen(op, "w+");
1084                 tip->ofile_stdout = 0;
1085                 tip->ofile_mmap = 1;
1086                 mode = _IOFBF;
1087                 vbuf_size = OFILE_BUF;
1088         }
1089
1090         if (net_mode != Net_client && tip->ofile == NULL) {
1091                 perror(op);
1092                 return 1;
1093         }
1094
1095         if (vbuf_size) {
1096                 tip->ofile_buffer = malloc(vbuf_size);
1097                 if (setvbuf(tip->ofile, tip->ofile_buffer, mode, vbuf_size)) {
1098                         perror("setvbuf");
1099                         close_thread(tip);
1100                         return 1;
1101                 }
1102         }
1103
1104         fill_ops(tip);
1105         return 0;
1106 }
1107
1108 static int start_threads(struct device_information *dip)
1109 {
1110         struct thread_information *tip;
1111         int j;
1112
1113         for_each_tip(dip, tip, j) {
1114                 tip->cpu = j;
1115                 tip->device = dip;
1116                 tip->events_processed = 0;
1117                 tip->fd = -1;
1118                 tip->pfd = -1;
1119                 memset(&tip->fifo, 0, sizeof(tip->fifo));
1120                 tip->leftover_ts = NULL;
1121
1122                 if (tip_open_output(dip, tip))
1123                         return 1;
1124
1125                 if (pthread_create(&tip->thread, NULL, thread_main, tip)) {
1126                         perror("pthread_create");
1127                         close_thread(tip);
1128                         return 1;
1129                 }
1130         }
1131
1132         return 0;
1133 }
1134
1135 static void stop_threads(struct device_information *dip)
1136 {
1137         struct thread_information *tip;
1138         unsigned long ret;
1139         int i;
1140
1141         for_each_tip(dip, tip, i) {
1142                 (void) pthread_join(tip->thread, (void *) &ret);
1143                 close_thread(tip);
1144         }
1145 }
1146
1147 static void stop_all_threads(void)
1148 {
1149         struct device_information *dip;
1150         int i;
1151
1152         for_each_dip(dip, i)
1153                 stop_threads(dip);
1154 }
1155
1156 static void stop_all_tracing(void)
1157 {
1158         struct device_information *dip;
1159         int i;
1160
1161         for_each_dip(dip, i)
1162                 stop_trace(dip);
1163 }
1164
1165 static void exit_trace(int status)
1166 {
1167         if (!is_trace_stopped()) {
1168                 trace_stopped = 1;
1169                 stop_all_threads();
1170                 stop_all_tracing();
1171         }
1172
1173         exit(status);
1174 }
1175
1176 static int resize_devices(char *path)
1177 {
1178         int size = (ndevs + 1) * sizeof(struct device_information);
1179
1180         device_information = realloc(device_information, size);
1181         if (!device_information) {
1182                 fprintf(stderr, "Out of memory, device %s (%d)\n", path, size);
1183                 return 1;
1184         }
1185         device_information[ndevs].path = path;
1186         ndevs++;
1187         return 0;
1188 }
1189
1190 static int open_devices(void)
1191 {
1192         struct device_information *dip;
1193         int i;
1194
1195         for_each_dip(dip, i) {
1196                 dip->fd = open(dip->path, O_RDONLY | O_NONBLOCK);
1197                 if (dip->fd < 0) {
1198                         perror(dip->path);
1199                         return 1;
1200                 }
1201         }
1202
1203         return 0;
1204 }
1205
1206 static int start_devices(void)
1207 {
1208         struct device_information *dip;
1209         int i, j, size;
1210
1211         size = ncpus * sizeof(struct thread_information);
1212         thread_information = malloc(size * ndevs);
1213         if (!thread_information) {
1214                 fprintf(stderr, "Out of memory, threads (%d)\n", size * ndevs);
1215                 return 1;
1216         }
1217
1218         for_each_dip(dip, i) {
1219                 if (start_trace(dip)) {
1220                         close(dip->fd);
1221                         fprintf(stderr, "Failed to start trace on %s\n",
1222                                 dip->path);
1223                         break;
1224                 }
1225         }
1226
1227         if (i != ndevs) {
1228                 __for_each_dip(dip, j, i)
1229                         stop_trace(dip);
1230
1231                 return 1;
1232         }
1233
1234         for_each_dip(dip, i) {
1235                 dip->threads = thread_information + (i * ncpus);
1236                 if (start_threads(dip)) {
1237                         fprintf(stderr, "Failed to start worker threads\n");
1238                         break;
1239                 }
1240         }
1241
1242         if (i != ndevs) {
1243                 __for_each_dip(dip, j, i)
1244                         stop_threads(dip);
1245                 for_each_dip(dip, i)
1246                         stop_trace(dip);
1247
1248                 return 1;
1249         }
1250
1251         return 0;
1252 }
1253
1254 static void show_stats(void)
1255 {
1256         struct device_information *dip;
1257         struct thread_information *tip;
1258         unsigned long long events_processed, data_read;
1259         unsigned long total_drops;
1260         int i, j, no_stdout = 0;
1261
1262         if (is_stat_shown())
1263                 return;
1264
1265         if (output_name && !strcmp(output_name, "-"))
1266                 no_stdout = 1;
1267
1268         stat_shown = 1;
1269
1270         total_drops = 0;
1271         for_each_dip(dip, i) {
1272                 if (!no_stdout)
1273                         printf("Device: %s\n", dip->path);
1274                 events_processed = 0;
1275                 data_read = 0;
1276                 for_each_tip(dip, tip, j) {
1277                         if (!no_stdout)
1278                                 printf("  CPU%3d: %20lu events, %8llu KiB data\n",
1279                                         tip->cpu, tip->events_processed,
1280                                         (tip->data_read + 1023) >> 10);
1281                         events_processed += tip->events_processed;
1282                         data_read += tip->data_read;
1283                 }
1284                 total_drops += dip->drop_count;
1285                 if (!no_stdout)
1286                         printf("  Total:  %20llu events (dropped %lu), %8llu KiB data\n",
1287                                         events_processed, dip->drop_count,
1288                                         (data_read + 1023) >> 10);
1289         }
1290
1291         if (total_drops)
1292                 fprintf(stderr, "You have dropped events, consider using a larger buffer size (-b)\n");
1293 }
1294
1295 static struct device_information *net_get_dip(char *buts_name,
1296                                               struct in_addr *cl_in_addr)
1297 {
1298         struct device_information *dip;
1299         int i;
1300
1301         for (i = 0; i < ndevs; i++) {
1302                 dip = &device_information[i];
1303
1304                 if (!strcmp(dip->buts_name, buts_name))
1305                         return dip;
1306         }
1307
1308         device_information = realloc(device_information, (ndevs + 1) * sizeof(*dip));
1309         dip = &device_information[ndevs];
1310         strcpy(dip->buts_name, buts_name);
1311         strcpy(dip->path, buts_name);
1312         ndevs++;
1313         dip->threads = malloc(ncpus * sizeof(struct thread_information));
1314         memset(dip->threads, 0, ncpus * sizeof(struct thread_information));
1315
1316         /*
1317          * open all files
1318          */
1319         for (i = 0; i < ncpus; i++) {
1320                 struct thread_information *tip = &dip->threads[i];
1321
1322                 tip->cpu = i;
1323                 tip->device = dip;
1324                 tip->fd = -1;
1325                 tip->pfd = -1;
1326                 tip->cl_in_addr = *cl_in_addr;
1327
1328                 if (tip_open_output(dip, tip))
1329                         return NULL;
1330         }
1331
1332         return dip;
1333 }
1334
1335 static struct thread_information *net_get_tip(struct blktrace_net_hdr *bnh,
1336                                               struct in_addr *cl_in_addr)
1337 {
1338         struct device_information *dip;
1339
1340         ncpus = bnh->max_cpus;
1341         dip = net_get_dip(bnh->buts_name, cl_in_addr);
1342         return &dip->threads[bnh->cpu];
1343 }
1344
1345 static int net_get_header(struct blktrace_net_hdr *bnh)
1346 {
1347         int fl = fcntl(net_in_fd, F_GETFL);
1348         int bytes_left, ret;
1349         void *p = bnh;
1350
1351         fcntl(net_in_fd, F_SETFL, fl | O_NONBLOCK);
1352         bytes_left = sizeof(*bnh);
1353         while (bytes_left && !is_done()) {
1354                 ret = recv(net_in_fd, p, bytes_left, MSG_WAITALL);
1355                 if (ret < 0) {
1356                         if (errno != EAGAIN) {
1357                                 perror("recv header");
1358                                 return 1;
1359                         }
1360                         usleep(100);
1361                         continue;
1362                 } else if (!ret) {
1363                         usleep(100);
1364                         continue;
1365                 } else {
1366                         p += ret;
1367                         bytes_left -= ret;
1368                 }
1369         }
1370         fcntl(net_in_fd, F_SETFL, fl & ~O_NONBLOCK);
1371         return 0;
1372 }
1373
1374 static int net_server_loop(struct in_addr *cl_in_addr)
1375 {
1376         struct thread_information *tip;
1377         struct blktrace_net_hdr bnh;
1378
1379         if (net_get_header(&bnh))
1380                 return 1;
1381
1382         if (data_is_native == -1 && check_data_endianness(bnh.magic)) {
1383                 fprintf(stderr, "server: received data is bad\n");
1384                 return 1;
1385         }
1386
1387         if (!data_is_native) {
1388                 bnh.cpu = be32_to_cpu(bnh.cpu);
1389                 bnh.len = be32_to_cpu(bnh.len);
1390         }
1391
1392         /*
1393          * len == 0 means that the other end signalled end-of-run
1394          */
1395         if (!bnh.len) {
1396                 fprintf(stderr, "server: end of run\n");
1397                 return 1;
1398         }
1399
1400         tip = net_get_tip(&bnh, cl_in_addr);
1401         if (!tip)
1402                 return 1;
1403
1404         if (mmap_subbuf(tip, bnh.len))
1405                 return 1;
1406
1407         return 0;
1408 }
1409
1410 /*
1411  * Start here when we are in server mode - just fetch data from the network
1412  * and dump to files
1413  */
1414 static int net_server(void)
1415 {
1416         struct device_information *dip;
1417         struct thread_information *tip;
1418         struct sockaddr_in addr;
1419         socklen_t socklen;
1420         int fd, opt, i, j;
1421
1422         fd = socket(AF_INET, SOCK_STREAM, 0);
1423         if (fd < 0) {
1424                 perror("server: socket");
1425                 return 1;
1426         }
1427
1428         opt = 1;
1429         if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
1430                 perror("setsockopt");
1431                 return 1;
1432         }
1433
1434         memset(&addr, 0, sizeof(addr));
1435         addr.sin_family = AF_INET;
1436         addr.sin_addr.s_addr = htonl(INADDR_ANY);
1437         addr.sin_port = htons(net_port);
1438
1439         if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
1440                 perror("bind");
1441                 return 1;
1442         }
1443
1444         if (listen(fd, 1) < 0) {
1445                 perror("listen");
1446                 return 1;
1447         }
1448
1449 repeat:
1450         signal(SIGINT, NULL);
1451         signal(SIGHUP, NULL);
1452         signal(SIGTERM, NULL);
1453         signal(SIGALRM, NULL);
1454
1455         printf("blktrace: waiting for incoming connection...\n");
1456
1457         socklen = sizeof(addr);
1458         net_in_fd = accept(fd, (struct sockaddr *) &addr, &socklen);
1459         if (net_in_fd < 0) {
1460                 perror("accept");
1461                 return 1;
1462         }
1463
1464         signal(SIGINT, handle_sigint);
1465         signal(SIGHUP, handle_sigint);
1466         signal(SIGTERM, handle_sigint);
1467         signal(SIGALRM, handle_sigint);
1468
1469         printf("blktrace: connection from %s\n", inet_ntoa(addr.sin_addr));
1470
1471         while (!is_done()) {
1472                 if (net_server_loop(&addr.sin_addr))
1473                         break;
1474         }
1475
1476         for_each_dip(dip, i)
1477                 for_each_tip(dip, tip, j)
1478                         tip_ftrunc_final(tip);
1479
1480         show_stats();
1481
1482         if (is_done())
1483                 return 0;
1484
1485         /*
1486          * cleanup for next run
1487          */
1488         for_each_dip(dip, i) {
1489                 for_each_tip(dip, tip, j)
1490                         fclose(tip->ofile);
1491
1492                 free(dip->threads);
1493         }
1494
1495         free(device_information);
1496         device_information = NULL;
1497         ncpus = ndevs = 0;
1498
1499         close(net_in_fd);
1500         net_in_fd = -1;
1501         stat_shown = 0;
1502         goto repeat;
1503 }
1504
1505 /*
1506  * Setup outgoing network connection where we will transmit data
1507  */
1508 static int net_setup_client(void)
1509 {
1510         struct sockaddr_in addr;
1511         int fd;
1512
1513         fd = socket(AF_INET, SOCK_STREAM, 0);
1514         if (fd < 0) {
1515                 perror("client: socket");
1516                 return 1;
1517         }
1518
1519         memset(&addr, 0, sizeof(addr));
1520         addr.sin_family = AF_INET;
1521         addr.sin_port = htons(net_port);
1522
1523         if (inet_aton(hostname, &addr.sin_addr) != 1) {
1524                 struct hostent *hent = gethostbyname(hostname);
1525                 if (!hent) {
1526                         perror("gethostbyname");
1527                         return 1;
1528                 }
1529
1530                 memcpy(&addr.sin_addr, hent->h_addr, 4);
1531                 strcpy(hostname, hent->h_name);
1532         }
1533
1534         printf("blktrace: connecting to %s\n", hostname);
1535
1536         if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
1537                 perror("client: connect");
1538                 return 1;
1539         }
1540
1541         printf("blktrace: connected!\n");
1542         net_out_fd = fd;
1543         return 0;
1544 }
1545
1546 static char usage_str[] = \
1547         "-d <dev> [ -r relay path ] [ -o <output> ] [-k ] [ -w time ]\n" \
1548         "[ -a action ] [ -A action mask ] [ -v ]\n\n" \
1549         "\t-d Use specified device. May also be given last after options\n" \
1550         "\t-r Path to mounted relayfs, defaults to /relay\n" \
1551         "\t-o File(s) to send output to\n" \
1552         "\t-D Directory to prepend to output file names\n" \
1553         "\t-k Kill a running trace\n" \
1554         "\t-w Stop after defined time, in seconds\n" \
1555         "\t-a Only trace specified actions. See documentation\n" \
1556         "\t-A Give trace mask as a single value. See documentation\n" \
1557         "\t-b Sub buffer size in KiB\n" \
1558         "\t-n Number of sub buffers\n" \
1559         "\t-l Run in network listen mode (blktrace server)\n" \
1560         "\t-h Run in network client mode, connecting to the given host\n" \
1561         "\t-p Network port to use (default 8462)\n" \
1562         "\t-s Make the network client use sendfile() to transfer data\n" \
1563         "\t-V Print program version info\n\n";
1564
1565 static void show_usage(char *program)
1566 {
1567         fprintf(stderr, "Usage: %s %s %s",program, blktrace_version, usage_str);
1568 }
1569
1570 int main(int argc, char *argv[])
1571 {
1572         static char default_relay_path[] = "/relay";
1573         struct statfs st;
1574         int i, c;
1575         int stop_watch = 0;
1576         int act_mask_tmp = 0;
1577
1578         while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) >= 0) {
1579                 switch (c) {
1580                 case 'a':
1581                         i = find_mask_map(optarg);
1582                         if (i < 0) {
1583                                 fprintf(stderr,"Invalid action mask %s\n",
1584                                         optarg);
1585                                 return 1;
1586                         }
1587                         act_mask_tmp |= i;
1588                         break;
1589
1590                 case 'A':
1591                         if ((sscanf(optarg, "%x", &i) != 1) || 
1592                                                         !valid_act_opt(i)) {
1593                                 fprintf(stderr,
1594                                         "Invalid set action mask %s/0x%x\n",
1595                                         optarg, i);
1596                                 return 1;
1597                         }
1598                         act_mask_tmp = i;
1599                         break;
1600
1601                 case 'd':
1602                         if (resize_devices(optarg) != 0)
1603                                 return 1;
1604                         break;
1605
1606                 case 'r':
1607                         relay_path = optarg;
1608                         break;
1609
1610                 case 'o':
1611                         output_name = optarg;
1612                         break;
1613                 case 'k':
1614                         kill_running_trace = 1;
1615                         break;
1616                 case 'w':
1617                         stop_watch = atoi(optarg);
1618                         if (stop_watch <= 0) {
1619                                 fprintf(stderr,
1620                                         "Invalid stopwatch value (%d secs)\n",
1621                                         stop_watch);
1622                                 return 1;
1623                         }
1624                         break;
1625                 case 'V':
1626                         printf("%s version %s\n", argv[0], blktrace_version);
1627                         return 0;
1628                 case 'b':
1629                         buf_size = strtoul(optarg, NULL, 10);
1630                         if (buf_size <= 0 || buf_size > 16*1024) {
1631                                 fprintf(stderr,
1632                                         "Invalid buffer size (%lu)\n",buf_size);
1633                                 return 1;
1634                         }
1635                         buf_size <<= 10;
1636                         break;
1637                 case 'n':
1638                         buf_nr = strtoul(optarg, NULL, 10);
1639                         if (buf_nr <= 0) {
1640                                 fprintf(stderr,
1641                                         "Invalid buffer nr (%lu)\n", buf_nr);
1642                                 return 1;
1643                         }
1644                         break;
1645                 case 'D':
1646                         output_dir = optarg;
1647                         break;
1648                 case 'h':
1649                         net_mode = Net_client;
1650                         strcpy(hostname, optarg);
1651                         break;
1652                 case 'l':
1653                         net_mode = Net_server;
1654                         break;
1655                 case 'p':
1656                         net_port = atoi(optarg);
1657                         break;
1658                 case 's':
1659                         net_sendfile = 1;
1660                         break;
1661                 default:
1662                         show_usage(argv[0]);
1663                         return 1;
1664                 }
1665         }
1666
1667         setlocale(LC_NUMERIC, "en_US");
1668
1669         page_size = getpagesize();
1670
1671         if (net_mode == Net_server)
1672                 return net_server();
1673
1674         while (optind < argc) {
1675                 if (resize_devices(argv[optind++]) != 0)
1676                         return 1;
1677         }
1678
1679         if (ndevs == 0) {
1680                 show_usage(argv[0]);
1681                 return 1;
1682         }
1683
1684         if (!relay_path)
1685                 relay_path = default_relay_path;
1686
1687         if (act_mask_tmp != 0)
1688                 act_mask = act_mask_tmp;
1689
1690         if (statfs(relay_path, &st) < 0) {
1691                 perror("statfs");
1692                 fprintf(stderr,"%s does not appear to be a valid path\n",
1693                         relay_path);
1694                 return 1;
1695         } else if (st.f_type != (long) RELAYFS_TYPE) {
1696                 fprintf(stderr,"%s does not appear to be a relay filesystem\n",
1697                         relay_path);
1698                 return 1;
1699         }
1700
1701         if (open_devices() != 0)
1702                 return 1;
1703
1704         if (kill_running_trace) {
1705                 stop_all_traces();
1706                 return 0;
1707         }
1708
1709         ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1710         if (ncpus < 0) {
1711                 fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed\n");
1712                 return 1;
1713         }
1714
1715         signal(SIGINT, handle_sigint);
1716         signal(SIGHUP, handle_sigint);
1717         signal(SIGTERM, handle_sigint);
1718         signal(SIGALRM, handle_sigint);
1719
1720         if (net_mode == Net_client && net_setup_client())
1721                 return 1;
1722
1723         if (start_devices() != 0)
1724                 return 1;
1725
1726         atexit(stop_all_tracing);
1727
1728         if (stop_watch)
1729                 alarm(stop_watch);
1730
1731         wait_for_threads();
1732
1733         if (!is_trace_stopped()) {
1734                 trace_stopped = 1;
1735                 stop_all_threads();
1736                 stop_all_traces();
1737         }
1738
1739         show_stats();
1740
1741         return 0;
1742 }
1743