[PATCH] blktrace: ->fd and ->pfd init in net client mode
[blktrace.git] / blktrace.c
1 /*
2  * block queue tracing application
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include <pthread.h>
22 #include <sys/types.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25 #include <locale.h>
26 #include <signal.h>
27 #include <fcntl.h>
28 #include <string.h>
29 #include <sys/ioctl.h>
30 #include <sys/param.h>
31 #include <sys/statfs.h>
32 #include <sys/poll.h>
33 #include <sys/mman.h>
34 #include <sys/socket.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <getopt.h>
40 #include <errno.h>
41 #include <netinet/in.h>
42 #include <arpa/inet.h>
43 #include <netdb.h>
44 #include <sys/sendfile.h>
45
46 #include "blktrace.h"
47 #include "barrier.h"
48
49 static char blktrace_version[] = "0.99";
50
51 /*
52  * You may want to increase this even more, if you are logging at a high
53  * rate and see skipped/missed events
54  */
55 #define BUF_SIZE        (512 * 1024)
56 #define BUF_NR          (4)
57
58 #define OFILE_BUF       (128 * 1024)
59
60 #define RELAYFS_TYPE    0xF0B4A981
61
62 #define S_OPTS  "d:a:A:r:o:kw:Vb:n:D:lh:p:s"
63 static struct option l_opts[] = {
64         {
65                 .name = "dev",
66                 .has_arg = required_argument,
67                 .flag = NULL,
68                 .val = 'd'
69         },
70         {
71                 .name = "act-mask",
72                 .has_arg = required_argument,
73                 .flag = NULL,
74                 .val = 'a'
75         },
76         {
77                 .name = "set-mask",
78                 .has_arg = required_argument,
79                 .flag = NULL,
80                 .val = 'A'
81         },
82         {
83                 .name = "relay",
84                 .has_arg = required_argument,
85                 .flag = NULL,
86                 .val = 'r'
87         },
88         {
89                 .name = "output",
90                 .has_arg = required_argument,
91                 .flag = NULL,
92                 .val = 'o'
93         },
94         {
95                 .name = "kill",
96                 .has_arg = no_argument,
97                 .flag = NULL,
98                 .val = 'k'
99         },
100         {
101                 .name = "stopwatch",
102                 .has_arg = required_argument,
103                 .flag = NULL,
104                 .val = 'w'
105         },
106         {
107                 .name = "version",
108                 .has_arg = no_argument,
109                 .flag = NULL,
110                 .val = 'V'
111         },
112         {
113                 .name = "buffer-size",
114                 .has_arg = required_argument,
115                 .flag = NULL,
116                 .val = 'b'
117         },
118         {
119                 .name = "num-sub-buffers",
120                 .has_arg = required_argument,
121                 .flag = NULL,
122                 .val = 'n'
123         },
124         {
125                 .name = "output-dir",
126                 .has_arg = required_argument,
127                 .flag = NULL,
128                 .val = 'D'
129         },
130         {
131                 .name = "listen",
132                 .has_arg = no_argument,
133                 .flag = NULL,
134                 .val = 'l'
135         },
136         {
137                 .name = "host",
138                 .has_arg = required_argument,
139                 .flag = NULL,
140                 .val = 'h'
141         },
142         {
143                 .name = "port",
144                 .has_arg = required_argument,
145                 .flag = NULL,
146                 .val = 'p'
147         },
148         {
149                 .name = "sendfile",
150                 .has_arg = no_argument,
151                 .flag = NULL,
152                 .val = 's'
153         },
154         {
155                 .name = NULL,
156         }
157 };
158
159 struct tip_subbuf {
160         void *buf;
161         unsigned int len;
162         unsigned int max_len;
163         off_t offset;
164 };
165
166 #define FIFO_SIZE       (1024)  /* should be plenty big! */
167 #define CL_SIZE         (128)   /* cache line, any bigger? */
168
169 struct tip_subbuf_fifo {
170         int tail __attribute__((aligned(CL_SIZE)));
171         int head __attribute__((aligned(CL_SIZE)));
172         struct tip_subbuf *q[FIFO_SIZE];
173 };
174
175 struct thread_information {
176         int cpu;
177         pthread_t thread;
178
179         int fd;
180         void *fd_buf;
181         char fn[MAXPATHLEN + 64];
182
183         int pfd;
184         size_t *pfd_buf;
185
186         FILE *ofile;
187         char *ofile_buffer;
188         off_t ofile_offset;
189         int ofile_stdout;
190         int ofile_mmap;
191
192         int (*get_subbuf)(struct thread_information *, unsigned int);
193         int (*flush_subbuf)(struct thread_information *, struct tip_subbuf *);
194         int (*read_data)(struct thread_information *, void *, unsigned int);
195
196         unsigned long events_processed;
197         unsigned long long data_read;
198         struct device_information *device;
199
200         int exited;
201
202         /*
203          * piped fifo buffers
204          */
205         struct tip_subbuf_fifo fifo;
206         struct tip_subbuf *leftover_ts;
207
208         /*
209          * mmap controlled output files
210          */
211         unsigned long long fs_size;
212         unsigned long long fs_max_size;
213         unsigned long fs_off;
214         void *fs_buf;
215         unsigned long fs_buf_len;
216 };
217
218 struct device_information {
219         int fd;
220         char *path;
221         char buts_name[32];
222         volatile int trace_started;
223         unsigned long drop_count;
224         struct thread_information *threads;
225 };
226
227 static int ncpus;
228 static struct thread_information *thread_information;
229 static int ndevs;
230 static struct device_information *device_information;
231
232 /* command line option globals */
233 static char *relay_path;
234 static char *output_name;
235 static char *output_dir;
236 static int act_mask = ~0U;
237 static int kill_running_trace;
238 static unsigned long buf_size = BUF_SIZE;
239 static unsigned long buf_nr = BUF_NR;
240 static unsigned int page_size;
241
242 #define is_done()       (*(volatile int *)(&done))
243 static volatile int done;
244
245 #define is_trace_stopped()      (*(volatile int *)(&trace_stopped))
246 static volatile int trace_stopped;
247
248 #define is_stat_shown() (*(volatile int *)(&stat_shown))
249 static volatile int stat_shown;
250
251 int data_is_native = -1;
252
253 static void exit_trace(int status);
254
255 #define dip_tracing(dip)        (*(volatile int *)(&(dip)->trace_started))
256 #define dip_set_tracing(dip, v) ((dip)->trace_started = (v))
257
258 #define __for_each_dip(__d, __i, __e)   \
259         for (__i = 0, __d = device_information; __i < __e; __i++, __d++)
260
261 #define for_each_dip(__d, __i)  __for_each_dip(__d, __i, ndevs)
262 #define for_each_tip(__d, __t, __j)     \
263         for (__j = 0, __t = (__d)->threads; __j < ncpus; __j++, __t++)
264
265 /*
266  * networking stuff follows. we include a magic number so we know whether
267  * to endianness convert or not
268  */
269 struct blktrace_net_hdr {
270         u32 magic;              /* same as trace magic */
271         char buts_name[32];     /* trace name */
272         u32 cpu;                /* for which cpu */
273         u32 max_cpus;
274         u32 len;                /* length of following trace data */
275 };
276
277 #define TRACE_NET_PORT          (8462)
278
279 enum {
280         Net_none = 0,
281         Net_server,
282         Net_client,
283 };
284
285 /*
286  * network cmd line params
287  */
288 static char hostname[MAXHOSTNAMELEN];
289 static int net_port = TRACE_NET_PORT;
290 static int net_mode = 0;
291 static int net_sendfile;
292
293 static int net_in_fd = -1;
294 static int net_out_fd = -1;
295
296 static void handle_sigint(__attribute__((__unused__)) int sig)
297 {
298         done = 1;
299 }
300
301 static int get_dropped_count(const char *buts_name)
302 {
303         int fd;
304         char tmp[MAXPATHLEN + 64];
305
306         snprintf(tmp, sizeof(tmp), "%s/block/%s/dropped",
307                  relay_path, buts_name);
308
309         fd = open(tmp, O_RDONLY);
310         if (fd < 0) {
311                 /*
312                  * this may be ok, if the kernel doesn't support dropped counts
313                  */
314                 if (errno == ENOENT)
315                         return 0;
316
317                 fprintf(stderr, "Couldn't open dropped file %s\n", tmp);
318                 return -1;
319         }
320
321         if (read(fd, tmp, sizeof(tmp)) < 0) {
322                 perror(tmp);
323                 close(fd);
324                 return -1;
325         }
326
327         close(fd);
328
329         return atoi(tmp);
330 }
331
332 static size_t get_subbuf_padding(struct thread_information *tip,
333                                  unsigned subbuf)
334 {
335         size_t padding_size = buf_nr * sizeof(size_t);
336         size_t ret;
337
338         if (read(tip->pfd, tip->pfd_buf, padding_size) < 0) {
339                 perror("tip pad read");
340                 ret = -1;
341         } else
342                 ret = tip->pfd_buf[subbuf];
343
344         return ret;
345 }
346
347 static int start_trace(struct device_information *dip)
348 {
349         struct blk_user_trace_setup buts;
350
351         memset(&buts, 0, sizeof(buts));
352         buts.buf_size = buf_size;
353         buts.buf_nr = buf_nr;
354         buts.act_mask = act_mask;
355
356         if (ioctl(dip->fd, BLKTRACESETUP, &buts) < 0) {
357                 perror("BLKTRACESETUP");
358                 return 1;
359         }
360
361         if (ioctl(dip->fd, BLKTRACESTART) < 0) {
362                 perror("BLKTRACESTART");
363                 return 1;
364         }
365
366         memcpy(dip->buts_name, buts.name, sizeof(dip->buts_name));
367         dip_set_tracing(dip, 1);
368         return 0;
369 }
370
371 static void stop_trace(struct device_information *dip)
372 {
373         if (dip_tracing(dip) || kill_running_trace) {
374                 dip_set_tracing(dip, 0);
375
376                 if (ioctl(dip->fd, BLKTRACESTOP) < 0)
377                         perror("BLKTRACESTOP");
378                 if (ioctl(dip->fd, BLKTRACETEARDOWN) < 0)
379                         perror("BLKTRACETEARDOWN");
380
381                 close(dip->fd);
382                 dip->fd = -1;
383         }
384 }
385
386 static void stop_all_traces(void)
387 {
388         struct device_information *dip;
389         int i;
390
391         for_each_dip(dip, i) {
392                 dip->drop_count = get_dropped_count(dip->buts_name);
393                 stop_trace(dip);
394         }
395 }
396
397 static void wait_for_data(struct thread_information *tip)
398 {
399         struct pollfd pfd = { .fd = tip->fd, .events = POLLIN };
400
401         do {
402                 poll(&pfd, 1, 100);
403                 if (pfd.revents & POLLIN)
404                         break;
405                 if (tip->ofile_stdout)
406                         break;
407         } while (!is_done());
408 }
409
410 static int read_data_file(struct thread_information *tip, void *buf,
411                           unsigned int len)
412 {
413         int ret = 0;
414
415         do {
416                 wait_for_data(tip);
417
418                 ret = read(tip->fd, buf, len);
419                 if (!ret)
420                         continue;
421                 else if (ret > 0)
422                         return ret;
423                 else {
424                         if (errno != EAGAIN) {
425                                 perror(tip->fn);
426                                 fprintf(stderr,"Thread %d failed read of %s\n",
427                                         tip->cpu, tip->fn);
428                                 break;
429                         }
430                         continue;
431                 }
432         } while (!is_done());
433
434         return ret;
435
436 }
437
438 static int read_data_net(struct thread_information *tip, void *buf,
439                          unsigned int len)
440 {
441         unsigned int bytes_left = len;
442         int ret = 0;
443
444         do {
445                 ret = recv(net_in_fd, buf, bytes_left, MSG_WAITALL);
446
447                 if (!ret)
448                         continue;
449                 else if (ret < 0) {
450                         if (errno != EAGAIN) {
451                                 perror(tip->fn);
452                                 fprintf(stderr, "server: failed read\n");
453                                 return 0;
454                         }
455                         continue;
456                 } else {
457                         buf += ret;
458                         bytes_left -= ret;
459                 }
460         } while (!is_done() && bytes_left);
461
462         return len - bytes_left;
463 }
464
465 static int read_data(struct thread_information *tip, void *buf,
466                      unsigned int len)
467 {
468         return tip->read_data(tip, buf, len);
469 }
470
471 static inline struct tip_subbuf *
472 subbuf_fifo_dequeue(struct thread_information *tip)
473 {
474         const int head = tip->fifo.head;
475         const int next = (head + 1) & (FIFO_SIZE - 1);
476
477         if (head != tip->fifo.tail) {
478                 struct tip_subbuf *ts = tip->fifo.q[head];
479
480                 store_barrier();
481                 tip->fifo.head = next;
482                 return ts;
483         }
484
485         return NULL;
486 }
487
488 static inline int subbuf_fifo_queue(struct thread_information *tip,
489                                     struct tip_subbuf *ts)
490 {
491         const int tail = tip->fifo.tail;
492         const int next = (tail + 1) & (FIFO_SIZE - 1);
493
494         if (next != tip->fifo.head) {
495                 tip->fifo.q[tail] = ts;
496                 store_barrier();
497                 tip->fifo.tail = next;
498                 return 0;
499         }
500
501         fprintf(stderr, "fifo too small!\n");
502         return 1;
503 }
504
505 /*
506  * For file output, truncate and mmap the file appropriately
507  */
508 static int mmap_subbuf(struct thread_information *tip, unsigned int maxlen)
509 {
510         int ofd = fileno(tip->ofile);
511         int ret;
512
513         /*
514          * extend file, if we have to. use chunks of 16 subbuffers.
515          */
516         if (tip->fs_off + buf_size > tip->fs_buf_len) {
517                 if (tip->fs_buf) {
518                         munlock(tip->fs_buf, tip->fs_buf_len);
519                         munmap(tip->fs_buf, tip->fs_buf_len);
520                         tip->fs_buf = NULL;
521                 }
522
523                 tip->fs_off = tip->fs_size & (page_size - 1);
524                 tip->fs_buf_len = (16 * buf_size) - tip->fs_off;
525                 tip->fs_max_size += tip->fs_buf_len;
526
527                 if (ftruncate(ofd, tip->fs_max_size) < 0) {
528                         perror("ftruncate");
529                         return -1;
530                 }
531
532                 tip->fs_buf = mmap(NULL, tip->fs_buf_len, PROT_WRITE,
533                                    MAP_SHARED, ofd, tip->fs_size - tip->fs_off);
534                 if (tip->fs_buf == MAP_FAILED) {
535                         perror("mmap");
536                         return -1;
537                 }
538                 mlock(tip->fs_buf, tip->fs_buf_len);
539         }
540
541         ret = read_data(tip, tip->fs_buf + tip->fs_off, maxlen);
542         if (ret >= 0) {
543                 tip->data_read += ret;
544                 tip->fs_size += ret;
545                 tip->fs_off += ret;
546                 return 0;
547         }
548
549         return -1;
550 }
551
552 /*
553  * Use the copy approach for pipes and network
554  */
555 static int get_subbuf(struct thread_information *tip, unsigned int maxlen)
556 {
557         struct tip_subbuf *ts = malloc(sizeof(*ts));
558         int ret;
559
560         ts->buf = malloc(buf_size);
561         ts->max_len = maxlen;
562
563         ret = read_data(tip, ts->buf, ts->max_len);
564         if (ret > 0) {
565                 ts->len = ret;
566                 tip->data_read += ret;
567                 return subbuf_fifo_queue(tip, ts);
568         }
569
570         return ret;
571 }
572
573 static int get_subbuf_sendfile(struct thread_information *tip,
574                                unsigned int maxlen)
575 {
576         struct tip_subbuf *ts;
577         struct stat sb;
578         unsigned int ready, this_size;
579         int err;
580
581         wait_for_data(tip);
582
583         /*
584          * hack to get last data out, we can't use sendfile for that
585          */
586         if (is_done())
587                 return get_subbuf(tip, maxlen);
588
589         if (fstat(tip->fd, &sb) < 0) {
590                 perror("trace stat");
591                 return 1;
592         }
593
594         ready = sb.st_size - tip->ofile_offset;
595         if (!ready) {
596                 /*
597                  * delay a little, since we poll() will return data available
598                  * until sendfile() is run
599                  */
600                 usleep(100);
601                 return 0;
602         }
603
604         this_size = buf_size;
605         while (ready) {
606                 if (this_size > ready)
607                         this_size = ready;
608
609                 ts = malloc(sizeof(*ts));
610
611                 ts->max_len = maxlen;
612                 ts->buf = NULL;
613
614                 ts->len = this_size;
615                 ts->max_len = ts->len;
616                 ts->offset = tip->ofile_offset;
617                 tip->ofile_offset += ts->len;
618
619                 err = subbuf_fifo_queue(tip, ts);
620                 if (err)
621                         return err;
622
623                 ready -= this_size;
624         }
625
626         return 0;
627 }
628
629 static void close_thread(struct thread_information *tip)
630 {
631         if (tip->fd != -1)
632                 close(tip->fd);
633         if (tip->pfd != -1)
634                 close(tip->pfd);
635         if (tip->ofile)
636                 fclose(tip->ofile);
637         if (tip->ofile_buffer)
638                 free(tip->ofile_buffer);
639         if (tip->fd_buf)
640                 free(tip->fd_buf);
641         if (tip->pfd_buf)
642                 free(tip->pfd_buf);
643
644         tip->fd = -1;
645         tip->pfd = -1;
646         tip->ofile = NULL;
647         tip->ofile_buffer = NULL;
648         tip->fd_buf = NULL;
649 }
650
651 static void tip_ftrunc_final(struct thread_information *tip)
652 {
653         /*
654          * truncate to right size and cleanup mmap
655          */
656         if (tip->ofile_mmap) {
657                 int ofd = fileno(tip->ofile);
658
659                 if (tip->fs_buf)
660                         munmap(tip->fs_buf, tip->fs_buf_len);
661
662                 ftruncate(ofd, tip->fs_size);
663         }
664 }
665
666 static void *thread_main(void *arg)
667 {
668         struct thread_information *tip = arg;
669         pid_t pid = getpid();
670         cpu_set_t cpu_mask;
671
672         CPU_ZERO(&cpu_mask);
673         CPU_SET((tip->cpu), &cpu_mask);
674
675         if (sched_setaffinity(pid, sizeof(cpu_mask), &cpu_mask) == -1) {
676                 perror("sched_setaffinity");
677                 exit_trace(1);
678         }
679
680         snprintf(tip->fn, sizeof(tip->fn), "%s/block/%s/trace%d",
681                         relay_path, tip->device->buts_name, tip->cpu);
682         tip->fd = open(tip->fn, O_RDONLY);
683         if (tip->fd < 0) {
684                 perror(tip->fn);
685                 fprintf(stderr,"Thread %d failed open of %s\n", tip->cpu,
686                         tip->fn);
687                 exit_trace(1);
688         }
689
690         if (net_mode == Net_client && net_sendfile) {
691                 char tmp[MAXPATHLEN + 64];
692
693                 snprintf(tmp, sizeof(tmp), "%s/block/%s/trace%d.padding",
694                          relay_path, tip->device->buts_name, tip->cpu);
695
696                 tip->pfd = open(tmp, O_RDONLY);
697                 if (tip->pfd < 0) {
698                         fprintf(stderr, "Couldn't open padding file %s\n", tmp);
699                         exit_trace(1);
700                 }
701
702                 tip->pfd_buf = malloc(buf_nr * sizeof(size_t));
703         }
704
705         while (!is_done()) {
706                 if (tip->get_subbuf(tip, buf_size))
707                         break;
708         }
709
710         tip_ftrunc_final(tip);
711         tip->exited = 1;
712         return NULL;
713 }
714
715 static int write_data_net(int fd, void *buf, unsigned int buf_len)
716 {
717         unsigned int bytes_left = buf_len;
718         int ret;
719
720         while (bytes_left) {
721                 ret = send(fd, buf, bytes_left, 0);
722                 if (ret < 0) {
723                         perror("send");
724                         return 1;
725                 }
726
727                 buf += ret;
728                 bytes_left -= ret;
729         }
730
731         return 0;
732 }
733
734 static int net_send_header(struct thread_information *tip, unsigned int len)
735 {
736         struct blktrace_net_hdr hdr;
737
738         hdr.magic = BLK_IO_TRACE_MAGIC;
739         strcpy(hdr.buts_name, tip->device->buts_name);
740         hdr.cpu = tip->cpu;
741         hdr.max_cpus = ncpus;
742         hdr.len = len;
743
744         return write_data_net(net_out_fd, &hdr, sizeof(hdr));
745 }
746
747 /*
748  * send header with 0 length to signal end-of-run
749  */
750 static void net_client_send_close(void)
751 {
752         struct blktrace_net_hdr hdr;
753
754         hdr.magic = BLK_IO_TRACE_MAGIC;
755         hdr.cpu = 0;
756         hdr.max_cpus = ncpus;
757         hdr.len = 0;
758
759         write_data_net(net_out_fd, &hdr, sizeof(hdr));
760 }
761
762 static int flush_subbuf_net(struct thread_information *tip,
763                             struct tip_subbuf *ts)
764 {
765         if (net_send_header(tip, ts->len))
766                 return 1;
767         if (write_data_net(net_out_fd, ts->buf, ts->len))
768                 return 1;
769
770         free(ts->buf);
771         free(ts);
772         return 0;
773 }
774
775 static int flush_subbuf_sendfile(struct thread_information *tip,
776                                  struct tip_subbuf *ts)
777 {
778         size_t padding;
779         unsigned subbuf;
780         unsigned len;
781
782         /*
783          * currently we cannot use sendfile() on the last bytes read, as they
784          * may not be a full subbuffer. get_subbuf_sendfile() falls back to
785          * the read approach for those, so use send() to ship them out
786          */
787         if (ts->buf)
788                 return flush_subbuf_net(tip, ts);
789         
790         subbuf = (ts->offset / buf_size) % buf_nr;
791         padding = get_subbuf_padding(tip, subbuf);
792         len = ts->len - padding;
793
794         if (net_send_header(tip, len))
795                 return 1;
796         if (sendfile(net_out_fd, tip->fd, &ts->offset, len) < 0) {
797                 perror("sendfile");
798                 return 1;
799         }
800
801         tip->data_read += len;
802         free(ts);
803         return 0;
804 }
805
806 static int write_data(struct thread_information *tip, void *buf,
807                       unsigned int buf_len)
808 {
809         int ret;
810
811         if (!buf_len)
812                 return 0;
813
814         while (1) {
815                 ret = fwrite(buf, buf_len, 1, tip->ofile);
816                 if (ret == 1)
817                         break;
818
819                 if (ret < 0) {
820                         perror("write");
821                         return 1;
822                 }
823         }
824
825         if (tip->ofile_stdout)
826                 fflush(tip->ofile);
827
828         return 0;
829 }
830
831 static int flush_subbuf_file(struct thread_information *tip,
832                              struct tip_subbuf *ts)
833 {
834         unsigned int offset = 0;
835         struct blk_io_trace *t;
836         int pdu_len, events = 0;
837
838         /*
839          * surplus from last run
840          */
841         if (tip->leftover_ts) {
842                 struct tip_subbuf *prev_ts = tip->leftover_ts;
843
844                 if (prev_ts->len + ts->len > prev_ts->max_len) {
845                         prev_ts->max_len += ts->len;
846                         prev_ts->buf = realloc(prev_ts->buf, prev_ts->max_len);
847                 }
848
849                 memcpy(prev_ts->buf + prev_ts->len, ts->buf, ts->len);
850                 prev_ts->len += ts->len;
851
852                 free(ts->buf);
853                 free(ts);
854
855                 ts = prev_ts;
856                 tip->leftover_ts = NULL;
857         }
858
859         while (offset + sizeof(*t) <= ts->len) {
860                 t = ts->buf + offset;
861
862                 if (verify_trace(t)) {
863                         write_data(tip, ts->buf, offset);
864                         return -1;
865                 }
866
867                 pdu_len = t->pdu_len;
868
869                 if (offset + sizeof(*t) + pdu_len > ts->len)
870                         break;
871
872                 offset += sizeof(*t) + pdu_len;
873                 tip->events_processed++;
874                 tip->data_read += sizeof(*t) + pdu_len;
875                 events++;
876         }
877
878         if (write_data(tip, ts->buf, offset))
879                 return -1;
880
881         /*
882          * leftover bytes, save them for next time
883          */
884         if (offset != ts->len) {
885                 tip->leftover_ts = ts;
886                 ts->len -= offset;
887                 memmove(ts->buf, ts->buf + offset, ts->len);
888         } else {
889                 free(ts->buf);
890                 free(ts);
891         }
892
893         return events;
894 }
895
896 static int write_tip_events(struct thread_information *tip)
897 {
898         struct tip_subbuf *ts = subbuf_fifo_dequeue(tip);
899
900         if (ts)
901                 return tip->flush_subbuf(tip, ts);
902
903         return 0;
904 }
905
906 /*
907  * scans the tips we know and writes out the subbuffers we accumulate
908  */
909 static void get_and_write_events(void)
910 {
911         struct device_information *dip;
912         struct thread_information *tip;
913         int i, j, events, ret, tips_running;
914
915         while (!is_done()) {
916                 events = 0;
917
918                 for_each_dip(dip, i) {
919                         for_each_tip(dip, tip, j) {
920                                 ret = write_tip_events(tip);
921                                 if (ret > 0)
922                                         events += ret;
923                         }
924                 }
925
926                 if (!events)
927                         usleep(10);
928         }
929
930         /*
931          * reap stored events
932          */
933         do {
934                 events = 0;
935                 tips_running = 0;
936                 for_each_dip(dip, i) {
937                         for_each_tip(dip, tip, j) {
938                                 ret = write_tip_events(tip);
939                                 if (ret > 0)
940                                         events += ret;
941                                 tips_running += !tip->exited;
942                         }
943                 }
944                 usleep(10);
945         } while (events || tips_running);
946 }
947
948 static void wait_for_threads(void)
949 {
950         /*
951          * for piped or network output, poll and fetch data for writeout.
952          * for files, we just wait around for trace threads to exit
953          */
954         if ((output_name && !strcmp(output_name, "-")) ||
955             net_mode == Net_client)
956                 get_and_write_events();
957         else {
958                 struct device_information *dip;
959                 struct thread_information *tip;
960                 int i, j, tips_running;
961
962                 do {
963                         tips_running = 0;
964                         usleep(1000);
965
966                         for_each_dip(dip, i)
967                                 for_each_tip(dip, tip, j)
968                                         tips_running += !tip->exited;
969                 } while (tips_running);
970         }
971
972         if (net_mode == Net_client)
973                 net_client_send_close();
974 }
975
976 static void fill_ofname(char *dst, char *buts_name, int cpu)
977 {
978         int len = 0;
979
980         if (output_dir)
981                 len = sprintf(dst, "%s/", output_dir);
982
983         if (output_name)
984                 sprintf(dst + len, "%s.blktrace.%d", output_name, cpu);
985         else
986                 sprintf(dst + len, "%s.blktrace.%d", buts_name, cpu);
987 }
988
989 static void fill_ops(struct thread_information *tip)
990 {
991         /*
992          * setup ops
993          */
994         if (net_mode == Net_client) {
995                 if (net_sendfile) {
996                         tip->get_subbuf = get_subbuf_sendfile;
997                         tip->flush_subbuf = flush_subbuf_sendfile;
998                 } else {
999                         tip->get_subbuf = get_subbuf;
1000                         tip->flush_subbuf = flush_subbuf_net;
1001                 }
1002         } else {
1003                 if (tip->ofile_mmap)
1004                         tip->get_subbuf = mmap_subbuf;
1005                 else
1006                         tip->get_subbuf = get_subbuf;
1007
1008                 tip->flush_subbuf = flush_subbuf_file;
1009         }
1010                         
1011         if (net_mode == Net_server)
1012                 tip->read_data = read_data_net;
1013         else
1014                 tip->read_data = read_data_file;
1015 }
1016
1017 static int tip_open_output(struct device_information *dip,
1018                            struct thread_information *tip)
1019 {
1020         int pipeline = output_name && !strcmp(output_name, "-");
1021         int mode, vbuf_size;
1022         char op[64];
1023
1024         if (net_mode == Net_client) {
1025                 tip->ofile = NULL;
1026                 tip->ofile_stdout = 0;
1027                 tip->ofile_mmap = 0;
1028                 vbuf_size = 0;
1029                 mode = 0; /* gcc 4.x issues a bogus warning */
1030         } else if (pipeline) {
1031                 tip->ofile = fdopen(STDOUT_FILENO, "w");
1032                 tip->ofile_stdout = 1;
1033                 tip->ofile_mmap = 0;
1034                 mode = _IOLBF;
1035                 vbuf_size = 512;
1036         } else {
1037                 fill_ofname(op, dip->buts_name, tip->cpu);
1038                 tip->ofile = fopen(op, "w+");
1039                 tip->ofile_stdout = 0;
1040                 tip->ofile_mmap = 1;
1041                 mode = _IOFBF;
1042                 vbuf_size = OFILE_BUF;
1043         }
1044
1045         if (net_mode != Net_client && tip->ofile == NULL) {
1046                 perror(op);
1047                 return 1;
1048         }
1049
1050         if (vbuf_size) {
1051                 tip->ofile_buffer = malloc(vbuf_size);
1052                 if (setvbuf(tip->ofile, tip->ofile_buffer, mode, vbuf_size)) {
1053                         perror("setvbuf");
1054                         close_thread(tip);
1055                         return 1;
1056                 }
1057         }
1058
1059         fill_ops(tip);
1060         return 0;
1061 }
1062
1063 static int start_threads(struct device_information *dip)
1064 {
1065         struct thread_information *tip;
1066         int j;
1067
1068         for_each_tip(dip, tip, j) {
1069                 tip->cpu = j;
1070                 tip->device = dip;
1071                 tip->events_processed = 0;
1072                 tip->fd = -1;
1073                 tip->pfd = -1;
1074                 memset(&tip->fifo, 0, sizeof(tip->fifo));
1075                 tip->leftover_ts = NULL;
1076
1077                 if (tip_open_output(dip, tip))
1078                         return 1;
1079
1080                 if (pthread_create(&tip->thread, NULL, thread_main, tip)) {
1081                         perror("pthread_create");
1082                         close_thread(tip);
1083                         return 1;
1084                 }
1085         }
1086
1087         return 0;
1088 }
1089
1090 static void stop_threads(struct device_information *dip)
1091 {
1092         struct thread_information *tip;
1093         unsigned long ret;
1094         int i;
1095
1096         for_each_tip(dip, tip, i) {
1097                 (void) pthread_join(tip->thread, (void *) &ret);
1098                 close_thread(tip);
1099         }
1100 }
1101
1102 static void stop_all_threads(void)
1103 {
1104         struct device_information *dip;
1105         int i;
1106
1107         for_each_dip(dip, i)
1108                 stop_threads(dip);
1109 }
1110
1111 static void stop_all_tracing(void)
1112 {
1113         struct device_information *dip;
1114         int i;
1115
1116         for_each_dip(dip, i)
1117                 stop_trace(dip);
1118 }
1119
1120 static void exit_trace(int status)
1121 {
1122         if (!is_trace_stopped()) {
1123                 trace_stopped = 1;
1124                 stop_all_threads();
1125                 stop_all_tracing();
1126         }
1127
1128         exit(status);
1129 }
1130
1131 static int resize_devices(char *path)
1132 {
1133         int size = (ndevs + 1) * sizeof(struct device_information);
1134
1135         device_information = realloc(device_information, size);
1136         if (!device_information) {
1137                 fprintf(stderr, "Out of memory, device %s (%d)\n", path, size);
1138                 return 1;
1139         }
1140         device_information[ndevs].path = path;
1141         ndevs++;
1142         return 0;
1143 }
1144
1145 static int open_devices(void)
1146 {
1147         struct device_information *dip;
1148         int i;
1149
1150         for_each_dip(dip, i) {
1151                 dip->fd = open(dip->path, O_RDONLY | O_NONBLOCK);
1152                 if (dip->fd < 0) {
1153                         perror(dip->path);
1154                         return 1;
1155                 }
1156         }
1157
1158         return 0;
1159 }
1160
1161 static int start_devices(void)
1162 {
1163         struct device_information *dip;
1164         int i, j, size;
1165
1166         size = ncpus * sizeof(struct thread_information);
1167         thread_information = malloc(size * ndevs);
1168         if (!thread_information) {
1169                 fprintf(stderr, "Out of memory, threads (%d)\n", size * ndevs);
1170                 return 1;
1171         }
1172
1173         for_each_dip(dip, i) {
1174                 if (start_trace(dip)) {
1175                         close(dip->fd);
1176                         fprintf(stderr, "Failed to start trace on %s\n",
1177                                 dip->path);
1178                         break;
1179                 }
1180         }
1181
1182         if (i != ndevs) {
1183                 __for_each_dip(dip, j, i)
1184                         stop_trace(dip);
1185
1186                 return 1;
1187         }
1188
1189         for_each_dip(dip, i) {
1190                 dip->threads = thread_information + (i * ncpus);
1191                 if (start_threads(dip)) {
1192                         fprintf(stderr, "Failed to start worker threads\n");
1193                         break;
1194                 }
1195         }
1196
1197         if (i != ndevs) {
1198                 __for_each_dip(dip, j, i)
1199                         stop_threads(dip);
1200                 for_each_dip(dip, i)
1201                         stop_trace(dip);
1202
1203                 return 1;
1204         }
1205
1206         return 0;
1207 }
1208
1209 static void show_stats(void)
1210 {
1211         struct device_information *dip;
1212         struct thread_information *tip;
1213         unsigned long long events_processed, data_read;
1214         unsigned long total_drops;
1215         int i, j, no_stdout = 0;
1216
1217         if (is_stat_shown())
1218                 return;
1219
1220         if (output_name && !strcmp(output_name, "-"))
1221                 no_stdout = 1;
1222
1223         stat_shown = 1;
1224
1225         total_drops = 0;
1226         for_each_dip(dip, i) {
1227                 if (!no_stdout)
1228                         printf("Device: %s\n", dip->path);
1229                 events_processed = 0;
1230                 data_read = 0;
1231                 for_each_tip(dip, tip, j) {
1232                         if (!no_stdout)
1233                                 printf("  CPU%3d: %20lu events, %8llu KiB data\n",
1234                                         tip->cpu, tip->events_processed,
1235                                         (tip->data_read + 1023) >> 10);
1236                         events_processed += tip->events_processed;
1237                         data_read += tip->data_read;
1238                 }
1239                 total_drops += dip->drop_count;
1240                 if (!no_stdout)
1241                         printf("  Total:  %20llu events (dropped %lu), %8llu KiB data\n",
1242                                         events_processed, dip->drop_count,
1243                                         (data_read + 1023) >> 10);
1244         }
1245
1246         if (total_drops)
1247                 fprintf(stderr, "You have dropped events, consider using a larger buffer size (-b)\n");
1248 }
1249
1250 static struct device_information *net_get_dip(char *buts_name)
1251 {
1252         struct device_information *dip;
1253         int i;
1254
1255         for (i = 0; i < ndevs; i++) {
1256                 dip = &device_information[i];
1257
1258                 if (!strcmp(dip->buts_name, buts_name))
1259                         return dip;
1260         }
1261
1262         device_information = realloc(device_information, (ndevs + 1) * sizeof(*dip));
1263         dip = &device_information[ndevs];
1264         strcpy(dip->buts_name, buts_name);
1265         strcpy(dip->path, buts_name);
1266         ndevs++;
1267         dip->threads = malloc(ncpus * sizeof(struct thread_information));
1268         memset(dip->threads, 0, ncpus * sizeof(struct thread_information));
1269
1270         /*
1271          * open all files
1272          */
1273         for (i = 0; i < ncpus; i++) {
1274                 struct thread_information *tip = &dip->threads[i];
1275
1276                 tip->cpu = i;
1277                 tip->device = dip;
1278                 tip->fd = -1;
1279                 tip->pfd = -1;
1280
1281                 if (tip_open_output(dip, tip))
1282                         return NULL;
1283         }
1284
1285         return dip;
1286 }
1287
1288 static struct thread_information *net_get_tip(struct blktrace_net_hdr *bnh)
1289 {
1290         struct device_information *dip;
1291
1292         ncpus = bnh->max_cpus;
1293         dip = net_get_dip(bnh->buts_name);
1294         return &dip->threads[bnh->cpu];
1295 }
1296
1297 static int net_get_header(struct blktrace_net_hdr *bnh)
1298 {
1299         int fl = fcntl(net_in_fd, F_GETFL);
1300         int bytes_left, ret;
1301         void *p = bnh;
1302
1303         fcntl(net_in_fd, F_SETFL, fl | O_NONBLOCK);
1304         bytes_left = sizeof(*bnh);
1305         while (bytes_left && !is_done()) {
1306                 ret = recv(net_in_fd, p, bytes_left, MSG_WAITALL);
1307                 if (ret < 0) {
1308                         if (errno != EAGAIN) {
1309                                 perror("recv header");
1310                                 return 1;
1311                         }
1312                         usleep(100);
1313                         continue;
1314                 } else if (!ret) {
1315                         usleep(100);
1316                         continue;
1317                 } else {
1318                         p += ret;
1319                         bytes_left -= ret;
1320                 }
1321         }
1322         fcntl(net_in_fd, F_SETFL, fl & ~O_NONBLOCK);
1323         return 0;
1324 }
1325
1326 static int net_server_loop(void)
1327 {
1328         struct thread_information *tip;
1329         struct blktrace_net_hdr bnh;
1330
1331         if (net_get_header(&bnh))
1332                 return 1;
1333
1334         if (data_is_native == -1 && check_data_endianness(bnh.magic)) {
1335                 fprintf(stderr, "server: received data is bad\n");
1336                 return 1;
1337         }
1338
1339         if (!data_is_native) {
1340                 bnh.cpu = be32_to_cpu(bnh.cpu);
1341                 bnh.len = be32_to_cpu(bnh.len);
1342         }
1343
1344         /*
1345          * len == 0 means that the other end signalled end-of-run
1346          */
1347         if (!bnh.len) {
1348                 fprintf(stderr, "server: end of run\n");
1349                 return 1;
1350         }
1351
1352         tip = net_get_tip(&bnh);
1353         if (!tip)
1354                 return 1;
1355
1356         if (mmap_subbuf(tip, bnh.len))
1357                 return 1;
1358
1359         return 0;
1360 }
1361
1362 /*
1363  * Start here when we are in server mode - just fetch data from the network
1364  * and dump to files
1365  */
1366 static int net_server(void)
1367 {
1368         struct device_information *dip;
1369         struct thread_information *tip;
1370         struct sockaddr_in addr;
1371         socklen_t socklen;
1372         int fd, opt, i, j;
1373
1374         fd = socket(AF_INET, SOCK_STREAM, 0);
1375         if (fd < 0) {
1376                 perror("server: socket");
1377                 return 1;
1378         }
1379
1380         opt = 1;
1381         if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
1382                 perror("setsockopt");
1383                 return 1;
1384         }
1385
1386         memset(&addr, 0, sizeof(addr));
1387         addr.sin_family = AF_INET;
1388         addr.sin_addr.s_addr = htonl(INADDR_ANY);
1389         addr.sin_port = htons(net_port);
1390
1391         if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
1392                 perror("bind");
1393                 return 1;
1394         }
1395
1396         if (listen(fd, 1) < 0) {
1397                 perror("listen");
1398                 return 1;
1399         }
1400
1401 repeat:
1402         signal(SIGINT, NULL);
1403         signal(SIGHUP, NULL);
1404         signal(SIGTERM, NULL);
1405         signal(SIGALRM, NULL);
1406
1407         printf("blktrace: waiting for incoming connection...\n");
1408
1409         socklen = sizeof(addr);
1410         net_in_fd = accept(fd, (struct sockaddr *) &addr, &socklen);
1411         if (net_in_fd < 0) {
1412                 perror("accept");
1413                 return 1;
1414         }
1415
1416         signal(SIGINT, handle_sigint);
1417         signal(SIGHUP, handle_sigint);
1418         signal(SIGTERM, handle_sigint);
1419         signal(SIGALRM, handle_sigint);
1420
1421         printf("blktrace: connection from %s\n", inet_ntoa(addr.sin_addr));
1422
1423         while (!is_done()) {
1424                 if (net_server_loop())
1425                         break;
1426         }
1427
1428         for_each_dip(dip, i)
1429                 for_each_tip(dip, tip, j)
1430                         tip_ftrunc_final(tip);
1431
1432         show_stats();
1433
1434         if (is_done())
1435                 return 0;
1436
1437         /*
1438          * cleanup for next run
1439          */
1440         for_each_dip(dip, i) {
1441                 for_each_tip(dip, tip, j)
1442                         fclose(tip->ofile);
1443
1444                 free(dip->threads);
1445         }
1446
1447         free(device_information);
1448         device_information = NULL;
1449         ncpus = ndevs = 0;
1450
1451         close(net_in_fd);
1452         net_in_fd = -1;
1453         stat_shown = 0;
1454         goto repeat;
1455 }
1456
1457 /*
1458  * Setup outgoing network connection where we will transmit data
1459  */
1460 static int net_setup_client(void)
1461 {
1462         struct sockaddr_in addr;
1463         int fd;
1464
1465         fd = socket(AF_INET, SOCK_STREAM, 0);
1466         if (fd < 0) {
1467                 perror("client: socket");
1468                 return 1;
1469         }
1470
1471         memset(&addr, 0, sizeof(addr));
1472         addr.sin_family = AF_INET;
1473         addr.sin_port = htons(net_port);
1474
1475         if (inet_aton(hostname, &addr.sin_addr) != 1) {
1476                 struct hostent *hent = gethostbyname(hostname);
1477                 if (!hent) {
1478                         perror("gethostbyname");
1479                         return 1;
1480                 }
1481
1482                 memcpy(&addr.sin_addr, hent->h_addr, 4);
1483                 strcpy(hostname, hent->h_name);
1484         }
1485
1486         printf("blktrace: connecting to %s\n", hostname);
1487
1488         if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
1489                 perror("client: connect");
1490                 return 1;
1491         }
1492
1493         printf("blktrace: connected!\n");
1494         net_out_fd = fd;
1495         return 0;
1496 }
1497
1498 static char usage_str[] = \
1499         "-d <dev> [ -r relay path ] [ -o <output> ] [-k ] [ -w time ]\n" \
1500         "[ -a action ] [ -A action mask ] [ -v ]\n\n" \
1501         "\t-d Use specified device. May also be given last after options\n" \
1502         "\t-r Path to mounted relayfs, defaults to /relay\n" \
1503         "\t-o File(s) to send output to\n" \
1504         "\t-D Directory to prepend to output file names\n" \
1505         "\t-k Kill a running trace\n" \
1506         "\t-w Stop after defined time, in seconds\n" \
1507         "\t-a Only trace specified actions. See documentation\n" \
1508         "\t-A Give trace mask as a single value. See documentation\n" \
1509         "\t-b Sub buffer size in KiB\n" \
1510         "\t-n Number of sub buffers\n" \
1511         "\t-l Run in network listen mode (blktrace server)\n" \
1512         "\t-h Run in network client mode, connecting to the given host\n" \
1513         "\t-p Network port to use (default 8462)\n" \
1514         "\t-s Make the network client use sendfile() to transfer data\n" \
1515         "\t-V Print program version info\n\n";
1516
1517 static void show_usage(char *program)
1518 {
1519         fprintf(stderr, "Usage: %s %s %s",program, blktrace_version, usage_str);
1520 }
1521
1522 int main(int argc, char *argv[])
1523 {
1524         static char default_relay_path[] = "/relay";
1525         struct statfs st;
1526         int i, c;
1527         int stop_watch = 0;
1528         int act_mask_tmp = 0;
1529
1530         while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) >= 0) {
1531                 switch (c) {
1532                 case 'a':
1533                         i = find_mask_map(optarg);
1534                         if (i < 0) {
1535                                 fprintf(stderr,"Invalid action mask %s\n",
1536                                         optarg);
1537                                 return 1;
1538                         }
1539                         act_mask_tmp |= i;
1540                         break;
1541
1542                 case 'A':
1543                         if ((sscanf(optarg, "%x", &i) != 1) || 
1544                                                         !valid_act_opt(i)) {
1545                                 fprintf(stderr,
1546                                         "Invalid set action mask %s/0x%x\n",
1547                                         optarg, i);
1548                                 return 1;
1549                         }
1550                         act_mask_tmp = i;
1551                         break;
1552
1553                 case 'd':
1554                         if (resize_devices(optarg) != 0)
1555                                 return 1;
1556                         break;
1557
1558                 case 'r':
1559                         relay_path = optarg;
1560                         break;
1561
1562                 case 'o':
1563                         output_name = optarg;
1564                         break;
1565                 case 'k':
1566                         kill_running_trace = 1;
1567                         break;
1568                 case 'w':
1569                         stop_watch = atoi(optarg);
1570                         if (stop_watch <= 0) {
1571                                 fprintf(stderr,
1572                                         "Invalid stopwatch value (%d secs)\n",
1573                                         stop_watch);
1574                                 return 1;
1575                         }
1576                         break;
1577                 case 'V':
1578                         printf("%s version %s\n", argv[0], blktrace_version);
1579                         return 0;
1580                 case 'b':
1581                         buf_size = strtoul(optarg, NULL, 10);
1582                         if (buf_size <= 0 || buf_size > 16*1024) {
1583                                 fprintf(stderr,
1584                                         "Invalid buffer size (%lu)\n",buf_size);
1585                                 return 1;
1586                         }
1587                         buf_size <<= 10;
1588                         break;
1589                 case 'n':
1590                         buf_nr = strtoul(optarg, NULL, 10);
1591                         if (buf_nr <= 0) {
1592                                 fprintf(stderr,
1593                                         "Invalid buffer nr (%lu)\n", buf_nr);
1594                                 return 1;
1595                         }
1596                         break;
1597                 case 'D':
1598                         output_dir = optarg;
1599                         break;
1600                 case 'h':
1601                         net_mode = Net_client;
1602                         strcpy(hostname, optarg);
1603                         break;
1604                 case 'l':
1605                         net_mode = Net_server;
1606                         break;
1607                 case 'p':
1608                         net_port = atoi(optarg);
1609                         break;
1610                 case 's':
1611                         net_sendfile = 1;
1612                         break;
1613                 default:
1614                         show_usage(argv[0]);
1615                         return 1;
1616                 }
1617         }
1618
1619         setlocale(LC_NUMERIC, "en_US");
1620
1621         page_size = getpagesize();
1622
1623         if (net_mode == Net_server)
1624                 return net_server();
1625
1626         while (optind < argc) {
1627                 if (resize_devices(argv[optind++]) != 0)
1628                         return 1;
1629         }
1630
1631         if (ndevs == 0) {
1632                 show_usage(argv[0]);
1633                 return 1;
1634         }
1635
1636         if (!relay_path)
1637                 relay_path = default_relay_path;
1638
1639         if (act_mask_tmp != 0)
1640                 act_mask = act_mask_tmp;
1641
1642         if (statfs(relay_path, &st) < 0) {
1643                 perror("statfs");
1644                 fprintf(stderr,"%s does not appear to be a valid path\n",
1645                         relay_path);
1646                 return 1;
1647         } else if (st.f_type != (long) RELAYFS_TYPE) {
1648                 fprintf(stderr,"%s does not appear to be a relay filesystem\n",
1649                         relay_path);
1650                 return 1;
1651         }
1652
1653         if (open_devices() != 0)
1654                 return 1;
1655
1656         if (kill_running_trace) {
1657                 stop_all_traces();
1658                 return 0;
1659         }
1660
1661         ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1662         if (ncpus < 0) {
1663                 fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed\n");
1664                 return 1;
1665         }
1666
1667         signal(SIGINT, handle_sigint);
1668         signal(SIGHUP, handle_sigint);
1669         signal(SIGTERM, handle_sigint);
1670         signal(SIGALRM, handle_sigint);
1671
1672         if (net_mode == Net_client && net_setup_client())
1673                 return 1;
1674
1675         if (start_devices() != 0)
1676                 return 1;
1677
1678         atexit(stop_all_tracing);
1679
1680         if (stop_watch)
1681                 alarm(stop_watch);
1682
1683         wait_for_threads();
1684
1685         if (!is_trace_stopped()) {
1686                 trace_stopped = 1;
1687                 stop_all_threads();
1688                 stop_all_traces();
1689         }
1690
1691         show_stats();
1692
1693         return 0;
1694 }
1695