[PATCH] blktrace: stop trace on SIGINT to allow drain
[blktrace.git] / blktrace.c
1 /*
2  * block queue tracing application
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include <pthread.h>
22 #include <sys/types.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25 #include <locale.h>
26 #include <signal.h>
27 #include <fcntl.h>
28 #include <string.h>
29 #include <sys/ioctl.h>
30 #include <sys/param.h>
31 #include <sys/statfs.h>
32 #include <sys/poll.h>
33 #include <sys/mman.h>
34 #include <sys/socket.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <getopt.h>
40 #include <errno.h>
41 #include <netinet/in.h>
42 #include <arpa/inet.h>
43 #include <netdb.h>
44 #include <sys/sendfile.h>
45
46 #include "blktrace.h"
47 #include "barrier.h"
48
49 static char blktrace_version[] = "0.99";
50
51 /*
52  * You may want to increase this even more, if you are logging at a high
53  * rate and see skipped/missed events
54  */
55 #define BUF_SIZE        (512 * 1024)
56 #define BUF_NR          (4)
57
58 #define OFILE_BUF       (128 * 1024)
59
60 #define RELAYFS_TYPE    0xF0B4A981
61
62 #define S_OPTS  "d:a:A:r:o:kw:Vb:n:D:lh:p:s"
63 static struct option l_opts[] = {
64         {
65                 .name = "dev",
66                 .has_arg = required_argument,
67                 .flag = NULL,
68                 .val = 'd'
69         },
70         {
71                 .name = "act-mask",
72                 .has_arg = required_argument,
73                 .flag = NULL,
74                 .val = 'a'
75         },
76         {
77                 .name = "set-mask",
78                 .has_arg = required_argument,
79                 .flag = NULL,
80                 .val = 'A'
81         },
82         {
83                 .name = "relay",
84                 .has_arg = required_argument,
85                 .flag = NULL,
86                 .val = 'r'
87         },
88         {
89                 .name = "output",
90                 .has_arg = required_argument,
91                 .flag = NULL,
92                 .val = 'o'
93         },
94         {
95                 .name = "kill",
96                 .has_arg = no_argument,
97                 .flag = NULL,
98                 .val = 'k'
99         },
100         {
101                 .name = "stopwatch",
102                 .has_arg = required_argument,
103                 .flag = NULL,
104                 .val = 'w'
105         },
106         {
107                 .name = "version",
108                 .has_arg = no_argument,
109                 .flag = NULL,
110                 .val = 'V'
111         },
112         {
113                 .name = "buffer-size",
114                 .has_arg = required_argument,
115                 .flag = NULL,
116                 .val = 'b'
117         },
118         {
119                 .name = "num-sub-buffers",
120                 .has_arg = required_argument,
121                 .flag = NULL,
122                 .val = 'n'
123         },
124         {
125                 .name = "output-dir",
126                 .has_arg = required_argument,
127                 .flag = NULL,
128                 .val = 'D'
129         },
130         {
131                 .name = "listen",
132                 .has_arg = no_argument,
133                 .flag = NULL,
134                 .val = 'l'
135         },
136         {
137                 .name = "host",
138                 .has_arg = required_argument,
139                 .flag = NULL,
140                 .val = 'h'
141         },
142         {
143                 .name = "port",
144                 .has_arg = required_argument,
145                 .flag = NULL,
146                 .val = 'p'
147         },
148         {
149                 .name = "sendfile",
150                 .has_arg = no_argument,
151                 .flag = NULL,
152                 .val = 's'
153         },
154         {
155                 .name = NULL,
156         }
157 };
158
159 struct tip_subbuf {
160         void *buf;
161         unsigned int len;
162         unsigned int max_len;
163         off_t offset;
164 };
165
166 #define FIFO_SIZE       (1024)  /* should be plenty big! */
167 #define CL_SIZE         (128)   /* cache line, any bigger? */
168
169 struct tip_subbuf_fifo {
170         int tail __attribute__((aligned(CL_SIZE)));
171         int head __attribute__((aligned(CL_SIZE)));
172         struct tip_subbuf *q[FIFO_SIZE];
173 };
174
175 struct thread_information {
176         int cpu;
177         pthread_t thread;
178
179         int fd;
180         void *fd_buf;
181         char fn[MAXPATHLEN + 64];
182
183         int pfd;
184         size_t *pfd_buf;
185
186         FILE *ofile;
187         char *ofile_buffer;
188         off_t ofile_offset;
189         int ofile_stdout;
190         int ofile_mmap;
191
192         int (*get_subbuf)(struct thread_information *, unsigned int);
193         int (*flush_subbuf)(struct thread_information *, struct tip_subbuf *);
194         int (*read_data)(struct thread_information *, void *, unsigned int);
195
196         unsigned long events_processed;
197         unsigned long long data_read;
198         struct device_information *device;
199
200         int exited;
201
202         /*
203          * piped fifo buffers
204          */
205         struct tip_subbuf_fifo fifo;
206         struct tip_subbuf *leftover_ts;
207
208         /*
209          * mmap controlled output files
210          */
211         unsigned long long fs_size;
212         unsigned long long fs_max_size;
213         unsigned long fs_off;
214         void *fs_buf;
215         unsigned long fs_buf_len;
216 };
217
218 struct device_information {
219         int fd;
220         char *path;
221         char buts_name[32];
222         volatile int trace_started;
223         unsigned long drop_count;
224         struct thread_information *threads;
225 };
226
227 static int ncpus;
228 static struct thread_information *thread_information;
229 static int ndevs;
230 static struct device_information *device_information;
231
232 /* command line option globals */
233 static char *relay_path;
234 static char *output_name;
235 static char *output_dir;
236 static int act_mask = ~0U;
237 static int kill_running_trace;
238 static unsigned long buf_size = BUF_SIZE;
239 static unsigned long buf_nr = BUF_NR;
240 static unsigned int page_size;
241
242 #define is_done()       (*(volatile int *)(&done))
243 static volatile int done;
244
245 #define is_trace_stopped()      (*(volatile int *)(&trace_stopped))
246 static volatile int trace_stopped;
247
248 #define is_stat_shown() (*(volatile int *)(&stat_shown))
249 static volatile int stat_shown;
250
251 int data_is_native = -1;
252
253 static void exit_trace(int status);
254
255 #define dip_tracing(dip)        (*(volatile int *)(&(dip)->trace_started))
256 #define dip_set_tracing(dip, v) ((dip)->trace_started = (v))
257
258 #define __for_each_dip(__d, __i, __e)   \
259         for (__i = 0, __d = device_information; __i < __e; __i++, __d++)
260
261 #define for_each_dip(__d, __i)  __for_each_dip(__d, __i, ndevs)
262 #define for_each_tip(__d, __t, __j)     \
263         for (__j = 0, __t = (__d)->threads; __j < ncpus; __j++, __t++)
264
265 /*
266  * networking stuff follows. we include a magic number so we know whether
267  * to endianness convert or not
268  */
269 struct blktrace_net_hdr {
270         u32 magic;              /* same as trace magic */
271         char buts_name[32];     /* trace name */
272         u32 cpu;                /* for which cpu */
273         u32 max_cpus;
274         u32 len;                /* length of following trace data */
275 };
276
277 #define TRACE_NET_PORT          (8462)
278
279 enum {
280         Net_none = 0,
281         Net_server,
282         Net_client,
283 };
284
285 /*
286  * network cmd line params
287  */
288 static char hostname[MAXHOSTNAMELEN];
289 static int net_port = TRACE_NET_PORT;
290 static int net_mode = 0;
291 static int net_sendfile;
292
293 static int net_in_fd = -1;
294 static int net_out_fd = -1;
295
296 static void handle_sigint(__attribute__((__unused__)) int sig)
297 {
298         struct device_information *dip;
299         int i;
300
301         /*
302          * stop trace so we can reap currently produced data
303          */
304         for_each_dip(dip, i) {
305                 if (ioctl(dip->fd, BLKTRACESTOP) < 0)
306                         perror("BLKTRACESTOP");
307         }
308
309         done = 1;
310 }
311
312 static int get_dropped_count(const char *buts_name)
313 {
314         int fd;
315         char tmp[MAXPATHLEN + 64];
316
317         snprintf(tmp, sizeof(tmp), "%s/block/%s/dropped",
318                  relay_path, buts_name);
319
320         fd = open(tmp, O_RDONLY);
321         if (fd < 0) {
322                 /*
323                  * this may be ok, if the kernel doesn't support dropped counts
324                  */
325                 if (errno == ENOENT)
326                         return 0;
327
328                 fprintf(stderr, "Couldn't open dropped file %s\n", tmp);
329                 return -1;
330         }
331
332         if (read(fd, tmp, sizeof(tmp)) < 0) {
333                 perror(tmp);
334                 close(fd);
335                 return -1;
336         }
337
338         close(fd);
339
340         return atoi(tmp);
341 }
342
343 static size_t get_subbuf_padding(struct thread_information *tip,
344                                  unsigned subbuf)
345 {
346         size_t padding_size = buf_nr * sizeof(size_t);
347         size_t ret;
348
349         if (read(tip->pfd, tip->pfd_buf, padding_size) < 0) {
350                 perror("tip pad read");
351                 ret = -1;
352         } else
353                 ret = tip->pfd_buf[subbuf];
354
355         return ret;
356 }
357
358 static int start_trace(struct device_information *dip)
359 {
360         struct blk_user_trace_setup buts;
361
362         memset(&buts, 0, sizeof(buts));
363         buts.buf_size = buf_size;
364         buts.buf_nr = buf_nr;
365         buts.act_mask = act_mask;
366
367         if (ioctl(dip->fd, BLKTRACESETUP, &buts) < 0) {
368                 perror("BLKTRACESETUP");
369                 return 1;
370         }
371
372         if (ioctl(dip->fd, BLKTRACESTART) < 0) {
373                 perror("BLKTRACESTART");
374                 return 1;
375         }
376
377         memcpy(dip->buts_name, buts.name, sizeof(dip->buts_name));
378         dip_set_tracing(dip, 1);
379         return 0;
380 }
381
382 static void stop_trace(struct device_information *dip)
383 {
384         if (dip_tracing(dip) || kill_running_trace) {
385                 dip_set_tracing(dip, 0);
386
387                 /*
388                  * should be stopped, just don't complain if it isn't
389                  */
390                 ioctl(dip->fd, BLKTRACESTOP);
391
392                 if (ioctl(dip->fd, BLKTRACETEARDOWN) < 0)
393                         perror("BLKTRACETEARDOWN");
394
395                 close(dip->fd);
396                 dip->fd = -1;
397         }
398 }
399
400 static void stop_all_traces(void)
401 {
402         struct device_information *dip;
403         int i;
404
405         for_each_dip(dip, i) {
406                 dip->drop_count = get_dropped_count(dip->buts_name);
407                 stop_trace(dip);
408         }
409 }
410
411 static void wait_for_data(struct thread_information *tip)
412 {
413         struct pollfd pfd = { .fd = tip->fd, .events = POLLIN };
414
415         do {
416                 poll(&pfd, 1, 100);
417                 if (pfd.revents & POLLIN)
418                         break;
419                 if (tip->ofile_stdout)
420                         break;
421         } while (!is_done());
422 }
423
424 static int read_data_file(struct thread_information *tip, void *buf,
425                           unsigned int len)
426 {
427         int ret = 0;
428
429         do {
430                 wait_for_data(tip);
431
432                 ret = read(tip->fd, buf, len);
433                 if (!ret)
434                         continue;
435                 else if (ret > 0)
436                         return ret;
437                 else {
438                         if (errno != EAGAIN) {
439                                 perror(tip->fn);
440                                 fprintf(stderr,"Thread %d failed read of %s\n",
441                                         tip->cpu, tip->fn);
442                                 break;
443                         }
444                         continue;
445                 }
446         } while (!is_done());
447
448         return ret;
449
450 }
451
452 static int read_data_net(struct thread_information *tip, void *buf,
453                          unsigned int len)
454 {
455         unsigned int bytes_left = len;
456         int ret = 0;
457
458         do {
459                 ret = recv(net_in_fd, buf, bytes_left, MSG_WAITALL);
460
461                 if (!ret)
462                         continue;
463                 else if (ret < 0) {
464                         if (errno != EAGAIN) {
465                                 perror(tip->fn);
466                                 fprintf(stderr, "server: failed read\n");
467                                 return 0;
468                         }
469                         continue;
470                 } else {
471                         buf += ret;
472                         bytes_left -= ret;
473                 }
474         } while (!is_done() && bytes_left);
475
476         return len - bytes_left;
477 }
478
479 static int read_data(struct thread_information *tip, void *buf,
480                      unsigned int len)
481 {
482         return tip->read_data(tip, buf, len);
483 }
484
485 static inline struct tip_subbuf *
486 subbuf_fifo_dequeue(struct thread_information *tip)
487 {
488         const int head = tip->fifo.head;
489         const int next = (head + 1) & (FIFO_SIZE - 1);
490
491         if (head != tip->fifo.tail) {
492                 struct tip_subbuf *ts = tip->fifo.q[head];
493
494                 store_barrier();
495                 tip->fifo.head = next;
496                 return ts;
497         }
498
499         return NULL;
500 }
501
502 static inline int subbuf_fifo_queue(struct thread_information *tip,
503                                     struct tip_subbuf *ts)
504 {
505         const int tail = tip->fifo.tail;
506         const int next = (tail + 1) & (FIFO_SIZE - 1);
507
508         if (next != tip->fifo.head) {
509                 tip->fifo.q[tail] = ts;
510                 store_barrier();
511                 tip->fifo.tail = next;
512                 return 0;
513         }
514
515         fprintf(stderr, "fifo too small!\n");
516         return 1;
517 }
518
519 /*
520  * For file output, truncate and mmap the file appropriately
521  */
522 static int mmap_subbuf(struct thread_information *tip, unsigned int maxlen)
523 {
524         int ofd = fileno(tip->ofile);
525         int ret;
526
527         /*
528          * extend file, if we have to. use chunks of 16 subbuffers.
529          */
530         if (tip->fs_off + buf_size > tip->fs_buf_len) {
531                 if (tip->fs_buf) {
532                         munlock(tip->fs_buf, tip->fs_buf_len);
533                         munmap(tip->fs_buf, tip->fs_buf_len);
534                         tip->fs_buf = NULL;
535                 }
536
537                 tip->fs_off = tip->fs_size & (page_size - 1);
538                 tip->fs_buf_len = (16 * buf_size) - tip->fs_off;
539                 tip->fs_max_size += tip->fs_buf_len;
540
541                 if (ftruncate(ofd, tip->fs_max_size) < 0) {
542                         perror("ftruncate");
543                         return -1;
544                 }
545
546                 tip->fs_buf = mmap(NULL, tip->fs_buf_len, PROT_WRITE,
547                                    MAP_SHARED, ofd, tip->fs_size - tip->fs_off);
548                 if (tip->fs_buf == MAP_FAILED) {
549                         perror("mmap");
550                         return -1;
551                 }
552                 mlock(tip->fs_buf, tip->fs_buf_len);
553         }
554
555         ret = read_data(tip, tip->fs_buf + tip->fs_off, maxlen);
556         if (ret >= 0) {
557                 tip->data_read += ret;
558                 tip->fs_size += ret;
559                 tip->fs_off += ret;
560                 return 0;
561         }
562
563         return -1;
564 }
565
566 /*
567  * Use the copy approach for pipes and network
568  */
569 static int get_subbuf(struct thread_information *tip, unsigned int maxlen)
570 {
571         struct tip_subbuf *ts = malloc(sizeof(*ts));
572         int ret;
573
574         ts->buf = malloc(buf_size);
575         ts->max_len = maxlen;
576
577         ret = read_data(tip, ts->buf, ts->max_len);
578         if (ret > 0) {
579                 ts->len = ret;
580                 tip->data_read += ret;
581                 if (subbuf_fifo_queue(tip, ts))
582                         return -1;
583         }
584
585         return ret;
586 }
587
588 static int get_subbuf_sendfile(struct thread_information *tip,
589                                unsigned int maxlen)
590 {
591         struct tip_subbuf *ts;
592         struct stat sb;
593         unsigned int ready, this_size, total;
594
595         wait_for_data(tip);
596
597         /*
598          * hack to get last data out, we can't use sendfile for that
599          */
600         if (is_done())
601                 return get_subbuf(tip, maxlen);
602
603         if (fstat(tip->fd, &sb) < 0) {
604                 perror("trace stat");
605                 return -1;
606         }
607
608         ready = sb.st_size - tip->ofile_offset;
609         if (!ready) {
610                 /*
611                  * delay a little, since poll() will return data available
612                  * until sendfile() is run
613                  */
614                 usleep(100);
615                 return 0;
616         }
617
618         this_size = buf_size;
619         total = ready;
620         while (ready) {
621                 if (this_size > ready)
622                         this_size = ready;
623
624                 ts = malloc(sizeof(*ts));
625
626                 ts->buf = NULL;
627                 ts->max_len = 0;
628
629                 ts->len = this_size;
630                 ts->offset = tip->ofile_offset;
631                 tip->ofile_offset += ts->len;
632
633                 if (subbuf_fifo_queue(tip, ts))
634                         return -1;
635
636                 ready -= this_size;
637         }
638
639         return total;
640 }
641
642 static void close_thread(struct thread_information *tip)
643 {
644         if (tip->fd != -1)
645                 close(tip->fd);
646         if (tip->pfd != -1)
647                 close(tip->pfd);
648         if (tip->ofile)
649                 fclose(tip->ofile);
650         if (tip->ofile_buffer)
651                 free(tip->ofile_buffer);
652         if (tip->fd_buf)
653                 free(tip->fd_buf);
654         if (tip->pfd_buf)
655                 free(tip->pfd_buf);
656
657         tip->fd = -1;
658         tip->pfd = -1;
659         tip->ofile = NULL;
660         tip->ofile_buffer = NULL;
661         tip->fd_buf = NULL;
662 }
663
664 static void tip_ftrunc_final(struct thread_information *tip)
665 {
666         /*
667          * truncate to right size and cleanup mmap
668          */
669         if (tip->ofile_mmap) {
670                 int ofd = fileno(tip->ofile);
671
672                 if (tip->fs_buf)
673                         munmap(tip->fs_buf, tip->fs_buf_len);
674
675                 ftruncate(ofd, tip->fs_size);
676         }
677 }
678
679 static void *thread_main(void *arg)
680 {
681         struct thread_information *tip = arg;
682         pid_t pid = getpid();
683         cpu_set_t cpu_mask;
684
685         CPU_ZERO(&cpu_mask);
686         CPU_SET((tip->cpu), &cpu_mask);
687
688         if (sched_setaffinity(pid, sizeof(cpu_mask), &cpu_mask) == -1) {
689                 perror("sched_setaffinity");
690                 exit_trace(1);
691         }
692
693         snprintf(tip->fn, sizeof(tip->fn), "%s/block/%s/trace%d",
694                         relay_path, tip->device->buts_name, tip->cpu);
695         tip->fd = open(tip->fn, O_RDONLY);
696         if (tip->fd < 0) {
697                 perror(tip->fn);
698                 fprintf(stderr,"Thread %d failed open of %s\n", tip->cpu,
699                         tip->fn);
700                 exit_trace(1);
701         }
702
703         if (net_mode == Net_client && net_sendfile) {
704                 char tmp[MAXPATHLEN + 64];
705
706                 snprintf(tmp, sizeof(tmp), "%s/block/%s/trace%d.padding",
707                          relay_path, tip->device->buts_name, tip->cpu);
708
709                 tip->pfd = open(tmp, O_RDONLY);
710                 if (tip->pfd < 0) {
711                         fprintf(stderr, "Couldn't open padding file %s\n", tmp);
712                         exit_trace(1);
713                 }
714
715                 tip->pfd_buf = malloc(buf_nr * sizeof(size_t));
716         }
717
718         while (!is_done()) {
719                 if (tip->get_subbuf(tip, buf_size) < 0)
720                         break;
721         }
722
723         /*
724          * trace is stopped, pull data until we get a short read
725          */
726         while (tip->get_subbuf(tip, buf_size) > 0)
727                 ;
728
729         tip_ftrunc_final(tip);
730         tip->exited = 1;
731         return NULL;
732 }
733
734 static int write_data_net(int fd, void *buf, unsigned int buf_len)
735 {
736         unsigned int bytes_left = buf_len;
737         int ret;
738
739         while (bytes_left) {
740                 ret = send(fd, buf, bytes_left, 0);
741                 if (ret < 0) {
742                         perror("send");
743                         return 1;
744                 }
745
746                 buf += ret;
747                 bytes_left -= ret;
748         }
749
750         return 0;
751 }
752
753 static int net_send_header(struct thread_information *tip, unsigned int len)
754 {
755         struct blktrace_net_hdr hdr;
756
757         hdr.magic = BLK_IO_TRACE_MAGIC;
758         strcpy(hdr.buts_name, tip->device->buts_name);
759         hdr.cpu = tip->cpu;
760         hdr.max_cpus = ncpus;
761         hdr.len = len;
762
763         return write_data_net(net_out_fd, &hdr, sizeof(hdr));
764 }
765
766 /*
767  * send header with 0 length to signal end-of-run
768  */
769 static void net_client_send_close(void)
770 {
771         struct blktrace_net_hdr hdr;
772
773         hdr.magic = BLK_IO_TRACE_MAGIC;
774         hdr.cpu = 0;
775         hdr.max_cpus = ncpus;
776         hdr.len = 0;
777
778         write_data_net(net_out_fd, &hdr, sizeof(hdr));
779 }
780
781 static int flush_subbuf_net(struct thread_information *tip,
782                             struct tip_subbuf *ts)
783 {
784         if (net_send_header(tip, ts->len))
785                 return 1;
786         if (write_data_net(net_out_fd, ts->buf, ts->len))
787                 return 1;
788
789         free(ts->buf);
790         free(ts);
791         return 0;
792 }
793
794 static int flush_subbuf_sendfile(struct thread_information *tip,
795                                  struct tip_subbuf *ts)
796 {
797         size_t padding;
798         unsigned subbuf;
799         unsigned len;
800
801         /*
802          * currently we cannot use sendfile() on the last bytes read, as they
803          * may not be a full subbuffer. get_subbuf_sendfile() falls back to
804          * the read approach for those, so use send() to ship them out
805          */
806         if (ts->buf)
807                 return flush_subbuf_net(tip, ts);
808         
809         subbuf = (ts->offset / buf_size) % buf_nr;
810         padding = get_subbuf_padding(tip, subbuf);
811         len = ts->len - padding;
812
813         if (net_send_header(tip, len))
814                 return 1;
815         if (sendfile(net_out_fd, tip->fd, &ts->offset, len) < 0) {
816                 perror("sendfile");
817                 return 1;
818         }
819
820         tip->data_read += len;
821         free(ts);
822         return 0;
823 }
824
825 static int write_data(struct thread_information *tip, void *buf,
826                       unsigned int buf_len)
827 {
828         int ret;
829
830         if (!buf_len)
831                 return 0;
832
833         while (1) {
834                 ret = fwrite(buf, buf_len, 1, tip->ofile);
835                 if (ret == 1)
836                         break;
837
838                 if (ret < 0) {
839                         perror("write");
840                         return 1;
841                 }
842         }
843
844         if (tip->ofile_stdout)
845                 fflush(tip->ofile);
846
847         return 0;
848 }
849
850 static int flush_subbuf_file(struct thread_information *tip,
851                              struct tip_subbuf *ts)
852 {
853         unsigned int offset = 0;
854         struct blk_io_trace *t;
855         int pdu_len, events = 0;
856
857         /*
858          * surplus from last run
859          */
860         if (tip->leftover_ts) {
861                 struct tip_subbuf *prev_ts = tip->leftover_ts;
862
863                 if (prev_ts->len + ts->len > prev_ts->max_len) {
864                         prev_ts->max_len += ts->len;
865                         prev_ts->buf = realloc(prev_ts->buf, prev_ts->max_len);
866                 }
867
868                 memcpy(prev_ts->buf + prev_ts->len, ts->buf, ts->len);
869                 prev_ts->len += ts->len;
870
871                 free(ts->buf);
872                 free(ts);
873
874                 ts = prev_ts;
875                 tip->leftover_ts = NULL;
876         }
877
878         while (offset + sizeof(*t) <= ts->len) {
879                 t = ts->buf + offset;
880
881                 if (verify_trace(t)) {
882                         write_data(tip, ts->buf, offset);
883                         return -1;
884                 }
885
886                 pdu_len = t->pdu_len;
887
888                 if (offset + sizeof(*t) + pdu_len > ts->len)
889                         break;
890
891                 offset += sizeof(*t) + pdu_len;
892                 tip->events_processed++;
893                 tip->data_read += sizeof(*t) + pdu_len;
894                 events++;
895         }
896
897         if (write_data(tip, ts->buf, offset))
898                 return -1;
899
900         /*
901          * leftover bytes, save them for next time
902          */
903         if (offset != ts->len) {
904                 tip->leftover_ts = ts;
905                 ts->len -= offset;
906                 memmove(ts->buf, ts->buf + offset, ts->len);
907         } else {
908                 free(ts->buf);
909                 free(ts);
910         }
911
912         return events;
913 }
914
915 static int write_tip_events(struct thread_information *tip)
916 {
917         struct tip_subbuf *ts = subbuf_fifo_dequeue(tip);
918
919         if (ts)
920                 return tip->flush_subbuf(tip, ts);
921
922         return 0;
923 }
924
925 /*
926  * scans the tips we know and writes out the subbuffers we accumulate
927  */
928 static void get_and_write_events(void)
929 {
930         struct device_information *dip;
931         struct thread_information *tip;
932         int i, j, events, ret, tips_running;
933
934         while (!is_done()) {
935                 events = 0;
936
937                 for_each_dip(dip, i) {
938                         for_each_tip(dip, tip, j) {
939                                 ret = write_tip_events(tip);
940                                 if (ret > 0)
941                                         events += ret;
942                         }
943                 }
944
945                 if (!events)
946                         usleep(10);
947         }
948
949         /*
950          * reap stored events
951          */
952         do {
953                 events = 0;
954                 tips_running = 0;
955                 for_each_dip(dip, i) {
956                         for_each_tip(dip, tip, j) {
957                                 ret = write_tip_events(tip);
958                                 if (ret > 0)
959                                         events += ret;
960                                 tips_running += !tip->exited;
961                         }
962                 }
963                 usleep(10);
964         } while (events || tips_running);
965 }
966
967 static void wait_for_threads(void)
968 {
969         /*
970          * for piped or network output, poll and fetch data for writeout.
971          * for files, we just wait around for trace threads to exit
972          */
973         if ((output_name && !strcmp(output_name, "-")) ||
974             net_mode == Net_client)
975                 get_and_write_events();
976         else {
977                 struct device_information *dip;
978                 struct thread_information *tip;
979                 int i, j, tips_running;
980
981                 do {
982                         tips_running = 0;
983                         usleep(1000);
984
985                         for_each_dip(dip, i)
986                                 for_each_tip(dip, tip, j)
987                                         tips_running += !tip->exited;
988                 } while (tips_running);
989         }
990
991         if (net_mode == Net_client)
992                 net_client_send_close();
993 }
994
995 static void fill_ofname(char *dst, char *buts_name, int cpu)
996 {
997         int len = 0;
998
999         if (output_dir)
1000                 len = sprintf(dst, "%s/", output_dir);
1001
1002         if (output_name)
1003                 sprintf(dst + len, "%s.blktrace.%d", output_name, cpu);
1004         else
1005                 sprintf(dst + len, "%s.blktrace.%d", buts_name, cpu);
1006 }
1007
1008 static void fill_ops(struct thread_information *tip)
1009 {
1010         /*
1011          * setup ops
1012          */
1013         if (net_mode == Net_client) {
1014                 if (net_sendfile) {
1015                         tip->get_subbuf = get_subbuf_sendfile;
1016                         tip->flush_subbuf = flush_subbuf_sendfile;
1017                 } else {
1018                         tip->get_subbuf = get_subbuf;
1019                         tip->flush_subbuf = flush_subbuf_net;
1020                 }
1021         } else {
1022                 if (tip->ofile_mmap)
1023                         tip->get_subbuf = mmap_subbuf;
1024                 else
1025                         tip->get_subbuf = get_subbuf;
1026
1027                 tip->flush_subbuf = flush_subbuf_file;
1028         }
1029                         
1030         if (net_mode == Net_server)
1031                 tip->read_data = read_data_net;
1032         else
1033                 tip->read_data = read_data_file;
1034 }
1035
1036 static int tip_open_output(struct device_information *dip,
1037                            struct thread_information *tip)
1038 {
1039         int pipeline = output_name && !strcmp(output_name, "-");
1040         int mode, vbuf_size;
1041         char op[64];
1042
1043         if (net_mode == Net_client) {
1044                 tip->ofile = NULL;
1045                 tip->ofile_stdout = 0;
1046                 tip->ofile_mmap = 0;
1047                 vbuf_size = 0;
1048                 mode = 0; /* gcc 4.x issues a bogus warning */
1049         } else if (pipeline) {
1050                 tip->ofile = fdopen(STDOUT_FILENO, "w");
1051                 tip->ofile_stdout = 1;
1052                 tip->ofile_mmap = 0;
1053                 mode = _IOLBF;
1054                 vbuf_size = 512;
1055         } else {
1056                 fill_ofname(op, dip->buts_name, tip->cpu);
1057                 tip->ofile = fopen(op, "w+");
1058                 tip->ofile_stdout = 0;
1059                 tip->ofile_mmap = 1;
1060                 mode = _IOFBF;
1061                 vbuf_size = OFILE_BUF;
1062         }
1063
1064         if (net_mode != Net_client && tip->ofile == NULL) {
1065                 perror(op);
1066                 return 1;
1067         }
1068
1069         if (vbuf_size) {
1070                 tip->ofile_buffer = malloc(vbuf_size);
1071                 if (setvbuf(tip->ofile, tip->ofile_buffer, mode, vbuf_size)) {
1072                         perror("setvbuf");
1073                         close_thread(tip);
1074                         return 1;
1075                 }
1076         }
1077
1078         fill_ops(tip);
1079         return 0;
1080 }
1081
1082 static int start_threads(struct device_information *dip)
1083 {
1084         struct thread_information *tip;
1085         int j;
1086
1087         for_each_tip(dip, tip, j) {
1088                 tip->cpu = j;
1089                 tip->device = dip;
1090                 tip->events_processed = 0;
1091                 tip->fd = -1;
1092                 tip->pfd = -1;
1093                 memset(&tip->fifo, 0, sizeof(tip->fifo));
1094                 tip->leftover_ts = NULL;
1095
1096                 if (tip_open_output(dip, tip))
1097                         return 1;
1098
1099                 if (pthread_create(&tip->thread, NULL, thread_main, tip)) {
1100                         perror("pthread_create");
1101                         close_thread(tip);
1102                         return 1;
1103                 }
1104         }
1105
1106         return 0;
1107 }
1108
1109 static void stop_threads(struct device_information *dip)
1110 {
1111         struct thread_information *tip;
1112         unsigned long ret;
1113         int i;
1114
1115         for_each_tip(dip, tip, i) {
1116                 (void) pthread_join(tip->thread, (void *) &ret);
1117                 close_thread(tip);
1118         }
1119 }
1120
1121 static void stop_all_threads(void)
1122 {
1123         struct device_information *dip;
1124         int i;
1125
1126         for_each_dip(dip, i)
1127                 stop_threads(dip);
1128 }
1129
1130 static void stop_all_tracing(void)
1131 {
1132         struct device_information *dip;
1133         int i;
1134
1135         for_each_dip(dip, i)
1136                 stop_trace(dip);
1137 }
1138
1139 static void exit_trace(int status)
1140 {
1141         if (!is_trace_stopped()) {
1142                 trace_stopped = 1;
1143                 stop_all_threads();
1144                 stop_all_tracing();
1145         }
1146
1147         exit(status);
1148 }
1149
1150 static int resize_devices(char *path)
1151 {
1152         int size = (ndevs + 1) * sizeof(struct device_information);
1153
1154         device_information = realloc(device_information, size);
1155         if (!device_information) {
1156                 fprintf(stderr, "Out of memory, device %s (%d)\n", path, size);
1157                 return 1;
1158         }
1159         device_information[ndevs].path = path;
1160         ndevs++;
1161         return 0;
1162 }
1163
1164 static int open_devices(void)
1165 {
1166         struct device_information *dip;
1167         int i;
1168
1169         for_each_dip(dip, i) {
1170                 dip->fd = open(dip->path, O_RDONLY | O_NONBLOCK);
1171                 if (dip->fd < 0) {
1172                         perror(dip->path);
1173                         return 1;
1174                 }
1175         }
1176
1177         return 0;
1178 }
1179
1180 static int start_devices(void)
1181 {
1182         struct device_information *dip;
1183         int i, j, size;
1184
1185         size = ncpus * sizeof(struct thread_information);
1186         thread_information = malloc(size * ndevs);
1187         if (!thread_information) {
1188                 fprintf(stderr, "Out of memory, threads (%d)\n", size * ndevs);
1189                 return 1;
1190         }
1191
1192         for_each_dip(dip, i) {
1193                 if (start_trace(dip)) {
1194                         close(dip->fd);
1195                         fprintf(stderr, "Failed to start trace on %s\n",
1196                                 dip->path);
1197                         break;
1198                 }
1199         }
1200
1201         if (i != ndevs) {
1202                 __for_each_dip(dip, j, i)
1203                         stop_trace(dip);
1204
1205                 return 1;
1206         }
1207
1208         for_each_dip(dip, i) {
1209                 dip->threads = thread_information + (i * ncpus);
1210                 if (start_threads(dip)) {
1211                         fprintf(stderr, "Failed to start worker threads\n");
1212                         break;
1213                 }
1214         }
1215
1216         if (i != ndevs) {
1217                 __for_each_dip(dip, j, i)
1218                         stop_threads(dip);
1219                 for_each_dip(dip, i)
1220                         stop_trace(dip);
1221
1222                 return 1;
1223         }
1224
1225         return 0;
1226 }
1227
1228 static void show_stats(void)
1229 {
1230         struct device_information *dip;
1231         struct thread_information *tip;
1232         unsigned long long events_processed, data_read;
1233         unsigned long total_drops;
1234         int i, j, no_stdout = 0;
1235
1236         if (is_stat_shown())
1237                 return;
1238
1239         if (output_name && !strcmp(output_name, "-"))
1240                 no_stdout = 1;
1241
1242         stat_shown = 1;
1243
1244         total_drops = 0;
1245         for_each_dip(dip, i) {
1246                 if (!no_stdout)
1247                         printf("Device: %s\n", dip->path);
1248                 events_processed = 0;
1249                 data_read = 0;
1250                 for_each_tip(dip, tip, j) {
1251                         if (!no_stdout)
1252                                 printf("  CPU%3d: %20lu events, %8llu KiB data\n",
1253                                         tip->cpu, tip->events_processed,
1254                                         (tip->data_read + 1023) >> 10);
1255                         events_processed += tip->events_processed;
1256                         data_read += tip->data_read;
1257                 }
1258                 total_drops += dip->drop_count;
1259                 if (!no_stdout)
1260                         printf("  Total:  %20llu events (dropped %lu), %8llu KiB data\n",
1261                                         events_processed, dip->drop_count,
1262                                         (data_read + 1023) >> 10);
1263         }
1264
1265         if (total_drops)
1266                 fprintf(stderr, "You have dropped events, consider using a larger buffer size (-b)\n");
1267 }
1268
1269 static struct device_information *net_get_dip(char *buts_name)
1270 {
1271         struct device_information *dip;
1272         int i;
1273
1274         for (i = 0; i < ndevs; i++) {
1275                 dip = &device_information[i];
1276
1277                 if (!strcmp(dip->buts_name, buts_name))
1278                         return dip;
1279         }
1280
1281         device_information = realloc(device_information, (ndevs + 1) * sizeof(*dip));
1282         dip = &device_information[ndevs];
1283         strcpy(dip->buts_name, buts_name);
1284         strcpy(dip->path, buts_name);
1285         ndevs++;
1286         dip->threads = malloc(ncpus * sizeof(struct thread_information));
1287         memset(dip->threads, 0, ncpus * sizeof(struct thread_information));
1288
1289         /*
1290          * open all files
1291          */
1292         for (i = 0; i < ncpus; i++) {
1293                 struct thread_information *tip = &dip->threads[i];
1294
1295                 tip->cpu = i;
1296                 tip->device = dip;
1297                 tip->fd = -1;
1298                 tip->pfd = -1;
1299
1300                 if (tip_open_output(dip, tip))
1301                         return NULL;
1302         }
1303
1304         return dip;
1305 }
1306
1307 static struct thread_information *net_get_tip(struct blktrace_net_hdr *bnh)
1308 {
1309         struct device_information *dip;
1310
1311         ncpus = bnh->max_cpus;
1312         dip = net_get_dip(bnh->buts_name);
1313         return &dip->threads[bnh->cpu];
1314 }
1315
1316 static int net_get_header(struct blktrace_net_hdr *bnh)
1317 {
1318         int fl = fcntl(net_in_fd, F_GETFL);
1319         int bytes_left, ret;
1320         void *p = bnh;
1321
1322         fcntl(net_in_fd, F_SETFL, fl | O_NONBLOCK);
1323         bytes_left = sizeof(*bnh);
1324         while (bytes_left && !is_done()) {
1325                 ret = recv(net_in_fd, p, bytes_left, MSG_WAITALL);
1326                 if (ret < 0) {
1327                         if (errno != EAGAIN) {
1328                                 perror("recv header");
1329                                 return 1;
1330                         }
1331                         usleep(100);
1332                         continue;
1333                 } else if (!ret) {
1334                         usleep(100);
1335                         continue;
1336                 } else {
1337                         p += ret;
1338                         bytes_left -= ret;
1339                 }
1340         }
1341         fcntl(net_in_fd, F_SETFL, fl & ~O_NONBLOCK);
1342         return 0;
1343 }
1344
1345 static int net_server_loop(void)
1346 {
1347         struct thread_information *tip;
1348         struct blktrace_net_hdr bnh;
1349
1350         if (net_get_header(&bnh))
1351                 return 1;
1352
1353         if (data_is_native == -1 && check_data_endianness(bnh.magic)) {
1354                 fprintf(stderr, "server: received data is bad\n");
1355                 return 1;
1356         }
1357
1358         if (!data_is_native) {
1359                 bnh.cpu = be32_to_cpu(bnh.cpu);
1360                 bnh.len = be32_to_cpu(bnh.len);
1361         }
1362
1363         /*
1364          * len == 0 means that the other end signalled end-of-run
1365          */
1366         if (!bnh.len) {
1367                 fprintf(stderr, "server: end of run\n");
1368                 return 1;
1369         }
1370
1371         tip = net_get_tip(&bnh);
1372         if (!tip)
1373                 return 1;
1374
1375         if (mmap_subbuf(tip, bnh.len))
1376                 return 1;
1377
1378         return 0;
1379 }
1380
1381 /*
1382  * Start here when we are in server mode - just fetch data from the network
1383  * and dump to files
1384  */
1385 static int net_server(void)
1386 {
1387         struct device_information *dip;
1388         struct thread_information *tip;
1389         struct sockaddr_in addr;
1390         socklen_t socklen;
1391         int fd, opt, i, j;
1392
1393         fd = socket(AF_INET, SOCK_STREAM, 0);
1394         if (fd < 0) {
1395                 perror("server: socket");
1396                 return 1;
1397         }
1398
1399         opt = 1;
1400         if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
1401                 perror("setsockopt");
1402                 return 1;
1403         }
1404
1405         memset(&addr, 0, sizeof(addr));
1406         addr.sin_family = AF_INET;
1407         addr.sin_addr.s_addr = htonl(INADDR_ANY);
1408         addr.sin_port = htons(net_port);
1409
1410         if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
1411                 perror("bind");
1412                 return 1;
1413         }
1414
1415         if (listen(fd, 1) < 0) {
1416                 perror("listen");
1417                 return 1;
1418         }
1419
1420 repeat:
1421         signal(SIGINT, NULL);
1422         signal(SIGHUP, NULL);
1423         signal(SIGTERM, NULL);
1424         signal(SIGALRM, NULL);
1425
1426         printf("blktrace: waiting for incoming connection...\n");
1427
1428         socklen = sizeof(addr);
1429         net_in_fd = accept(fd, (struct sockaddr *) &addr, &socklen);
1430         if (net_in_fd < 0) {
1431                 perror("accept");
1432                 return 1;
1433         }
1434
1435         signal(SIGINT, handle_sigint);
1436         signal(SIGHUP, handle_sigint);
1437         signal(SIGTERM, handle_sigint);
1438         signal(SIGALRM, handle_sigint);
1439
1440         printf("blktrace: connection from %s\n", inet_ntoa(addr.sin_addr));
1441
1442         while (!is_done()) {
1443                 if (net_server_loop())
1444                         break;
1445         }
1446
1447         for_each_dip(dip, i)
1448                 for_each_tip(dip, tip, j)
1449                         tip_ftrunc_final(tip);
1450
1451         show_stats();
1452
1453         if (is_done())
1454                 return 0;
1455
1456         /*
1457          * cleanup for next run
1458          */
1459         for_each_dip(dip, i) {
1460                 for_each_tip(dip, tip, j)
1461                         fclose(tip->ofile);
1462
1463                 free(dip->threads);
1464         }
1465
1466         free(device_information);
1467         device_information = NULL;
1468         ncpus = ndevs = 0;
1469
1470         close(net_in_fd);
1471         net_in_fd = -1;
1472         stat_shown = 0;
1473         goto repeat;
1474 }
1475
1476 /*
1477  * Setup outgoing network connection where we will transmit data
1478  */
1479 static int net_setup_client(void)
1480 {
1481         struct sockaddr_in addr;
1482         int fd;
1483
1484         fd = socket(AF_INET, SOCK_STREAM, 0);
1485         if (fd < 0) {
1486                 perror("client: socket");
1487                 return 1;
1488         }
1489
1490         memset(&addr, 0, sizeof(addr));
1491         addr.sin_family = AF_INET;
1492         addr.sin_port = htons(net_port);
1493
1494         if (inet_aton(hostname, &addr.sin_addr) != 1) {
1495                 struct hostent *hent = gethostbyname(hostname);
1496                 if (!hent) {
1497                         perror("gethostbyname");
1498                         return 1;
1499                 }
1500
1501                 memcpy(&addr.sin_addr, hent->h_addr, 4);
1502                 strcpy(hostname, hent->h_name);
1503         }
1504
1505         printf("blktrace: connecting to %s\n", hostname);
1506
1507         if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
1508                 perror("client: connect");
1509                 return 1;
1510         }
1511
1512         printf("blktrace: connected!\n");
1513         net_out_fd = fd;
1514         return 0;
1515 }
1516
1517 static char usage_str[] = \
1518         "-d <dev> [ -r relay path ] [ -o <output> ] [-k ] [ -w time ]\n" \
1519         "[ -a action ] [ -A action mask ] [ -v ]\n\n" \
1520         "\t-d Use specified device. May also be given last after options\n" \
1521         "\t-r Path to mounted relayfs, defaults to /relay\n" \
1522         "\t-o File(s) to send output to\n" \
1523         "\t-D Directory to prepend to output file names\n" \
1524         "\t-k Kill a running trace\n" \
1525         "\t-w Stop after defined time, in seconds\n" \
1526         "\t-a Only trace specified actions. See documentation\n" \
1527         "\t-A Give trace mask as a single value. See documentation\n" \
1528         "\t-b Sub buffer size in KiB\n" \
1529         "\t-n Number of sub buffers\n" \
1530         "\t-l Run in network listen mode (blktrace server)\n" \
1531         "\t-h Run in network client mode, connecting to the given host\n" \
1532         "\t-p Network port to use (default 8462)\n" \
1533         "\t-s Make the network client use sendfile() to transfer data\n" \
1534         "\t-V Print program version info\n\n";
1535
1536 static void show_usage(char *program)
1537 {
1538         fprintf(stderr, "Usage: %s %s %s",program, blktrace_version, usage_str);
1539 }
1540
1541 int main(int argc, char *argv[])
1542 {
1543         static char default_relay_path[] = "/relay";
1544         struct statfs st;
1545         int i, c;
1546         int stop_watch = 0;
1547         int act_mask_tmp = 0;
1548
1549         while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) >= 0) {
1550                 switch (c) {
1551                 case 'a':
1552                         i = find_mask_map(optarg);
1553                         if (i < 0) {
1554                                 fprintf(stderr,"Invalid action mask %s\n",
1555                                         optarg);
1556                                 return 1;
1557                         }
1558                         act_mask_tmp |= i;
1559                         break;
1560
1561                 case 'A':
1562                         if ((sscanf(optarg, "%x", &i) != 1) || 
1563                                                         !valid_act_opt(i)) {
1564                                 fprintf(stderr,
1565                                         "Invalid set action mask %s/0x%x\n",
1566                                         optarg, i);
1567                                 return 1;
1568                         }
1569                         act_mask_tmp = i;
1570                         break;
1571
1572                 case 'd':
1573                         if (resize_devices(optarg) != 0)
1574                                 return 1;
1575                         break;
1576
1577                 case 'r':
1578                         relay_path = optarg;
1579                         break;
1580
1581                 case 'o':
1582                         output_name = optarg;
1583                         break;
1584                 case 'k':
1585                         kill_running_trace = 1;
1586                         break;
1587                 case 'w':
1588                         stop_watch = atoi(optarg);
1589                         if (stop_watch <= 0) {
1590                                 fprintf(stderr,
1591                                         "Invalid stopwatch value (%d secs)\n",
1592                                         stop_watch);
1593                                 return 1;
1594                         }
1595                         break;
1596                 case 'V':
1597                         printf("%s version %s\n", argv[0], blktrace_version);
1598                         return 0;
1599                 case 'b':
1600                         buf_size = strtoul(optarg, NULL, 10);
1601                         if (buf_size <= 0 || buf_size > 16*1024) {
1602                                 fprintf(stderr,
1603                                         "Invalid buffer size (%lu)\n",buf_size);
1604                                 return 1;
1605                         }
1606                         buf_size <<= 10;
1607                         break;
1608                 case 'n':
1609                         buf_nr = strtoul(optarg, NULL, 10);
1610                         if (buf_nr <= 0) {
1611                                 fprintf(stderr,
1612                                         "Invalid buffer nr (%lu)\n", buf_nr);
1613                                 return 1;
1614                         }
1615                         break;
1616                 case 'D':
1617                         output_dir = optarg;
1618                         break;
1619                 case 'h':
1620                         net_mode = Net_client;
1621                         strcpy(hostname, optarg);
1622                         break;
1623                 case 'l':
1624                         net_mode = Net_server;
1625                         break;
1626                 case 'p':
1627                         net_port = atoi(optarg);
1628                         break;
1629                 case 's':
1630                         net_sendfile = 1;
1631                         break;
1632                 default:
1633                         show_usage(argv[0]);
1634                         return 1;
1635                 }
1636         }
1637
1638         setlocale(LC_NUMERIC, "en_US");
1639
1640         page_size = getpagesize();
1641
1642         if (net_mode == Net_server)
1643                 return net_server();
1644
1645         while (optind < argc) {
1646                 if (resize_devices(argv[optind++]) != 0)
1647                         return 1;
1648         }
1649
1650         if (ndevs == 0) {
1651                 show_usage(argv[0]);
1652                 return 1;
1653         }
1654
1655         if (!relay_path)
1656                 relay_path = default_relay_path;
1657
1658         if (act_mask_tmp != 0)
1659                 act_mask = act_mask_tmp;
1660
1661         if (statfs(relay_path, &st) < 0) {
1662                 perror("statfs");
1663                 fprintf(stderr,"%s does not appear to be a valid path\n",
1664                         relay_path);
1665                 return 1;
1666         } else if (st.f_type != (long) RELAYFS_TYPE) {
1667                 fprintf(stderr,"%s does not appear to be a relay filesystem\n",
1668                         relay_path);
1669                 return 1;
1670         }
1671
1672         if (open_devices() != 0)
1673                 return 1;
1674
1675         if (kill_running_trace) {
1676                 stop_all_traces();
1677                 return 0;
1678         }
1679
1680         ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1681         if (ncpus < 0) {
1682                 fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed\n");
1683                 return 1;
1684         }
1685
1686         signal(SIGINT, handle_sigint);
1687         signal(SIGHUP, handle_sigint);
1688         signal(SIGTERM, handle_sigint);
1689         signal(SIGALRM, handle_sigint);
1690
1691         if (net_mode == Net_client && net_setup_client())
1692                 return 1;
1693
1694         if (start_devices() != 0)
1695                 return 1;
1696
1697         atexit(stop_all_tracing);
1698
1699         if (stop_watch)
1700                 alarm(stop_watch);
1701
1702         wait_for_threads();
1703
1704         if (!is_trace_stopped()) {
1705                 trace_stopped = 1;
1706                 stop_all_threads();
1707                 stop_all_traces();
1708         }
1709
1710         show_stats();
1711
1712         return 0;
1713 }
1714