[PATCH] blktrace: improve live tracing
[blktrace.git] / blktrace.c
1 /*
2  * block queue tracing application
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include <pthread.h>
22 #include <sys/types.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25 #include <locale.h>
26 #include <signal.h>
27 #include <fcntl.h>
28 #include <string.h>
29 #include <sys/ioctl.h>
30 #include <sys/param.h>
31 #include <sys/statfs.h>
32 #include <sys/poll.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <getopt.h>
38 #include <errno.h>
39 #include <assert.h>
40
41 #include "blktrace.h"
42
43 static char blktrace_version[] = "0.99";
44
45 /*
46  * You may want to increase this even more, if you are logging at a high
47  * rate and see skipped/missed events
48  */
49 #define BUF_SIZE        (512 * 1024)
50 #define BUF_NR          (4)
51
52 #define OFILE_BUF       (128 * 1024)
53
54 #define RELAYFS_TYPE    0xF0B4A981
55
56 #define RING_INIT_NR    (2)
57 #define RING_MAX_NR     (16UL)
58
59 #define S_OPTS  "d:a:A:r:o:kw:Vb:n:D:"
60 static struct option l_opts[] = {
61         {
62                 .name = "dev",
63                 .has_arg = required_argument,
64                 .flag = NULL,
65                 .val = 'd'
66         },
67         {
68                 .name = "act-mask",
69                 .has_arg = required_argument,
70                 .flag = NULL,
71                 .val = 'a'
72         },
73         {
74                 .name = "set-mask",
75                 .has_arg = required_argument,
76                 .flag = NULL,
77                 .val = 'A'
78         },
79         {
80                 .name = "relay",
81                 .has_arg = required_argument,
82                 .flag = NULL,
83                 .val = 'r'
84         },
85         {
86                 .name = "output",
87                 .has_arg = required_argument,
88                 .flag = NULL,
89                 .val = 'o'
90         },
91         {
92                 .name = "kill",
93                 .has_arg = no_argument,
94                 .flag = NULL,
95                 .val = 'k'
96         },
97         {
98                 .name = "stopwatch",
99                 .has_arg = required_argument,
100                 .flag = NULL,
101                 .val = 'w'
102         },
103         {
104                 .name = "version",
105                 .has_arg = no_argument,
106                 .flag = NULL,
107                 .val = 'V'
108         },
109         {
110                 .name = "buffer-size",
111                 .has_arg = required_argument,
112                 .flag = NULL,
113                 .val = 'b'
114         },
115         {
116                 .name = "num-sub-buffers",
117                 .has_arg = required_argument,
118                 .flag = NULL,
119                 .val = 'n'
120         },
121         {
122                 .name = "output-dir",
123                 .has_arg = required_argument,
124                 .flag = NULL,
125                 .val = 'D'
126         },
127         {
128                 .name = NULL,
129         }
130 };
131
132 struct thread_information {
133         int cpu;
134         pthread_t thread;
135
136         int fd;
137         void *fd_buf;
138         unsigned long fd_off;
139         unsigned long fd_size;
140         unsigned long fd_max_size;
141         char fn[MAXPATHLEN + 64];
142
143         pthread_mutex_t *fd_lock;
144         FILE *ofile;
145         char *ofile_buffer;
146         int ofile_flush;
147
148         unsigned long events_processed;
149         struct device_information *device;
150 };
151
152 struct device_information {
153         int fd;
154         char *path;
155         char buts_name[32];
156         volatile int trace_started;
157         unsigned long drop_count;
158         struct thread_information *threads;
159 };
160
161 static int ncpus;
162 static struct thread_information *thread_information;
163 static int ndevs;
164 static struct device_information *device_information;
165
166 /* command line option globals */
167 static char *relay_path;
168 static char *output_name;
169 static char *output_dir;
170 static int act_mask = ~0U;
171 static int kill_running_trace;
172 static unsigned long buf_size = BUF_SIZE;
173 static unsigned long buf_nr = BUF_NR;
174
175 #define is_done()       (*(volatile int *)(&done))
176 static volatile int done;
177
178 #define is_trace_stopped()      (*(volatile int *)(&trace_stopped))
179 static volatile int trace_stopped;
180
181 #define is_stat_shown() (*(volatile int *)(&stat_shown))
182 static volatile int stat_shown;
183
184 static pthread_mutex_t stdout_mutex = PTHREAD_MUTEX_INITIALIZER;
185
186 static void exit_trace(int status);
187
188 #define dip_tracing(dip)        (*(volatile int *)(&(dip)->trace_started))
189 #define dip_set_tracing(dip, v) ((dip)->trace_started = (v))
190
191 #define __for_each_dip(__d, __i, __e)   \
192         for (__i = 0, __d = device_information; __i < __e; __i++, __d++)
193
194 #define for_each_dip(__d, __i)  __for_each_dip(__d, __i, ndevs)
195 #define for_each_tip(__d, __t, __i)     \
196         for (__i = 0, __t = (__d)->threads; __i < ncpus; __i++, __t++)
197
198 static int get_dropped_count(const char *buts_name)
199 {
200         int fd;
201         char tmp[MAXPATHLEN + 64];
202
203         snprintf(tmp, sizeof(tmp), "%s/block/%s/dropped",
204                  relay_path, buts_name);
205
206         fd = open(tmp, O_RDONLY);
207         if (fd < 0) {
208                 /*
209                  * this may be ok, if the kernel doesn't support dropped counts
210                  */
211                 if (errno == ENOENT)
212                         return 0;
213
214                 fprintf(stderr, "Couldn't open dropped file %s\n", tmp);
215                 return -1;
216         }
217
218         if (read(fd, tmp, sizeof(tmp)) < 0) {
219                 perror(tmp);
220                 close(fd);
221                 return -1;
222         }
223
224         close(fd);
225
226         return atoi(tmp);
227 }
228
229 static int start_trace(struct device_information *dip)
230 {
231         struct blk_user_trace_setup buts;
232
233         memset(&buts, 0, sizeof(buts));
234         buts.buf_size = buf_size;
235         buts.buf_nr = buf_nr;
236         buts.act_mask = act_mask;
237
238         if (ioctl(dip->fd, BLKSTARTTRACE, &buts) < 0) {
239                 perror("BLKSTARTTRACE");
240                 return 1;
241         }
242
243         memcpy(dip->buts_name, buts.name, sizeof(dip->buts_name));
244         dip_set_tracing(dip, 1);
245         return 0;
246 }
247
248 static void stop_trace(struct device_information *dip)
249 {
250         if (dip_tracing(dip) || kill_running_trace) {
251                 dip_set_tracing(dip, 0);
252
253                 if (ioctl(dip->fd, BLKSTOPTRACE) < 0)
254                         perror("BLKSTOPTRACE");
255
256                 close(dip->fd);
257                 dip->fd = -1;
258         }
259 }
260
261 static void stop_all_traces(void)
262 {
263         struct device_information *dip;
264         int i;
265
266         for_each_dip(dip, i) {
267                 dip->drop_count = get_dropped_count(dip->buts_name);
268                 stop_trace(dip);
269         }
270 }
271
272 static void wait_for_data(struct thread_information *tip)
273 {
274         struct pollfd pfd = { .fd = tip->fd, .events = POLLIN };
275
276         poll(&pfd, 1, 10);
277 }
278
279 static int __read_data(struct thread_information *tip, void *buf, int len,
280                        int block)
281 {
282         int ret = 0;
283
284         while (!is_done()) {
285                 ret = read(tip->fd, buf, len);
286                 if (ret > 0)
287                         break;
288                 else if (!ret) {
289                         if (!block)
290                                 break;
291
292                         wait_for_data(tip);
293                 } else {
294                         if (errno != EAGAIN) {
295                                 perror(tip->fn);
296                                 fprintf(stderr,"Thread %d failed read of %s\n",
297                                         tip->cpu, tip->fn);
298                                 break;
299                         }
300                         if (!block) {
301                                 ret = 0;
302                                 break;
303                         }
304
305                         wait_for_data(tip);
306                 }
307         }
308
309         return ret;
310 }
311
312 #define can_grow_ring(tip)      ((tip)->fd_max_size < RING_MAX_NR * buf_size * buf_nr)
313
314 static int resize_ringbuffer(struct thread_information *tip)
315 {
316         if (!can_grow_ring(tip))
317                 return 1;
318
319         tip->fd_buf = realloc(tip->fd_buf, 2 * tip->fd_max_size);
320
321         /*
322          * if the ring currently wraps, copy range over
323          */
324         if (tip->fd_off + tip->fd_size > tip->fd_max_size) {
325                 unsigned long wrap_size = tip->fd_size - (tip->fd_max_size - tip->fd_off);
326                 memmove(tip->fd_buf + tip->fd_max_size, tip->fd_buf, wrap_size);
327         }
328
329         tip->fd_max_size <<= 1;
330         return 0;
331 }
332
333 static int __refill_ringbuffer(struct thread_information *tip, int len,
334                                int block)
335 {
336         unsigned long off;
337         int ret;
338
339         off = (tip->fd_size + tip->fd_off) & (tip->fd_max_size - 1);
340         if (off + len > tip->fd_max_size)
341                 len = tip->fd_max_size - off;
342
343         assert(len > 0);
344
345         ret = __read_data(tip, tip->fd_buf + off, len, block);
346         if (ret < 0)
347                 return -1;
348
349         tip->fd_size += ret;
350         return ret;
351 }
352
353 /*
354  * keep filling ring until we get a short read
355  */
356 static void refill_ringbuffer(struct thread_information *tip, int block)
357 {
358         int len = buf_size;
359         int ret;
360
361         do {
362                 if (len + tip->fd_size > tip->fd_max_size)
363                         resize_ringbuffer(tip);
364
365                 ret = __refill_ringbuffer(tip, len, block);
366         } while ((ret == len) && !is_done());
367 }
368
369 static int read_data(struct thread_information *tip, void *buf,
370                      unsigned int len)
371 {
372         unsigned int start_size, end_size;
373
374         refill_ringbuffer(tip, len > tip->fd_size);
375
376         if (len > tip->fd_size)
377                 return -1;
378
379         /*
380          * see if we wrap the ring
381          */
382         start_size = len;
383         end_size = 0;
384         if (len > (tip->fd_max_size - tip->fd_off)) {
385                 start_size = tip->fd_max_size - tip->fd_off;
386                 end_size = len - start_size;
387         }
388
389         memcpy(buf, tip->fd_buf + tip->fd_off, start_size);
390         if (end_size)
391                 memcpy(buf + start_size, tip->fd_buf, end_size);
392
393         tip->fd_off = (tip->fd_off + len) & (tip->fd_max_size - 1);
394         tip->fd_size -= len;
395         return 0;
396 }
397
398 static int write_data(struct thread_information *tip,
399                       void *buf, unsigned int buf_len)
400 {
401         int ret;
402
403         while (1) {
404                 ret = fwrite(buf, buf_len, 1, tip->ofile);
405                 if (ret == 1)
406                         break;
407
408                 if (ret < 0) {
409                         perror("write");
410                         return 1;
411                 }
412         }
413
414         if (tip->ofile_flush)
415                 fflush(tip->ofile);
416
417         return 0;
418 }
419
420 static void *extract_data(struct thread_information *tip, int nb)
421 {
422         unsigned char *buf;
423
424         buf = malloc(nb);
425         if (!read_data(tip, buf, nb))
426                 return buf;
427
428         free(buf);
429         return NULL;
430 }
431
432 /*
433  * trace may start inside 'bit' or may need to be gotten further on
434  */
435 static int get_event_slow(struct thread_information *tip,
436                           struct blk_io_trace *bit)
437 {
438         const int inc = sizeof(__u32);
439         struct blk_io_trace foo;
440         unsigned int offset;
441         void *p;
442
443         /*
444          * check if trace is inside
445          */
446         offset = 0;
447         p = bit;
448         while (offset < sizeof(*bit)) {
449                 p += inc;
450                 offset += inc;
451
452                 memcpy(&foo, p, inc);
453
454                 if (CHECK_MAGIC(&foo))
455                         break;
456         }
457
458         /*
459          * part trace found inside, read the rest
460          */
461         if (offset < sizeof(*bit)) {
462                 int good_bytes = sizeof(*bit) - offset;
463
464                 memmove(bit, p, good_bytes);
465                 p = (void *) bit + good_bytes;
466
467                 return read_data(tip, p, offset);
468         }
469
470         /*
471          * nothing found, keep looking for start of trace
472          */
473         do {
474                 if (read_data(tip, bit, sizeof(bit->magic)))
475                         return -1;
476         } while (!CHECK_MAGIC(bit));
477
478         /*
479          * now get the rest of it
480          */
481         p = &bit->sequence;
482         if (read_data(tip, p, sizeof(*bit) - inc))
483                 return -1;
484
485         return 0;
486 }
487
488 /*
489  * Sometimes relayfs screws us a little, if an event crosses a sub buffer
490  * boundary. So keep looking forward in the trace data until an event
491  * is found
492  */
493 static int get_event(struct thread_information *tip, struct blk_io_trace *bit)
494 {
495         /*
496          * optimize for the common fast case, a full trace read that
497          * succeeds
498          */
499         if (read_data(tip, bit, sizeof(*bit)))
500                 return -1;
501
502         if (CHECK_MAGIC(bit))
503                 return 0;
504
505         /*
506          * ok that didn't work, the event may start somewhere inside the
507          * trace itself
508          */
509         return get_event_slow(tip, bit);
510 }
511
512 static inline void tip_fd_unlock(struct thread_information *tip)
513 {
514         if (tip->fd_lock)
515                 pthread_mutex_unlock(tip->fd_lock);
516 }
517
518 static inline void tip_fd_lock(struct thread_information *tip)
519 {
520         if (tip->fd_lock)
521                 pthread_mutex_lock(tip->fd_lock);
522 }
523
524 static void close_thread(struct thread_information *tip)
525 {
526         if (tip->fd != -1)
527                 close(tip->fd);
528         if (tip->ofile)
529                 fclose(tip->ofile);
530         if (tip->ofile_buffer)
531                 free(tip->ofile_buffer);
532         if (tip->fd_buf)
533                 free(tip->fd_buf);
534
535         tip->fd = -1;
536         tip->ofile = NULL;
537         tip->ofile_buffer = NULL;
538         tip->fd_buf = NULL;
539 }
540
541 static void *extract(void *arg)
542 {
543         struct thread_information *tip = arg;
544         int pdu_len;
545         char *pdu_data;
546         struct blk_io_trace t;
547         pid_t pid = getpid();
548         cpu_set_t cpu_mask;
549
550         CPU_ZERO(&cpu_mask);
551         CPU_SET((tip->cpu), &cpu_mask);
552
553         if (sched_setaffinity(pid, sizeof(cpu_mask), &cpu_mask) == -1) {
554                 perror("sched_setaffinity");
555                 exit_trace(1);
556         }
557
558         snprintf(tip->fn, sizeof(tip->fn), "%s/block/%s/trace%d",
559                         relay_path, tip->device->buts_name, tip->cpu);
560         tip->fd = open(tip->fn, O_RDONLY | O_NONBLOCK);
561         if (tip->fd < 0) {
562                 perror(tip->fn);
563                 fprintf(stderr,"Thread %d failed open of %s\n", tip->cpu,
564                         tip->fn);
565                 exit_trace(1);
566         }
567
568         /*
569          * start with a ringbuffer that is twice the size of the kernel side
570          */
571         tip->fd_max_size = buf_size * buf_nr * RING_INIT_NR;
572         tip->fd_buf = malloc(tip->fd_max_size);
573         tip->fd_off = 0;
574         tip->fd_size = 0;
575
576         pdu_data = NULL;
577         while (1) {
578                 if (get_event(tip, &t))
579                         break;
580
581                 if (verify_trace(&t))
582                         break;
583
584                 pdu_len = t.pdu_len;
585
586                 trace_to_be(&t);
587
588                 if (pdu_len) {
589                         pdu_data = extract_data(tip, pdu_len);
590                         if (!pdu_data)
591                                 break;
592                 }
593
594                 /*
595                  * now we have both trace and payload, get a lock on the
596                  * output descriptor and send it off
597                  */
598                 tip_fd_lock(tip);
599
600                 if (write_data(tip, &t, sizeof(t))) {
601                         tip_fd_unlock(tip);
602                         break;
603                 }
604
605                 if (pdu_data && write_data(tip, pdu_data, pdu_len)) {
606                         tip_fd_unlock(tip);
607                         break;
608                 }
609
610                 tip_fd_unlock(tip);
611
612                 if (pdu_data) {
613                         free(pdu_data);
614                         pdu_data = NULL;
615                 }
616
617                 tip->events_processed++;
618         }
619
620         close_thread(tip);
621         return NULL;
622 }
623
624 static int start_threads(struct device_information *dip)
625 {
626         struct thread_information *tip;
627         char op[64];
628         int j, pipeline = output_name && !strcmp(output_name, "-");
629         int len, mode, vbuf_size;
630
631         for_each_tip(dip, tip, j) {
632                 tip->cpu = j;
633                 tip->device = dip;
634                 tip->fd_lock = NULL;
635                 tip->events_processed = 0;
636
637                 if (pipeline) {
638                         tip->ofile = fdopen(STDOUT_FILENO, "w");
639                         tip->fd_lock = &stdout_mutex;
640                         tip->ofile_flush = 1;
641                         mode = _IOLBF;
642                         vbuf_size = 512;
643                 } else {
644                         len = 0;
645
646                         if (output_dir)
647                                 len = sprintf(op, "%s/", output_dir);
648
649                         if (output_name) {
650                                 sprintf(op + len, "%s.blktrace.%d", output_name,
651                                         tip->cpu);
652                         } else {
653                                 sprintf(op + len, "%s.blktrace.%d",
654                                         dip->buts_name, tip->cpu);
655                         }
656                         tip->ofile = fopen(op, "w");
657                         tip->ofile_flush = 0;
658                         mode = _IOFBF;
659                         vbuf_size = OFILE_BUF;
660                 }
661
662                 if (tip->ofile == NULL) {
663                         perror(op);
664                         return 1;
665                 }
666
667                 tip->ofile_buffer = malloc(vbuf_size);
668                 if (setvbuf(tip->ofile, tip->ofile_buffer, mode, vbuf_size)) {
669                         perror("setvbuf");
670                         close_thread(tip);
671                         return 1;
672                 }
673
674                 if (pthread_create(&tip->thread, NULL, extract, tip)) {
675                         perror("pthread_create");
676                         close_thread(tip);
677                         return 1;
678                 }
679         }
680
681         return 0;
682 }
683
684 static void stop_threads(struct device_information *dip)
685 {
686         struct thread_information *tip;
687         unsigned long ret;
688         int i;
689
690         for_each_tip(dip, tip, i)
691                 (void) pthread_join(tip->thread, (void *) &ret);
692 }
693
694 static void stop_all_threads(void)
695 {
696         struct device_information *dip;
697         int i;
698
699         for_each_dip(dip, i)
700                 stop_threads(dip);
701 }
702
703 static void stop_all_tracing(void)
704 {
705         struct device_information *dip;
706         int i;
707
708         for_each_dip(dip, i)
709                 stop_trace(dip);
710 }
711
712 static void exit_trace(int status)
713 {
714         if (!is_trace_stopped()) {
715                 trace_stopped = 1;
716                 stop_all_threads();
717                 stop_all_tracing();
718         }
719
720         exit(status);
721 }
722
723 static int resize_devices(char *path)
724 {
725         int size = (ndevs + 1) * sizeof(struct device_information);
726
727         device_information = realloc(device_information, size);
728         if (!device_information) {
729                 fprintf(stderr, "Out of memory, device %s (%d)\n", path, size);
730                 return 1;
731         }
732         device_information[ndevs].path = path;
733         ndevs++;
734         return 0;
735 }
736
737 static int open_devices(void)
738 {
739         struct device_information *dip;
740         int i;
741
742         for_each_dip(dip, i) {
743                 dip->fd = open(dip->path, O_RDONLY | O_NONBLOCK);
744                 if (dip->fd < 0) {
745                         perror(dip->path);
746                         return 1;
747                 }
748         }
749
750         return 0;
751 }
752
753 static int start_devices(void)
754 {
755         struct device_information *dip;
756         int i, j, size;
757
758         size = ncpus * sizeof(struct thread_information);
759         thread_information = malloc(size * ndevs);
760         if (!thread_information) {
761                 fprintf(stderr, "Out of memory, threads (%d)\n", size * ndevs);
762                 return 1;
763         }
764
765         for_each_dip(dip, i) {
766                 if (start_trace(dip)) {
767                         close(dip->fd);
768                         fprintf(stderr, "Failed to start trace on %s\n",
769                                 dip->path);
770                         break;
771                 }
772         }
773
774         if (i != ndevs) {
775                 __for_each_dip(dip, j, i)
776                         stop_trace(dip);
777
778                 return 1;
779         }
780
781         for_each_dip(dip, i) {
782                 dip->threads = thread_information + (i * ncpus);
783                 if (start_threads(dip)) {
784                         fprintf(stderr, "Failed to start worker threads\n");
785                         break;
786                 }
787         }
788
789         if (i != ndevs) {
790                 __for_each_dip(dip, j, i)
791                         stop_threads(dip);
792                 for_each_dip(dip, i)
793                         stop_trace(dip);
794
795                 return 1;
796         }
797
798         return 0;
799 }
800
801 static void show_stats(void)
802 {
803         int i, j, no_stdout = 0;
804         struct device_information *dip;
805         struct thread_information *tip;
806         unsigned long long events_processed;
807         unsigned long total_drops;
808
809         if (is_stat_shown())
810                 return;
811
812         stat_shown = 1;
813
814         if (output_name && !strcmp(output_name, "-"))
815                 no_stdout = 1;
816
817         total_drops = 0;
818         for_each_dip(dip, i) {
819                 if (!no_stdout)
820                         printf("Device: %s\n", dip->path);
821                 events_processed = 0;
822                 for_each_tip(dip, tip, j) {
823                         if (!no_stdout)
824                                 printf("  CPU%3d: %20ld events\n",
825                                         tip->cpu, tip->events_processed);
826                         events_processed += tip->events_processed;
827                 }
828                 total_drops += dip->drop_count;
829                 if (!no_stdout)
830                         printf("  Total:  %20lld events (dropped %lu)\n",
831                                         events_processed, dip->drop_count);
832         }
833
834         if (total_drops)
835                 fprintf(stderr, "You have dropped events, consider using a larger buffer size (-b)\n");
836 }
837
838 static char usage_str[] = \
839         "-d <dev> [ -r relay path ] [ -o <output> ] [-k ] [ -w time ]\n" \
840         "[ -a action ] [ -A action mask ] [ -v ]\n\n" \
841         "\t-d Use specified device. May also be given last after options\n" \
842         "\t-r Path to mounted relayfs, defaults to /relay\n" \
843         "\t-o File(s) to send output to\n" \
844         "\t-D Directory to prepend to output file names\n" \
845         "\t-k Kill a running trace\n" \
846         "\t-w Stop after defined time, in seconds\n" \
847         "\t-a Only trace specified actions. See documentation\n" \
848         "\t-A Give trace mask as a single value. See documentation\n" \
849         "\t-b Sub buffer size in KiB\n" \
850         "\t-n Number of sub buffers\n" \
851         "\t-v Print program version info\n\n";
852
853 static void show_usage(char *program)
854 {
855         fprintf(stderr, "Usage: %s %s %s",program, blktrace_version, usage_str);
856 }
857 static void handle_sigint(__attribute__((__unused__)) int sig)
858 {
859         done = 1;
860         if (!is_trace_stopped()) {
861                 trace_stopped = 1;
862                 stop_all_threads();
863                 stop_all_traces();
864         }
865
866         show_stats();
867 }
868
869 int main(int argc, char *argv[])
870 {
871         static char default_relay_path[] = "/relay";
872         struct statfs st;
873         int i, c;
874         int stop_watch = 0;
875         int act_mask_tmp = 0;
876
877         while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) >= 0) {
878                 switch (c) {
879                 case 'a':
880                         i = find_mask_map(optarg);
881                         if (i < 0) {
882                                 fprintf(stderr,"Invalid action mask %s\n",
883                                         optarg);
884                                 return 1;
885                         }
886                         act_mask_tmp |= i;
887                         break;
888
889                 case 'A':
890                         if ((sscanf(optarg, "%x", &i) != 1) || 
891                                                         !valid_act_opt(i)) {
892                                 fprintf(stderr,
893                                         "Invalid set action mask %s/0x%x\n",
894                                         optarg, i);
895                                 return 1;
896                         }
897                         act_mask_tmp = i;
898                         break;
899
900                 case 'd':
901                         if (resize_devices(optarg) != 0)
902                                 return 1;
903                         break;
904
905                 case 'r':
906                         relay_path = optarg;
907                         break;
908
909                 case 'o':
910                         output_name = optarg;
911                         break;
912                 case 'k':
913                         kill_running_trace = 1;
914                         break;
915                 case 'w':
916                         stop_watch = atoi(optarg);
917                         if (stop_watch <= 0) {
918                                 fprintf(stderr,
919                                         "Invalid stopwatch value (%d secs)\n",
920                                         stop_watch);
921                                 return 1;
922                         }
923                         break;
924                 case 'V':
925                         printf("%s version %s\n", argv[0], blktrace_version);
926                         return 0;
927                 case 'b':
928                         buf_size = strtoul(optarg, NULL, 10);
929                         if (buf_size <= 0 || buf_size > 16*1024) {
930                                 fprintf(stderr,
931                                         "Invalid buffer size (%lu)\n",buf_size);
932                                 return 1;
933                         }
934                         buf_size <<= 10;
935                         break;
936                 case 'n':
937                         buf_nr = strtoul(optarg, NULL, 10);
938                         if (buf_nr <= 0) {
939                                 fprintf(stderr,
940                                         "Invalid buffer nr (%lu)\n", buf_nr);
941                                 return 1;
942                         }
943                         break;
944                 case 'D':
945                         output_dir = optarg;
946                         break;
947                 default:
948                         show_usage(argv[0]);
949                         return 1;
950                 }
951         }
952
953         while (optind < argc) {
954                 if (resize_devices(argv[optind++]) != 0)
955                         return 1;
956         }
957
958         if (ndevs == 0) {
959                 show_usage(argv[0]);
960                 return 1;
961         }
962
963         if (!relay_path)
964                 relay_path = default_relay_path;
965
966         if (act_mask_tmp != 0)
967                 act_mask = act_mask_tmp;
968
969         if (statfs(relay_path, &st) < 0) {
970                 perror("statfs");
971                 fprintf(stderr,"%s does not appear to be a valid path\n",
972                         relay_path);
973                 return 1;
974         } else if (st.f_type != (long) RELAYFS_TYPE) {
975                 fprintf(stderr,"%s does not appear to be a relay filesystem\n",
976                         relay_path);
977                 return 1;
978         }
979
980         if (open_devices() != 0)
981                 return 1;
982
983         if (kill_running_trace) {
984                 stop_all_traces();
985                 return 0;
986         }
987
988         setlocale(LC_NUMERIC, "en_US");
989
990         ncpus = sysconf(_SC_NPROCESSORS_ONLN);
991         if (ncpus < 0) {
992                 fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed\n");
993                 return 1;
994         }
995
996         if (start_devices() != 0)
997                 return 1;
998
999         signal(SIGINT, handle_sigint);
1000         signal(SIGHUP, handle_sigint);
1001         signal(SIGTERM, handle_sigint);
1002         signal(SIGALRM, handle_sigint);
1003
1004         atexit(stop_all_tracing);
1005
1006         if (stop_watch)
1007                 alarm(stop_watch);
1008
1009         while (!is_done())
1010                 sleep(1);
1011
1012         if (!is_trace_stopped()) {
1013                 trace_stopped = 1;
1014                 stop_all_threads();
1015                 stop_all_traces();
1016         }
1017
1018         show_stats();
1019
1020         return 0;
1021 }
1022