[PATCH] blktrace: bad ret = len assignment that should be ret == len
[blktrace.git] / blktrace.c
1 /*
2  * block queue tracing application
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include <pthread.h>
22 #include <sys/types.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25 #include <locale.h>
26 #include <signal.h>
27 #include <fcntl.h>
28 #include <string.h>
29 #include <sys/ioctl.h>
30 #include <sys/param.h>
31 #include <sys/statfs.h>
32 #include <sys/poll.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <getopt.h>
38 #include <errno.h>
39 #include <assert.h>
40
41 #include "blktrace.h"
42
43 static char blktrace_version[] = "0.99";
44
45 /*
46  * You may want to increase this even more, if you are logging at a high
47  * rate and see skipped/missed events
48  */
49 #define BUF_SIZE        (512 * 1024)
50 #define BUF_NR          (4)
51
52 #define OFILE_BUF       (128 * 1024)
53
54 #define RELAYFS_TYPE    0xF0B4A981
55
56 #define RING_INIT_NR    (2)
57 #define RING_MAX_NR     (16UL)
58
59 #define S_OPTS  "d:a:A:r:o:kw:Vb:n:D:"
60 static struct option l_opts[] = {
61         {
62                 .name = "dev",
63                 .has_arg = required_argument,
64                 .flag = NULL,
65                 .val = 'd'
66         },
67         {
68                 .name = "act-mask",
69                 .has_arg = required_argument,
70                 .flag = NULL,
71                 .val = 'a'
72         },
73         {
74                 .name = "set-mask",
75                 .has_arg = required_argument,
76                 .flag = NULL,
77                 .val = 'A'
78         },
79         {
80                 .name = "relay",
81                 .has_arg = required_argument,
82                 .flag = NULL,
83                 .val = 'r'
84         },
85         {
86                 .name = "output",
87                 .has_arg = required_argument,
88                 .flag = NULL,
89                 .val = 'o'
90         },
91         {
92                 .name = "kill",
93                 .has_arg = no_argument,
94                 .flag = NULL,
95                 .val = 'k'
96         },
97         {
98                 .name = "stopwatch",
99                 .has_arg = required_argument,
100                 .flag = NULL,
101                 .val = 'w'
102         },
103         {
104                 .name = "version",
105                 .has_arg = no_argument,
106                 .flag = NULL,
107                 .val = 'V'
108         },
109         {
110                 .name = "buffer-size",
111                 .has_arg = required_argument,
112                 .flag = NULL,
113                 .val = 'b'
114         },
115         {
116                 .name = "num-sub-buffers",
117                 .has_arg = required_argument,
118                 .flag = NULL,
119                 .val = 'n'
120         },
121         {
122                 .name = "output-dir",
123                 .has_arg = required_argument,
124                 .flag = NULL,
125                 .val = 'D'
126         },
127         {
128                 .name = NULL,
129         }
130 };
131
132 struct thread_information {
133         int cpu;
134         pthread_t thread;
135
136         int fd;
137         void *fd_buf;
138         unsigned long fd_off;
139         unsigned long fd_size;
140         unsigned long fd_max_size;
141         char fn[MAXPATHLEN + 64];
142
143         pthread_mutex_t *fd_lock;
144         FILE *ofile;
145         char *ofile_buffer;
146
147         unsigned long events_processed;
148         struct device_information *device;
149 };
150
151 struct device_information {
152         int fd;
153         char *path;
154         char buts_name[32];
155         volatile int trace_started;
156         unsigned long drop_count;
157         struct thread_information *threads;
158 };
159
160 static int ncpus;
161 static struct thread_information *thread_information;
162 static int ndevs;
163 static struct device_information *device_information;
164
165 /* command line option globals */
166 static char *relay_path;
167 static char *output_name;
168 static char *output_dir;
169 static int act_mask = ~0U;
170 static int kill_running_trace;
171 static unsigned long buf_size = BUF_SIZE;
172 static unsigned long buf_nr = BUF_NR;
173
174 #define is_done()       (*(volatile int *)(&done))
175 static volatile int done;
176
177 #define is_trace_stopped()      (*(volatile int *)(&trace_stopped))
178 static volatile int trace_stopped;
179
180 #define is_stat_shown() (*(volatile int *)(&stat_shown))
181 static volatile int stat_shown;
182
183 static pthread_mutex_t stdout_mutex = PTHREAD_MUTEX_INITIALIZER;
184
185 static void exit_trace(int status);
186
187 #define dip_tracing(dip)        (*(volatile int *)(&(dip)->trace_started))
188 #define dip_set_tracing(dip, v) ((dip)->trace_started = (v))
189
190 #define __for_each_dip(__d, __i, __e)   \
191         for (__i = 0, __d = device_information; __i < __e; __i++, __d++)
192
193 #define for_each_dip(__d, __i)  __for_each_dip(__d, __i, ndevs)
194 #define for_each_tip(__d, __t, __i)     \
195         for (__i = 0, __t = (__d)->threads; __i < ncpus; __i++, __t++)
196
197 static int get_dropped_count(const char *buts_name)
198 {
199         int fd;
200         char tmp[MAXPATHLEN + 64];
201
202         snprintf(tmp, sizeof(tmp), "%s/block/%s/dropped",
203                  relay_path, buts_name);
204
205         fd = open(tmp, O_RDONLY);
206         if (fd < 0) {
207                 /*
208                  * this may be ok, if the kernel doesn't support dropped counts
209                  */
210                 if (errno == ENOENT)
211                         return 0;
212
213                 fprintf(stderr, "Couldn't open dropped file %s\n", tmp);
214                 return -1;
215         }
216
217         if (read(fd, tmp, sizeof(tmp)) < 0) {
218                 perror(tmp);
219                 close(fd);
220                 return -1;
221         }
222
223         close(fd);
224
225         return atoi(tmp);
226 }
227
228 static int start_trace(struct device_information *dip)
229 {
230         struct blk_user_trace_setup buts;
231
232         memset(&buts, 0, sizeof(buts));
233         buts.buf_size = buf_size;
234         buts.buf_nr = buf_nr;
235         buts.act_mask = act_mask;
236
237         if (ioctl(dip->fd, BLKSTARTTRACE, &buts) < 0) {
238                 perror("BLKSTARTTRACE");
239                 return 1;
240         }
241
242         memcpy(dip->buts_name, buts.name, sizeof(dip->buts_name));
243         dip_set_tracing(dip, 1);
244         return 0;
245 }
246
247 static void stop_trace(struct device_information *dip)
248 {
249         if (dip_tracing(dip) || kill_running_trace) {
250                 dip_set_tracing(dip, 0);
251
252                 if (ioctl(dip->fd, BLKSTOPTRACE) < 0)
253                         perror("BLKSTOPTRACE");
254
255                 close(dip->fd);
256                 dip->fd = -1;
257         }
258 }
259
260 static void stop_all_traces(void)
261 {
262         struct device_information *dip;
263         int i;
264
265         for_each_dip(dip, i) {
266                 dip->drop_count = get_dropped_count(dip->buts_name);
267                 stop_trace(dip);
268         }
269 }
270
271 static void wait_for_data(struct thread_information *tip)
272 {
273         struct pollfd pfd = { .fd = tip->fd, .events = POLLIN };
274
275         poll(&pfd, 1, 10);
276 }
277
278 static int __read_data(struct thread_information *tip, void *buf, int len,
279                        int block)
280 {
281         int ret = 0;
282
283         while (!is_done()) {
284                 ret = read(tip->fd, buf, len);
285                 if (ret > 0)
286                         break;
287                 else if (!ret) {
288                         if (!block)
289                                 break;
290
291                         wait_for_data(tip);
292                 } else {
293                         if (errno != EAGAIN) {
294                                 perror(tip->fn);
295                                 fprintf(stderr,"Thread %d failed read of %s\n",
296                                         tip->cpu, tip->fn);
297                                 break;
298                         }
299                         if (!block) {
300                                 ret = 0;
301                                 break;
302                         }
303
304                         wait_for_data(tip);
305                 }
306         }
307
308         return ret;
309 }
310
311 #define can_grow_ring(tip)      ((tip)->fd_max_size < RING_MAX_NR * buf_size * buf_nr)
312
313 static int resize_ringbuffer(struct thread_information *tip)
314 {
315         if (!can_grow_ring(tip))
316                 return 1;
317
318         tip->fd_buf = realloc(tip->fd_buf, 2 * tip->fd_max_size);
319
320         /*
321          * if the ring currently wraps, copy range over
322          */
323         if (tip->fd_off + tip->fd_size > tip->fd_max_size) {
324                 unsigned long wrap_size = tip->fd_size - (tip->fd_max_size - tip->fd_off);
325                 memmove(tip->fd_buf + tip->fd_off, tip->fd_buf, wrap_size);
326         }
327
328         tip->fd_max_size <<= 1;
329         return 0;
330 }
331
332 static int __refill_ringbuffer(struct thread_information *tip, int len,
333                                int block)
334 {
335         unsigned long off;
336         int ret;
337
338         off = (tip->fd_size + tip->fd_off) & (tip->fd_max_size - 1);
339         if (off + len > tip->fd_max_size)
340                 len = tip->fd_max_size - off;
341
342         assert(len > 0);
343
344         ret = __read_data(tip, tip->fd_buf + off, len, block);
345         if (ret < 0)
346                 return -1;
347
348         tip->fd_size += ret;
349         return ret;
350 }
351
352 /*
353  * keep filling ring until we get a short read
354  */
355 static void refill_ringbuffer(struct thread_information *tip, int block)
356 {
357         int len = buf_size;
358         int ret;
359
360         do {
361                 if (len + tip->fd_size > tip->fd_max_size)
362                         resize_ringbuffer(tip);
363
364                 ret = __refill_ringbuffer(tip, len, block);
365         } while ((ret == len) && !is_done());
366 }
367
368 static int read_data(struct thread_information *tip, void *buf,
369                      unsigned int len)
370 {
371         unsigned int start_size, end_size;
372
373         refill_ringbuffer(tip, len > tip->fd_size);
374
375         if (len > tip->fd_size)
376                 return -1;
377
378         /*
379          * see if we wrap the ring
380          */
381         start_size = len;
382         end_size = 0;
383         if (len > (tip->fd_max_size - tip->fd_off)) {
384                 start_size = tip->fd_max_size - tip->fd_off;
385                 end_size = len - start_size;
386         }
387
388         memcpy(buf, tip->fd_buf + tip->fd_off, start_size);
389         if (end_size)
390                 memcpy(buf + start_size, tip->fd_buf, end_size);
391
392         tip->fd_off = (tip->fd_off + len) & (tip->fd_max_size - 1);
393         tip->fd_size -= len;
394         return 0;
395 }
396
397 static int write_data(FILE *file, void *buf, unsigned int buf_len)
398 {
399         int ret, bytes_left;
400         char *p = buf;
401
402         bytes_left = buf_len;
403         while (bytes_left > 0) {
404                 ret = fwrite(p, bytes_left, 1, file);
405                 if (ret == 1)
406                         break;
407
408                 if (ret < 0) {
409                         perror("write");
410                         return 1;
411                 }
412         }
413
414         return 0;
415 }
416
417 static void *extract_data(struct thread_information *tip, int nb)
418 {
419         unsigned char *buf;
420
421         buf = malloc(nb);
422         if (!read_data(tip, buf, nb))
423                 return buf;
424
425         free(buf);
426         return NULL;
427 }
428
429 /*
430  * trace may start inside 'bit' or may need to be gotten further on
431  */
432 static int get_event_slow(struct thread_information *tip,
433                           struct blk_io_trace *bit)
434 {
435         const int inc = sizeof(__u32);
436         struct blk_io_trace foo;
437         unsigned int offset;
438         void *p;
439
440         /*
441          * check if trace is inside
442          */
443         offset = 0;
444         p = bit;
445         while (offset < sizeof(*bit)) {
446                 p += inc;
447                 offset += inc;
448
449                 memcpy(&foo, p, inc);
450
451                 if (CHECK_MAGIC(&foo))
452                         break;
453         }
454
455         /*
456          * part trace found inside, read the rest
457          */
458         if (offset < sizeof(*bit)) {
459                 int good_bytes = sizeof(*bit) - offset;
460
461                 memmove(bit, p, good_bytes);
462                 p = (void *) bit + good_bytes;
463
464                 return read_data(tip, p, offset);
465         }
466
467         /*
468          * nothing found, keep looking for start of trace
469          */
470         do {
471                 if (read_data(tip, bit, sizeof(bit->magic)))
472                         return -1;
473         } while (!CHECK_MAGIC(bit));
474
475         /*
476          * now get the rest of it
477          */
478         p = &bit->sequence;
479         if (read_data(tip, p, sizeof(*bit) - inc))
480                 return -1;
481
482         return 0;
483 }
484
485 /*
486  * Sometimes relayfs screws us a little, if an event crosses a sub buffer
487  * boundary. So keep looking forward in the trace data until an event
488  * is found
489  */
490 static int get_event(struct thread_information *tip, struct blk_io_trace *bit)
491 {
492         /*
493          * optimize for the common fast case, a full trace read that
494          * succeeds
495          */
496         if (read_data(tip, bit, sizeof(*bit)))
497                 return -1;
498
499         if (CHECK_MAGIC(bit))
500                 return 0;
501
502         /*
503          * ok that didn't work, the event may start somewhere inside the
504          * trace itself
505          */
506         return get_event_slow(tip, bit);
507 }
508
509 static inline void tip_fd_unlock(struct thread_information *tip)
510 {
511         if (tip->fd_lock)
512                 pthread_mutex_unlock(tip->fd_lock);
513 }
514
515 static inline void tip_fd_lock(struct thread_information *tip)
516 {
517         if (tip->fd_lock)
518                 pthread_mutex_lock(tip->fd_lock);
519 }
520
521 static void close_thread(struct thread_information *tip)
522 {
523         if (tip->fd != -1)
524                 close(tip->fd);
525         if (tip->ofile)
526                 fclose(tip->ofile);
527         if (tip->ofile_buffer)
528                 free(tip->ofile_buffer);
529         if (tip->fd_buf)
530                 free(tip->fd_buf);
531
532         tip->fd = -1;
533         tip->ofile = NULL;
534         tip->ofile_buffer = NULL;
535         tip->fd_buf = NULL;
536 }
537
538 static void *extract(void *arg)
539 {
540         struct thread_information *tip = arg;
541         int pdu_len;
542         char *pdu_data;
543         struct blk_io_trace t;
544         pid_t pid = getpid();
545         cpu_set_t cpu_mask;
546
547         CPU_ZERO(&cpu_mask);
548         CPU_SET((tip->cpu), &cpu_mask);
549
550         if (sched_setaffinity(pid, sizeof(cpu_mask), &cpu_mask) == -1) {
551                 perror("sched_setaffinity");
552                 exit_trace(1);
553         }
554
555         snprintf(tip->fn, sizeof(tip->fn), "%s/block/%s/trace%d",
556                         relay_path, tip->device->buts_name, tip->cpu);
557         tip->fd = open(tip->fn, O_RDONLY | O_NONBLOCK);
558         if (tip->fd < 0) {
559                 perror(tip->fn);
560                 fprintf(stderr,"Thread %d failed open of %s\n", tip->cpu,
561                         tip->fn);
562                 exit_trace(1);
563         }
564
565         /*
566          * start with a ringbuffer that is twice the size of the kernel side
567          */
568         tip->fd_max_size = buf_size * buf_nr * RING_INIT_NR;
569         tip->fd_buf = malloc(tip->fd_max_size);
570         tip->fd_off = 0;
571         tip->fd_size = 0;
572
573         pdu_data = NULL;
574         while (1) {
575                 if (get_event(tip, &t))
576                         break;
577
578                 if (verify_trace(&t))
579                         break;
580
581                 pdu_len = t.pdu_len;
582
583                 trace_to_be(&t);
584
585                 if (pdu_len) {
586                         pdu_data = extract_data(tip, pdu_len);
587                         if (!pdu_data)
588                                 break;
589                 }
590
591                 /*
592                  * now we have both trace and payload, get a lock on the
593                  * output descriptor and send it off
594                  */
595                 tip_fd_lock(tip);
596
597                 if (write_data(tip->ofile, &t, sizeof(t))) {
598                         tip_fd_unlock(tip);
599                         break;
600                 }
601
602                 if (pdu_data && write_data(tip->ofile, pdu_data, pdu_len)) {
603                         tip_fd_unlock(tip);
604                         break;
605                 }
606
607                 tip_fd_unlock(tip);
608
609                 if (pdu_data) {
610                         free(pdu_data);
611                         pdu_data = NULL;
612                 }
613
614                 tip->events_processed++;
615         }
616
617         close_thread(tip);
618         return NULL;
619 }
620
621 static int start_threads(struct device_information *dip)
622 {
623         struct thread_information *tip;
624         char op[64];
625         int j, pipeline = output_name && !strcmp(output_name, "-");
626         int len, mode;
627
628         for_each_tip(dip, tip, j) {
629                 tip->cpu = j;
630                 tip->device = dip;
631                 tip->fd_lock = NULL;
632                 tip->events_processed = 0;
633
634                 if (pipeline) {
635                         tip->ofile = fdopen(STDOUT_FILENO, "w");
636                         tip->fd_lock = &stdout_mutex;
637                         mode = _IOLBF;
638                         buf_size = 512;
639                 } else {
640                         len = 0;
641
642                         if (output_dir)
643                                 len = sprintf(op, "%s/", output_dir);
644
645                         if (output_name) {
646                                 sprintf(op + len, "%s.blktrace.%d", output_name,
647                                         tip->cpu);
648                         } else {
649                                 sprintf(op + len, "%s.blktrace.%d",
650                                         dip->buts_name, tip->cpu);
651                         }
652                         tip->ofile = fopen(op, "w");
653                         mode = _IOFBF;
654                         buf_size = OFILE_BUF;
655                 }
656
657                 if (tip->ofile == NULL) {
658                         perror(op);
659                         return 1;
660                 }
661
662                 tip->ofile_buffer = malloc(buf_size);
663                 if (setvbuf(tip->ofile, tip->ofile_buffer, mode, buf_size)) {
664                         perror("setvbuf");
665                         close_thread(tip);
666                         return 1;
667                 }
668
669                 if (pthread_create(&tip->thread, NULL, extract, tip)) {
670                         perror("pthread_create");
671                         close_thread(tip);
672                         return 1;
673                 }
674         }
675
676         return 0;
677 }
678
679 static void stop_threads(struct device_information *dip)
680 {
681         struct thread_information *tip;
682         unsigned long ret;
683         int i;
684
685         for_each_tip(dip, tip, i)
686                 (void) pthread_join(tip->thread, (void *) &ret);
687 }
688
689 static void stop_all_threads(void)
690 {
691         struct device_information *dip;
692         int i;
693
694         for_each_dip(dip, i)
695                 stop_threads(dip);
696 }
697
698 static void stop_all_tracing(void)
699 {
700         struct device_information *dip;
701         int i;
702
703         for_each_dip(dip, i)
704                 stop_trace(dip);
705 }
706
707 static void exit_trace(int status)
708 {
709         if (!is_trace_stopped()) {
710                 trace_stopped = 1;
711                 stop_all_threads();
712                 stop_all_tracing();
713         }
714
715         exit(status);
716 }
717
718 static int resize_devices(char *path)
719 {
720         int size = (ndevs + 1) * sizeof(struct device_information);
721
722         device_information = realloc(device_information, size);
723         if (!device_information) {
724                 fprintf(stderr, "Out of memory, device %s (%d)\n", path, size);
725                 return 1;
726         }
727         device_information[ndevs].path = path;
728         ndevs++;
729         return 0;
730 }
731
732 static int open_devices(void)
733 {
734         struct device_information *dip;
735         int i;
736
737         for_each_dip(dip, i) {
738                 dip->fd = open(dip->path, O_RDONLY | O_NONBLOCK);
739                 if (dip->fd < 0) {
740                         perror(dip->path);
741                         return 1;
742                 }
743         }
744
745         return 0;
746 }
747
748 static int start_devices(void)
749 {
750         struct device_information *dip;
751         int i, j, size;
752
753         size = ncpus * sizeof(struct thread_information);
754         thread_information = malloc(size * ndevs);
755         if (!thread_information) {
756                 fprintf(stderr, "Out of memory, threads (%d)\n", size * ndevs);
757                 return 1;
758         }
759
760         for_each_dip(dip, i) {
761                 if (start_trace(dip)) {
762                         close(dip->fd);
763                         fprintf(stderr, "Failed to start trace on %s\n",
764                                 dip->path);
765                         break;
766                 }
767         }
768
769         if (i != ndevs) {
770                 __for_each_dip(dip, j, i)
771                         stop_trace(dip);
772
773                 return 1;
774         }
775
776         for_each_dip(dip, i) {
777                 dip->threads = thread_information + (i * ncpus);
778                 if (start_threads(dip)) {
779                         fprintf(stderr, "Failed to start worker threads\n");
780                         break;
781                 }
782         }
783
784         if (i != ndevs) {
785                 __for_each_dip(dip, j, i)
786                         stop_threads(dip);
787                 for_each_dip(dip, i)
788                         stop_trace(dip);
789
790                 return 1;
791         }
792
793         return 0;
794 }
795
796 static void show_stats(void)
797 {
798         int i, j, no_stdout = 0;
799         struct device_information *dip;
800         struct thread_information *tip;
801         unsigned long long events_processed;
802         unsigned long total_drops;
803
804         if (is_stat_shown())
805                 return;
806
807         stat_shown = 1;
808
809         if (output_name && !strcmp(output_name, "-"))
810                 no_stdout = 1;
811
812         total_drops = 0;
813         for_each_dip(dip, i) {
814                 if (!no_stdout)
815                         printf("Device: %s\n", dip->path);
816                 events_processed = 0;
817                 for_each_tip(dip, tip, j) {
818                         if (!no_stdout)
819                                 printf("  CPU%3d: %20ld events\n",
820                                         tip->cpu, tip->events_processed);
821                         events_processed += tip->events_processed;
822                 }
823                 total_drops += dip->drop_count;
824                 if (!no_stdout)
825                         printf("  Total:  %20lld events (dropped %lu)\n",
826                                         events_processed, dip->drop_count);
827         }
828
829         if (total_drops)
830                 fprintf(stderr, "You have dropped events, consider using a larger buffer size (-b)\n");
831 }
832
833 static char usage_str[] = \
834         "-d <dev> [ -r relay path ] [ -o <output> ] [-k ] [ -w time ]\n" \
835         "[ -a action ] [ -A action mask ] [ -v ]\n\n" \
836         "\t-d Use specified device. May also be given last after options\n" \
837         "\t-r Path to mounted relayfs, defaults to /relay\n" \
838         "\t-o File(s) to send output to\n" \
839         "\t-D Directory to prepend to output file names\n" \
840         "\t-k Kill a running trace\n" \
841         "\t-w Stop after defined time, in seconds\n" \
842         "\t-a Only trace specified actions. See documentation\n" \
843         "\t-A Give trace mask as a single value. See documentation\n" \
844         "\t-b Sub buffer size in KiB\n" \
845         "\t-n Number of sub buffers\n" \
846         "\t-v Print program version info\n\n";
847
848 static void show_usage(char *program)
849 {
850         fprintf(stderr, "Usage: %s %s %s",program, blktrace_version, usage_str);
851 }
852 static void handle_sigint(__attribute__((__unused__)) int sig)
853 {
854         done = 1;
855         if (!is_trace_stopped()) {
856                 trace_stopped = 1;
857                 stop_all_threads();
858                 stop_all_traces();
859         }
860
861         show_stats();
862 }
863
864 int main(int argc, char *argv[])
865 {
866         static char default_relay_path[] = "/relay";
867         struct statfs st;
868         int i, c;
869         int stop_watch = 0;
870         int act_mask_tmp = 0;
871
872         while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) >= 0) {
873                 switch (c) {
874                 case 'a':
875                         i = find_mask_map(optarg);
876                         if (i < 0) {
877                                 fprintf(stderr,"Invalid action mask %s\n",
878                                         optarg);
879                                 return 1;
880                         }
881                         act_mask_tmp |= i;
882                         break;
883
884                 case 'A':
885                         if ((sscanf(optarg, "%x", &i) != 1) || 
886                                                         !valid_act_opt(i)) {
887                                 fprintf(stderr,
888                                         "Invalid set action mask %s/0x%x\n",
889                                         optarg, i);
890                                 return 1;
891                         }
892                         act_mask_tmp = i;
893                         break;
894
895                 case 'd':
896                         if (resize_devices(optarg) != 0)
897                                 return 1;
898                         break;
899
900                 case 'r':
901                         relay_path = optarg;
902                         break;
903
904                 case 'o':
905                         output_name = optarg;
906                         break;
907                 case 'k':
908                         kill_running_trace = 1;
909                         break;
910                 case 'w':
911                         stop_watch = atoi(optarg);
912                         if (stop_watch <= 0) {
913                                 fprintf(stderr,
914                                         "Invalid stopwatch value (%d secs)\n",
915                                         stop_watch);
916                                 return 1;
917                         }
918                         break;
919                 case 'V':
920                         printf("%s version %s\n", argv[0], blktrace_version);
921                         return 0;
922                 case 'b':
923                         buf_size = strtoul(optarg, NULL, 10);
924                         if (buf_size <= 0 || buf_size > 16*1024) {
925                                 fprintf(stderr,
926                                         "Invalid buffer size (%lu)\n",buf_size);
927                                 return 1;
928                         }
929                         buf_size <<= 10;
930                         break;
931                 case 'n':
932                         buf_nr = strtoul(optarg, NULL, 10);
933                         if (buf_nr <= 0) {
934                                 fprintf(stderr,
935                                         "Invalid buffer nr (%lu)\n", buf_nr);
936                                 return 1;
937                         }
938                         break;
939                 case 'D':
940                         output_dir = optarg;
941                         break;
942                 default:
943                         show_usage(argv[0]);
944                         return 1;
945                 }
946         }
947
948         while (optind < argc) {
949                 if (resize_devices(argv[optind++]) != 0)
950                         return 1;
951         }
952
953         if (ndevs == 0) {
954                 show_usage(argv[0]);
955                 return 1;
956         }
957
958         if (!relay_path)
959                 relay_path = default_relay_path;
960
961         if (act_mask_tmp != 0)
962                 act_mask = act_mask_tmp;
963
964         if (statfs(relay_path, &st) < 0) {
965                 perror("statfs");
966                 fprintf(stderr,"%s does not appear to be a valid path\n",
967                         relay_path);
968                 return 1;
969         } else if (st.f_type != (long) RELAYFS_TYPE) {
970                 fprintf(stderr,"%s does not appear to be a relay filesystem\n",
971                         relay_path);
972                 return 1;
973         }
974
975         if (open_devices() != 0)
976                 return 1;
977
978         if (kill_running_trace) {
979                 stop_all_traces();
980                 return 0;
981         }
982
983         setlocale(LC_NUMERIC, "en_US");
984
985         ncpus = sysconf(_SC_NPROCESSORS_ONLN);
986         if (ncpus < 0) {
987                 fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed\n");
988                 return 1;
989         }
990
991         if (start_devices() != 0)
992                 return 1;
993
994         signal(SIGINT, handle_sigint);
995         signal(SIGHUP, handle_sigint);
996         signal(SIGTERM, handle_sigint);
997         signal(SIGALRM, handle_sigint);
998
999         atexit(stop_all_tracing);
1000
1001         if (stop_watch)
1002                 alarm(stop_watch);
1003
1004         while (!is_done())
1005                 sleep(1);
1006
1007         if (!is_trace_stopped()) {
1008                 trace_stopped = 1;
1009                 stop_all_threads();
1010                 stop_all_traces();
1011         }
1012
1013         show_stats();
1014
1015         return 0;
1016 }
1017