[PATCH] fio: stonewall needs to break to honor future stonewalls
[disktools.git] / fio.c
1 /*
2  * fio - the flexible io tester
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <unistd.h>
24 #include <fcntl.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <signal.h>
28 #include <time.h>
29 #include <ctype.h>
30 #include <sched.h>
31 #include <libaio.h>
32 #include <math.h>
33 #include <limits.h>
34 #include <assert.h>
35 #include <sys/time.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/wait.h>
39 #include <semaphore.h>
40 #include <sys/ipc.h>
41 #include <sys/shm.h>
42 #include <sys/ioctl.h>
43 #include <asm/unistd.h>
44 #include <asm/types.h>
45 #include <asm/bitops.h>
46
47 #include "arch.h"
48 #include "list.h"
49 #include "md5.h"
50
51 #ifndef BLKGETSIZE64
52 #define BLKGETSIZE64    _IOR(0x12,114,size_t)
53 #endif
54
55 #define MAX_JOBS        (1024)
56
57 static int ioprio_set(int which, int who, int ioprio)
58 {
59         return syscall(__NR_ioprio_set, which, who, ioprio);
60 }
61
62 /*
63  * we want fadvise64 really, but it's so tangled... later
64  */
65 static int fadvise(int fd, loff_t offset, size_t len, int advice)
66 {
67 #if 0
68         return syscall(__NR_fadvise64, fd, offset, offset >> 32, len, advice);
69 #else
70         return posix_fadvise(fd, (off_t) offset, len, advice);
71 #endif
72 }
73
74 enum {
75         IOPRIO_WHO_PROCESS = 1,
76         IOPRIO_WHO_PGRP,
77         IOPRIO_WHO_USER,
78 };
79
80 #define IOPRIO_CLASS_SHIFT      13
81
82 #define MASK    (4095)
83
84 #define DEF_BS          (4096)
85 #define DEF_TIMEOUT     (0)
86 #define DEF_RATE_CYCLE  (1000)
87 #define DEF_ODIRECT     (1)
88 #define DEF_SEQUENTIAL  (1)
89 #define DEF_RAND_REPEAT (1)
90 #define DEF_OVERWRITE   (0)
91 #define DEF_CREATE      (1)
92 #define DEF_INVALIDATE  (1)
93 #define DEF_SYNCIO      (0)
94 #define DEF_RANDSEED    (0xb1899bedUL)
95 #define DEF_BWAVGTIME   (500)
96 #define DEF_CREATE_SER  (1)
97 #define DEF_CREATE_FSYNC        (1)
98 #define DEF_LOOPS       (1)
99 #define DEF_VERIFY      (0)
100 #define DEF_STONEWALL   (0)
101
102 #define ALIGN(buf)      (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
103
104 static int repeatable = DEF_RAND_REPEAT;
105 static int rate_quit = 1;
106 static int write_lat_log;
107 static int write_bw_log;
108 static int exitall_on_terminate;
109
110 static int thread_number;
111 static char *ini_file;
112
113 static int max_jobs = MAX_JOBS;
114
115 static char run_str[MAX_JOBS + 1];
116
117 static int shm_id;
118
119 enum {
120         DDIR_READ = 0,
121         DDIR_WRITE,
122 };
123
124 /*
125  * thread life cycle
126  */
127 enum {
128         TD_NOT_CREATED = 0,
129         TD_CREATED,
130         TD_RUNNING,
131         TD_VERIFYING,
132         TD_EXITED,
133         TD_REAPED,
134 };
135
136 enum {
137         MEM_MALLOC,
138         MEM_SHM,
139 };
140
141 /*
142  * The io unit
143  */
144 struct io_u {
145         struct iocb iocb;
146         struct timeval start_time;
147         struct timeval issue_time;
148
149         char *buf;
150         unsigned int buflen;
151         unsigned long long offset;
152
153         struct list_head list;
154 };
155
156 struct io_stat {
157         unsigned long val;
158         unsigned long val_sq;
159         unsigned long max_val;
160         unsigned long min_val;
161         unsigned long samples;
162 };
163
164 struct io_sample {
165         unsigned long time;
166         unsigned long val;
167 };
168
169 struct io_log {
170         unsigned long nr_samples;
171         unsigned long max_samples;
172         struct io_sample *log;
173 };
174
175 struct io_piece {
176         struct list_head list;
177         unsigned long long offset;
178         unsigned int len;
179 };
180
181 #define FIO_HDR_MAGIC   0xf00baaef
182
183 struct verify_header {
184         unsigned int fio_magic;
185         unsigned int len;
186         char md5_digest[MD5_HASH_WORDS * 4];
187 };
188
189 #define td_read(td)             ((td)->ddir == DDIR_READ)
190 #define td_write(td)            ((td)->ddir == DDIR_WRITE)
191 #define should_fsync(td)        (td_write(td) && !(td)->odirect)
192
193 #define BLOCKS_PER_MAP          (8 * sizeof(long))
194 #define TO_MAP_BLOCK(td, b)     ((b) - ((td)->file_offset / (td)->min_bs))
195 #define RAND_MAP_IDX(td, b)     (TO_MAP_BLOCK(td, b) / BLOCKS_PER_MAP)
196 #define RAND_MAP_BIT(td, b)     (TO_MAP_BLOCK(td, b) & (BLOCKS_PER_MAP - 1))
197
198 struct thread_data {
199         char file_name[256];
200         int thread_number;
201         int error;
202         int fd;
203         pid_t pid;
204         char *orig_buffer;
205         volatile int terminate;
206         volatile int runstate;
207         volatile int old_runstate;
208         unsigned int ddir;
209         unsigned int ioprio;
210         unsigned int sequential;
211         unsigned int bs;
212         unsigned int min_bs;
213         unsigned int max_bs;
214         unsigned int odirect;
215         unsigned int thinktime;
216         unsigned int fsync_blocks;
217         unsigned int start_delay;
218         unsigned int timeout;
219         unsigned int use_aio;
220         unsigned int create_file;
221         unsigned int overwrite;
222         unsigned int invalidate_cache;
223         unsigned int bw_avg_time;
224         unsigned int create_serialize;
225         unsigned int create_fsync;
226         unsigned int loops;
227         unsigned long long file_size;
228         unsigned long long file_offset;
229         unsigned int sync_io;
230         unsigned int mem_type;
231         unsigned int verify;
232         unsigned int stonewall;
233         cpu_set_t cpumask;
234
235         struct drand48_data bsrange_state;
236         struct drand48_data verify_state;
237
238         int shm_id;
239
240         off_t cur_off;
241
242         io_context_t aio_ctx;
243         unsigned int aio_depth;
244         struct io_event *aio_events;
245
246         unsigned int cur_depth;
247         struct list_head io_u_freelist;
248         struct list_head io_u_busylist;
249
250         unsigned int rate;
251         unsigned int ratemin;
252         unsigned int ratecycle;
253         unsigned long rate_usec_cycle;
254         long rate_pending_usleep;
255         unsigned long rate_bytes;
256         struct timeval lastrate;
257
258         unsigned long runtime;          /* sec */
259         unsigned long long io_size;
260
261         unsigned long io_blocks;
262         unsigned long io_bytes;
263         unsigned long this_io_bytes;
264         unsigned long last_bytes;
265         sem_t mutex;
266
267         struct drand48_data random_state;
268         unsigned long *file_map;
269         unsigned int num_maps;
270
271         /*
272          * bandwidth and latency stats
273          */
274         struct io_stat clat_stat;               /* completion latency */
275         struct io_stat slat_stat;               /* submission latency */
276
277         struct io_stat bw_stat;                 /* bandwidth stats */
278         unsigned long stat_io_bytes;
279         struct timeval stat_sample_time;
280
281         struct io_log *lat_log;
282         struct io_log *bw_log;
283
284         struct timeval start;
285         struct rusage ru_start;
286         struct rusage ru_end;
287
288         struct list_head io_hist_list;
289 };
290
291 static struct thread_data *threads;
292 static struct thread_data def_thread;
293
294 static sem_t startup_sem;
295
296 static void sig_handler(int sig)
297 {
298         int i;
299
300         for (i = 0; i < thread_number; i++) {
301                 struct thread_data *td = &threads[i];
302
303                 td->terminate = 1;
304                 td->start_delay = 0;
305         }
306 }
307
308 static int init_random_state(struct thread_data *td)
309 {
310         unsigned long seed;
311         int fd, num_maps, blocks;
312
313         fd = open("/dev/random", O_RDONLY);
314         if (fd == -1) {
315                 td->error = errno;
316                 return 1;
317         }
318
319         if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
320                 td->error = EIO;
321                 close(fd);
322                 return 1;
323         }
324
325         close(fd);
326
327         srand48_r(seed, &td->bsrange_state);
328         srand48_r(seed, &td->verify_state);
329
330         if (td->sequential)
331                 return 0;
332
333         if (repeatable)
334                 seed = DEF_RANDSEED;
335
336         blocks = (td->io_size + td->min_bs - 1) / td->min_bs;
337         num_maps = blocks / BLOCKS_PER_MAP;
338         td->file_map = malloc(num_maps * sizeof(long));
339         td->num_maps = num_maps;
340         memset(td->file_map, 0, num_maps * sizeof(long));
341
342         srand48_r(seed, &td->random_state);
343         return 0;
344 }
345
346 static unsigned long utime_since(struct timeval *s, struct timeval *e)
347 {
348         double sec, usec;
349
350         sec = e->tv_sec - s->tv_sec;
351         usec = e->tv_usec - s->tv_usec;
352         if (sec > 0 && usec < 0) {
353                 sec--;
354                 usec += 1000000;
355         }
356
357         sec *= (double) 1000000;
358
359         return sec + usec;
360 }
361
362 static unsigned long utime_since_now(struct timeval *s)
363 {
364         struct timeval t;
365
366         gettimeofday(&t, NULL);
367         return utime_since(s, &t);
368 }
369
370 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
371 {
372         double sec, usec;
373
374         sec = e->tv_sec - s->tv_sec;
375         usec = e->tv_usec - s->tv_usec;
376         if (sec > 0 && usec < 0) {
377                 sec--;
378                 usec += 1000000;
379         }
380
381         sec *= (double) 1000;
382         usec /= (double) 1000;
383
384         return sec + usec;
385 }
386
387 static unsigned long mtime_since_now(struct timeval *s)
388 {
389         struct timeval t;
390
391         gettimeofday(&t, NULL);
392         return mtime_since(s, &t);
393 }
394
395 static inline unsigned long msec_now(struct timeval *s)
396 {
397         return s->tv_sec * 1000 + s->tv_usec / 1000;
398 }
399
400 static int random_map_free(struct thread_data *td, unsigned long long block)
401 {
402         unsigned int idx = RAND_MAP_IDX(td, block);
403         unsigned int bit = RAND_MAP_BIT(td, block);
404
405         return (td->file_map[idx] & (1UL << bit)) == 0;
406 }
407
408 static int get_next_free_block(struct thread_data *td, unsigned long long *b)
409 {
410         int i;
411
412         *b = 0;
413         i = 0;
414         while ((*b) * td->min_bs < td->io_size) {
415                 if (td->file_map[i] != -1UL) {
416                         *b += ffz(td->file_map[i]);
417                         return 0;
418                 }
419
420                 *b += BLOCKS_PER_MAP;
421                 i++;
422         }
423
424         return 1;
425 }
426
427 static void mark_random_map(struct thread_data *td, struct io_u *io_u)
428 {
429         unsigned long block = io_u->offset / td->min_bs;
430         unsigned int blocks = 0;
431
432         while (blocks < (io_u->buflen / td->min_bs)) {
433                 int idx, bit;
434
435                 if (!random_map_free(td, block))
436                         break;
437
438                 idx = RAND_MAP_IDX(td, block);
439                 bit = RAND_MAP_BIT(td, block);
440
441                 assert(idx < td->num_maps);
442
443                 td->file_map[idx] |= (1UL << bit);
444                 block++;
445                 blocks++;
446         }
447
448         if ((blocks * td->min_bs) < io_u->buflen)
449                 io_u->buflen = blocks * td->min_bs;
450 }
451
452 static int get_next_offset(struct thread_data *td, unsigned long long *offset)
453 {
454         unsigned long long b, rb;
455         long r;
456
457         if (!td->sequential) {
458                 unsigned long max_blocks = td->io_size / td->min_bs;
459                 int loops = 50;
460
461                 do {
462                         lrand48_r(&td->random_state, &r);
463                         b = ((max_blocks - 1) * r / (RAND_MAX+1.0));
464                         rb = b + (td->file_offset / td->min_bs);
465                         loops--;
466                 } while (!random_map_free(td, rb) && loops);
467
468                 if (!loops) {
469                         if (get_next_free_block(td, &b))
470                                 return 1;
471                 }
472         } else
473                 b = td->last_bytes / td->min_bs;
474
475         *offset = (b * td->min_bs) + td->file_offset;
476         if (*offset > td->file_size)
477                 return 1;
478
479         return 0;
480 }
481
482 static unsigned int get_next_buflen(struct thread_data *td)
483 {
484         unsigned int buflen;
485         long r;
486
487         if (td->min_bs == td->max_bs)
488                 buflen = td->min_bs;
489         else {
490                 lrand48_r(&td->bsrange_state, &r);
491                 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
492                 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
493         }
494
495         if (buflen > td->io_size - td->this_io_bytes)
496                 buflen = td->io_size - td->this_io_bytes;
497
498         return buflen;
499 }
500
501 static inline void add_stat_sample(struct thread_data *td, struct io_stat *is,
502                                    unsigned long val)
503 {
504         if (val > is->max_val)
505                 is->max_val = val;
506         if (val < is->min_val)
507                 is->min_val = val;
508
509         is->val += val;
510         is->val_sq += val * val;
511         is->samples++;
512 }
513
514 static void add_log_sample(struct thread_data *td, struct io_log *log,
515                            unsigned long val)
516 {
517         if (log->nr_samples == log->max_samples) {
518                 int new_size = sizeof(struct io_sample) * log->max_samples * 2;
519
520                 log->log = realloc(log->log, new_size);
521                 log->max_samples <<= 1;
522         }
523
524         log->log[log->nr_samples].val = val;
525         log->log[log->nr_samples].time = mtime_since_now(&td->start);
526         log->nr_samples++;
527 }
528
529 static void add_clat_sample(struct thread_data *td, unsigned long msec)
530 {
531         add_stat_sample(td, &td->clat_stat, msec);
532
533         if (td->lat_log)
534                 add_log_sample(td, td->lat_log, msec);
535 }
536
537 static void add_slat_sample(struct thread_data *td, unsigned long msec)
538 {
539         add_stat_sample(td, &td->slat_stat, msec);
540 }
541
542 static void add_bw_sample(struct thread_data *td)
543 {
544         unsigned long spent = mtime_since_now(&td->stat_sample_time);
545         unsigned long rate;
546
547         if (spent < td->bw_avg_time)
548                 return;
549
550         rate = (td->this_io_bytes - td->stat_io_bytes) / spent;
551         add_stat_sample(td, &td->bw_stat, rate);
552
553         if (td->bw_log)
554                 add_log_sample(td, td->bw_log, rate);
555
556         gettimeofday(&td->stat_sample_time, NULL);
557         td->stat_io_bytes = td->this_io_bytes;
558 }
559
560 /*
561  * busy looping version for the last few usec
562  */
563 static void __usec_sleep(int usec)
564 {
565         struct timeval start;
566
567         gettimeofday(&start, NULL);
568         while (utime_since_now(&start) < usec)
569                  __asm__ __volatile__("rep;nop": : :"memory");
570 }
571
572 static void usec_sleep(int usec)
573 {
574         struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
575         struct timespec rem;
576
577         do {
578                 if (usec < 5000) {
579                         __usec_sleep(usec);
580                         break;
581                 }
582                 rem.tv_sec = rem.tv_nsec = 0;
583                 nanosleep(&req, &rem);
584                 if (!rem.tv_nsec)
585                         break;
586
587                 req.tv_nsec = rem.tv_nsec;
588                 usec = rem.tv_nsec * 1000;
589         } while (1);
590 }
591
592 static void rate_throttle(struct thread_data *td, unsigned long time_spent,
593                           unsigned int bytes)
594 {
595         unsigned long usec_cycle;
596
597         if (!td->rate)
598                 return;
599
600         usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
601
602         if (time_spent < usec_cycle) {
603                 unsigned long s = usec_cycle - time_spent;
604
605                 td->rate_pending_usleep += s;
606                 if (td->rate_pending_usleep >= 100000) {
607                         usec_sleep(td->rate_pending_usleep);
608                         td->rate_pending_usleep = 0;
609                 }
610         } else {
611                 long overtime = time_spent - usec_cycle;
612
613                 td->rate_pending_usleep -= overtime;
614         }
615 }
616
617 static int check_min_rate(struct thread_data *td, struct timeval *now)
618 {
619         unsigned long spent;
620         unsigned long rate;
621
622         /*
623          * allow a 2 second settle period in the beginning
624          */
625         if (mtime_since(&td->start, now) < 2000)
626                 return 0;
627
628         /*
629          * if rate blocks is set, sample is running
630          */
631         if (td->rate_bytes) {
632                 spent = mtime_since(&td->lastrate, now);
633                 if (spent < td->ratecycle)
634                         return 0;
635
636                 rate = (td->this_io_bytes - td->rate_bytes) / spent;
637                 if (rate < td->ratemin) {
638                         printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
639                         if (rate_quit)
640                                 sig_handler(0);
641                         return 1;
642                 }
643         }
644
645         td->rate_bytes = td->this_io_bytes;
646         memcpy(&td->lastrate, now, sizeof(*now));
647         return 0;
648 }
649
650 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
651 {
652         if (!td->timeout)
653                 return 0;
654         if (mtime_since(&td->start, t) >= td->timeout * 1000)
655                 return 1;
656
657         return 0;
658 }
659
660 static void fill_random_bytes(struct thread_data *td,
661                               unsigned char *p, unsigned int len)
662 {
663         unsigned int todo;
664         double r;
665
666         while (len) {
667                 drand48_r(&td->verify_state, &r);
668
669                 /*
670                  * lrand48_r seems to be broken and only fill the bottom
671                  * 32-bits, even on 64-bit archs with 64-bit longs
672                  */
673                 todo = sizeof(r);
674                 if (todo > len)
675                         todo = len;
676
677                 memcpy(p, &r, todo);
678
679                 len -= todo;
680                 p += todo;
681         }
682 }
683
684 static void hexdump(void *buffer, int len)
685 {
686         unsigned char *p = buffer;
687         int i;
688
689         for (i = 0; i < len; i++)
690                 printf("%02x", p[i]);
691         printf("\n");
692 }
693
694 static int verify_io_u(struct io_u *io_u)
695 {
696         struct verify_header *hdr = (struct verify_header *) io_u->buf;
697         unsigned char *p = (unsigned char *) io_u->buf;
698         struct md5_ctx md5_ctx;
699         int ret;
700
701         if (hdr->fio_magic != FIO_HDR_MAGIC)
702                 return 1;
703
704         memset(&md5_ctx, 0, sizeof(md5_ctx));
705         p += sizeof(*hdr);
706         md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
707
708         ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
709         if (ret) {
710                 hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
711                 hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
712         }
713
714         return ret;
715 }
716
717 /*
718  * fill body of io_u->buf with random data and add a header with the
719  * (eg) sha1sum of that data.
720  */
721 static void populate_io_u(struct thread_data *td, struct io_u *io_u)
722 {
723         struct md5_ctx md5_ctx;
724         struct verify_header hdr;
725         unsigned char *p = (unsigned char *) io_u->buf;
726
727         hdr.fio_magic = FIO_HDR_MAGIC;
728         hdr.len = io_u->buflen;
729         p += sizeof(hdr);
730         fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
731
732         memset(&md5_ctx, 0, sizeof(md5_ctx));
733         md5_update(&md5_ctx, p, io_u->buflen - sizeof(hdr));
734         memcpy(hdr.md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
735         memcpy(io_u->buf, &hdr, sizeof(hdr));
736 }
737
738 static void put_io_u(struct thread_data *td, struct io_u *io_u)
739 {
740         list_del(&io_u->list);
741         list_add(&io_u->list, &td->io_u_freelist);
742         td->cur_depth--;
743 }
744
745 #define queue_full(td)  (list_empty(&(td)->io_u_freelist))
746
747 static struct io_u *__get_io_u(struct thread_data *td)
748 {
749         struct io_u *io_u;
750
751         if (queue_full(td))
752                 return NULL;
753
754         io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
755         list_del(&io_u->list);
756         list_add(&io_u->list, &td->io_u_busylist);
757         td->cur_depth++;
758         return io_u;
759 }
760
761 static struct io_u *get_io_u(struct thread_data *td)
762 {
763         struct io_u *io_u;
764
765         io_u = __get_io_u(td);
766         if (!io_u)
767                 return NULL;
768
769         if (get_next_offset(td, &io_u->offset)) {
770                 put_io_u(td, io_u);
771                 return NULL;
772         }
773
774         io_u->buflen = get_next_buflen(td);
775         if (!io_u->buflen) {
776                 put_io_u(td, io_u);
777                 return NULL;
778         }
779
780         if (io_u->buflen + io_u->offset > td->file_size)
781                 io_u->buflen = td->file_size - io_u->offset;
782
783         if (!td->sequential)
784                 mark_random_map(td, io_u);
785
786         td->last_bytes += io_u->buflen;
787
788         if (td->verify)
789                 populate_io_u(td, io_u);
790
791         if (td->use_aio) {
792                 if (td_read(td))
793                         io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
794                 else
795                         io_prep_pwrite(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
796         }
797
798         gettimeofday(&io_u->start_time, NULL);
799         return io_u;
800 }
801
802 static inline void td_set_runstate(struct thread_data *td, int runstate)
803 {
804         td->old_runstate = td->runstate;
805         td->runstate = runstate;
806 }
807
808 static int get_next_verify(struct thread_data *td,
809                            unsigned long long *offset, unsigned int *len)
810 {
811         struct io_piece *ipo;
812
813         if (list_empty(&td->io_hist_list))
814                 return 1;
815
816         ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
817         list_del(&ipo->list);
818
819         *offset = ipo->offset;
820         *len = ipo->len;
821         free(ipo);
822         return 0;
823 }
824
825 static void prune_io_piece_log(struct thread_data *td)
826 {
827         struct io_piece *ipo;
828
829         while (!list_empty(&td->io_hist_list)) {
830                 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
831
832                 list_del(&ipo->list);
833                 free(ipo);
834         }
835 }
836
837 /*
838  * log a succesful write, so we can unwind the log for verify
839  */
840 static void log_io_piece(struct thread_data *td, struct io_u *io_u)
841 {
842         struct io_piece *ipo = malloc(sizeof(struct io_piece));
843         struct list_head *entry;
844
845         INIT_LIST_HEAD(&ipo->list);
846         ipo->offset = io_u->offset;
847         ipo->len = io_u->buflen;
848
849         /*
850          * for random io where the writes extend the file, it will typically
851          * be laid out with the block scattered as written. it's faster to
852          * read them in in that order again, so don't sort
853          */
854         if (td->sequential || !td->overwrite) {
855                 list_add_tail(&ipo->list, &td->io_hist_list);
856                 return;
857         }
858
859         /*
860          * for random io, sort the list so verify will run faster
861          */
862         entry = &td->io_hist_list;
863         while ((entry = entry->prev) != &td->io_hist_list) {
864                 struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
865
866                 if (__ipo->offset < ipo->offset)
867                         break;
868         }
869
870         list_add(&ipo->list, entry);
871 }
872
873 static void do_sync_verify(struct thread_data *td)
874 {
875         struct timeval t;
876         struct io_u *io_u = NULL;
877         int ret;
878
879         td_set_runstate(td, TD_VERIFYING);
880
881         io_u = __get_io_u(td);
882
883         if (!td->odirect) {
884                 if (fadvise(td->fd, td->file_offset, td->io_size, POSIX_FADV_DONTNEED) < 0) {
885                         td->error = errno;
886                         goto out;
887                 }
888         }
889
890         do {
891                 if (td->terminate)
892                         break;
893
894                 gettimeofday(&t, NULL);
895                 if (runtime_exceeded(td, &t))
896                         break;
897
898                 if (get_next_verify(td, &io_u->offset, &io_u->buflen))
899                         break;
900
901                 if (td->cur_off != io_u->offset) {
902                         if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
903                                 td->error = errno;
904                                 break;
905                         }
906                 }
907
908                 ret = read(td->fd, io_u->buf, io_u->buflen);
909                 if (ret < (int) io_u->buflen) {
910                         if (ret == -1) {
911                                 td->error = errno;
912                                 break;
913                         } else if (!ret)
914                                 break;
915                         else
916                                 io_u->buflen = ret;
917                 }
918
919                 if (verify_io_u(io_u))
920                         break;
921
922                 td->cur_off = io_u->offset + io_u->buflen;
923         } while (1);
924
925 out:
926         td_set_runstate(td, TD_RUNNING);
927         put_io_u(td, io_u);
928 }
929
930 static void do_sync_io(struct thread_data *td)
931 {
932         unsigned long msec, usec;
933         struct io_u *io_u = NULL;
934         struct timeval e;
935
936         while (td->this_io_bytes < td->io_size) {
937                 int ret;
938
939                 if (td->terminate)
940                         break;
941
942                 io_u = get_io_u(td);
943                 if (!io_u)
944                         break;
945
946                 if (td->cur_off != io_u->offset) {
947                         if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
948                                 td->error = errno;
949                                 break;
950                         }
951                 }
952
953                 if (td_read(td))
954                         ret = read(td->fd, io_u->buf, io_u->buflen);
955                 else
956                         ret = write(td->fd, io_u->buf, io_u->buflen);
957
958                 if (ret < (int) io_u->buflen) {
959                         if (ret == -1)
960                                 td->error = errno;
961                         break;
962                 }
963
964                 if (td_write(td))
965                         log_io_piece(td, io_u);
966
967                 td->io_blocks++;
968                 td->io_bytes += io_u->buflen;
969                 td->this_io_bytes += io_u->buflen;
970                 td->cur_off = io_u->offset + io_u->buflen;
971
972                 gettimeofday(&e, NULL);
973
974                 usec = utime_since(&io_u->start_time, &e);
975
976                 rate_throttle(td, usec, io_u->buflen);
977
978                 if (check_min_rate(td, &e)) {
979                         td->error = ENODATA;
980                         break;
981                 }
982
983                 msec = usec / 1000;
984                 add_clat_sample(td, msec);
985                 add_bw_sample(td);
986
987                 if (runtime_exceeded(td, &e))
988                         break;
989
990                 put_io_u(td, io_u);
991                 io_u = NULL;
992
993                 if (td->thinktime)
994                         usec_sleep(td->thinktime);
995
996                 if (should_fsync(td) && td->fsync_blocks &&
997                     (td->io_blocks % td->fsync_blocks) == 0)
998                         fsync(td->fd);
999         }
1000
1001         if (io_u)
1002                 put_io_u(td, io_u);
1003
1004         if (should_fsync(td))
1005                 fsync(td->fd);
1006 }
1007
1008 static int io_u_getevents(struct thread_data *td, int min, int max,
1009                           struct timespec *t)
1010 {
1011         int r;
1012
1013         do {
1014                 r = io_getevents(td->aio_ctx, min, max, td->aio_events, t);
1015                 if (r != -EAGAIN && r != -EINTR)
1016                         break;
1017         } while (1);
1018
1019         return r;
1020 }
1021
1022 static int io_u_queue(struct thread_data *td, struct io_u *io_u)
1023 {
1024         struct iocb *iocb = &io_u->iocb;
1025         int ret;
1026
1027         do {
1028                 ret = io_submit(td->aio_ctx, 1, &iocb);
1029                 if (ret == 1)
1030                         return 0;
1031                 else if (ret == -EAGAIN)
1032                         usleep(100);
1033                 else if (ret == -EINTR)
1034                         continue;
1035                 else
1036                         break;
1037         } while (1);
1038
1039         return ret;
1040 }
1041
1042 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
1043 #define ev_to_iou(ev)   (struct io_u *) ((unsigned long) (ev)->obj)
1044
1045 static int ios_completed(struct thread_data *td, int nr)
1046 {
1047         unsigned long msec;
1048         struct io_u *io_u;
1049         struct timeval e;
1050         int i, bytes_done;
1051
1052         gettimeofday(&e, NULL);
1053
1054         for (i = 0, bytes_done = 0; i < nr; i++) {
1055                 io_u = ev_to_iou(td->aio_events + i);
1056
1057                 td->io_blocks++;
1058                 td->io_bytes += io_u->buflen;
1059                 td->this_io_bytes += io_u->buflen;
1060
1061                 msec = mtime_since(&io_u->issue_time, &e);
1062
1063                 add_clat_sample(td, msec);
1064                 add_bw_sample(td);
1065
1066                 if (td_write(td))
1067                         log_io_piece(td, io_u);
1068
1069                 bytes_done += io_u->buflen;
1070                 put_io_u(td, io_u);
1071         }
1072
1073         return bytes_done;
1074 }
1075
1076 static void cleanup_pending_aio(struct thread_data *td)
1077 {
1078         struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1079         struct list_head *entry, *n;
1080         struct io_u *io_u;
1081         int r;
1082
1083         /*
1084          * get immediately available events, if any
1085          */
1086         r = io_u_getevents(td, 0, td->cur_depth, &ts);
1087         if (r > 0)
1088                 ios_completed(td, r);
1089
1090         /*
1091          * now cancel remaining active events
1092          */
1093         list_for_each_safe(entry, n, &td->io_u_busylist) {
1094                 io_u = list_entry(entry, struct io_u, list);
1095
1096                 r = io_cancel(td->aio_ctx, &io_u->iocb, td->aio_events);
1097                 if (!r)
1098                         put_io_u(td, io_u);
1099         }
1100
1101         if (td->cur_depth) {
1102                 r = io_u_getevents(td, td->cur_depth, td->cur_depth, NULL);
1103                 if (r > 0)
1104                         ios_completed(td, r);
1105         }
1106 }
1107
1108 static int async_do_verify(struct thread_data *td, struct io_u **io_u)
1109 {
1110         struct io_u *v_io_u = *io_u;
1111         int ret = 0;
1112
1113         if (v_io_u) {
1114                 ret = verify_io_u(v_io_u);
1115                 put_io_u(td, v_io_u);
1116                 *io_u = NULL;
1117         }
1118
1119         return ret;
1120 }
1121
1122 static void do_async_verify(struct thread_data *td)
1123 {
1124         struct timeval t;
1125         struct io_u *io_u, *v_io_u = NULL;
1126         int ret;
1127
1128         td_set_runstate(td, TD_VERIFYING);
1129
1130         do {
1131                 if (td->terminate)
1132                         break;
1133
1134                 gettimeofday(&t, NULL);
1135                 if (runtime_exceeded(td, &t))
1136                         break;
1137
1138                 io_u = __get_io_u(td);
1139                 if (!io_u)
1140                         break;
1141
1142                 if (get_next_verify(td, &io_u->offset, &io_u->buflen)) {
1143                         put_io_u(td, io_u);
1144                         break;
1145                 }
1146
1147                 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
1148                 ret = io_u_queue(td, io_u);
1149                 if (ret) {
1150                         put_io_u(td, io_u);
1151                         td->error = ret;
1152                         break;
1153                 }
1154
1155                 /*
1156                  * we have one pending to verify, do that while the next
1157                  * we are doing io on the next one
1158                  */
1159                 if (async_do_verify(td, &v_io_u))
1160                         break;
1161
1162                 ret = io_u_getevents(td, 1, 1, NULL);
1163                 if (ret != 1) {
1164                         if (ret < 0)
1165                                 td->error = ret;
1166                         break;
1167                 }
1168
1169                 v_io_u = ev_to_iou(td->aio_events);
1170
1171                 td->cur_off = v_io_u->offset + v_io_u->buflen;
1172
1173                 /*
1174                  * if we can't submit more io, we need to verify now
1175                  */
1176                 if (queue_full(td) && async_do_verify(td, &v_io_u))
1177                         break;
1178
1179         } while (1);
1180
1181         async_do_verify(td, &v_io_u);
1182
1183         if (td->cur_depth)
1184                 cleanup_pending_aio(td);
1185
1186         td_set_runstate(td, TD_RUNNING);
1187 }
1188
1189 static void do_async_io(struct thread_data *td)
1190 {
1191         struct timeval s, e;
1192         unsigned long usec;
1193
1194         while (td->this_io_bytes < td->io_size) {
1195                 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1196                 struct timespec *timeout;
1197                 int ret, min_evts = 0;
1198                 struct io_u *io_u;
1199                 unsigned int bytes_done;
1200
1201                 if (td->terminate)
1202                         break;
1203
1204                 io_u = get_io_u(td);
1205                 if (!io_u)
1206                         break;
1207
1208                 memcpy(&s, &io_u->start_time, sizeof(s));
1209
1210                 ret = io_u_queue(td, io_u);
1211                 if (ret) {
1212                         put_io_u(td, io_u);
1213                         td->error = ret;
1214                         break;
1215                 }
1216
1217                 gettimeofday(&io_u->issue_time, NULL);
1218                 add_slat_sample(td, mtime_since(&io_u->start_time, &io_u->issue_time));
1219                 if (td->cur_depth < td->aio_depth) {
1220                         timeout = &ts;
1221                         min_evts = 0;
1222                 } else {
1223                         timeout = NULL;
1224                         min_evts = 1;
1225                 }
1226
1227                 ret = io_u_getevents(td, min_evts, td->cur_depth, timeout);
1228                 if (ret < 0) {
1229                         td->error = ret;
1230                         break;
1231                 } else if (!ret)
1232                         continue;
1233
1234                 bytes_done = ios_completed(td, ret);
1235
1236                 /*
1237                  * the rate is batched for now, it should work for batches
1238                  * of completions except the very first one which may look
1239                  * a little bursty
1240                  */
1241                 gettimeofday(&e, NULL);
1242                 usec = utime_since(&s, &e);
1243
1244                 rate_throttle(td, usec, bytes_done);
1245
1246                 if (check_min_rate(td, &e)) {
1247                         td->error = ENODATA;
1248                         break;
1249                 }
1250
1251                 if (runtime_exceeded(td, &e))
1252                         break;
1253
1254                 if (td->thinktime)
1255                         usec_sleep(td->thinktime);
1256
1257                 if (should_fsync(td) && td->fsync_blocks &&
1258                     (td->io_blocks % td->fsync_blocks) == 0)
1259                         fsync(td->fd);
1260         }
1261
1262         if (td->cur_depth)
1263                 cleanup_pending_aio(td);
1264
1265         if (should_fsync(td))
1266                 fsync(td->fd);
1267 }
1268
1269 static void cleanup_aio(struct thread_data *td)
1270 {
1271         io_destroy(td->aio_ctx);
1272
1273         if (td->aio_events)
1274                 free(td->aio_events);
1275 }
1276
1277 static int init_aio(struct thread_data *td)
1278 {
1279         if (io_queue_init(td->aio_depth, &td->aio_ctx)) {
1280                 td->error = errno;
1281                 return 1;
1282         }
1283
1284         td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
1285         return 0;
1286 }
1287
1288 static void cleanup_io_u(struct thread_data *td)
1289 {
1290         struct list_head *entry, *n;
1291         struct io_u *io_u;
1292
1293         list_for_each_safe(entry, n, &td->io_u_freelist) {
1294                 io_u = list_entry(entry, struct io_u, list);
1295
1296                 list_del(&io_u->list);
1297                 free(io_u);
1298         }
1299
1300         if (td->mem_type == MEM_MALLOC)
1301                 free(td->orig_buffer);
1302         else if (td->mem_type == MEM_SHM) {
1303                 struct shmid_ds sbuf;
1304
1305                 shmdt(td->orig_buffer);
1306                 shmctl(td->shm_id, IPC_RMID, &sbuf);
1307         }
1308 }
1309
1310 static int init_io_u(struct thread_data *td)
1311 {
1312         struct io_u *io_u;
1313         int i, max_units, mem_size;
1314         char *p;
1315
1316         if (!td->use_aio)
1317                 max_units = 1;
1318         else
1319                 max_units = td->aio_depth;
1320
1321         mem_size = td->max_bs * max_units + MASK;
1322
1323         if (td->mem_type == MEM_MALLOC)
1324                 td->orig_buffer = malloc(mem_size);
1325         else if (td->mem_type == MEM_SHM) {
1326                 td->shm_id = shmget(IPC_PRIVATE, mem_size, IPC_CREAT | 0600);
1327                 if (td->shm_id < 0) {
1328                         td->error = errno;
1329                         perror("shmget");
1330                         return 1;
1331                 }
1332
1333                 td->orig_buffer = shmat(td->shm_id, NULL, 0);
1334                 if (td->orig_buffer == (void *) -1) {
1335                         td->error = errno;
1336                         perror("shmat");
1337                         return 1;
1338                 }
1339         }
1340
1341         INIT_LIST_HEAD(&td->io_u_freelist);
1342         INIT_LIST_HEAD(&td->io_u_busylist);
1343         INIT_LIST_HEAD(&td->io_hist_list);
1344
1345         p = ALIGN(td->orig_buffer);
1346         for (i = 0; i < max_units; i++) {
1347                 io_u = malloc(sizeof(*io_u));
1348                 memset(io_u, 0, sizeof(*io_u));
1349                 INIT_LIST_HEAD(&io_u->list);
1350
1351                 io_u->buf = p + td->max_bs * i;
1352                 list_add(&io_u->list, &td->io_u_freelist);
1353         }
1354
1355         return 0;
1356 }
1357
1358 static void setup_log(struct io_log **log)
1359 {
1360         struct io_log *l = malloc(sizeof(*l));
1361
1362         l->nr_samples = 0;
1363         l->max_samples = 1024;
1364         l->log = malloc(l->max_samples * sizeof(struct io_sample));
1365         *log = l;
1366 }
1367
1368 static void finish_log(struct thread_data *td, struct io_log *log, char *name)
1369 {
1370         char file_name[128];
1371         FILE *f;
1372         unsigned int i;
1373
1374         sprintf(file_name, "client%d_%s.log", td->thread_number, name);
1375         f = fopen(file_name, "w");
1376         if (!f) {
1377                 perror("fopen log");
1378                 return;
1379         }
1380
1381         for (i = 0; i < log->nr_samples; i++)
1382                 fprintf(f, "%lu, %lu\n", log->log[i].time, log->log[i].val);
1383
1384         fclose(f);
1385         free(log->log);
1386         free(log);
1387 }
1388
1389 static int create_file(struct thread_data *td)
1390 {
1391         unsigned long long left;
1392         unsigned int bs;
1393         char *b;
1394         int r;
1395
1396         /*
1397          * unless specifically asked for overwrite, let normal io extend it
1398          */
1399         if (td_write(td) && !td->overwrite)
1400                 return 0;
1401
1402         if (!td->file_size) {
1403                 fprintf(stderr, "Need size for create\n");
1404                 td->error = EINVAL;
1405                 return 1;
1406         }
1407
1408         printf("Client%d: Laying out IO file\n", td->thread_number);
1409
1410         td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
1411         if (td->fd < 0) {
1412                 td->error = errno;
1413                 return 1;
1414         }
1415
1416         if (ftruncate(td->fd, td->file_size) == -1) {
1417                 td->error = errno;
1418                 return 1;
1419         }
1420
1421         td->io_size = td->file_size;
1422         b = malloc(td->max_bs);
1423         memset(b, 0, td->max_bs);
1424
1425         left = td->file_size;
1426         while (left) {
1427                 bs = td->max_bs;
1428                 if (bs > left)
1429                         bs = left;
1430
1431                 r = write(td->fd, b, bs);
1432
1433                 if (r == (int) bs) {
1434                         left -= bs;
1435                         continue;
1436                 } else {
1437                         if (r < 0)
1438                                 td->error = errno;
1439                         else
1440                                 td->error = EIO;
1441
1442                         break;
1443                 }
1444         }
1445
1446         if (td->create_fsync)
1447                 fsync(td->fd);
1448
1449         close(td->fd);
1450         td->fd = -1;
1451         free(b);
1452         return 0;
1453 }
1454
1455 static int file_exists(struct thread_data *td)
1456 {
1457         struct stat st;
1458
1459         if (stat(td->file_name, &st) != -1)
1460                 return 1;
1461
1462         return errno != ENOENT;
1463 }
1464
1465 static int get_file_size(struct thread_data *td)
1466 {
1467         size_t bytes = 0;
1468         struct stat st;
1469
1470         if (fstat(td->fd, &st) == -1) {
1471                 td->error = errno;
1472                 return 1;
1473         }
1474
1475         /*
1476          * if block device, get size via BLKGETSIZE64 ioctl. try that as well
1477          * if this is a link, fall back to st.st_size if it fails
1478          */
1479         if (S_ISBLK(st.st_mode) || S_ISLNK(st.st_mode)) {
1480                 if (ioctl(td->fd, BLKGETSIZE64, &bytes)) {
1481                         if (S_ISBLK(st.st_mode)) {
1482                                 td->error = errno;
1483                                 return 1;
1484                         } else
1485                                 bytes = st.st_size;
1486                 }
1487         } else
1488                 bytes = st.st_size;
1489
1490         if (td_read(td)) {
1491                 if (td->file_size > bytes)
1492                         bytes = td->file_size;
1493         } else {
1494                 if (!td->file_size)
1495                         td->file_size = 1024 * 1024 * 1024;
1496
1497                 bytes = td->file_size;
1498         }
1499
1500         if (td->file_offset > bytes) {
1501                 fprintf(stderr, "Client%d: offset larger than length\n", td->thread_number);
1502                 return 1;
1503         }
1504
1505         td->io_size = bytes - td->file_offset;
1506         if (td->io_size == 0) {
1507                 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1508                 td->error = EINVAL;
1509                 return 1;
1510         }
1511
1512         return 0;
1513 }
1514
1515 static int setup_file(struct thread_data *td)
1516 {
1517         int flags = 0;
1518
1519         if (!file_exists(td)) {
1520                 if (!td->create_file) {
1521                         td->error = ENOENT;
1522                         return 1;
1523                 }
1524                 if (create_file(td))
1525                         return 1;
1526         }
1527
1528         if (td->odirect)
1529                 flags |= O_DIRECT;
1530
1531         if (td_read(td))
1532                 td->fd = open(td->file_name, flags | O_RDONLY);
1533         else {
1534                 if (!td->overwrite)
1535                         flags |= O_TRUNC;
1536                 if (td->sync_io)
1537                         flags |= O_SYNC;
1538                 if (td->verify)
1539                         flags |= O_RDWR;
1540                 else
1541                         flags |= O_WRONLY;
1542
1543                 td->fd = open(td->file_name, flags | O_CREAT, 0600);
1544         }
1545
1546         if (td->fd == -1) {
1547                 td->error = errno;
1548                 return 1;
1549         }
1550
1551         if (get_file_size(td))
1552                 return 1;
1553
1554         if (td_write(td) && ftruncate(td->fd, td->file_size) == -1) {
1555                 td->error = errno;
1556                 return 1;
1557         }
1558
1559         if (td->invalidate_cache) {
1560                 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_DONTNEED) < 0) {
1561                         td->error = errno;
1562                         return 1;
1563                 }
1564         }
1565
1566         return 0;
1567 }
1568
1569 static void clear_io_state(struct thread_data *td)
1570 {
1571         if (!td->use_aio)
1572                 lseek(td->fd, SEEK_SET, 0);
1573
1574         td->cur_off = 0;
1575         td->last_bytes = 0;
1576         td->stat_io_bytes = 0;
1577         td->this_io_bytes = 0;
1578
1579         if (td->file_map)
1580                 memset(td->file_map, 0, td->num_maps * sizeof(long));
1581 }
1582
1583 static void *thread_main(int shm_id, int offset, char *argv[])
1584 {
1585         struct thread_data *td;
1586         int ret = 1;
1587         void *data;
1588
1589         setsid();
1590
1591         data = shmat(shm_id, NULL, 0);
1592         if (data == (void *) -1) {
1593                 perror("shmat");
1594                 return NULL;
1595         }
1596
1597         td = data + offset * sizeof(struct thread_data);
1598         td->pid = getpid();
1599
1600         if (init_io_u(td))
1601                 goto err;
1602
1603         if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
1604                 td->error = errno;
1605                 goto err;
1606         }
1607
1608         sprintf(argv[0], "fio%d", offset);
1609
1610         if (td->use_aio && init_aio(td))
1611                 goto err;
1612
1613         if (td->ioprio) {
1614                 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1615                         td->error = errno;
1616                         goto err;
1617                 }
1618         }
1619
1620         sem_post(&startup_sem);
1621         sem_wait(&td->mutex);
1622
1623         if (!td->create_serialize && setup_file(td))
1624                 goto err;
1625
1626         if (init_random_state(td))
1627                 goto err;
1628
1629         gettimeofday(&td->start, NULL);
1630
1631         getrusage(RUSAGE_SELF, &td->ru_start);
1632
1633         while (td->loops--) {
1634                 gettimeofday(&td->stat_sample_time, NULL);
1635
1636                 if (td->ratemin)
1637                         memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
1638
1639                 clear_io_state(td);
1640                 prune_io_piece_log(td);
1641
1642                 if (!td->use_aio)
1643                         do_sync_io(td);
1644                 else
1645                         do_async_io(td);
1646
1647                 if (td->error)
1648                         break;
1649
1650                 if (!td->verify)
1651                         continue;
1652
1653                 clear_io_state(td);
1654
1655                 if (!td->use_aio)
1656                         do_sync_verify(td);
1657                 else
1658                         do_async_verify(td);
1659
1660                 if (td->error)
1661                         break;
1662         }
1663
1664         td->runtime = mtime_since_now(&td->start);
1665         getrusage(RUSAGE_SELF, &td->ru_end);
1666         ret = 0;
1667
1668         if (td->bw_log)
1669                 finish_log(td, td->bw_log, "bw");
1670         if (td->lat_log)
1671                 finish_log(td, td->lat_log, "lat");
1672
1673         if (exitall_on_terminate)
1674                 sig_handler(0);
1675
1676 err:
1677         if (td->fd != -1) {
1678                 close(td->fd);
1679                 td->fd = -1;
1680         }
1681         if (td->use_aio)
1682                 cleanup_aio(td);
1683         cleanup_io_u(td);
1684         if (ret) {
1685                 sem_post(&startup_sem);
1686                 sem_wait(&td->mutex);
1687         }
1688         td_set_runstate(td, TD_EXITED);
1689         shmdt(data);
1690         return NULL;
1691 }
1692
1693 static void free_shm(void)
1694 {
1695         struct shmid_ds sbuf;
1696
1697         if (threads) {
1698                 shmdt(threads);
1699                 threads = NULL;
1700                 shmctl(shm_id, IPC_RMID, &sbuf);
1701         }
1702 }
1703
1704 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1705                     double *mean, double *dev)
1706 {
1707         double n;
1708
1709         if (is->samples == 0)
1710                 return 0;
1711
1712         *min = is->min_val;
1713         *max = is->max_val;
1714
1715         n = (double) is->samples;
1716         *mean = (double) is->val / n;
1717         *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1718         return 1;
1719 }
1720
1721 static void show_thread_status(struct thread_data *td)
1722 {
1723         int prio, prio_class;
1724         unsigned long min, max, bw = 0, ctx;
1725         double mean, dev, usr_cpu, sys_cpu;
1726
1727         if (!td->io_bytes && !td->error)
1728                 return;
1729
1730         if (td->runtime)
1731                 bw = td->io_bytes / td->runtime;
1732
1733         prio = td->ioprio & 0xff;
1734         prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
1735
1736         printf("Client%d: err=%2d, io=%6luMiB, bw=%6luKiB/s, runt=%6lumsec\n", td->thread_number, td->error, td->io_bytes >> 20, bw, td->runtime);
1737
1738         if (calc_lat(&td->slat_stat, &min, &max, &mean, &dev))
1739                 printf("  slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1740         if (calc_lat(&td->clat_stat, &min, &max, &mean, &dev))
1741                 printf("  clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1742         if (calc_lat(&td->bw_stat, &min, &max, &mean, &dev))
1743                 printf("  bw (KiB/s) : min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1744
1745         if (td->runtime) {
1746                 unsigned long t;
1747
1748                 t = mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
1749                 usr_cpu = (double) t * 100 / (double) td->runtime;
1750
1751                 t = mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
1752                 sys_cpu = (double) t * 100 / (double) td->runtime;
1753         } else {
1754                 usr_cpu = 0;
1755                 sys_cpu = 0;
1756         }
1757
1758         ctx = td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
1759
1760         printf("  cpu        : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ctx);
1761 }
1762
1763 static int setup_rate(struct thread_data *td)
1764 {
1765         int nr_reads_per_sec;
1766
1767         if (!td->rate)
1768                 return 0;
1769
1770         if (td->rate < td->ratemin) {
1771                 fprintf(stderr, "min rate larger than nominal rate\n");
1772                 return -1;
1773         }
1774
1775         nr_reads_per_sec = (td->rate * 1024) / td->min_bs;
1776         td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
1777         td->rate_pending_usleep = 0;
1778         return 0;
1779 }
1780
1781 static struct thread_data *get_new_job(int global)
1782 {
1783         struct thread_data *td;
1784
1785         if (global)
1786                 return &def_thread;
1787         if (thread_number >= max_jobs)
1788                 return NULL;
1789
1790         td = &threads[thread_number++];
1791         memset(td, 0, sizeof(*td));
1792
1793         td->fd = -1;
1794         td->thread_number = thread_number;
1795
1796         td->ddir = def_thread.ddir;
1797         td->ioprio = def_thread.ioprio;
1798         td->sequential = def_thread.sequential;
1799         td->bs = def_thread.bs;
1800         td->min_bs = def_thread.min_bs;
1801         td->max_bs = def_thread.max_bs;
1802         td->odirect = def_thread.odirect;
1803         td->thinktime = def_thread.thinktime;
1804         td->fsync_blocks = def_thread.fsync_blocks;
1805         td->start_delay = def_thread.start_delay;
1806         td->timeout = def_thread.timeout;
1807         td->use_aio = def_thread.use_aio;
1808         td->create_file = def_thread.create_file;
1809         td->overwrite = def_thread.overwrite;
1810         td->invalidate_cache = def_thread.invalidate_cache;
1811         td->file_size = def_thread.file_size;
1812         td->file_offset = def_thread.file_offset;
1813         td->rate = def_thread.rate;
1814         td->ratemin = def_thread.ratemin;
1815         td->ratecycle = def_thread.ratecycle;
1816         td->aio_depth = def_thread.aio_depth;
1817         td->sync_io = def_thread.sync_io;
1818         td->mem_type = def_thread.mem_type;
1819         td->bw_avg_time = def_thread.bw_avg_time;
1820         td->create_serialize = def_thread.create_serialize;
1821         td->create_fsync = def_thread.create_fsync;
1822         td->loops = def_thread.loops;
1823         td->verify = def_thread.verify;
1824         td->stonewall = def_thread.stonewall;
1825         memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
1826
1827         return td;
1828 }
1829
1830 static void put_job(struct thread_data *td)
1831 {
1832         memset(&threads[td->thread_number - 1], 0, sizeof(*td));
1833         thread_number--;
1834 }
1835
1836 static int add_job(struct thread_data *td, const char *filename, int prioclass,
1837                    int prio)
1838 {
1839         if (td == &def_thread)
1840                 return 0;
1841
1842         strcpy(td->file_name, filename);
1843         sem_init(&td->mutex, 1, 0);
1844         td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
1845
1846         td->clat_stat.min_val = ULONG_MAX;
1847         td->slat_stat.min_val = ULONG_MAX;
1848         td->bw_stat.min_val = ULONG_MAX;
1849
1850         run_str[td->thread_number - 1] = 'P';
1851
1852         if (td->use_aio && !td->aio_depth)
1853                 td->aio_depth = 1;
1854
1855         if (td->min_bs == -1U)
1856                 td->min_bs = td->bs;
1857         if (td->max_bs == -1U)
1858                 td->max_bs = td->bs;
1859         if (td_read(td))
1860                 td->verify = 0;
1861
1862         if (setup_rate(td))
1863                 return -1;
1864
1865         if (write_lat_log)
1866                 setup_log(&td->lat_log);
1867         if (write_bw_log)
1868                 setup_log(&td->bw_log);
1869
1870         printf("Client%d: file=%s, rw=%d, prio=%d/%d, seq=%d, odir=%d, bs=%d-%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, filename, td->ddir, prioclass, prio, td->sequential, td->odirect, td->min_bs, td->max_bs, td->rate, td->use_aio, td->aio_depth);
1871         return 0;
1872 }
1873
1874 static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
1875 {
1876         unsigned int i;
1877
1878         CPU_ZERO(&cpumask);
1879
1880         for (i = 0; i < sizeof(int) * 8; i++) {
1881                 if ((1 << i) & cpu)
1882                         CPU_SET(i, &cpumask);
1883         }
1884 }
1885
1886 unsigned long get_mult(char c)
1887 {
1888         switch (c) {
1889                 case 'k':
1890                 case 'K':
1891                         return 1024;
1892                 case 'm':
1893                 case 'M':
1894                         return 1024 * 1024;
1895                 case 'g':
1896                 case 'G':
1897                         return 1024 * 1024 * 1024;
1898                 default:
1899                         return 1;
1900         }
1901 }
1902
1903 /*
1904  * convert string after '=' into decimal value, noting any size suffix
1905  */
1906 static int str_cnv(char *p, unsigned long long *val)
1907 {
1908         char *str;
1909         int len;
1910
1911         str = strstr(p, "=");
1912         if (!str)
1913                 return 1;
1914
1915         str++;
1916         len = strlen(str);
1917
1918         *val = strtoul(str, NULL, 10);
1919         if (*val == ULONG_MAX && errno == ERANGE)
1920                 return 1;
1921
1922         *val *= get_mult(str[len - 2]);
1923         return 0;
1924 }
1925
1926 static int check_strcnv(char *p, char *name, unsigned long long *val)
1927 {
1928         if (!strstr(p, name))
1929                 return 1;
1930
1931         return str_cnv(p, val);
1932 }
1933
1934 static int check_str(char *p, char *name, char *option)
1935 {
1936         char *s = strstr(p, name);
1937
1938         if (!s)
1939                 return 1;
1940
1941         s += strlen(name);
1942         if (strstr(s, option))
1943                 return 0;
1944
1945         return 1;
1946 }
1947
1948 static int check_range(char *p, char *name, unsigned long *s, unsigned long *e)
1949 {
1950         char str[128];
1951         char s1, s2;
1952
1953         sprintf(str, "%s=%%lu%%c-%%lu%%c", name);
1954         if (sscanf(p, str, s, &s1, e, &s2) == 4) {
1955                 *s *= get_mult(s1);
1956                 *e *= get_mult(s2);
1957                 return 0;
1958         }
1959
1960         sprintf(str, "%s = %%lu%%c-%%lu%%c", name);
1961         if (sscanf(p, str, s, &s1, e, &s2) == 4) {
1962                 *s *= get_mult(s1);
1963                 *e *= get_mult(s2);
1964                 return 0;
1965         }
1966
1967         sprintf(str, "%s=%%lu-%%lu", name);
1968         if (sscanf(p, str, s, e) == 2)
1969                 return 0;
1970
1971         sprintf(str, "%s = %%lu-%%lu", name);
1972         if (sscanf(p, str, s, e) == 2)
1973                 return 0;
1974
1975         return 1;
1976
1977 }
1978
1979 static int check_int(char *p, char *name, unsigned int *val)
1980 {
1981         char str[128];
1982
1983         sprintf(str, "%s=%%d", name);
1984         if (sscanf(p, str, val) == 1)
1985                 return 0;
1986
1987         sprintf(str, "%s = %%d", name);
1988         if (sscanf(p, str, val) == 1)
1989                 return 0;
1990
1991         return 1;
1992 }
1993
1994 static int is_empty_or_comment(char *line)
1995 {
1996         unsigned int i;
1997
1998         for (i = 0; i < strlen(line); i++) {
1999                 if (line[i] == ';')
2000                         return 1;
2001                 if (!isspace(line[i]) && !iscntrl(line[i]))
2002                         return 0;
2003         }
2004
2005         return 1;
2006 }
2007
2008 static int parse_jobs_ini(char *file)
2009 {
2010         unsigned int prioclass, prio, cpu, global;
2011         unsigned long long ull;
2012         unsigned long ul1, ul2;
2013         struct thread_data *td;
2014         char *string, *name;
2015         fpos_t off;
2016         FILE *f;
2017         char *p;
2018
2019         f = fopen(file, "r");
2020         if (!f) {
2021                 perror("fopen");
2022                 return 1;
2023         }
2024
2025         string = malloc(4096);
2026         name = malloc(256);
2027
2028         while ((p = fgets(string, 4096, f)) != NULL) {
2029                 if (is_empty_or_comment(p))
2030                         continue;
2031                 if (sscanf(p, "[%s]", name) != 1)
2032                         continue;
2033
2034                 global = !strncmp(name, "global", 6);
2035
2036                 name[strlen(name) - 1] = '\0';
2037
2038                 td = get_new_job(global);
2039                 if (!td)
2040                         break;
2041
2042                 prioclass = 2;
2043                 prio = 4;
2044
2045                 fgetpos(f, &off);
2046                 while ((p = fgets(string, 4096, f)) != NULL) {
2047                         if (is_empty_or_comment(p))
2048                                 continue;
2049                         if (strstr(p, "["))
2050                                 break;
2051                         if (!check_int(p, "rw", &td->ddir)) {
2052                                 fgetpos(f, &off);
2053                                 continue;
2054                         }
2055                         if (!check_int(p, "prio", &prio)) {
2056                                 fgetpos(f, &off);
2057                                 continue;
2058                         }
2059                         if (!check_int(p, "prioclass", &prioclass)) {
2060                                 fgetpos(f, &off);
2061                                 continue;
2062                         }
2063                         if (!check_int(p, "direct", &td->odirect)) {
2064                                 fgetpos(f, &off);
2065                                 continue;
2066                         }
2067                         if (!check_int(p, "rate", &td->rate)) {
2068                                 fgetpos(f, &off);
2069                                 continue;
2070                         }
2071                         if (!check_int(p, "ratemin", &td->ratemin)) {
2072                                 fgetpos(f, &off);
2073                                 continue;
2074                         }
2075                         if (!check_int(p, "ratecycle", &td->ratecycle)) {
2076                                 fgetpos(f, &off);
2077                                 continue;
2078                         }
2079                         if (!check_int(p, "thinktime", &td->thinktime)) {
2080                                 fgetpos(f, &off);
2081                                 continue;
2082                         }
2083                         if (!check_int(p, "cpumask", &cpu)) {
2084                                 fill_cpu_mask(td->cpumask, cpu);
2085                                 fgetpos(f, &off);
2086                                 continue;
2087                         }
2088                         if (!check_int(p, "fsync", &td->fsync_blocks)) {
2089                                 fgetpos(f, &off);
2090                                 continue;
2091                         }
2092                         if (!check_int(p, "startdelay", &td->start_delay)) {
2093                                 fgetpos(f, &off);
2094                                 continue;
2095                         }
2096                         if (!check_int(p, "timeout", &td->timeout)) {
2097                                 fgetpos(f, &off);
2098                                 continue;
2099                         }
2100                         if (!check_int(p, "invalidate",&td->invalidate_cache)) {
2101                                 fgetpos(f, &off);
2102                                 continue;
2103                         }
2104                         if (!check_int(p, "aio_depth", &td->aio_depth)) {
2105                                 fgetpos(f, &off);
2106                                 continue;
2107                         }
2108                         if (!check_int(p, "sync", &td->sync_io)) {
2109                                 fgetpos(f, &off);
2110                                 continue;
2111                         }
2112                         if (!check_int(p, "bwavgtime", &td->bw_avg_time)) {
2113                                 fgetpos(f, &off);
2114                                 continue;
2115                         }
2116                         if (!check_int(p, "create_serialize", &td->create_serialize)) {
2117                                 fgetpos(f, &off);
2118                                 continue;
2119                         }
2120                         if (!check_int(p, "create_fsync", &td->create_fsync)) {
2121                                 fgetpos(f, &off);
2122                                 continue;
2123                         }
2124                         if (!check_int(p, "loops", &td->loops)) {
2125                                 fgetpos(f, &off);
2126                                 continue;
2127                         }
2128                         if (!check_int(p, "verify", &td->verify)) {
2129                                 fgetpos(f, &off);
2130                                 continue;
2131                         }
2132                         if (!check_range(p, "bsrange", &ul1, &ul2)) {
2133                                 if (ul1 & 511)
2134                                         printf("bad min block size, must be a multiple of 512\n");
2135                                 else
2136                                         td->min_bs = ul1;
2137                                 if (ul2 & 511)
2138                                         printf("bad max block size, must be a multiple of 512\n");
2139                                 else
2140                                         td->max_bs = ul2;
2141                                 fgetpos(f, &off);
2142                                 continue;
2143                         }
2144                         if (!check_strcnv(p, "bs", &ull)) {
2145                                 if (ull & 511)
2146                                         printf("bad block size, must be a multiple of 512\n");
2147                                 else
2148                                         td->bs = ull;
2149                                 fgetpos(f, &off);
2150                                 continue;
2151                         }
2152                         if (!check_strcnv(p, "size", &td->file_size)) {
2153                                 fgetpos(f, &off);
2154                                 continue;
2155                         }
2156                         if (!check_strcnv(p, "offset", &td->file_offset)) {
2157                                 fgetpos(f, &off);
2158                                 continue;
2159                         }
2160                         if (!check_str(p, "mem", "malloc")) {
2161                                 td->mem_type = MEM_MALLOC;
2162                                 fgetpos(f, &off);
2163                                 continue;
2164                         }
2165                         if (!check_str(p, "mem", "shm")) {
2166                                 td->mem_type = MEM_SHM;
2167                                 fgetpos(f, &off);
2168                                 continue;
2169                         }
2170                         if (!strncmp(p, "sequential", 10)) {
2171                                 td->sequential = 1;
2172                                 fgetpos(f, &off);
2173                                 continue;
2174                         }
2175                         if (!strncmp(p, "random", 6)) {
2176                                 td->sequential = 0;
2177                                 fgetpos(f, &off);
2178                                 continue;
2179                         }
2180                         if (!strncmp(p, "aio", 3)) {
2181                                 td->use_aio = 1;
2182                                 fgetpos(f, &off);
2183                                 continue;
2184                         }
2185                         if (!strncmp(p, "create", 6)) {
2186                                 td->create_file = 1;
2187                                 fgetpos(f, &off);
2188                                 continue;
2189                         }
2190                         if (!strncmp(p, "overwrite", 9)) {
2191                                 td->overwrite = 1;
2192                                 fgetpos(f, &off);
2193                                 continue;
2194                         }
2195                         if (!strncmp(p, "exitall", 7)) {
2196                                 exitall_on_terminate = 1;
2197                                 fgetpos(f, &off);
2198                                 continue;
2199                         }
2200                         if (!strncmp(p, "stonewall", 9)) {
2201                                 td->stonewall = 1;
2202                                 fgetpos(f, &off);
2203                                 continue;
2204                         }
2205                         printf("Client%d: bad option %s\n",td->thread_number,p);
2206                 }
2207                 fsetpos(f, &off);
2208
2209                 if (add_job(td, name, prioclass, prio))
2210                         put_job(td);
2211         }
2212
2213         free(string);
2214         free(name);
2215         fclose(f);
2216         return 0;
2217 }
2218
2219 static int parse_options(int argc, char *argv[])
2220 {
2221         int i;
2222
2223         for (i = 1; i < argc; i++) {
2224                 char *parm = argv[i];
2225
2226                 if (parm[0] != '-')
2227                         break;
2228
2229                 parm++;
2230                 switch (*parm) {
2231                         case 's':
2232                                 parm++;
2233                                 def_thread.sequential = !!atoi(parm);
2234                                 break;
2235                         case 'b':
2236                                 parm++;
2237                                 def_thread.bs = atoi(parm);
2238                                 def_thread.bs <<= 10;
2239                                 if (!def_thread.bs) {
2240                                         printf("bad block size\n");
2241                                         def_thread.bs = DEF_BS;
2242                                 }
2243                                 break;
2244                         case 't':
2245                                 parm++;
2246                                 def_thread.timeout = atoi(parm);
2247                                 break;
2248                         case 'r':
2249                                 parm++;
2250                                 repeatable = !!atoi(parm);
2251                                 break;
2252                         case 'R':
2253                                 parm++;
2254                                 rate_quit = !!atoi(parm);
2255                                 break;
2256                         case 'o':
2257                                 parm++;
2258                                 def_thread.odirect = !!atoi(parm);
2259                                 break;
2260                         case 'f':
2261                                 if (i + 1 >= argc) {
2262                                         printf("-f needs file as arg\n");
2263                                         break;
2264                                 }
2265                                 ini_file = strdup(argv[i+1]);
2266                                 i++;
2267                                 break;
2268                         case 'l':
2269                                 write_lat_log = 1;
2270                                 break;
2271                         case 'w':
2272                                 write_bw_log = 1;
2273                                 break;
2274                         default:
2275                                 printf("bad option %s\n", argv[i]);
2276                                 break;
2277                 }
2278         }
2279
2280         return i;
2281 }
2282
2283 static void print_thread_status(struct thread_data *td, int nr_running,
2284                                 int t_rate, int m_rate)
2285 {
2286         printf("Threads now running: %d", nr_running);
2287         if (m_rate || t_rate)
2288                 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
2289         printf(" : [%s]\r", run_str);
2290         fflush(stdout);
2291 }
2292
2293 static void check_str_update(struct thread_data *td, int n, int t, int m)
2294 {
2295         char c = run_str[td->thread_number - 1];
2296
2297         if (td->runstate == td->old_runstate)
2298                 return;
2299
2300         switch (td->runstate) {
2301                 case TD_REAPED:
2302                         c = '_';
2303                         break;
2304                 case TD_EXITED:
2305                         c = 'E';
2306                         break;
2307                 case TD_RUNNING:
2308                         if (td_read(td)) {
2309                                 if (td->sequential)
2310                                         c = 'R';
2311                                 else
2312                                         c = 'r';
2313                         } else {
2314                                 if (td->sequential)
2315                                         c = 'W';
2316                                 else
2317                                         c = 'w';
2318                         }
2319                         break;
2320                 case TD_VERIFYING:
2321                         c = 'V';
2322                         break;
2323                 case TD_CREATED:
2324                         c = 'C';
2325                         break;
2326                 case TD_NOT_CREATED:
2327                         c = 'P';
2328                         break;
2329                 default:
2330                         printf("state %d\n", td->runstate);
2331         }
2332
2333         run_str[td->thread_number - 1] = c;
2334         print_thread_status(td, n, t, m);
2335         td->old_runstate = td->runstate;
2336 }
2337
2338 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
2339 {
2340         int i;
2341
2342         /*
2343          * reap exited threads (TD_EXITED -> TD_REAPED)
2344          */
2345         for (i = 0; i < thread_number; i++) {
2346                 struct thread_data *td = &threads[i];
2347
2348                 check_str_update(td, *nr_running, *t_rate, *m_rate);
2349
2350                 if (td->runstate != TD_EXITED)
2351                         continue;
2352
2353                 td_set_runstate(td, TD_REAPED);
2354                 waitpid(td->pid, NULL, 0);
2355                 (*nr_running)--;
2356                 (*m_rate) -= td->ratemin;
2357                 (*t_rate) -= td->rate;
2358                 check_str_update(td, *nr_running, *t_rate, *m_rate);
2359
2360                 if (td->terminate)
2361                         continue;
2362         }
2363 }
2364
2365 static void run_threads(char *argv[])
2366 {
2367         struct timeval genesis;
2368         struct thread_data *td;
2369         unsigned long spent;
2370         int i, todo, nr_running, m_rate, t_rate, nr_started;
2371
2372         printf("Starting %d threads\n", thread_number);
2373         fflush(stdout);
2374
2375         signal(SIGINT, sig_handler);
2376
2377         todo = thread_number;
2378         nr_running = 0;
2379         nr_started = 0;
2380         m_rate = t_rate = 0;
2381
2382         for (i = 0; i < thread_number; i++) {
2383                 td = &threads[i];
2384
2385                 if (!td->create_serialize)
2386                         continue;
2387
2388                 /*
2389                  * do file setup here so it happens sequentially,
2390                  * we don't want X number of threads getting their
2391                  * client data interspersed on disk
2392                  */
2393                 if (setup_file(td)) {
2394                         td_set_runstate(td, TD_REAPED);
2395                         todo--;
2396                 }
2397         }
2398
2399         gettimeofday(&genesis, NULL);
2400
2401         while (todo) {
2402                 /*
2403                  * create threads (TD_NOT_CREATED -> TD_CREATED)
2404                  */
2405                 for (i = 0; i < thread_number; i++) {
2406                         td = &threads[i];
2407
2408                         if (td->runstate != TD_NOT_CREATED)
2409                                 continue;
2410
2411                         /*
2412                          * never got a chance to start, killed by other
2413                          * thread for some reason
2414                          */
2415                         if (td->terminate) {
2416                                 todo--;
2417                                 continue;
2418                         }
2419
2420                         if (td->start_delay) {
2421                                 spent = mtime_since_now(&genesis);
2422
2423                                 if (td->start_delay * 1000 > spent)
2424                                         continue;
2425                         }
2426
2427                         if (td->stonewall && (nr_started || nr_running))
2428                                 break;
2429
2430                         td_set_runstate(td, TD_CREATED);
2431                         check_str_update(td, nr_running, t_rate, m_rate);
2432                         sem_init(&startup_sem, 1, 1);
2433                         todo--;
2434                         nr_started++;
2435
2436                         if (fork())
2437                                 sem_wait(&startup_sem);
2438                         else {
2439                                 thread_main(shm_id, i, argv);
2440                                 exit(0);
2441                         }
2442                 }
2443
2444                 /*
2445                  * start created threads (TD_CREATED -> TD_RUNNING)
2446                  */
2447                 for (i = 0; i < thread_number; i++) {
2448                         struct thread_data *td = &threads[i];
2449
2450                         if (td->runstate != TD_CREATED)
2451                                 continue;
2452
2453                         td_set_runstate(td, TD_RUNNING);
2454                         nr_running++;
2455                         nr_started--;
2456                         m_rate += td->ratemin;
2457                         t_rate += td->rate;
2458                         check_str_update(td, nr_running, t_rate, m_rate);
2459                         sem_post(&td->mutex);
2460                 }
2461
2462                 for (i = 0; i < thread_number; i++) {
2463                         struct thread_data *td = &threads[i];
2464
2465                         if (td->runstate == TD_RUNNING)
2466                                 run_str[td->thread_number - 1] = '+';
2467                         else if (td->runstate == TD_VERIFYING)
2468                                 run_str[td->thread_number - 1] = 'V';
2469                         else
2470                                 continue;
2471
2472                         check_str_update(td, nr_running, t_rate, m_rate);
2473                 }
2474
2475                 reap_threads(&nr_running, &t_rate, &m_rate);
2476
2477                 if (todo)
2478                         usleep(100000);
2479         }
2480
2481         while (nr_running) {
2482                 reap_threads(&nr_running, &t_rate, &m_rate);
2483                 usleep(10000);
2484         }
2485 }
2486
2487 int setup_thread_area(void)
2488 {
2489         /*
2490          * 1024 is too much on some machines, scale max_jobs if
2491          * we get a failure that looks like too large a shm segment
2492          */
2493         do {
2494                 int s = max_jobs * sizeof(struct thread_data);
2495
2496                 shm_id = shmget(0, s, IPC_CREAT | 0600);
2497                 if (shm_id != -1)
2498                         break;
2499                 if (errno != EINVAL) {
2500                         perror("shmget");
2501                         break;
2502                 }
2503
2504                 max_jobs >>= 1;
2505         } while (max_jobs);
2506
2507         if (shm_id == -1)
2508                 return 1;
2509
2510         threads = shmat(shm_id, NULL, 0);
2511         if (threads == (void *) -1) {
2512                 perror("shmat");
2513                 return 1;
2514         }
2515
2516         atexit(free_shm);
2517         return 0;
2518 }
2519
2520 int main(int argc, char *argv[])
2521 {
2522         static unsigned long max_run[2], min_run[2];
2523         static unsigned long max_bw[2], min_bw[2];
2524         static unsigned long io_mb[2], agg[2];
2525         int i;
2526
2527         if (setup_thread_area())
2528                 return 1;
2529
2530         if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
2531                 perror("sched_getaffinity");
2532                 return 1;
2533         }
2534
2535         /*
2536          * fill globals
2537          */
2538         def_thread.ddir = DDIR_READ;
2539         def_thread.bs = DEF_BS;
2540         def_thread.min_bs = -1;
2541         def_thread.max_bs = -1;
2542         def_thread.odirect = DEF_ODIRECT;
2543         def_thread.ratecycle = DEF_RATE_CYCLE;
2544         def_thread.sequential = DEF_SEQUENTIAL;
2545         def_thread.timeout = DEF_TIMEOUT;
2546         def_thread.create_file = DEF_CREATE;
2547         def_thread.overwrite = DEF_OVERWRITE;
2548         def_thread.invalidate_cache = DEF_INVALIDATE;
2549         def_thread.sync_io = DEF_SYNCIO;
2550         def_thread.mem_type = MEM_MALLOC;
2551         def_thread.bw_avg_time = DEF_BWAVGTIME;
2552         def_thread.create_serialize = DEF_CREATE_SER;
2553         def_thread.create_fsync = DEF_CREATE_FSYNC;
2554         def_thread.loops = DEF_LOOPS;
2555         def_thread.verify = DEF_VERIFY;
2556         def_thread.stonewall = DEF_STONEWALL;
2557
2558         i = parse_options(argc, argv);
2559
2560         if (!ini_file) {
2561                 printf("Need job file\n");
2562                 return 1;
2563         }
2564
2565         if (parse_jobs_ini(ini_file))
2566                 return 1;
2567
2568         if (!thread_number) {
2569                 printf("Nothing to do\n");
2570                 return 1;
2571         }
2572
2573         run_threads(argv);
2574
2575         min_bw[0] = min_run[0] = ~0UL;
2576         min_bw[1] = min_run[1] = ~0UL;
2577         io_mb[0] = io_mb[1] = 0;
2578         agg[0] = agg[1] = 0;
2579         for (i = 0; i < thread_number; i++) {
2580                 struct thread_data *td = &threads[i];
2581                 unsigned long bw = 0;
2582
2583                 if (!td->error) {
2584                         if (td->runtime < min_run[td->ddir])
2585                                 min_run[td->ddir] = td->runtime;
2586                         if (td->runtime > max_run[td->ddir])
2587                                 max_run[td->ddir] = td->runtime;
2588
2589                         if (td->runtime)
2590                                 bw = td->io_bytes / td->runtime;
2591                         if (bw < min_bw[td->ddir])
2592                                 min_bw[td->ddir] = bw;
2593                         if (bw > max_bw[td->ddir])
2594                                 max_bw[td->ddir] = bw;
2595
2596                         io_mb[td->ddir] += td->io_bytes >> 20;
2597                 }
2598
2599                 show_thread_status(td);
2600         }
2601         
2602         if (max_run[0])
2603                 agg[0] = (io_mb[0] * 1024 * 1000) / max_run[0];
2604         if (max_run[1])
2605                 agg[1] = (io_mb[1] * 1024 * 1000) / max_run[1];
2606
2607         printf("\nRun status:\n");
2608         if (max_run[DDIR_READ])
2609                 printf("   READ: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[0], agg[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
2610         if (max_run[DDIR_WRITE])
2611                 printf("  WRITE: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[1], agg[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);
2612
2613         return 0;
2614 }