2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
36 #include <sys/types.h>
39 #include <semaphore.h>
42 #include <sys/ioctl.h>
43 #include <asm/unistd.h>
44 #include <asm/types.h>
51 #define BLKGETSIZE64 _IOR(0x12,114,size_t)
54 #define MAX_JOBS (1024)
56 static int ioprio_set(int which, int who, int ioprio)
58 return syscall(__NR_ioprio_set, which, who, ioprio);
62 * we want fadvise64 really, but it's so tangled... later
64 static int fadvise(int fd, loff_t offset, size_t len, int advice)
67 return syscall(__NR_fadvise64, fd, offset, offset >> 32, len, advice);
69 return posix_fadvise(fd, (off_t) offset, len, advice);
74 IOPRIO_WHO_PROCESS = 1,
79 #define IOPRIO_CLASS_SHIFT 13
84 #define DEF_TIMEOUT (0)
85 #define DEF_RATE_CYCLE (1000)
86 #define DEF_ODIRECT (1)
87 #define DEF_SEQUENTIAL (1)
88 #define DEF_RAND_REPEAT (1)
89 #define DEF_OVERWRITE (0)
90 #define DEF_CREATE (1)
91 #define DEF_INVALIDATE (1)
92 #define DEF_SYNCIO (0)
93 #define DEF_RANDSEED (0xb1899bedUL)
94 #define DEF_BWAVGTIME (500)
95 #define DEF_CREATE_SER (1)
96 #define DEF_CREATE_FSYNC (1)
98 #define DEF_VERIFY (0)
99 #define DEF_STONEWALL (0)
100 #define DEF_NUMJOBS (1)
102 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
104 static int repeatable = DEF_RAND_REPEAT;
105 static int rate_quit = 1;
106 static int write_lat_log;
107 static int write_bw_log;
108 static int exitall_on_terminate;
110 static int thread_number;
111 static char *ini_file;
113 static int max_jobs = MAX_JOBS;
115 static char run_str[MAX_JOBS + 1];
146 struct timeval start_time;
147 struct timeval issue_time;
151 unsigned long long offset;
153 struct list_head list;
158 unsigned long val_sq;
159 unsigned long max_val;
160 unsigned long min_val;
161 unsigned long samples;
170 unsigned long nr_samples;
171 unsigned long max_samples;
172 struct io_sample *log;
176 struct list_head list;
177 unsigned long long offset;
181 #define FIO_HDR_MAGIC 0xf00baaef
183 struct verify_header {
184 unsigned int fio_magic;
186 char md5_digest[MD5_HASH_WORDS * 4];
189 #define td_read(td) ((td)->ddir == DDIR_READ)
190 #define td_write(td) ((td)->ddir == DDIR_WRITE)
191 #define should_fsync(td) (td_write(td) && !(td)->odirect)
193 #define BLOCKS_PER_MAP (8 * sizeof(long))
194 #define TO_MAP_BLOCK(td, b) ((b) - ((td)->file_offset / (td)->min_bs))
195 #define RAND_MAP_IDX(td, b) (TO_MAP_BLOCK(td, b) / BLOCKS_PER_MAP)
196 #define RAND_MAP_BIT(td, b) (TO_MAP_BLOCK(td, b) & (BLOCKS_PER_MAP - 1))
206 volatile int terminate;
207 volatile int runstate;
208 volatile int old_runstate;
211 unsigned int sequential;
215 unsigned int odirect;
216 unsigned int thinktime;
217 unsigned int fsync_blocks;
218 unsigned int start_delay;
219 unsigned int timeout;
220 unsigned int use_aio;
221 unsigned int create_file;
222 unsigned int overwrite;
223 unsigned int invalidate_cache;
224 unsigned int bw_avg_time;
225 unsigned int create_serialize;
226 unsigned int create_fsync;
228 unsigned long long file_size;
229 unsigned long long file_offset;
230 unsigned int sync_io;
231 unsigned int mem_type;
233 unsigned int stonewall;
234 unsigned int numjobs;
237 struct drand48_data bsrange_state;
238 struct drand48_data verify_state;
244 io_context_t aio_ctx;
245 unsigned int aio_depth;
246 struct io_event *aio_events;
248 unsigned int cur_depth;
249 struct list_head io_u_freelist;
250 struct list_head io_u_busylist;
253 unsigned int ratemin;
254 unsigned int ratecycle;
255 unsigned long rate_usec_cycle;
256 long rate_pending_usleep;
257 unsigned long rate_bytes;
258 struct timeval lastrate;
260 unsigned long runtime; /* sec */
261 unsigned long long io_size;
263 unsigned long io_blocks;
264 unsigned long io_bytes;
265 unsigned long this_io_bytes;
266 unsigned long last_bytes;
269 struct drand48_data random_state;
270 unsigned long *file_map;
271 unsigned int num_maps;
274 * bandwidth and latency stats
276 struct io_stat clat_stat; /* completion latency */
277 struct io_stat slat_stat; /* submission latency */
279 struct io_stat bw_stat; /* bandwidth stats */
280 unsigned long stat_io_bytes;
281 struct timeval stat_sample_time;
283 struct io_log *lat_log;
284 struct io_log *bw_log;
286 struct timeval start;
287 struct rusage ru_start;
288 struct rusage ru_end;
290 struct list_head io_hist_list;
293 static struct thread_data *threads;
294 static struct thread_data def_thread;
296 static sem_t startup_sem;
298 static void sig_handler(int sig)
302 for (i = 0; i < thread_number; i++) {
303 struct thread_data *td = &threads[i];
310 static int init_random_state(struct thread_data *td)
313 int fd, num_maps, blocks;
315 fd = open("/dev/random", O_RDONLY);
321 if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
329 srand48_r(seed, &td->bsrange_state);
330 srand48_r(seed, &td->verify_state);
338 blocks = (td->io_size + td->min_bs - 1) / td->min_bs;
339 num_maps = blocks / BLOCKS_PER_MAP;
340 td->file_map = malloc(num_maps * sizeof(long));
341 td->num_maps = num_maps;
342 memset(td->file_map, 0, num_maps * sizeof(long));
344 srand48_r(seed, &td->random_state);
348 static unsigned long utime_since(struct timeval *s, struct timeval *e)
352 sec = e->tv_sec - s->tv_sec;
353 usec = e->tv_usec - s->tv_usec;
354 if (sec > 0 && usec < 0) {
359 sec *= (double) 1000000;
364 static unsigned long utime_since_now(struct timeval *s)
368 gettimeofday(&t, NULL);
369 return utime_since(s, &t);
372 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
376 sec = e->tv_sec - s->tv_sec;
377 usec = e->tv_usec - s->tv_usec;
378 if (sec > 0 && usec < 0) {
383 sec *= (double) 1000;
384 usec /= (double) 1000;
389 static unsigned long mtime_since_now(struct timeval *s)
393 gettimeofday(&t, NULL);
394 return mtime_since(s, &t);
397 static inline unsigned long msec_now(struct timeval *s)
399 return s->tv_sec * 1000 + s->tv_usec / 1000;
402 static int random_map_free(struct thread_data *td, unsigned long long block)
404 unsigned int idx = RAND_MAP_IDX(td, block);
405 unsigned int bit = RAND_MAP_BIT(td, block);
407 return (td->file_map[idx] & (1UL << bit)) == 0;
410 static int get_next_free_block(struct thread_data *td, unsigned long long *b)
416 while ((*b) * td->min_bs < td->io_size) {
417 if (td->file_map[i] != -1UL) {
418 *b += ffz(td->file_map[i]);
422 *b += BLOCKS_PER_MAP;
429 static void mark_random_map(struct thread_data *td, struct io_u *io_u)
431 unsigned long block = io_u->offset / td->min_bs;
432 unsigned int blocks = 0;
434 while (blocks < (io_u->buflen / td->min_bs)) {
437 if (!random_map_free(td, block))
440 idx = RAND_MAP_IDX(td, block);
441 bit = RAND_MAP_BIT(td, block);
443 assert(idx < td->num_maps);
445 td->file_map[idx] |= (1UL << bit);
450 if ((blocks * td->min_bs) < io_u->buflen)
451 io_u->buflen = blocks * td->min_bs;
454 static int get_next_offset(struct thread_data *td, unsigned long long *offset)
456 unsigned long long b, rb;
459 if (!td->sequential) {
460 unsigned long max_blocks = td->io_size / td->min_bs;
464 lrand48_r(&td->random_state, &r);
465 b = ((max_blocks - 1) * r / (RAND_MAX+1.0));
466 rb = b + (td->file_offset / td->min_bs);
468 } while (!random_map_free(td, rb) && loops);
471 if (get_next_free_block(td, &b))
475 b = td->last_bytes / td->min_bs;
477 *offset = (b * td->min_bs) + td->file_offset;
478 if (*offset > td->file_size)
484 static unsigned int get_next_buflen(struct thread_data *td)
489 if (td->min_bs == td->max_bs)
492 lrand48_r(&td->bsrange_state, &r);
493 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
494 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
497 if (buflen > td->io_size - td->this_io_bytes)
498 buflen = td->io_size - td->this_io_bytes;
503 static inline void add_stat_sample(struct thread_data *td, struct io_stat *is,
506 if (val > is->max_val)
508 if (val < is->min_val)
512 is->val_sq += val * val;
516 static void add_log_sample(struct thread_data *td, struct io_log *log,
519 if (log->nr_samples == log->max_samples) {
520 int new_size = sizeof(struct io_sample) * log->max_samples * 2;
522 log->log = realloc(log->log, new_size);
523 log->max_samples <<= 1;
526 log->log[log->nr_samples].val = val;
527 log->log[log->nr_samples].time = mtime_since_now(&td->start);
531 static void add_clat_sample(struct thread_data *td, unsigned long msec)
533 add_stat_sample(td, &td->clat_stat, msec);
536 add_log_sample(td, td->lat_log, msec);
539 static void add_slat_sample(struct thread_data *td, unsigned long msec)
541 add_stat_sample(td, &td->slat_stat, msec);
544 static void add_bw_sample(struct thread_data *td)
546 unsigned long spent = mtime_since_now(&td->stat_sample_time);
549 if (spent < td->bw_avg_time)
552 rate = (td->this_io_bytes - td->stat_io_bytes) / spent;
553 add_stat_sample(td, &td->bw_stat, rate);
556 add_log_sample(td, td->bw_log, rate);
558 gettimeofday(&td->stat_sample_time, NULL);
559 td->stat_io_bytes = td->this_io_bytes;
563 * busy looping version for the last few usec
565 static void __usec_sleep(int usec)
567 struct timeval start;
569 gettimeofday(&start, NULL);
570 while (utime_since_now(&start) < usec)
574 static void usec_sleep(int usec)
576 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
584 rem.tv_sec = rem.tv_nsec = 0;
585 nanosleep(&req, &rem);
589 req.tv_nsec = rem.tv_nsec;
590 usec = rem.tv_nsec * 1000;
594 static void rate_throttle(struct thread_data *td, unsigned long time_spent,
597 unsigned long usec_cycle;
602 usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
604 if (time_spent < usec_cycle) {
605 unsigned long s = usec_cycle - time_spent;
607 td->rate_pending_usleep += s;
608 if (td->rate_pending_usleep >= 100000) {
609 usec_sleep(td->rate_pending_usleep);
610 td->rate_pending_usleep = 0;
613 long overtime = time_spent - usec_cycle;
615 td->rate_pending_usleep -= overtime;
619 static int check_min_rate(struct thread_data *td, struct timeval *now)
625 * allow a 2 second settle period in the beginning
627 if (mtime_since(&td->start, now) < 2000)
631 * if rate blocks is set, sample is running
633 if (td->rate_bytes) {
634 spent = mtime_since(&td->lastrate, now);
635 if (spent < td->ratecycle)
638 rate = (td->this_io_bytes - td->rate_bytes) / spent;
639 if (rate < td->ratemin) {
640 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
647 td->rate_bytes = td->this_io_bytes;
648 memcpy(&td->lastrate, now, sizeof(*now));
652 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
656 if (mtime_since(&td->start, t) >= td->timeout * 1000)
662 static void fill_random_bytes(struct thread_data *td,
663 unsigned char *p, unsigned int len)
669 drand48_r(&td->verify_state, &r);
672 * lrand48_r seems to be broken and only fill the bottom
673 * 32-bits, even on 64-bit archs with 64-bit longs
686 static void hexdump(void *buffer, int len)
688 unsigned char *p = buffer;
691 for (i = 0; i < len; i++)
692 printf("%02x", p[i]);
696 static int verify_io_u(struct io_u *io_u)
698 struct verify_header *hdr = (struct verify_header *) io_u->buf;
699 unsigned char *p = (unsigned char *) io_u->buf;
700 struct md5_ctx md5_ctx;
703 if (hdr->fio_magic != FIO_HDR_MAGIC)
706 memset(&md5_ctx, 0, sizeof(md5_ctx));
708 md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
710 ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
712 hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
713 hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
720 * fill body of io_u->buf with random data and add a header with the
721 * (eg) sha1sum of that data.
723 static void populate_io_u(struct thread_data *td, struct io_u *io_u)
725 struct md5_ctx md5_ctx;
726 struct verify_header hdr;
727 unsigned char *p = (unsigned char *) io_u->buf;
729 hdr.fio_magic = FIO_HDR_MAGIC;
730 hdr.len = io_u->buflen;
732 fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
734 memset(&md5_ctx, 0, sizeof(md5_ctx));
735 md5_update(&md5_ctx, p, io_u->buflen - sizeof(hdr));
736 memcpy(hdr.md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
737 memcpy(io_u->buf, &hdr, sizeof(hdr));
740 static void put_io_u(struct thread_data *td, struct io_u *io_u)
742 list_del(&io_u->list);
743 list_add(&io_u->list, &td->io_u_freelist);
747 #define queue_full(td) (list_empty(&(td)->io_u_freelist))
749 static struct io_u *__get_io_u(struct thread_data *td)
756 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
757 list_del(&io_u->list);
758 list_add(&io_u->list, &td->io_u_busylist);
763 static struct io_u *get_io_u(struct thread_data *td)
767 io_u = __get_io_u(td);
771 if (get_next_offset(td, &io_u->offset)) {
776 io_u->buflen = get_next_buflen(td);
782 if (io_u->buflen + io_u->offset > td->file_size)
783 io_u->buflen = td->file_size - io_u->offset;
786 mark_random_map(td, io_u);
788 td->last_bytes += io_u->buflen;
791 populate_io_u(td, io_u);
795 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
797 io_prep_pwrite(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
800 gettimeofday(&io_u->start_time, NULL);
804 static inline void td_set_runstate(struct thread_data *td, int runstate)
806 td->old_runstate = td->runstate;
807 td->runstate = runstate;
810 static int get_next_verify(struct thread_data *td,
811 unsigned long long *offset, unsigned int *len)
813 struct io_piece *ipo;
815 if (list_empty(&td->io_hist_list))
818 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
819 list_del(&ipo->list);
821 *offset = ipo->offset;
827 static void prune_io_piece_log(struct thread_data *td)
829 struct io_piece *ipo;
831 while (!list_empty(&td->io_hist_list)) {
832 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
834 list_del(&ipo->list);
840 * log a succesful write, so we can unwind the log for verify
842 static void log_io_piece(struct thread_data *td, struct io_u *io_u)
844 struct io_piece *ipo = malloc(sizeof(struct io_piece));
845 struct list_head *entry;
847 INIT_LIST_HEAD(&ipo->list);
848 ipo->offset = io_u->offset;
849 ipo->len = io_u->buflen;
852 * for random io where the writes extend the file, it will typically
853 * be laid out with the block scattered as written. it's faster to
854 * read them in in that order again, so don't sort
856 if (td->sequential || !td->overwrite) {
857 list_add_tail(&ipo->list, &td->io_hist_list);
862 * for random io, sort the list so verify will run faster
864 entry = &td->io_hist_list;
865 while ((entry = entry->prev) != &td->io_hist_list) {
866 struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
868 if (__ipo->offset < ipo->offset)
872 list_add(&ipo->list, entry);
875 static void do_sync_verify(struct thread_data *td)
878 struct io_u *io_u = NULL;
881 td_set_runstate(td, TD_VERIFYING);
883 io_u = __get_io_u(td);
886 if (fadvise(td->fd, td->file_offset, td->io_size, POSIX_FADV_DONTNEED) < 0) {
896 gettimeofday(&t, NULL);
897 if (runtime_exceeded(td, &t))
900 if (get_next_verify(td, &io_u->offset, &io_u->buflen))
903 if (td->cur_off != io_u->offset) {
904 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
910 ret = read(td->fd, io_u->buf, io_u->buflen);
911 if (ret < (int) io_u->buflen) {
921 if (verify_io_u(io_u))
924 td->cur_off = io_u->offset + io_u->buflen;
928 td_set_runstate(td, TD_RUNNING);
932 static void do_sync_io(struct thread_data *td)
934 unsigned long msec, usec;
935 struct io_u *io_u = NULL;
938 while (td->this_io_bytes < td->io_size) {
948 if (td->cur_off != io_u->offset) {
949 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
956 ret = read(td->fd, io_u->buf, io_u->buflen);
958 ret = write(td->fd, io_u->buf, io_u->buflen);
960 if (ret < (int) io_u->buflen) {
967 log_io_piece(td, io_u);
970 td->io_bytes += io_u->buflen;
971 td->this_io_bytes += io_u->buflen;
972 td->cur_off = io_u->offset + io_u->buflen;
974 gettimeofday(&e, NULL);
976 usec = utime_since(&io_u->start_time, &e);
978 rate_throttle(td, usec, io_u->buflen);
980 if (check_min_rate(td, &e)) {
986 add_clat_sample(td, msec);
989 if (runtime_exceeded(td, &e))
996 usec_sleep(td->thinktime);
998 if (should_fsync(td) && td->fsync_blocks &&
999 (td->io_blocks % td->fsync_blocks) == 0)
1006 if (should_fsync(td))
1010 static int io_u_getevents(struct thread_data *td, int min, int max,
1016 r = io_getevents(td->aio_ctx, min, max, td->aio_events, t);
1017 if (r != -EAGAIN && r != -EINTR)
1024 static int io_u_queue(struct thread_data *td, struct io_u *io_u)
1026 struct iocb *iocb = &io_u->iocb;
1030 ret = io_submit(td->aio_ctx, 1, &iocb);
1033 else if (ret == -EAGAIN)
1035 else if (ret == -EINTR)
1044 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
1045 #define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
1047 static int ios_completed(struct thread_data *td, int nr)
1054 gettimeofday(&e, NULL);
1056 for (i = 0, bytes_done = 0; i < nr; i++) {
1057 io_u = ev_to_iou(td->aio_events + i);
1060 td->io_bytes += io_u->buflen;
1061 td->this_io_bytes += io_u->buflen;
1063 msec = mtime_since(&io_u->issue_time, &e);
1065 add_clat_sample(td, msec);
1069 log_io_piece(td, io_u);
1071 bytes_done += io_u->buflen;
1078 static void cleanup_pending_aio(struct thread_data *td)
1080 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1081 struct list_head *entry, *n;
1086 * get immediately available events, if any
1088 r = io_u_getevents(td, 0, td->cur_depth, &ts);
1090 ios_completed(td, r);
1093 * now cancel remaining active events
1095 list_for_each_safe(entry, n, &td->io_u_busylist) {
1096 io_u = list_entry(entry, struct io_u, list);
1098 r = io_cancel(td->aio_ctx, &io_u->iocb, td->aio_events);
1103 if (td->cur_depth) {
1104 r = io_u_getevents(td, td->cur_depth, td->cur_depth, NULL);
1106 ios_completed(td, r);
1110 static int async_do_verify(struct thread_data *td, struct io_u **io_u)
1112 struct io_u *v_io_u = *io_u;
1116 ret = verify_io_u(v_io_u);
1117 put_io_u(td, v_io_u);
1124 static void do_async_verify(struct thread_data *td)
1127 struct io_u *io_u, *v_io_u = NULL;
1130 td_set_runstate(td, TD_VERIFYING);
1136 gettimeofday(&t, NULL);
1137 if (runtime_exceeded(td, &t))
1140 io_u = __get_io_u(td);
1144 if (get_next_verify(td, &io_u->offset, &io_u->buflen)) {
1149 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
1150 ret = io_u_queue(td, io_u);
1158 * we have one pending to verify, do that while the next
1159 * we are doing io on the next one
1161 if (async_do_verify(td, &v_io_u))
1164 ret = io_u_getevents(td, 1, 1, NULL);
1171 v_io_u = ev_to_iou(td->aio_events);
1173 td->cur_off = v_io_u->offset + v_io_u->buflen;
1176 * if we can't submit more io, we need to verify now
1178 if (queue_full(td) && async_do_verify(td, &v_io_u))
1183 async_do_verify(td, &v_io_u);
1186 cleanup_pending_aio(td);
1188 td_set_runstate(td, TD_RUNNING);
1191 static void do_async_io(struct thread_data *td)
1193 struct timeval s, e;
1196 while (td->this_io_bytes < td->io_size) {
1197 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1198 struct timespec *timeout;
1199 int ret, min_evts = 0;
1201 unsigned int bytes_done;
1206 io_u = get_io_u(td);
1210 memcpy(&s, &io_u->start_time, sizeof(s));
1212 ret = io_u_queue(td, io_u);
1219 gettimeofday(&io_u->issue_time, NULL);
1220 add_slat_sample(td, mtime_since(&io_u->start_time, &io_u->issue_time));
1221 if (td->cur_depth < td->aio_depth) {
1229 ret = io_u_getevents(td, min_evts, td->cur_depth, timeout);
1236 bytes_done = ios_completed(td, ret);
1239 * the rate is batched for now, it should work for batches
1240 * of completions except the very first one which may look
1243 gettimeofday(&e, NULL);
1244 usec = utime_since(&s, &e);
1246 rate_throttle(td, usec, bytes_done);
1248 if (check_min_rate(td, &e)) {
1249 td->error = ENODATA;
1253 if (runtime_exceeded(td, &e))
1257 usec_sleep(td->thinktime);
1259 if (should_fsync(td) && td->fsync_blocks &&
1260 (td->io_blocks % td->fsync_blocks) == 0)
1265 cleanup_pending_aio(td);
1267 if (should_fsync(td))
1271 static void cleanup_aio(struct thread_data *td)
1273 io_destroy(td->aio_ctx);
1276 free(td->aio_events);
1279 static int init_aio(struct thread_data *td)
1281 if (io_queue_init(td->aio_depth, &td->aio_ctx)) {
1286 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
1290 static void cleanup_io_u(struct thread_data *td)
1292 struct list_head *entry, *n;
1295 list_for_each_safe(entry, n, &td->io_u_freelist) {
1296 io_u = list_entry(entry, struct io_u, list);
1298 list_del(&io_u->list);
1302 if (td->mem_type == MEM_MALLOC)
1303 free(td->orig_buffer);
1304 else if (td->mem_type == MEM_SHM) {
1305 struct shmid_ds sbuf;
1307 shmdt(td->orig_buffer);
1308 shmctl(td->shm_id, IPC_RMID, &sbuf);
1312 static int init_io_u(struct thread_data *td)
1315 int i, max_units, mem_size;
1321 max_units = td->aio_depth;
1323 mem_size = td->max_bs * max_units + MASK;
1325 if (td->mem_type == MEM_MALLOC)
1326 td->orig_buffer = malloc(mem_size);
1327 else if (td->mem_type == MEM_SHM) {
1328 td->shm_id = shmget(IPC_PRIVATE, mem_size, IPC_CREAT | 0600);
1329 if (td->shm_id < 0) {
1335 td->orig_buffer = shmat(td->shm_id, NULL, 0);
1336 if (td->orig_buffer == (void *) -1) {
1343 INIT_LIST_HEAD(&td->io_u_freelist);
1344 INIT_LIST_HEAD(&td->io_u_busylist);
1345 INIT_LIST_HEAD(&td->io_hist_list);
1347 p = ALIGN(td->orig_buffer);
1348 for (i = 0; i < max_units; i++) {
1349 io_u = malloc(sizeof(*io_u));
1350 memset(io_u, 0, sizeof(*io_u));
1351 INIT_LIST_HEAD(&io_u->list);
1353 io_u->buf = p + td->max_bs * i;
1354 list_add(&io_u->list, &td->io_u_freelist);
1360 static void setup_log(struct io_log **log)
1362 struct io_log *l = malloc(sizeof(*l));
1365 l->max_samples = 1024;
1366 l->log = malloc(l->max_samples * sizeof(struct io_sample));
1370 static void finish_log(struct thread_data *td, struct io_log *log, char *name)
1372 char file_name[128];
1376 sprintf(file_name, "client%d_%s.log", td->thread_number, name);
1377 f = fopen(file_name, "w");
1379 perror("fopen log");
1383 for (i = 0; i < log->nr_samples; i++)
1384 fprintf(f, "%lu, %lu\n", log->log[i].time, log->log[i].val);
1391 static int create_file(struct thread_data *td)
1393 unsigned long long left;
1399 * unless specifically asked for overwrite, let normal io extend it
1401 if (td_write(td) && !td->overwrite)
1404 if (!td->file_size) {
1405 fprintf(stderr, "Need size for create\n");
1410 printf("Client%d: Laying out IO file\n", td->thread_number);
1412 td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
1418 if (ftruncate(td->fd, td->file_size) == -1) {
1423 td->io_size = td->file_size;
1424 b = malloc(td->max_bs);
1425 memset(b, 0, td->max_bs);
1427 left = td->file_size;
1433 r = write(td->fd, b, bs);
1435 if (r == (int) bs) {
1448 if (td->create_fsync)
1457 static int file_exists(struct thread_data *td)
1461 if (stat(td->file_name, &st) != -1)
1464 return errno != ENOENT;
1467 static int get_file_size(struct thread_data *td)
1472 if (fstat(td->fd, &st) == -1) {
1478 * if block device, get size via BLKGETSIZE64 ioctl. try that as well
1479 * if this is a link, fall back to st.st_size if it fails
1481 if (S_ISBLK(st.st_mode) || S_ISLNK(st.st_mode)) {
1482 if (ioctl(td->fd, BLKGETSIZE64, &bytes)) {
1483 if (S_ISBLK(st.st_mode)) {
1493 if (td->file_size > bytes)
1494 bytes = td->file_size;
1497 td->file_size = 1024 * 1024 * 1024;
1499 bytes = td->file_size;
1502 if (td->file_offset > bytes) {
1503 fprintf(stderr, "Client%d: offset larger than length\n", td->thread_number);
1507 td->io_size = bytes - td->file_offset;
1508 if (td->io_size == 0) {
1509 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1517 static int setup_file(struct thread_data *td)
1521 if (!file_exists(td)) {
1522 if (!td->create_file) {
1526 if (create_file(td))
1534 td->fd = open(td->file_name, flags | O_RDONLY);
1545 td->fd = open(td->file_name, flags | O_CREAT, 0600);
1553 if (get_file_size(td))
1556 if (td_write(td) && ftruncate(td->fd, td->file_size) == -1) {
1561 if (td->invalidate_cache) {
1562 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_DONTNEED) < 0) {
1571 static void clear_io_state(struct thread_data *td)
1574 lseek(td->fd, SEEK_SET, 0);
1578 td->stat_io_bytes = 0;
1579 td->this_io_bytes = 0;
1582 memset(td->file_map, 0, td->num_maps * sizeof(long));
1585 static void *thread_main(int shm_id, int offset, char *argv[])
1587 struct thread_data *td;
1593 data = shmat(shm_id, NULL, 0);
1594 if (data == (void *) -1) {
1599 td = data + offset * sizeof(struct thread_data);
1605 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
1610 sprintf(argv[0], "fio%d", offset);
1612 if (td->use_aio && init_aio(td))
1616 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1622 sem_post(&startup_sem);
1623 sem_wait(&td->mutex);
1625 if (!td->create_serialize && setup_file(td))
1628 if (init_random_state(td))
1631 gettimeofday(&td->start, NULL);
1633 getrusage(RUSAGE_SELF, &td->ru_start);
1635 while (td->loops--) {
1636 gettimeofday(&td->stat_sample_time, NULL);
1639 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
1642 prune_io_piece_log(td);
1660 do_async_verify(td);
1666 td->runtime = mtime_since_now(&td->start);
1667 getrusage(RUSAGE_SELF, &td->ru_end);
1671 finish_log(td, td->bw_log, "bw");
1673 finish_log(td, td->lat_log, "lat");
1675 if (exitall_on_terminate)
1687 sem_post(&startup_sem);
1688 sem_wait(&td->mutex);
1690 td_set_runstate(td, TD_EXITED);
1695 static void free_shm(void)
1697 struct shmid_ds sbuf;
1702 shmctl(shm_id, IPC_RMID, &sbuf);
1706 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1707 double *mean, double *dev)
1711 if (is->samples == 0)
1717 n = (double) is->samples;
1718 *mean = (double) is->val / n;
1719 *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1723 static void show_thread_status(struct thread_data *td)
1725 int prio, prio_class;
1726 unsigned long min, max, bw = 0, ctx;
1727 double mean, dev, usr_cpu, sys_cpu;
1729 if (!td->io_bytes && !td->error)
1733 bw = td->io_bytes / td->runtime;
1735 prio = td->ioprio & 0xff;
1736 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
1738 printf("Client%d: err=%2d, io=%6luMiB, bw=%6luKiB/s, runt=%6lumsec\n", td->thread_number, td->error, td->io_bytes >> 20, bw, td->runtime);
1740 if (calc_lat(&td->slat_stat, &min, &max, &mean, &dev))
1741 printf(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1742 if (calc_lat(&td->clat_stat, &min, &max, &mean, &dev))
1743 printf(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1744 if (calc_lat(&td->bw_stat, &min, &max, &mean, &dev))
1745 printf(" bw (KiB/s) : min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1750 t = mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
1751 usr_cpu = (double) t * 100 / (double) td->runtime;
1753 t = mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
1754 sys_cpu = (double) t * 100 / (double) td->runtime;
1760 ctx = td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
1762 printf(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ctx);
1765 static int setup_rate(struct thread_data *td)
1767 int nr_reads_per_sec;
1772 if (td->rate < td->ratemin) {
1773 fprintf(stderr, "min rate larger than nominal rate\n");
1777 nr_reads_per_sec = (td->rate * 1024) / td->min_bs;
1778 td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
1779 td->rate_pending_usleep = 0;
1783 static struct thread_data *get_new_job(int global, struct thread_data *parent)
1785 struct thread_data *td;
1789 if (thread_number >= max_jobs)
1792 td = &threads[thread_number++];
1793 memset(td, 0, sizeof(*td));
1795 sprintf(td->directory, ".");
1798 td->thread_number = thread_number;
1800 td->ddir = parent->ddir;
1801 td->ioprio = parent->ioprio;
1802 td->sequential = parent->sequential;
1803 td->bs = parent->bs;
1804 td->min_bs = parent->min_bs;
1805 td->max_bs = parent->max_bs;
1806 td->odirect = parent->odirect;
1807 td->thinktime = parent->thinktime;
1808 td->fsync_blocks = parent->fsync_blocks;
1809 td->start_delay = parent->start_delay;
1810 td->timeout = parent->timeout;
1811 td->use_aio = parent->use_aio;
1812 td->create_file = parent->create_file;
1813 td->overwrite = parent->overwrite;
1814 td->invalidate_cache = parent->invalidate_cache;
1815 td->file_size = parent->file_size;
1816 td->file_offset = parent->file_offset;
1817 td->rate = parent->rate;
1818 td->ratemin = parent->ratemin;
1819 td->ratecycle = parent->ratecycle;
1820 td->aio_depth = parent->aio_depth;
1821 td->sync_io = parent->sync_io;
1822 td->mem_type = parent->mem_type;
1823 td->bw_avg_time = parent->bw_avg_time;
1824 td->create_serialize = parent->create_serialize;
1825 td->create_fsync = parent->create_fsync;
1826 td->loops = parent->loops;
1827 td->verify = parent->verify;
1828 td->stonewall = parent->stonewall;
1829 td->numjobs = parent->numjobs;
1830 memcpy(&td->cpumask, &parent->cpumask, sizeof(td->cpumask));
1835 static void put_job(struct thread_data *td)
1837 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
1841 static int add_job(struct thread_data *td, const char *jobname, int prioclass,
1846 if (td == &def_thread)
1849 sprintf(td->file_name, "%s/%s.%d", td->directory, jobname, td->thread_number);
1850 sem_init(&td->mutex, 1, 0);
1851 td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
1853 td->clat_stat.min_val = ULONG_MAX;
1854 td->slat_stat.min_val = ULONG_MAX;
1855 td->bw_stat.min_val = ULONG_MAX;
1857 run_str[td->thread_number - 1] = 'P';
1859 if (td->use_aio && !td->aio_depth)
1862 if (td->min_bs == -1U)
1863 td->min_bs = td->bs;
1864 if (td->max_bs == -1U)
1865 td->max_bs = td->bs;
1873 setup_log(&td->lat_log);
1875 setup_log(&td->bw_log);
1877 printf("Client%d: rw=%d, prio=%d/%d, seq=%d, odir=%d, bs=%d-%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, td->ddir, prioclass, prio, td->sequential, td->odirect, td->min_bs, td->max_bs, td->rate, td->use_aio, td->aio_depth);
1880 * recurse add identical jobs, clear numjobs and stonewall options
1881 * as they don't apply to sub-jobs
1883 numjobs = td->numjobs;
1885 struct thread_data *td_new = get_new_job(0, td);
1890 td_new->numjobs = 1;
1891 td_new->stonewall = 0;
1893 if (add_job(td_new, jobname, prioclass, prio))
1902 static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
1908 for (i = 0; i < sizeof(int) * 8; i++) {
1910 CPU_SET(i, &cpumask);
1914 unsigned long get_mult(char c)
1925 return 1024 * 1024 * 1024;
1932 * convert string after '=' into decimal value, noting any size suffix
1934 static int str_cnv(char *p, unsigned long long *val)
1939 str = strstr(p, "=");
1946 *val = strtoul(str, NULL, 10);
1947 if (*val == ULONG_MAX && errno == ERANGE)
1950 *val *= get_mult(str[len - 2]);
1954 static int check_strcnv(char *p, char *name, unsigned long long *val)
1956 if (!strstr(p, name))
1959 return str_cnv(p, val);
1962 static int check_str(char *p, char *name, char *option)
1964 char *s = strstr(p, name);
1970 if (strstr(s, option))
1976 static int check_strstore(char *p, char *name, char *dest)
1978 char *s = strstr(p, name);
1995 static int check_range(char *p, char *name, unsigned long *s, unsigned long *e)
2000 sprintf(str, "%s=%%lu%%c-%%lu%%c", name);
2001 if (sscanf(p, str, s, &s1, e, &s2) == 4) {
2007 sprintf(str, "%s = %%lu%%c-%%lu%%c", name);
2008 if (sscanf(p, str, s, &s1, e, &s2) == 4) {
2014 sprintf(str, "%s=%%lu-%%lu", name);
2015 if (sscanf(p, str, s, e) == 2)
2018 sprintf(str, "%s = %%lu-%%lu", name);
2019 if (sscanf(p, str, s, e) == 2)
2026 static int check_int(char *p, char *name, unsigned int *val)
2030 sprintf(str, "%s=%%d", name);
2031 if (sscanf(p, str, val) == 1)
2034 sprintf(str, "%s = %%d", name);
2035 if (sscanf(p, str, val) == 1)
2041 static int is_empty_or_comment(char *line)
2045 for (i = 0; i < strlen(line); i++) {
2048 if (!isspace(line[i]) && !iscntrl(line[i]))
2055 static int parse_jobs_ini(char *file)
2057 unsigned int prioclass, prio, cpu, global;
2058 unsigned long long ull;
2059 unsigned long ul1, ul2;
2060 struct thread_data *td;
2061 char *string, *name;
2066 f = fopen(file, "r");
2072 string = malloc(4096);
2075 while ((p = fgets(string, 4096, f)) != NULL) {
2076 if (is_empty_or_comment(p))
2078 if (sscanf(p, "[%s]", name) != 1)
2081 global = !strncmp(name, "global", 6);
2083 name[strlen(name) - 1] = '\0';
2085 td = get_new_job(global, &def_thread);
2093 while ((p = fgets(string, 4096, f)) != NULL) {
2094 if (is_empty_or_comment(p))
2098 if (!check_int(p, "rw", &td->ddir)) {
2102 if (!check_int(p, "prio", &prio)) {
2106 if (!check_int(p, "prioclass", &prioclass)) {
2110 if (!check_int(p, "direct", &td->odirect)) {
2114 if (!check_int(p, "rate", &td->rate)) {
2118 if (!check_int(p, "ratemin", &td->ratemin)) {
2122 if (!check_int(p, "ratecycle", &td->ratecycle)) {
2126 if (!check_int(p, "thinktime", &td->thinktime)) {
2130 if (!check_int(p, "cpumask", &cpu)) {
2131 fill_cpu_mask(td->cpumask, cpu);
2135 if (!check_int(p, "fsync", &td->fsync_blocks)) {
2139 if (!check_int(p, "startdelay", &td->start_delay)) {
2143 if (!check_int(p, "timeout", &td->timeout)) {
2147 if (!check_int(p, "invalidate",&td->invalidate_cache)) {
2151 if (!check_int(p, "aio_depth", &td->aio_depth)) {
2155 if (!check_int(p, "sync", &td->sync_io)) {
2159 if (!check_int(p, "bwavgtime", &td->bw_avg_time)) {
2163 if (!check_int(p, "create_serialize", &td->create_serialize)) {
2167 if (!check_int(p, "create_fsync", &td->create_fsync)) {
2171 if (!check_int(p, "loops", &td->loops)) {
2175 if (!check_int(p, "verify", &td->verify)) {
2179 if (!check_int(p, "numjobs", &td->numjobs)) {
2183 if (!check_range(p, "bsrange", &ul1, &ul2)) {
2185 printf("bad min block size, must be a multiple of 512\n");
2189 printf("bad max block size, must be a multiple of 512\n");
2195 if (!check_strcnv(p, "bs", &ull)) {
2197 printf("bad block size, must be a multiple of 512\n");
2203 if (!check_strcnv(p, "size", &td->file_size)) {
2207 if (!check_strcnv(p, "offset", &td->file_offset)) {
2211 if (!check_strstore(p, "directory", td->directory)) {
2215 if (!check_str(p, "mem", "malloc")) {
2216 td->mem_type = MEM_MALLOC;
2220 if (!check_str(p, "mem", "shm")) {
2221 td->mem_type = MEM_SHM;
2225 if (!strncmp(p, "sequential", 10)) {
2230 if (!strncmp(p, "random", 6)) {
2235 if (!strncmp(p, "aio", 3)) {
2240 if (!strncmp(p, "create", 6)) {
2241 td->create_file = 1;
2245 if (!strncmp(p, "overwrite", 9)) {
2250 if (!strncmp(p, "exitall", 7)) {
2251 exitall_on_terminate = 1;
2255 if (!strncmp(p, "stonewall", 9)) {
2260 printf("Client%d: bad option %s\n",td->thread_number,p);
2264 if (add_job(td, name, prioclass, prio))
2274 static int parse_options(int argc, char *argv[])
2278 for (i = 1; i < argc; i++) {
2279 char *parm = argv[i];
2288 def_thread.sequential = !!atoi(parm);
2292 def_thread.bs = atoi(parm);
2293 def_thread.bs <<= 10;
2294 if (!def_thread.bs) {
2295 printf("bad block size\n");
2296 def_thread.bs = DEF_BS;
2301 def_thread.timeout = atoi(parm);
2305 repeatable = !!atoi(parm);
2309 rate_quit = !!atoi(parm);
2313 def_thread.odirect = !!atoi(parm);
2316 if (i + 1 >= argc) {
2317 printf("-f needs file as arg\n");
2320 ini_file = strdup(argv[i+1]);
2330 printf("bad option %s\n", argv[i]);
2338 static void print_thread_status(struct thread_data *td, int nr_running,
2339 int t_rate, int m_rate)
2341 printf("Threads now running: %d", nr_running);
2342 if (m_rate || t_rate)
2343 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
2344 printf(" : [%s]\r", run_str);
2348 static void check_str_update(struct thread_data *td, int n, int t, int m)
2350 char c = run_str[td->thread_number - 1];
2352 if (td->runstate == td->old_runstate)
2355 switch (td->runstate) {
2381 case TD_NOT_CREATED:
2385 printf("state %d\n", td->runstate);
2388 run_str[td->thread_number - 1] = c;
2389 print_thread_status(td, n, t, m);
2390 td->old_runstate = td->runstate;
2393 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
2398 * reap exited threads (TD_EXITED -> TD_REAPED)
2400 for (i = 0; i < thread_number; i++) {
2401 struct thread_data *td = &threads[i];
2403 check_str_update(td, *nr_running, *t_rate, *m_rate);
2405 if (td->runstate != TD_EXITED)
2408 td_set_runstate(td, TD_REAPED);
2409 waitpid(td->pid, NULL, 0);
2411 (*m_rate) -= td->ratemin;
2412 (*t_rate) -= td->rate;
2413 check_str_update(td, *nr_running, *t_rate, *m_rate);
2420 static void run_threads(char *argv[])
2422 struct timeval genesis;
2423 struct thread_data *td;
2424 unsigned long spent;
2425 int i, todo, nr_running, m_rate, t_rate, nr_started;
2427 printf("Starting %d threads\n", thread_number);
2430 signal(SIGINT, sig_handler);
2432 todo = thread_number;
2435 m_rate = t_rate = 0;
2437 for (i = 0; i < thread_number; i++) {
2440 if (!td->create_serialize)
2444 * do file setup here so it happens sequentially,
2445 * we don't want X number of threads getting their
2446 * client data interspersed on disk
2448 if (setup_file(td)) {
2449 td_set_runstate(td, TD_REAPED);
2454 gettimeofday(&genesis, NULL);
2458 * create threads (TD_NOT_CREATED -> TD_CREATED)
2460 for (i = 0; i < thread_number; i++) {
2463 if (td->runstate != TD_NOT_CREATED)
2467 * never got a chance to start, killed by other
2468 * thread for some reason
2470 if (td->terminate) {
2475 if (td->start_delay) {
2476 spent = mtime_since_now(&genesis);
2478 if (td->start_delay * 1000 > spent)
2482 if (td->stonewall && (nr_started || nr_running))
2485 td_set_runstate(td, TD_CREATED);
2486 check_str_update(td, nr_running, t_rate, m_rate);
2487 sem_init(&startup_sem, 1, 1);
2492 sem_wait(&startup_sem);
2494 thread_main(shm_id, i, argv);
2500 * start created threads (TD_CREATED -> TD_RUNNING)
2502 for (i = 0; i < thread_number; i++) {
2503 struct thread_data *td = &threads[i];
2505 if (td->runstate != TD_CREATED)
2508 td_set_runstate(td, TD_RUNNING);
2511 m_rate += td->ratemin;
2513 check_str_update(td, nr_running, t_rate, m_rate);
2514 sem_post(&td->mutex);
2517 for (i = 0; i < thread_number; i++) {
2518 struct thread_data *td = &threads[i];
2520 if (td->runstate != TD_RUNNING &&
2521 td->runstate != TD_VERIFYING)
2524 check_str_update(td, nr_running, t_rate, m_rate);
2527 reap_threads(&nr_running, &t_rate, &m_rate);
2533 while (nr_running) {
2534 reap_threads(&nr_running, &t_rate, &m_rate);
2539 int setup_thread_area(void)
2542 * 1024 is too much on some machines, scale max_jobs if
2543 * we get a failure that looks like too large a shm segment
2546 int s = max_jobs * sizeof(struct thread_data);
2548 shm_id = shmget(0, s, IPC_CREAT | 0600);
2551 if (errno != EINVAL) {
2562 threads = shmat(shm_id, NULL, 0);
2563 if (threads == (void *) -1) {
2572 int main(int argc, char *argv[])
2574 static unsigned long max_run[2], min_run[2];
2575 static unsigned long max_bw[2], min_bw[2];
2576 static unsigned long io_mb[2], agg[2];
2579 if (setup_thread_area())
2582 if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
2583 perror("sched_getaffinity");
2590 def_thread.ddir = DDIR_READ;
2591 def_thread.bs = DEF_BS;
2592 def_thread.min_bs = -1;
2593 def_thread.max_bs = -1;
2594 def_thread.odirect = DEF_ODIRECT;
2595 def_thread.ratecycle = DEF_RATE_CYCLE;
2596 def_thread.sequential = DEF_SEQUENTIAL;
2597 def_thread.timeout = DEF_TIMEOUT;
2598 def_thread.create_file = DEF_CREATE;
2599 def_thread.overwrite = DEF_OVERWRITE;
2600 def_thread.invalidate_cache = DEF_INVALIDATE;
2601 def_thread.sync_io = DEF_SYNCIO;
2602 def_thread.mem_type = MEM_MALLOC;
2603 def_thread.bw_avg_time = DEF_BWAVGTIME;
2604 def_thread.create_serialize = DEF_CREATE_SER;
2605 def_thread.create_fsync = DEF_CREATE_FSYNC;
2606 def_thread.loops = DEF_LOOPS;
2607 def_thread.verify = DEF_VERIFY;
2608 def_thread.stonewall = DEF_STONEWALL;
2609 def_thread.numjobs = DEF_NUMJOBS;
2611 i = parse_options(argc, argv);
2614 printf("Need job file\n");
2618 if (parse_jobs_ini(ini_file))
2621 if (!thread_number) {
2622 printf("Nothing to do\n");
2628 min_bw[0] = min_run[0] = ~0UL;
2629 min_bw[1] = min_run[1] = ~0UL;
2630 io_mb[0] = io_mb[1] = 0;
2631 agg[0] = agg[1] = 0;
2632 for (i = 0; i < thread_number; i++) {
2633 struct thread_data *td = &threads[i];
2634 unsigned long bw = 0;
2637 if (td->runtime < min_run[td->ddir])
2638 min_run[td->ddir] = td->runtime;
2639 if (td->runtime > max_run[td->ddir])
2640 max_run[td->ddir] = td->runtime;
2643 bw = td->io_bytes / td->runtime;
2644 if (bw < min_bw[td->ddir])
2645 min_bw[td->ddir] = bw;
2646 if (bw > max_bw[td->ddir])
2647 max_bw[td->ddir] = bw;
2649 io_mb[td->ddir] += td->io_bytes >> 20;
2652 show_thread_status(td);
2656 agg[0] = (io_mb[0] * 1024 * 1000) / max_run[0];
2658 agg[1] = (io_mb[1] * 1024 * 1000) / max_run[1];
2660 printf("\nRun status:\n");
2661 if (max_run[DDIR_READ])
2662 printf(" READ: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[0], agg[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
2663 if (max_run[DDIR_WRITE])
2664 printf(" WRITE: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[1], agg[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);