2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
36 #include <sys/types.h>
39 #include <semaphore.h>
42 #include <sys/ioctl.h>
43 #include <asm/unistd.h>
44 #include <asm/types.h>
45 #include <asm/bitops.h>
52 #define BLKGETSIZE64 _IOR(0x12,114,size_t)
55 #define MAX_JOBS (1024)
57 static int ioprio_set(int which, int who, int ioprio)
59 return syscall(__NR_ioprio_set, which, who, ioprio);
63 * we want fadvise64 really, but it's so tangled... later
65 static int fadvise(int fd, loff_t offset, size_t len, int advice)
68 return syscall(__NR_fadvise64, fd, offset, offset >> 32, len, advice);
70 return posix_fadvise(fd, (off_t) offset, len, advice);
75 IOPRIO_WHO_PROCESS = 1,
80 #define IOPRIO_CLASS_SHIFT 13
85 #define DEF_TIMEOUT (0)
86 #define DEF_RATE_CYCLE (1000)
87 #define DEF_ODIRECT (1)
88 #define DEF_SEQUENTIAL (1)
89 #define DEF_RAND_REPEAT (1)
90 #define DEF_OVERWRITE (0)
91 #define DEF_CREATE (1)
92 #define DEF_INVALIDATE (1)
93 #define DEF_SYNCIO (0)
94 #define DEF_RANDSEED (0xb1899bedUL)
95 #define DEF_BWAVGTIME (500)
96 #define DEF_CREATE_SER (1)
97 #define DEF_CREATE_FSYNC (1)
99 #define DEF_VERIFY (0)
100 #define DEF_STONEWALL (0)
102 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
104 static int repeatable = DEF_RAND_REPEAT;
105 static int rate_quit = 1;
106 static int write_lat_log;
107 static int write_bw_log;
108 static int exitall_on_terminate;
110 static int thread_number;
111 static char *ini_file;
113 static int max_jobs = MAX_JOBS;
115 static char run_str[MAX_JOBS + 1];
146 struct timeval start_time;
147 struct timeval issue_time;
151 unsigned long long offset;
153 struct list_head list;
158 unsigned long val_sq;
159 unsigned long max_val;
160 unsigned long min_val;
161 unsigned long samples;
170 unsigned long nr_samples;
171 unsigned long max_samples;
172 struct io_sample *log;
176 struct list_head list;
177 unsigned long long offset;
181 #define FIO_HDR_MAGIC 0xf00baaef
183 struct verify_header {
184 unsigned int fio_magic;
186 char md5_digest[MD5_HASH_WORDS * 4];
189 #define td_read(td) ((td)->ddir == DDIR_READ)
190 #define td_write(td) ((td)->ddir == DDIR_WRITE)
191 #define should_fsync(td) (td_write(td) && !(td)->odirect)
193 #define BLOCKS_PER_MAP (8 * sizeof(long))
194 #define TO_MAP_BLOCK(td, b) ((b) - ((td)->file_offset / (td)->min_bs))
195 #define RAND_MAP_IDX(td, b) (TO_MAP_BLOCK(td, b) / BLOCKS_PER_MAP)
196 #define RAND_MAP_BIT(td, b) (TO_MAP_BLOCK(td, b) & (BLOCKS_PER_MAP - 1))
205 volatile int terminate;
206 volatile int runstate;
207 volatile int old_runstate;
210 unsigned int sequential;
214 unsigned int odirect;
215 unsigned int thinktime;
216 unsigned int fsync_blocks;
217 unsigned int start_delay;
218 unsigned int timeout;
219 unsigned int use_aio;
220 unsigned int create_file;
221 unsigned int overwrite;
222 unsigned int invalidate_cache;
223 unsigned int bw_avg_time;
224 unsigned int create_serialize;
225 unsigned int create_fsync;
227 unsigned long long file_size;
228 unsigned long long file_offset;
229 unsigned int sync_io;
230 unsigned int mem_type;
232 unsigned int stonewall;
235 struct drand48_data bsrange_state;
236 struct drand48_data verify_state;
242 io_context_t aio_ctx;
243 unsigned int aio_depth;
244 struct io_event *aio_events;
246 unsigned int cur_depth;
247 struct list_head io_u_freelist;
248 struct list_head io_u_busylist;
251 unsigned int ratemin;
252 unsigned int ratecycle;
253 unsigned long rate_usec_cycle;
254 long rate_pending_usleep;
255 unsigned long rate_bytes;
256 struct timeval lastrate;
258 unsigned long runtime; /* sec */
259 unsigned long long io_size;
261 unsigned long io_blocks;
262 unsigned long io_bytes;
263 unsigned long this_io_bytes;
264 unsigned long last_bytes;
267 struct drand48_data random_state;
268 unsigned long *file_map;
269 unsigned int num_maps;
272 * bandwidth and latency stats
274 struct io_stat clat_stat; /* completion latency */
275 struct io_stat slat_stat; /* submission latency */
277 struct io_stat bw_stat; /* bandwidth stats */
278 unsigned long stat_io_bytes;
279 struct timeval stat_sample_time;
281 struct io_log *lat_log;
282 struct io_log *bw_log;
284 struct timeval start;
285 struct rusage ru_start;
286 struct rusage ru_end;
288 struct list_head io_hist_list;
291 static struct thread_data *threads;
292 static struct thread_data def_thread;
294 static sem_t startup_sem;
296 static void sig_handler(int sig)
300 for (i = 0; i < thread_number; i++) {
301 struct thread_data *td = &threads[i];
308 static int init_random_state(struct thread_data *td)
311 int fd, num_maps, blocks;
313 fd = open("/dev/random", O_RDONLY);
319 if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
327 srand48_r(seed, &td->bsrange_state);
328 srand48_r(seed, &td->verify_state);
336 blocks = (td->io_size + td->min_bs - 1) / td->min_bs;
337 num_maps = blocks / BLOCKS_PER_MAP;
338 td->file_map = malloc(num_maps * sizeof(long));
339 td->num_maps = num_maps;
340 memset(td->file_map, 0, num_maps * sizeof(long));
342 srand48_r(seed, &td->random_state);
346 static unsigned long utime_since(struct timeval *s, struct timeval *e)
350 sec = e->tv_sec - s->tv_sec;
351 usec = e->tv_usec - s->tv_usec;
352 if (sec > 0 && usec < 0) {
357 sec *= (double) 1000000;
362 static unsigned long utime_since_now(struct timeval *s)
366 gettimeofday(&t, NULL);
367 return utime_since(s, &t);
370 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
374 sec = e->tv_sec - s->tv_sec;
375 usec = e->tv_usec - s->tv_usec;
376 if (sec > 0 && usec < 0) {
381 sec *= (double) 1000;
382 usec /= (double) 1000;
387 static unsigned long mtime_since_now(struct timeval *s)
391 gettimeofday(&t, NULL);
392 return mtime_since(s, &t);
395 static inline unsigned long msec_now(struct timeval *s)
397 return s->tv_sec * 1000 + s->tv_usec / 1000;
400 static int random_map_free(struct thread_data *td, unsigned long long block)
402 unsigned int idx = RAND_MAP_IDX(td, block);
403 unsigned int bit = RAND_MAP_BIT(td, block);
405 return (td->file_map[idx] & (1UL << bit)) == 0;
408 static int get_next_free_block(struct thread_data *td, unsigned long long *b)
414 while ((*b) * td->min_bs < td->io_size) {
415 if (td->file_map[i] != -1UL) {
416 *b += ffz(td->file_map[i]);
420 *b += BLOCKS_PER_MAP;
427 static void mark_random_map(struct thread_data *td, struct io_u *io_u)
429 unsigned long block = io_u->offset / td->min_bs;
430 unsigned int blocks = 0;
432 while (blocks < (io_u->buflen / td->min_bs)) {
435 if (!random_map_free(td, block))
438 idx = RAND_MAP_IDX(td, block);
439 bit = RAND_MAP_BIT(td, block);
441 assert(idx < td->num_maps);
443 td->file_map[idx] |= (1UL << bit);
448 if ((blocks * td->min_bs) < io_u->buflen)
449 io_u->buflen = blocks * td->min_bs;
452 static int get_next_offset(struct thread_data *td, unsigned long long *offset)
454 unsigned long long b, rb;
457 if (!td->sequential) {
458 unsigned long max_blocks = td->io_size / td->min_bs;
462 lrand48_r(&td->random_state, &r);
463 b = ((max_blocks - 1) * r / (RAND_MAX+1.0));
464 rb = b + (td->file_offset / td->min_bs);
466 } while (!random_map_free(td, rb) && loops);
469 if (get_next_free_block(td, &b))
473 b = td->last_bytes / td->min_bs;
475 *offset = (b * td->min_bs) + td->file_offset;
476 if (*offset > td->file_size)
482 static unsigned int get_next_buflen(struct thread_data *td)
487 if (td->min_bs == td->max_bs)
490 lrand48_r(&td->bsrange_state, &r);
491 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
492 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
495 if (buflen > td->io_size - td->this_io_bytes)
496 buflen = td->io_size - td->this_io_bytes;
501 static inline void add_stat_sample(struct thread_data *td, struct io_stat *is,
504 if (val > is->max_val)
506 if (val < is->min_val)
510 is->val_sq += val * val;
514 static void add_log_sample(struct thread_data *td, struct io_log *log,
517 if (log->nr_samples == log->max_samples) {
518 int new_size = sizeof(struct io_sample) * log->max_samples * 2;
520 log->log = realloc(log->log, new_size);
521 log->max_samples <<= 1;
524 log->log[log->nr_samples].val = val;
525 log->log[log->nr_samples].time = mtime_since_now(&td->start);
529 static void add_clat_sample(struct thread_data *td, unsigned long msec)
531 add_stat_sample(td, &td->clat_stat, msec);
534 add_log_sample(td, td->lat_log, msec);
537 static void add_slat_sample(struct thread_data *td, unsigned long msec)
539 add_stat_sample(td, &td->slat_stat, msec);
542 static void add_bw_sample(struct thread_data *td)
544 unsigned long spent = mtime_since_now(&td->stat_sample_time);
547 if (spent < td->bw_avg_time)
550 rate = (td->this_io_bytes - td->stat_io_bytes) / spent;
551 add_stat_sample(td, &td->bw_stat, rate);
554 add_log_sample(td, td->bw_log, rate);
556 gettimeofday(&td->stat_sample_time, NULL);
557 td->stat_io_bytes = td->this_io_bytes;
561 * busy looping version for the last few usec
563 static void __usec_sleep(int usec)
565 struct timeval start;
567 gettimeofday(&start, NULL);
568 while (utime_since_now(&start) < usec)
569 __asm__ __volatile__("rep;nop": : :"memory");
572 static void usec_sleep(int usec)
574 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
582 rem.tv_sec = rem.tv_nsec = 0;
583 nanosleep(&req, &rem);
587 req.tv_nsec = rem.tv_nsec;
588 usec = rem.tv_nsec * 1000;
592 static void rate_throttle(struct thread_data *td, unsigned long time_spent,
595 unsigned long usec_cycle;
600 usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
602 if (time_spent < usec_cycle) {
603 unsigned long s = usec_cycle - time_spent;
605 td->rate_pending_usleep += s;
606 if (td->rate_pending_usleep >= 100000) {
607 usec_sleep(td->rate_pending_usleep);
608 td->rate_pending_usleep = 0;
611 long overtime = time_spent - usec_cycle;
613 td->rate_pending_usleep -= overtime;
617 static int check_min_rate(struct thread_data *td, struct timeval *now)
623 * allow a 2 second settle period in the beginning
625 if (mtime_since(&td->start, now) < 2000)
629 * if rate blocks is set, sample is running
631 if (td->rate_bytes) {
632 spent = mtime_since(&td->lastrate, now);
633 if (spent < td->ratecycle)
636 rate = (td->this_io_bytes - td->rate_bytes) / spent;
637 if (rate < td->ratemin) {
638 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
645 td->rate_bytes = td->this_io_bytes;
646 memcpy(&td->lastrate, now, sizeof(*now));
650 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
654 if (mtime_since(&td->start, t) >= td->timeout * 1000)
660 static void fill_random_bytes(struct thread_data *td,
661 unsigned char *p, unsigned int len)
667 drand48_r(&td->verify_state, &r);
670 * lrand48_r seems to be broken and only fill the bottom
671 * 32-bits, even on 64-bit archs with 64-bit longs
684 static void hexdump(void *buffer, int len)
686 unsigned char *p = buffer;
689 for (i = 0; i < len; i++)
690 printf("%02x", p[i]);
694 static int verify_io_u(struct io_u *io_u)
696 struct verify_header *hdr = (struct verify_header *) io_u->buf;
697 unsigned char *p = (unsigned char *) io_u->buf;
698 struct md5_ctx md5_ctx;
701 if (hdr->fio_magic != FIO_HDR_MAGIC)
704 memset(&md5_ctx, 0, sizeof(md5_ctx));
706 md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
708 ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
710 hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
711 hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
718 * fill body of io_u->buf with random data and add a header with the
719 * (eg) sha1sum of that data.
721 static void populate_io_u(struct thread_data *td, struct io_u *io_u)
723 struct md5_ctx md5_ctx;
724 struct verify_header hdr;
725 unsigned char *p = (unsigned char *) io_u->buf;
727 hdr.fio_magic = FIO_HDR_MAGIC;
728 hdr.len = io_u->buflen;
730 fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
732 memset(&md5_ctx, 0, sizeof(md5_ctx));
733 md5_update(&md5_ctx, p, io_u->buflen - sizeof(hdr));
734 memcpy(hdr.md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
735 memcpy(io_u->buf, &hdr, sizeof(hdr));
738 static void put_io_u(struct thread_data *td, struct io_u *io_u)
740 list_del(&io_u->list);
741 list_add(&io_u->list, &td->io_u_freelist);
745 #define queue_full(td) (list_empty(&(td)->io_u_freelist))
747 static struct io_u *__get_io_u(struct thread_data *td)
754 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
755 list_del(&io_u->list);
756 list_add(&io_u->list, &td->io_u_busylist);
761 static struct io_u *get_io_u(struct thread_data *td)
765 io_u = __get_io_u(td);
769 if (get_next_offset(td, &io_u->offset)) {
774 io_u->buflen = get_next_buflen(td);
780 if (io_u->buflen + io_u->offset > td->file_size)
781 io_u->buflen = td->file_size - io_u->offset;
784 mark_random_map(td, io_u);
786 td->last_bytes += io_u->buflen;
789 populate_io_u(td, io_u);
793 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
795 io_prep_pwrite(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
798 gettimeofday(&io_u->start_time, NULL);
802 static inline void td_set_runstate(struct thread_data *td, int runstate)
804 td->old_runstate = td->runstate;
805 td->runstate = runstate;
808 static int get_next_verify(struct thread_data *td,
809 unsigned long long *offset, unsigned int *len)
811 struct io_piece *ipo;
813 if (list_empty(&td->io_hist_list))
816 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
817 list_del(&ipo->list);
819 *offset = ipo->offset;
825 static void prune_io_piece_log(struct thread_data *td)
827 struct io_piece *ipo;
829 while (!list_empty(&td->io_hist_list)) {
830 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
832 list_del(&ipo->list);
838 * log a succesful write, so we can unwind the log for verify
840 static void log_io_piece(struct thread_data *td, struct io_u *io_u)
842 struct io_piece *ipo = malloc(sizeof(struct io_piece));
843 struct list_head *entry;
845 INIT_LIST_HEAD(&ipo->list);
846 ipo->offset = io_u->offset;
847 ipo->len = io_u->buflen;
850 * for random io where the writes extend the file, it will typically
851 * be laid out with the block scattered as written. it's faster to
852 * read them in in that order again, so don't sort
854 if (td->sequential || !td->overwrite) {
855 list_add_tail(&ipo->list, &td->io_hist_list);
860 * for random io, sort the list so verify will run faster
862 entry = &td->io_hist_list;
863 while ((entry = entry->prev) != &td->io_hist_list) {
864 struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
866 if (__ipo->offset < ipo->offset)
870 list_add(&ipo->list, entry);
873 static void do_sync_verify(struct thread_data *td)
876 struct io_u *io_u = NULL;
879 td_set_runstate(td, TD_VERIFYING);
881 io_u = __get_io_u(td);
884 if (fadvise(td->fd, td->file_offset, td->io_size, POSIX_FADV_DONTNEED) < 0) {
894 gettimeofday(&t, NULL);
895 if (runtime_exceeded(td, &t))
898 if (get_next_verify(td, &io_u->offset, &io_u->buflen))
901 if (td->cur_off != io_u->offset) {
902 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
908 ret = read(td->fd, io_u->buf, io_u->buflen);
909 if (ret < (int) io_u->buflen) {
919 if (verify_io_u(io_u))
922 td->cur_off = io_u->offset + io_u->buflen;
926 td_set_runstate(td, TD_RUNNING);
930 static void do_sync_io(struct thread_data *td)
932 unsigned long msec, usec;
933 struct io_u *io_u = NULL;
936 while (td->this_io_bytes < td->io_size) {
946 if (td->cur_off != io_u->offset) {
947 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
954 ret = read(td->fd, io_u->buf, io_u->buflen);
956 ret = write(td->fd, io_u->buf, io_u->buflen);
958 if (ret < (int) io_u->buflen) {
965 log_io_piece(td, io_u);
968 td->io_bytes += io_u->buflen;
969 td->this_io_bytes += io_u->buflen;
970 td->cur_off = io_u->offset + io_u->buflen;
972 gettimeofday(&e, NULL);
974 usec = utime_since(&io_u->start_time, &e);
976 rate_throttle(td, usec, io_u->buflen);
978 if (check_min_rate(td, &e)) {
984 add_clat_sample(td, msec);
987 if (runtime_exceeded(td, &e))
994 usec_sleep(td->thinktime);
996 if (should_fsync(td) && td->fsync_blocks &&
997 (td->io_blocks % td->fsync_blocks) == 0)
1004 if (should_fsync(td))
1008 static int io_u_getevents(struct thread_data *td, int min, int max,
1014 r = io_getevents(td->aio_ctx, min, max, td->aio_events, t);
1015 if (r != -EAGAIN && r != -EINTR)
1022 static int io_u_queue(struct thread_data *td, struct io_u *io_u)
1024 struct iocb *iocb = &io_u->iocb;
1028 ret = io_submit(td->aio_ctx, 1, &iocb);
1031 else if (ret == -EAGAIN)
1033 else if (ret == -EINTR)
1042 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
1043 #define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
1045 static int ios_completed(struct thread_data *td, int nr)
1052 gettimeofday(&e, NULL);
1054 for (i = 0, bytes_done = 0; i < nr; i++) {
1055 io_u = ev_to_iou(td->aio_events + i);
1058 td->io_bytes += io_u->buflen;
1059 td->this_io_bytes += io_u->buflen;
1061 msec = mtime_since(&io_u->issue_time, &e);
1063 add_clat_sample(td, msec);
1067 log_io_piece(td, io_u);
1069 bytes_done += io_u->buflen;
1076 static void cleanup_pending_aio(struct thread_data *td)
1078 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1079 struct list_head *entry, *n;
1084 * get immediately available events, if any
1086 r = io_u_getevents(td, 0, td->cur_depth, &ts);
1088 ios_completed(td, r);
1091 * now cancel remaining active events
1093 list_for_each_safe(entry, n, &td->io_u_busylist) {
1094 io_u = list_entry(entry, struct io_u, list);
1096 r = io_cancel(td->aio_ctx, &io_u->iocb, td->aio_events);
1101 if (td->cur_depth) {
1102 r = io_u_getevents(td, td->cur_depth, td->cur_depth, NULL);
1104 ios_completed(td, r);
1108 static int async_do_verify(struct thread_data *td, struct io_u **io_u)
1110 struct io_u *v_io_u = *io_u;
1114 ret = verify_io_u(v_io_u);
1115 put_io_u(td, v_io_u);
1122 static void do_async_verify(struct thread_data *td)
1125 struct io_u *io_u, *v_io_u = NULL;
1128 td_set_runstate(td, TD_VERIFYING);
1134 gettimeofday(&t, NULL);
1135 if (runtime_exceeded(td, &t))
1138 io_u = __get_io_u(td);
1142 if (get_next_verify(td, &io_u->offset, &io_u->buflen)) {
1147 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
1148 ret = io_u_queue(td, io_u);
1156 * we have one pending to verify, do that while the next
1157 * we are doing io on the next one
1159 if (async_do_verify(td, &v_io_u))
1162 ret = io_u_getevents(td, 1, 1, NULL);
1169 v_io_u = ev_to_iou(td->aio_events);
1171 td->cur_off = v_io_u->offset + v_io_u->buflen;
1174 * if we can't submit more io, we need to verify now
1176 if (queue_full(td) && async_do_verify(td, &v_io_u))
1181 async_do_verify(td, &v_io_u);
1184 cleanup_pending_aio(td);
1186 td_set_runstate(td, TD_RUNNING);
1189 static void do_async_io(struct thread_data *td)
1191 struct timeval s, e;
1194 while (td->this_io_bytes < td->io_size) {
1195 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1196 struct timespec *timeout;
1197 int ret, min_evts = 0;
1199 unsigned int bytes_done;
1204 io_u = get_io_u(td);
1208 memcpy(&s, &io_u->start_time, sizeof(s));
1210 ret = io_u_queue(td, io_u);
1217 gettimeofday(&io_u->issue_time, NULL);
1218 add_slat_sample(td, mtime_since(&io_u->start_time, &io_u->issue_time));
1219 if (td->cur_depth < td->aio_depth) {
1227 ret = io_u_getevents(td, min_evts, td->cur_depth, timeout);
1234 bytes_done = ios_completed(td, ret);
1237 * the rate is batched for now, it should work for batches
1238 * of completions except the very first one which may look
1241 gettimeofday(&e, NULL);
1242 usec = utime_since(&s, &e);
1244 rate_throttle(td, usec, bytes_done);
1246 if (check_min_rate(td, &e)) {
1247 td->error = ENODATA;
1251 if (runtime_exceeded(td, &e))
1255 usec_sleep(td->thinktime);
1257 if (should_fsync(td) && td->fsync_blocks &&
1258 (td->io_blocks % td->fsync_blocks) == 0)
1263 cleanup_pending_aio(td);
1265 if (should_fsync(td))
1269 static void cleanup_aio(struct thread_data *td)
1271 io_destroy(td->aio_ctx);
1274 free(td->aio_events);
1277 static int init_aio(struct thread_data *td)
1279 if (io_queue_init(td->aio_depth, &td->aio_ctx)) {
1284 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
1288 static void cleanup_io_u(struct thread_data *td)
1290 struct list_head *entry, *n;
1293 list_for_each_safe(entry, n, &td->io_u_freelist) {
1294 io_u = list_entry(entry, struct io_u, list);
1296 list_del(&io_u->list);
1300 if (td->mem_type == MEM_MALLOC)
1301 free(td->orig_buffer);
1302 else if (td->mem_type == MEM_SHM) {
1303 struct shmid_ds sbuf;
1305 shmdt(td->orig_buffer);
1306 shmctl(td->shm_id, IPC_RMID, &sbuf);
1310 static int init_io_u(struct thread_data *td)
1313 int i, max_units, mem_size;
1319 max_units = td->aio_depth;
1321 mem_size = td->max_bs * max_units + MASK;
1323 if (td->mem_type == MEM_MALLOC)
1324 td->orig_buffer = malloc(mem_size);
1325 else if (td->mem_type == MEM_SHM) {
1326 td->shm_id = shmget(IPC_PRIVATE, mem_size, IPC_CREAT | 0600);
1327 if (td->shm_id < 0) {
1333 td->orig_buffer = shmat(td->shm_id, NULL, 0);
1334 if (td->orig_buffer == (void *) -1) {
1341 INIT_LIST_HEAD(&td->io_u_freelist);
1342 INIT_LIST_HEAD(&td->io_u_busylist);
1343 INIT_LIST_HEAD(&td->io_hist_list);
1345 p = ALIGN(td->orig_buffer);
1346 for (i = 0; i < max_units; i++) {
1347 io_u = malloc(sizeof(*io_u));
1348 memset(io_u, 0, sizeof(*io_u));
1349 INIT_LIST_HEAD(&io_u->list);
1351 io_u->buf = p + td->max_bs * i;
1352 list_add(&io_u->list, &td->io_u_freelist);
1358 static void setup_log(struct io_log **log)
1360 struct io_log *l = malloc(sizeof(*l));
1363 l->max_samples = 1024;
1364 l->log = malloc(l->max_samples * sizeof(struct io_sample));
1368 static void finish_log(struct thread_data *td, struct io_log *log, char *name)
1370 char file_name[128];
1374 sprintf(file_name, "client%d_%s.log", td->thread_number, name);
1375 f = fopen(file_name, "w");
1377 perror("fopen log");
1381 for (i = 0; i < log->nr_samples; i++)
1382 fprintf(f, "%lu, %lu\n", log->log[i].time, log->log[i].val);
1389 static int create_file(struct thread_data *td)
1391 unsigned long long left;
1397 * unless specifically asked for overwrite, let normal io extend it
1399 if (td_write(td) && !td->overwrite)
1402 if (!td->file_size) {
1403 fprintf(stderr, "Need size for create\n");
1408 printf("Client%d: Laying out IO file\n", td->thread_number);
1410 td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
1416 if (ftruncate(td->fd, td->file_size) == -1) {
1421 td->io_size = td->file_size;
1422 b = malloc(td->max_bs);
1423 memset(b, 0, td->max_bs);
1425 left = td->file_size;
1431 r = write(td->fd, b, bs);
1433 if (r == (int) bs) {
1446 if (td->create_fsync)
1455 static int file_exists(struct thread_data *td)
1459 if (stat(td->file_name, &st) != -1)
1462 return errno != ENOENT;
1465 static int get_file_size(struct thread_data *td)
1470 if (fstat(td->fd, &st) == -1) {
1476 * if block device, get size via BLKGETSIZE64 ioctl. try that as well
1477 * if this is a link, fall back to st.st_size if it fails
1479 if (S_ISBLK(st.st_mode) || S_ISLNK(st.st_mode)) {
1480 if (ioctl(td->fd, BLKGETSIZE64, &bytes)) {
1481 if (S_ISBLK(st.st_mode)) {
1491 if (td->file_size > bytes)
1492 bytes = td->file_size;
1495 td->file_size = 1024 * 1024 * 1024;
1497 bytes = td->file_size;
1500 if (td->file_offset > bytes) {
1501 fprintf(stderr, "Client%d: offset larger than length\n", td->thread_number);
1505 td->io_size = bytes - td->file_offset;
1506 if (td->io_size == 0) {
1507 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1515 static int setup_file(struct thread_data *td)
1519 if (!file_exists(td)) {
1520 if (!td->create_file) {
1524 if (create_file(td))
1532 td->fd = open(td->file_name, flags | O_RDONLY);
1543 td->fd = open(td->file_name, flags | O_CREAT, 0600);
1551 if (get_file_size(td))
1554 if (td_write(td) && ftruncate(td->fd, td->file_size) == -1) {
1559 if (td->invalidate_cache) {
1560 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_DONTNEED) < 0) {
1569 static void clear_io_state(struct thread_data *td)
1572 lseek(td->fd, SEEK_SET, 0);
1576 td->stat_io_bytes = 0;
1577 td->this_io_bytes = 0;
1580 memset(td->file_map, 0, td->num_maps * sizeof(long));
1583 static void *thread_main(int shm_id, int offset, char *argv[])
1585 struct thread_data *td;
1591 data = shmat(shm_id, NULL, 0);
1592 if (data == (void *) -1) {
1597 td = data + offset * sizeof(struct thread_data);
1603 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
1608 sprintf(argv[0], "fio%d", offset);
1610 if (td->use_aio && init_aio(td))
1614 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1620 sem_post(&startup_sem);
1621 sem_wait(&td->mutex);
1623 if (!td->create_serialize && setup_file(td))
1626 if (init_random_state(td))
1629 gettimeofday(&td->start, NULL);
1631 getrusage(RUSAGE_SELF, &td->ru_start);
1633 while (td->loops--) {
1634 gettimeofday(&td->stat_sample_time, NULL);
1637 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
1640 prune_io_piece_log(td);
1658 do_async_verify(td);
1664 td->runtime = mtime_since_now(&td->start);
1665 getrusage(RUSAGE_SELF, &td->ru_end);
1669 finish_log(td, td->bw_log, "bw");
1671 finish_log(td, td->lat_log, "lat");
1673 if (exitall_on_terminate)
1685 sem_post(&startup_sem);
1686 sem_wait(&td->mutex);
1688 td_set_runstate(td, TD_EXITED);
1693 static void free_shm(void)
1695 struct shmid_ds sbuf;
1700 shmctl(shm_id, IPC_RMID, &sbuf);
1704 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1705 double *mean, double *dev)
1709 if (is->samples == 0)
1715 n = (double) is->samples;
1716 *mean = (double) is->val / n;
1717 *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1721 static void show_thread_status(struct thread_data *td)
1723 int prio, prio_class;
1724 unsigned long min, max, bw = 0, ctx;
1725 double mean, dev, usr_cpu, sys_cpu;
1727 if (!td->io_bytes && !td->error)
1731 bw = td->io_bytes / td->runtime;
1733 prio = td->ioprio & 0xff;
1734 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
1736 printf("Client%d: err=%2d, io=%6luMiB, bw=%6luKiB/s, runt=%6lumsec\n", td->thread_number, td->error, td->io_bytes >> 20, bw, td->runtime);
1738 if (calc_lat(&td->slat_stat, &min, &max, &mean, &dev))
1739 printf(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1740 if (calc_lat(&td->clat_stat, &min, &max, &mean, &dev))
1741 printf(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1742 if (calc_lat(&td->bw_stat, &min, &max, &mean, &dev))
1743 printf(" bw (KiB/s) : min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1748 t = mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
1749 usr_cpu = (double) t * 100 / (double) td->runtime;
1751 t = mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
1752 sys_cpu = (double) t * 100 / (double) td->runtime;
1758 ctx = td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
1760 printf(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ctx);
1763 static int setup_rate(struct thread_data *td)
1765 int nr_reads_per_sec;
1770 if (td->rate < td->ratemin) {
1771 fprintf(stderr, "min rate larger than nominal rate\n");
1775 nr_reads_per_sec = (td->rate * 1024) / td->min_bs;
1776 td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
1777 td->rate_pending_usleep = 0;
1781 static struct thread_data *get_new_job(int global)
1783 struct thread_data *td;
1787 if (thread_number >= max_jobs)
1790 td = &threads[thread_number++];
1791 memset(td, 0, sizeof(*td));
1794 td->thread_number = thread_number;
1796 td->ddir = def_thread.ddir;
1797 td->ioprio = def_thread.ioprio;
1798 td->sequential = def_thread.sequential;
1799 td->bs = def_thread.bs;
1800 td->min_bs = def_thread.min_bs;
1801 td->max_bs = def_thread.max_bs;
1802 td->odirect = def_thread.odirect;
1803 td->thinktime = def_thread.thinktime;
1804 td->fsync_blocks = def_thread.fsync_blocks;
1805 td->start_delay = def_thread.start_delay;
1806 td->timeout = def_thread.timeout;
1807 td->use_aio = def_thread.use_aio;
1808 td->create_file = def_thread.create_file;
1809 td->overwrite = def_thread.overwrite;
1810 td->invalidate_cache = def_thread.invalidate_cache;
1811 td->file_size = def_thread.file_size;
1812 td->file_offset = def_thread.file_offset;
1813 td->rate = def_thread.rate;
1814 td->ratemin = def_thread.ratemin;
1815 td->ratecycle = def_thread.ratecycle;
1816 td->aio_depth = def_thread.aio_depth;
1817 td->sync_io = def_thread.sync_io;
1818 td->mem_type = def_thread.mem_type;
1819 td->bw_avg_time = def_thread.bw_avg_time;
1820 td->create_serialize = def_thread.create_serialize;
1821 td->create_fsync = def_thread.create_fsync;
1822 td->loops = def_thread.loops;
1823 td->verify = def_thread.verify;
1824 td->stonewall = def_thread.stonewall;
1825 memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
1830 static void put_job(struct thread_data *td)
1832 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
1836 static int add_job(struct thread_data *td, const char *filename, int prioclass,
1839 if (td == &def_thread)
1842 strcpy(td->file_name, filename);
1843 sem_init(&td->mutex, 1, 0);
1844 td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
1846 td->clat_stat.min_val = ULONG_MAX;
1847 td->slat_stat.min_val = ULONG_MAX;
1848 td->bw_stat.min_val = ULONG_MAX;
1850 run_str[td->thread_number - 1] = 'P';
1852 if (td->use_aio && !td->aio_depth)
1855 if (td->min_bs == -1U)
1856 td->min_bs = td->bs;
1857 if (td->max_bs == -1U)
1858 td->max_bs = td->bs;
1866 setup_log(&td->lat_log);
1868 setup_log(&td->bw_log);
1870 printf("Client%d: file=%s, rw=%d, prio=%d/%d, seq=%d, odir=%d, bs=%d-%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, filename, td->ddir, prioclass, prio, td->sequential, td->odirect, td->min_bs, td->max_bs, td->rate, td->use_aio, td->aio_depth);
1874 static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
1880 for (i = 0; i < sizeof(int) * 8; i++) {
1882 CPU_SET(i, &cpumask);
1886 unsigned long get_mult(char c)
1897 return 1024 * 1024 * 1024;
1904 * convert string after '=' into decimal value, noting any size suffix
1906 static int str_cnv(char *p, unsigned long long *val)
1911 str = strstr(p, "=");
1918 *val = strtoul(str, NULL, 10);
1919 if (*val == ULONG_MAX && errno == ERANGE)
1922 *val *= get_mult(str[len - 2]);
1926 static int check_strcnv(char *p, char *name, unsigned long long *val)
1928 if (!strstr(p, name))
1931 return str_cnv(p, val);
1934 static int check_str(char *p, char *name, char *option)
1936 char *s = strstr(p, name);
1942 if (strstr(s, option))
1948 static int check_range(char *p, char *name, unsigned long *s, unsigned long *e)
1953 sprintf(str, "%s=%%lu%%c-%%lu%%c", name);
1954 if (sscanf(p, str, s, &s1, e, &s2) == 4) {
1960 sprintf(str, "%s = %%lu%%c-%%lu%%c", name);
1961 if (sscanf(p, str, s, &s1, e, &s2) == 4) {
1967 sprintf(str, "%s=%%lu-%%lu", name);
1968 if (sscanf(p, str, s, e) == 2)
1971 sprintf(str, "%s = %%lu-%%lu", name);
1972 if (sscanf(p, str, s, e) == 2)
1979 static int check_int(char *p, char *name, unsigned int *val)
1983 sprintf(str, "%s=%%d", name);
1984 if (sscanf(p, str, val) == 1)
1987 sprintf(str, "%s = %%d", name);
1988 if (sscanf(p, str, val) == 1)
1994 static int is_empty_or_comment(char *line)
1998 for (i = 0; i < strlen(line); i++) {
2001 if (!isspace(line[i]) && !iscntrl(line[i]))
2008 static int parse_jobs_ini(char *file)
2010 unsigned int prioclass, prio, cpu, global;
2011 unsigned long long ull;
2012 unsigned long ul1, ul2;
2013 struct thread_data *td;
2014 char *string, *name;
2019 f = fopen(file, "r");
2025 string = malloc(4096);
2028 while ((p = fgets(string, 4096, f)) != NULL) {
2029 if (is_empty_or_comment(p))
2031 if (sscanf(p, "[%s]", name) != 1)
2034 global = !strncmp(name, "global", 6);
2036 name[strlen(name) - 1] = '\0';
2038 td = get_new_job(global);
2046 while ((p = fgets(string, 4096, f)) != NULL) {
2047 if (is_empty_or_comment(p))
2051 if (!check_int(p, "rw", &td->ddir)) {
2055 if (!check_int(p, "prio", &prio)) {
2059 if (!check_int(p, "prioclass", &prioclass)) {
2063 if (!check_int(p, "direct", &td->odirect)) {
2067 if (!check_int(p, "rate", &td->rate)) {
2071 if (!check_int(p, "ratemin", &td->ratemin)) {
2075 if (!check_int(p, "ratecycle", &td->ratecycle)) {
2079 if (!check_int(p, "thinktime", &td->thinktime)) {
2083 if (!check_int(p, "cpumask", &cpu)) {
2084 fill_cpu_mask(td->cpumask, cpu);
2088 if (!check_int(p, "fsync", &td->fsync_blocks)) {
2092 if (!check_int(p, "startdelay", &td->start_delay)) {
2096 if (!check_int(p, "timeout", &td->timeout)) {
2100 if (!check_int(p, "invalidate",&td->invalidate_cache)) {
2104 if (!check_int(p, "aio_depth", &td->aio_depth)) {
2108 if (!check_int(p, "sync", &td->sync_io)) {
2112 if (!check_int(p, "bwavgtime", &td->bw_avg_time)) {
2116 if (!check_int(p, "create_serialize", &td->create_serialize)) {
2120 if (!check_int(p, "create_fsync", &td->create_fsync)) {
2124 if (!check_int(p, "loops", &td->loops)) {
2128 if (!check_int(p, "verify", &td->verify)) {
2132 if (!check_range(p, "bsrange", &ul1, &ul2)) {
2134 printf("bad min block size, must be a multiple of 512\n");
2138 printf("bad max block size, must be a multiple of 512\n");
2144 if (!check_strcnv(p, "bs", &ull)) {
2146 printf("bad block size, must be a multiple of 512\n");
2152 if (!check_strcnv(p, "size", &td->file_size)) {
2156 if (!check_strcnv(p, "offset", &td->file_offset)) {
2160 if (!check_str(p, "mem", "malloc")) {
2161 td->mem_type = MEM_MALLOC;
2165 if (!check_str(p, "mem", "shm")) {
2166 td->mem_type = MEM_SHM;
2170 if (!strncmp(p, "sequential", 10)) {
2175 if (!strncmp(p, "random", 6)) {
2180 if (!strncmp(p, "aio", 3)) {
2185 if (!strncmp(p, "create", 6)) {
2186 td->create_file = 1;
2190 if (!strncmp(p, "overwrite", 9)) {
2195 if (!strncmp(p, "exitall", 7)) {
2196 exitall_on_terminate = 1;
2200 if (!strncmp(p, "stonewall", 9)) {
2205 printf("Client%d: bad option %s\n",td->thread_number,p);
2209 if (add_job(td, name, prioclass, prio))
2219 static int parse_options(int argc, char *argv[])
2223 for (i = 1; i < argc; i++) {
2224 char *parm = argv[i];
2233 def_thread.sequential = !!atoi(parm);
2237 def_thread.bs = atoi(parm);
2238 def_thread.bs <<= 10;
2239 if (!def_thread.bs) {
2240 printf("bad block size\n");
2241 def_thread.bs = DEF_BS;
2246 def_thread.timeout = atoi(parm);
2250 repeatable = !!atoi(parm);
2254 rate_quit = !!atoi(parm);
2258 def_thread.odirect = !!atoi(parm);
2261 if (i + 1 >= argc) {
2262 printf("-f needs file as arg\n");
2265 ini_file = strdup(argv[i+1]);
2275 printf("bad option %s\n", argv[i]);
2283 static void print_thread_status(struct thread_data *td, int nr_running,
2284 int t_rate, int m_rate)
2286 printf("Threads now running: %d", nr_running);
2287 if (m_rate || t_rate)
2288 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
2289 printf(" : [%s]\r", run_str);
2293 static void check_str_update(struct thread_data *td, int n, int t, int m)
2295 char c = run_str[td->thread_number - 1];
2297 if (td->runstate == td->old_runstate)
2300 switch (td->runstate) {
2326 case TD_NOT_CREATED:
2330 printf("state %d\n", td->runstate);
2333 run_str[td->thread_number - 1] = c;
2334 print_thread_status(td, n, t, m);
2335 td->old_runstate = td->runstate;
2338 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
2343 * reap exited threads (TD_EXITED -> TD_REAPED)
2345 for (i = 0; i < thread_number; i++) {
2346 struct thread_data *td = &threads[i];
2348 check_str_update(td, *nr_running, *t_rate, *m_rate);
2350 if (td->runstate != TD_EXITED)
2353 td_set_runstate(td, TD_REAPED);
2354 waitpid(td->pid, NULL, 0);
2356 (*m_rate) -= td->ratemin;
2357 (*t_rate) -= td->rate;
2358 check_str_update(td, *nr_running, *t_rate, *m_rate);
2365 static void run_threads(char *argv[])
2367 struct timeval genesis;
2368 struct thread_data *td;
2369 unsigned long spent;
2370 int i, todo, nr_running, m_rate, t_rate, nr_started;
2372 printf("Starting %d threads\n", thread_number);
2375 signal(SIGINT, sig_handler);
2377 todo = thread_number;
2380 m_rate = t_rate = 0;
2382 for (i = 0; i < thread_number; i++) {
2385 if (!td->create_serialize)
2389 * do file setup here so it happens sequentially,
2390 * we don't want X number of threads getting their
2391 * client data interspersed on disk
2393 if (setup_file(td)) {
2394 td_set_runstate(td, TD_REAPED);
2399 gettimeofday(&genesis, NULL);
2403 * create threads (TD_NOT_CREATED -> TD_CREATED)
2405 for (i = 0; i < thread_number; i++) {
2408 if (td->runstate != TD_NOT_CREATED)
2412 * never got a chance to start, killed by other
2413 * thread for some reason
2415 if (td->terminate) {
2420 if (td->start_delay) {
2421 spent = mtime_since_now(&genesis);
2423 if (td->start_delay * 1000 > spent)
2427 if (td->stonewall && (nr_started || nr_running))
2430 td_set_runstate(td, TD_CREATED);
2431 check_str_update(td, nr_running, t_rate, m_rate);
2432 sem_init(&startup_sem, 1, 1);
2437 sem_wait(&startup_sem);
2439 thread_main(shm_id, i, argv);
2445 * start created threads (TD_CREATED -> TD_RUNNING)
2447 for (i = 0; i < thread_number; i++) {
2448 struct thread_data *td = &threads[i];
2450 if (td->runstate != TD_CREATED)
2453 td_set_runstate(td, TD_RUNNING);
2456 m_rate += td->ratemin;
2458 check_str_update(td, nr_running, t_rate, m_rate);
2459 sem_post(&td->mutex);
2462 for (i = 0; i < thread_number; i++) {
2463 struct thread_data *td = &threads[i];
2465 if (td->runstate == TD_RUNNING)
2466 run_str[td->thread_number - 1] = '+';
2467 else if (td->runstate == TD_VERIFYING)
2468 run_str[td->thread_number - 1] = 'V';
2472 check_str_update(td, nr_running, t_rate, m_rate);
2475 reap_threads(&nr_running, &t_rate, &m_rate);
2481 while (nr_running) {
2482 reap_threads(&nr_running, &t_rate, &m_rate);
2487 int setup_thread_area(void)
2490 * 1024 is too much on some machines, scale max_jobs if
2491 * we get a failure that looks like too large a shm segment
2494 int s = max_jobs * sizeof(struct thread_data);
2496 shm_id = shmget(0, s, IPC_CREAT | 0600);
2499 if (errno != EINVAL) {
2510 threads = shmat(shm_id, NULL, 0);
2511 if (threads == (void *) -1) {
2520 int main(int argc, char *argv[])
2522 static unsigned long max_run[2], min_run[2];
2523 static unsigned long max_bw[2], min_bw[2];
2524 static unsigned long io_mb[2], agg[2];
2527 if (setup_thread_area())
2530 if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
2531 perror("sched_getaffinity");
2538 def_thread.ddir = DDIR_READ;
2539 def_thread.bs = DEF_BS;
2540 def_thread.min_bs = -1;
2541 def_thread.max_bs = -1;
2542 def_thread.odirect = DEF_ODIRECT;
2543 def_thread.ratecycle = DEF_RATE_CYCLE;
2544 def_thread.sequential = DEF_SEQUENTIAL;
2545 def_thread.timeout = DEF_TIMEOUT;
2546 def_thread.create_file = DEF_CREATE;
2547 def_thread.overwrite = DEF_OVERWRITE;
2548 def_thread.invalidate_cache = DEF_INVALIDATE;
2549 def_thread.sync_io = DEF_SYNCIO;
2550 def_thread.mem_type = MEM_MALLOC;
2551 def_thread.bw_avg_time = DEF_BWAVGTIME;
2552 def_thread.create_serialize = DEF_CREATE_SER;
2553 def_thread.create_fsync = DEF_CREATE_FSYNC;
2554 def_thread.loops = DEF_LOOPS;
2555 def_thread.verify = DEF_VERIFY;
2556 def_thread.stonewall = DEF_STONEWALL;
2558 i = parse_options(argc, argv);
2561 printf("Need job file\n");
2565 if (parse_jobs_ini(ini_file))
2568 if (!thread_number) {
2569 printf("Nothing to do\n");
2575 min_bw[0] = min_run[0] = ~0UL;
2576 min_bw[1] = min_run[1] = ~0UL;
2577 io_mb[0] = io_mb[1] = 0;
2578 agg[0] = agg[1] = 0;
2579 for (i = 0; i < thread_number; i++) {
2580 struct thread_data *td = &threads[i];
2581 unsigned long bw = 0;
2584 if (td->runtime < min_run[td->ddir])
2585 min_run[td->ddir] = td->runtime;
2586 if (td->runtime > max_run[td->ddir])
2587 max_run[td->ddir] = td->runtime;
2590 bw = td->io_bytes / td->runtime;
2591 if (bw < min_bw[td->ddir])
2592 min_bw[td->ddir] = bw;
2593 if (bw > max_bw[td->ddir])
2594 max_bw[td->ddir] = bw;
2596 io_mb[td->ddir] += td->io_bytes >> 20;
2599 show_thread_status(td);
2603 agg[0] = (io_mb[0] * 1024 * 1000) / max_run[0];
2605 agg[1] = (io_mb[1] * 1024 * 1000) / max_run[1];
2607 printf("\nRun status:\n");
2608 if (max_run[DDIR_READ])
2609 printf(" READ: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[0], agg[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
2610 if (max_run[DDIR_WRITE])
2611 printf(" WRITE: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[1], agg[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);