2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
35 #include <sys/types.h>
38 #include <semaphore.h>
41 #include <asm/unistd.h>
42 #include <asm/types.h>
47 #define MAX_JOBS (1024)
50 * assume we don't have _get either, if _set isn't defined
52 #ifndef __NR_ioprio_set
54 #define __NR_ioprio_set 289
55 #define __NR_ioprio_get 290
56 #elif defined(__powerpc__) || defined(__powerpc64__)
57 #define __NR_ioprio_set 273
58 #define __NR_ioprio_get 274
59 #elif defined(__x86_64__)
60 #define __NR_ioprio_set 251
61 #define __NR_ioprio_get 252
62 #elif defined(__ia64__)
63 #define __NR_ioprio_set 1274
64 #define __NR_ioprio_get 1275
65 #elif defined(__alpha__)
66 #define __NR_ioprio_set 442
67 #define __NR_ioprio_get 443
68 #elif defined(__s390x__) || defined(__s390__)
69 #define __NR_ioprio_set 282
70 #define __NR_ioprio_get 283
72 #error "Unsupported arch"
76 #ifndef __NR_fadvise64
78 #define __NR_fadvise64 250
79 #elif defined(__powerpc__) || defined(__powerpc64__)
80 #define __NR_fadvise64 233
81 #elif defined(__x86_64__)
82 #define __NR_fadvise64 221
83 #elif defined(__ia64__)
84 #define __NR_fadvise64 1234
85 #elif defined(__alpha__)
86 #define __NR_fadvise64 413
87 #elif defined(__s390x__) || defined(__s390__)
88 #define __NR_fadvise64 253
90 #error "Unsupported arch"
94 static int ioprio_set(int which, int who, int ioprio)
96 return syscall(__NR_ioprio_set, which, who, ioprio);
100 * we want fadvise64 really, but it's so tangled... later
102 static int fadvise(int fd, loff_t offset, size_t len, int advice)
105 return syscall(__NR_fadvise64, fd, offset, offset >> 32, len, advice);
107 return posix_fadvise(fd, (off_t) offset, len, advice);
112 IOPRIO_WHO_PROCESS = 1,
117 #define IOPRIO_CLASS_SHIFT 13
121 #define DEF_BS (4096)
122 #define DEF_TIMEOUT (30)
123 #define DEF_RATE_CYCLE (1000)
124 #define DEF_ODIRECT (1)
125 #define DEF_SEQUENTIAL (1)
126 #define DEF_RAND_REPEAT (1)
127 #define DEF_OVERWRITE (0)
128 #define DEF_CREATE (1)
129 #define DEF_INVALIDATE (1)
130 #define DEF_SYNCIO (0)
131 #define DEF_RANDSEED (0xb1899bedUL)
132 #define DEF_BWAVGTIME (500)
133 #define DEF_CREATE_SER (1)
134 #define DEF_CREATE_FSYNC (1)
135 #define DEF_LOOPS (1)
136 #define DEF_VERIFY (0)
138 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
140 static int repeatable = DEF_RAND_REPEAT;
141 static int rate_quit = 1;
142 static int write_lat_log;
143 static int write_bw_log;
144 static int exitall_on_terminate;
146 static int thread_number;
147 static char *ini_file;
149 static int max_jobs = MAX_JOBS;
151 static char run_str[MAX_JOBS + 1];
182 struct timeval start_time;
183 struct timeval issue_time;
187 unsigned long long offset;
189 struct list_head list;
194 unsigned long val_sq;
195 unsigned long max_val;
196 unsigned long min_val;
197 unsigned long samples;
206 unsigned long nr_samples;
207 unsigned long max_samples;
208 struct io_sample *log;
212 struct list_head list;
213 unsigned long long offset;
217 #define FIO_HDR_MAGIC 0xf00baaef
219 struct verify_header {
220 unsigned int fio_magic;
222 char md5_digest[MD5_HASH_WORDS * 4];
225 #define td_read(td) ((td)->ddir == DDIR_READ)
226 #define should_fsync(td) (!td_read(td) && !(td)->odirect)
235 volatile int terminate;
236 volatile int runstate;
237 volatile int old_runstate;
240 unsigned int sequential;
244 unsigned int odirect;
245 unsigned int thinktime;
246 unsigned int fsync_blocks;
247 unsigned int start_delay;
248 unsigned int timeout;
249 unsigned int use_aio;
250 unsigned int create_file;
251 unsigned int overwrite;
252 unsigned int invalidate_cache;
253 unsigned int bw_avg_time;
254 unsigned int create_serialize;
255 unsigned int create_fsync;
257 unsigned long long file_size;
258 unsigned long long file_offset;
259 unsigned int sync_io;
260 unsigned int mem_type;
264 struct drand48_data bsrange_state;
265 struct drand48_data verify_state;
271 io_context_t aio_ctx;
272 unsigned int aio_depth;
273 struct io_event *aio_events;
275 unsigned int cur_depth;
276 struct list_head io_u_freelist;
277 struct list_head io_u_busylist;
280 unsigned int ratemin;
281 unsigned int ratecycle;
282 unsigned long rate_usec_cycle;
283 long rate_pending_usleep;
284 unsigned long rate_sectors;
285 struct timeval lastrate;
287 unsigned long runtime; /* sec */
288 unsigned long sectors;
290 unsigned long io_blocks;
291 unsigned long io_sectors;
292 unsigned long this_io_sectors;
293 unsigned long last_sectors;
295 struct drand48_data random_state;
298 * bandwidth and latency stats
300 struct io_stat clat_stat; /* completion latency */
301 struct io_stat slat_stat; /* submission latency */
303 struct io_stat bw_stat; /* bandwidth stats */
304 unsigned long stat_io_sectors;
305 struct timeval stat_sample_time;
307 struct io_log *lat_log;
308 struct io_log *bw_log;
310 struct timeval start;
312 struct list_head io_hist_list;
315 static struct thread_data *threads;
316 static struct thread_data def_thread;
318 static sem_t startup_sem;
320 static void sig_handler(int sig)
324 for (i = 0; i < thread_number; i++) {
325 struct thread_data *td = &threads[i];
332 static int init_random_state(struct thread_data *td)
337 fd = open("/dev/random", O_RDONLY);
343 if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
351 srand48_r(seed, &td->bsrange_state);
352 srand48_r(seed, &td->verify_state);
360 srand48_r(seed, &td->random_state);
364 static unsigned long utime_since(struct timeval *s, struct timeval *e)
368 sec = e->tv_sec - s->tv_sec;
369 usec = e->tv_usec - s->tv_usec;
370 if (sec > 0 && usec < 0) {
375 sec *= (double) 1000000;
380 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
384 sec = e->tv_sec - s->tv_sec;
385 usec = e->tv_usec - s->tv_usec;
386 if (sec > 0 && usec < 0) {
391 sec *= (double) 1000;
392 usec /= (double) 1000;
397 static unsigned long mtime_since_now(struct timeval *s)
401 gettimeofday(&t, NULL);
402 return mtime_since(s, &t);
405 static inline unsigned long msec_now(struct timeval *s)
407 return s->tv_sec * 1000 + s->tv_usec / 1000;
410 static unsigned long long get_next_offset(struct thread_data *td)
412 unsigned long long kb;
415 if (!td->sequential) {
416 int min_bs_kb = td->min_bs >> 10;
417 unsigned long max_kb = td->sectors << 1;
419 lrand48_r(&td->random_state, &r);
420 kb = (1+(double) (max_kb-1) * r / (RAND_MAX+1.0));
421 kb = (kb + min_bs_kb - 1) & ~(min_bs_kb - 1);
423 kb = td->last_sectors << 1;
425 return (kb << 10) + td->file_offset;
428 static unsigned int get_next_buflen(struct thread_data *td)
433 if (td->min_bs == td->max_bs)
436 lrand48_r(&td->bsrange_state, &r);
437 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
438 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
441 if (buflen > ((td->sectors - td->this_io_sectors) << 9))
442 buflen = (td->sectors - td->this_io_sectors) << 9;
444 td->last_sectors += buflen >> 9;
448 static inline void add_stat_sample(struct thread_data *td, struct io_stat *is,
451 if (val > is->max_val)
453 if (val < is->min_val)
457 is->val_sq += val * val;
461 static void add_log_sample(struct thread_data *td, struct io_log *log,
464 if (log->nr_samples == log->max_samples) {
465 int new_size = sizeof(struct io_sample) * log->max_samples * 2;
467 log->log = realloc(log->log, new_size);
468 log->max_samples <<= 1;
471 log->log[log->nr_samples].val = val;
472 log->log[log->nr_samples].time = mtime_since_now(&td->start);
476 static void add_clat_sample(struct thread_data *td, unsigned long msec)
478 add_stat_sample(td, &td->clat_stat, msec);
481 add_log_sample(td, td->lat_log, msec);
484 static void add_slat_sample(struct thread_data *td, unsigned long msec)
486 add_stat_sample(td, &td->slat_stat, msec);
489 static void add_bw_sample(struct thread_data *td)
491 unsigned long spent = mtime_since_now(&td->stat_sample_time);
494 if (spent < td->bw_avg_time)
497 rate = ((td->this_io_sectors - td->stat_io_sectors) << 9) / spent;
498 add_stat_sample(td, &td->bw_stat, rate);
501 add_log_sample(td, td->bw_log, rate);
503 gettimeofday(&td->stat_sample_time, NULL);
504 td->stat_io_sectors = td->this_io_sectors;
507 static void usec_sleep(int usec)
509 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
513 rem.tv_sec = rem.tv_nsec = 0;
514 nanosleep(&req, &rem);
518 req.tv_nsec = rem.tv_nsec;
522 static void rate_throttle(struct thread_data *td, unsigned long time_spent,
525 unsigned long usec_cycle;
530 usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
532 if (time_spent < usec_cycle) {
533 unsigned long s = usec_cycle - time_spent;
535 td->rate_pending_usleep += s;
536 if (td->rate_pending_usleep >= 100000) {
537 usec_sleep(td->rate_pending_usleep);
538 td->rate_pending_usleep = 0;
541 long overtime = time_spent - usec_cycle;
543 td->rate_pending_usleep -= overtime;
547 static int check_min_rate(struct thread_data *td, struct timeval *now)
553 * allow a 2 second settle period in the beginning
555 if (mtime_since(&td->start, now) < 2000)
559 * if rate blocks is set, sample is running
561 if (td->rate_sectors) {
562 spent = mtime_since(&td->lastrate, now);
563 if (spent < td->ratecycle)
566 rate = ((td->this_io_sectors - td->rate_sectors) << 9) / spent;
567 if (rate < td->ratemin) {
568 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
575 td->rate_sectors = td->this_io_sectors;
576 memcpy(&td->lastrate, now, sizeof(*now));
580 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
582 if (mtime_since(&td->start, t) >= td->timeout * 1000)
588 static void fill_random_bytes(struct thread_data *td,
589 unsigned char *p, unsigned int len)
595 drand48_r(&td->verify_state, &r);
598 * lrand48_r seems to be broken and only fill the bottom
599 * 32-bits, even on 64-bit archs with 64-bit longs
612 static void hexdump(void *buffer, int len)
614 unsigned char *p = buffer;
617 for (i = 0; i < len; i++)
618 printf("%02x", p[i]);
622 static int verify_io_u(struct io_u *io_u)
624 struct verify_header *hdr = (struct verify_header *) io_u->buf;
625 unsigned char *p = (unsigned char *) io_u->buf;
626 struct md5_ctx md5_ctx;
629 if (hdr->fio_magic != FIO_HDR_MAGIC)
632 memset(&md5_ctx, 0, sizeof(md5_ctx));
634 md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
636 ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
638 hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
639 hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
646 * fill body of io_u->buf with random data and add a header with the
647 * (eg) sha1sum of that data.
649 static void populate_io_u(struct thread_data *td, struct io_u *io_u)
651 struct md5_ctx md5_ctx;
652 struct verify_header hdr;
653 unsigned char *p = (unsigned char *) io_u->buf;
655 hdr.fio_magic = FIO_HDR_MAGIC;
656 hdr.len = io_u->buflen;
658 fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
660 memset(&md5_ctx, 0, sizeof(md5_ctx));
661 md5_update(&md5_ctx, p, io_u->buflen - sizeof(hdr));
662 memcpy(hdr.md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
663 memcpy(io_u->buf, &hdr, sizeof(hdr));
666 static void put_io_u(struct thread_data *td, struct io_u *io_u)
668 list_del(&io_u->list);
669 list_add(&io_u->list, &td->io_u_freelist);
673 #define queue_full(td) (list_empty(&(td)->io_u_freelist))
675 static struct io_u *__get_io_u(struct thread_data *td)
682 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
683 list_del(&io_u->list);
684 list_add(&io_u->list, &td->io_u_busylist);
689 static struct io_u *get_io_u(struct thread_data *td)
693 io_u = __get_io_u(td);
697 io_u->offset = get_next_offset(td);
698 io_u->buflen = get_next_buflen(td);
705 populate_io_u(td, io_u);
709 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
711 io_prep_pwrite(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
714 gettimeofday(&io_u->start_time, NULL);
718 static inline void td_set_runstate(struct thread_data *td, int runstate)
720 td->old_runstate = td->runstate;
721 td->runstate = runstate;
724 static int get_next_verify(struct thread_data *td,
725 unsigned long long *offset, unsigned int *len)
727 struct io_piece *ipo;
729 if (list_empty(&td->io_hist_list))
732 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
733 list_del(&ipo->list);
735 *offset = ipo->offset;
741 static void prune_io_piece_log(struct thread_data *td)
743 struct io_piece *ipo;
745 while (!list_empty(&td->io_hist_list)) {
746 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
748 list_del(&ipo->list);
754 * if ipo's overlap, kill old ipo
756 static int ipo_overlap(struct io_piece *old, struct io_piece *new)
758 unsigned long long old_end = old->offset + old->len;
759 unsigned long long new_end = new->offset + new->len;
761 if ((new->offset > old->offset && new->offset < old_end) ||
762 (new_end > old->offset && new_end < old_end)) {
763 list_add(&new->list, &old->list);
764 list_del(&old->list);
773 * log a succesful write, so we can unwind the log for verify
775 static void log_io_piece(struct thread_data *td, struct io_u *io_u)
777 struct io_piece *ipo = malloc(sizeof(*ipo));
778 struct list_head *entry;
780 INIT_LIST_HEAD(&ipo->list);
781 ipo->offset = io_u->offset;
782 ipo->len = io_u->buflen;
784 if (td->sequential) {
785 list_add_tail(&ipo->list, &td->io_hist_list);
790 * for random io, sort the list so verify will run faster
792 entry = &td->io_hist_list;
793 while ((entry = entry->prev) != &td->io_hist_list) {
794 struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
796 if (ipo_overlap(__ipo, ipo))
799 if (__ipo->offset < ipo->offset)
803 list_add(&ipo->list, entry);
806 static void do_sync_verify(struct thread_data *td)
809 struct io_u *io_u = NULL;
812 td_set_runstate(td, TD_VERIFYING);
814 io_u = __get_io_u(td);
817 unsigned long size = td->sectors << 9;
819 if (fadvise(td->fd, 0, size, POSIX_FADV_DONTNEED) < 0) {
829 gettimeofday(&t, NULL);
830 if (runtime_exceeded(td, &t))
833 if (get_next_verify(td, &io_u->offset, &io_u->buflen))
836 if (td->cur_off != io_u->offset) {
837 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
843 ret = read(td->fd, io_u->buf, io_u->buflen);
844 if (ret < (int) io_u->buflen) {
854 if (verify_io_u(io_u))
857 td->cur_off = io_u->offset + io_u->buflen;
861 td_set_runstate(td, TD_RUNNING);
865 static void do_sync_io(struct thread_data *td)
867 unsigned long msec, usec;
868 struct io_u *io_u = NULL;
871 while (td->this_io_sectors < td->sectors) {
881 if (td->cur_off != io_u->offset) {
882 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
889 ret = read(td->fd, io_u->buf, io_u->buflen);
891 ret = write(td->fd, io_u->buf, io_u->buflen);
893 if (ret < (int) io_u->buflen) {
900 log_io_piece(td, io_u);
903 td->io_sectors += io_u->buflen >> 9;
904 td->this_io_sectors += io_u->buflen >> 9;
905 td->cur_off = io_u->offset + io_u->buflen;
907 gettimeofday(&e, NULL);
909 usec = utime_since(&io_u->start_time, &e);
911 rate_throttle(td, usec, io_u->buflen);
913 if (check_min_rate(td, &e)) {
919 add_clat_sample(td, msec);
922 if (runtime_exceeded(td, &e))
929 usec_sleep(td->thinktime);
931 if (should_fsync(td) && td->fsync_blocks &&
932 (td->io_blocks % td->fsync_blocks) == 0)
939 if (should_fsync(td))
943 static int io_u_queue(struct thread_data *td, struct io_u *io_u)
945 struct iocb *iocb = &io_u->iocb;
949 ret = io_submit(td->aio_ctx, 1, &iocb);
952 else if (ret == EAGAIN)
954 else if (ret == EINTR)
963 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
964 #define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
966 static int ios_completed(struct thread_data *td, int nr)
973 gettimeofday(&e, NULL);
975 for (i = 0, bytes_done = 0; i < nr; i++) {
976 io_u = ev_to_iou(td->aio_events + i);
979 td->io_sectors += io_u->buflen >> 9;
980 td->this_io_sectors += io_u->buflen >> 9;
982 msec = mtime_since(&io_u->issue_time, &e);
984 add_clat_sample(td, msec);
988 log_io_piece(td, io_u);
990 bytes_done += io_u->buflen;
997 static void cleanup_pending_aio(struct thread_data *td)
999 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1000 struct list_head *entry, *n;
1005 * get immediately available events, if any
1007 r = io_getevents(td->aio_ctx, 0, td->cur_depth, td->aio_events, &ts);
1009 ios_completed(td, r);
1012 * now cancel remaining active events
1014 list_for_each_safe(entry, n, &td->io_u_busylist) {
1015 io_u = list_entry(entry, struct io_u, list);
1017 r = io_cancel(td->aio_ctx, &io_u->iocb, td->aio_events);
1022 if (td->cur_depth) {
1023 r = io_getevents(td->aio_ctx, td->cur_depth, td->cur_depth, td->aio_events, NULL);
1025 ios_completed(td, r);
1029 static int async_do_verify(struct thread_data *td, struct io_u **io_u)
1031 struct io_u *v_io_u = *io_u;
1035 ret = verify_io_u(v_io_u);
1036 put_io_u(td, v_io_u);
1043 static void do_async_verify(struct thread_data *td)
1046 struct io_u *io_u, *v_io_u = NULL;
1049 td_set_runstate(td, TD_VERIFYING);
1055 gettimeofday(&t, NULL);
1056 if (runtime_exceeded(td, &t))
1059 io_u = __get_io_u(td);
1063 if (get_next_verify(td, &io_u->offset, &io_u->buflen)) {
1068 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
1069 ret = io_u_queue(td, io_u);
1077 * we have one pending to verify, do that while the next
1078 * we are doing io on the next one
1080 if (async_do_verify(td, &v_io_u))
1083 ret = io_getevents(td->aio_ctx, 1, 1, td->aio_events, NULL);
1090 v_io_u = ev_to_iou(td->aio_events);
1092 td->cur_off = v_io_u->offset + v_io_u->buflen;
1095 * if we can't submit more io, we need to verify now
1097 if (queue_full(td) && async_do_verify(td, &v_io_u))
1102 async_do_verify(td, &v_io_u);
1105 cleanup_pending_aio(td);
1107 td_set_runstate(td, TD_RUNNING);
1110 static void do_async_io(struct thread_data *td)
1112 struct timeval s, e;
1115 while (td->this_io_sectors < td->sectors) {
1116 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1117 struct timespec *timeout;
1118 int ret, min_evts = 0;
1120 unsigned int bytes_done;
1125 io_u = get_io_u(td);
1129 memcpy(&s, &io_u->start_time, sizeof(s));
1131 ret = io_u_queue(td, io_u);
1138 gettimeofday(&io_u->issue_time, NULL);
1139 add_slat_sample(td, mtime_since(&io_u->start_time, &io_u->issue_time));
1140 if (td->cur_depth < td->aio_depth) {
1148 ret = io_getevents(td->aio_ctx, min_evts, td->cur_depth, td->aio_events, timeout);
1155 bytes_done = ios_completed(td, ret);
1158 * the rate is batched for now, it should work for batches
1159 * of completions except the very first one which may look
1162 gettimeofday(&e, NULL);
1163 usec = utime_since(&s, &e);
1165 rate_throttle(td, usec, bytes_done);
1167 if (check_min_rate(td, &e)) {
1168 td->error = ENODATA;
1172 if (runtime_exceeded(td, &e))
1176 usec_sleep(td->thinktime);
1178 if (should_fsync(td) && td->fsync_blocks &&
1179 (td->io_blocks % td->fsync_blocks) == 0)
1184 cleanup_pending_aio(td);
1186 if (should_fsync(td))
1190 static void cleanup_aio(struct thread_data *td)
1192 io_destroy(td->aio_ctx);
1195 free(td->aio_events);
1198 static int init_aio(struct thread_data *td)
1200 if (io_queue_init(td->aio_depth, &td->aio_ctx)) {
1205 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
1209 static void cleanup_io_u(struct thread_data *td)
1211 struct list_head *entry, *n;
1214 list_for_each_safe(entry, n, &td->io_u_freelist) {
1215 io_u = list_entry(entry, struct io_u, list);
1217 list_del(&io_u->list);
1221 if (td->mem_type == MEM_MALLOC)
1222 free(td->orig_buffer);
1223 else if (td->mem_type == MEM_SHM) {
1224 struct shmid_ds sbuf;
1226 shmdt(td->orig_buffer);
1227 shmctl(td->shm_id, IPC_RMID, &sbuf);
1231 static int init_io_u(struct thread_data *td)
1234 int i, max_units, mem_size;
1240 max_units = td->aio_depth;
1242 mem_size = td->max_bs * max_units + MASK;
1244 if (td->mem_type == MEM_MALLOC)
1245 td->orig_buffer = malloc(mem_size);
1246 else if (td->mem_type == MEM_SHM) {
1247 td->shm_id = shmget(IPC_PRIVATE, mem_size, IPC_CREAT | 0600);
1248 if (td->shm_id < 0) {
1254 td->orig_buffer = shmat(td->shm_id, NULL, 0);
1255 if (td->orig_buffer == (void *) -1) {
1262 INIT_LIST_HEAD(&td->io_u_freelist);
1263 INIT_LIST_HEAD(&td->io_u_busylist);
1264 INIT_LIST_HEAD(&td->io_hist_list);
1266 p = ALIGN(td->orig_buffer);
1267 for (i = 0; i < max_units; i++) {
1268 io_u = malloc(sizeof(*io_u));
1269 memset(io_u, 0, sizeof(*io_u));
1270 INIT_LIST_HEAD(&io_u->list);
1272 io_u->buf = p + td->max_bs * i;
1273 list_add(&io_u->list, &td->io_u_freelist);
1279 static void setup_log(struct io_log **log)
1281 struct io_log *l = malloc(sizeof(*l));
1284 l->max_samples = 1024;
1285 l->log = malloc(l->max_samples * sizeof(struct io_sample));
1289 static void finish_log(struct thread_data *td, struct io_log *log, char *name)
1291 char file_name[128];
1295 sprintf(file_name, "client%d_%s.log", td->thread_number, name);
1296 f = fopen(file_name, "w");
1298 perror("fopen log");
1302 for (i = 0; i < log->nr_samples; i++)
1303 fprintf(f, "%lu, %lu\n", log->log[i].time, log->log[i].val);
1310 static int create_file(struct thread_data *td)
1312 unsigned long long left;
1318 * unless specifically asked for overwrite, let normal io extend it
1320 if (!td_read(td) && !td->overwrite)
1323 if (!td->file_size) {
1324 fprintf(stderr, "Need size for create\n");
1329 printf("Client%d: Laying out IO file\n", td->thread_number);
1331 td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
1337 td->sectors = td->file_size >> 9;
1338 b = malloc(td->max_bs);
1339 memset(b, 0, td->max_bs);
1341 left = td->file_size;
1347 r = write(td->fd, b, bs);
1349 if (r == (int) bs) {
1362 if (td->create_fsync)
1371 static int file_exists(struct thread_data *td)
1375 if (stat(td->file_name, &st) != -1)
1378 return errno != ENOENT;
1381 static int setup_file(struct thread_data *td)
1386 if (!file_exists(td)) {
1387 if (!td->create_file) {
1391 if (create_file(td))
1399 td->fd = open(td->file_name, flags | O_RDONLY);
1410 td->fd = open(td->file_name, flags | O_CREAT, 0600);
1419 if (fstat(td->fd, &st) == -1) {
1424 if (td->file_size > st.st_size)
1425 st.st_size = td->file_size;
1428 td->file_size = 1024 * 1024 * 1024;
1430 st.st_size = td->file_size;
1433 td->sectors = (st.st_size - td->file_offset) / 1024;
1435 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1440 if (td->invalidate_cache) {
1441 if (fadvise(td->fd, 0, st.st_size, POSIX_FADV_DONTNEED) < 0) {
1450 static void clear_io_state(struct thread_data *td)
1453 lseek(td->fd, SEEK_SET, 0);
1456 td->last_sectors = 0;
1457 td->stat_io_sectors = 0;
1458 td->this_io_sectors = 0;
1461 static void *thread_main(int shm_id, int offset, char *argv[])
1463 struct thread_data *td;
1469 data = shmat(shm_id, NULL, 0);
1470 if (data == (void *) -1) {
1475 td = data + offset * sizeof(struct thread_data);
1481 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
1486 sprintf(argv[0], "fio%d", offset);
1488 if (td->use_aio && init_aio(td))
1491 if (init_random_state(td))
1495 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1501 sem_post(&startup_sem);
1502 sem_wait(&td->mutex);
1504 if (!td->create_serialize && setup_file(td))
1507 gettimeofday(&td->start, NULL);
1509 while (td->loops--) {
1510 gettimeofday(&td->stat_sample_time, NULL);
1513 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
1516 prune_io_piece_log(td);
1534 do_async_verify(td);
1540 td->runtime = mtime_since_now(&td->start);
1544 finish_log(td, td->bw_log, "bw");
1546 finish_log(td, td->lat_log, "lat");
1548 if (exitall_on_terminate)
1560 sem_post(&startup_sem);
1561 sem_wait(&td->mutex);
1563 td_set_runstate(td, TD_EXITED);
1568 static void free_shm(void)
1570 struct shmid_ds sbuf;
1575 shmctl(shm_id, IPC_RMID, &sbuf);
1579 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1580 double *mean, double *dev)
1584 if (is->samples == 0)
1590 n = (double) is->samples;
1591 *mean = (double) is->val / n;
1592 *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1596 static void show_thread_status(struct thread_data *td)
1598 int prio, prio_class;
1599 unsigned long min, max, bw = 0;
1602 if (!td->io_sectors && !td->error)
1606 bw = (td->io_sectors << 9) / td->runtime;
1608 prio = td->ioprio & 0xff;
1609 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
1611 printf("Client%d: err=%2d, io=%6luMiB, bw=%6luKiB/s, runt=%6lumsec\n", td->thread_number, td->error, td->io_sectors << 9, bw, td->runtime);
1613 if (calc_lat(&td->slat_stat, &min, &max, &mean, &dev))
1614 printf(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1615 if (calc_lat(&td->clat_stat, &min, &max, &mean, &dev))
1616 printf(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1617 if (calc_lat(&td->bw_stat, &min, &max, &mean, &dev))
1618 printf(" bw (KiB/s) : min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1621 static int setup_rate(struct thread_data *td)
1623 int nr_reads_per_sec;
1628 if (td->rate < td->ratemin) {
1629 fprintf(stderr, "min rate larger than nominal rate\n");
1633 nr_reads_per_sec = td->rate * 1024 / td->min_bs;
1634 td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
1635 td->rate_pending_usleep = 0;
1639 static struct thread_data *get_new_job(int global)
1641 struct thread_data *td;
1645 if (thread_number >= max_jobs)
1648 td = &threads[thread_number++];
1649 memset(td, 0, sizeof(*td));
1652 td->thread_number = thread_number;
1654 td->ddir = def_thread.ddir;
1655 td->ioprio = def_thread.ioprio;
1656 td->sequential = def_thread.sequential;
1657 td->bs = def_thread.bs;
1658 td->min_bs = def_thread.min_bs;
1659 td->max_bs = def_thread.max_bs;
1660 td->odirect = def_thread.odirect;
1661 td->thinktime = def_thread.thinktime;
1662 td->fsync_blocks = def_thread.fsync_blocks;
1663 td->start_delay = def_thread.start_delay;
1664 td->timeout = def_thread.timeout;
1665 td->use_aio = def_thread.use_aio;
1666 td->create_file = def_thread.create_file;
1667 td->overwrite = def_thread.overwrite;
1668 td->invalidate_cache = def_thread.invalidate_cache;
1669 td->file_size = def_thread.file_size;
1670 td->file_offset = def_thread.file_offset;
1671 td->rate = def_thread.rate;
1672 td->ratemin = def_thread.ratemin;
1673 td->ratecycle = def_thread.ratecycle;
1674 td->aio_depth = def_thread.aio_depth;
1675 td->sync_io = def_thread.sync_io;
1676 td->mem_type = def_thread.mem_type;
1677 td->bw_avg_time = def_thread.bw_avg_time;
1678 td->create_serialize = def_thread.create_serialize;
1679 td->create_fsync = def_thread.create_fsync;
1680 td->loops = def_thread.loops;
1681 td->verify = def_thread.verify;
1682 memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
1687 static void put_job(struct thread_data *td)
1689 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
1693 static int add_job(struct thread_data *td, const char *filename, int prioclass,
1696 if (td == &def_thread)
1699 strcpy(td->file_name, filename);
1700 sem_init(&td->mutex, 1, 0);
1701 td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
1703 td->clat_stat.min_val = ULONG_MAX;
1704 td->slat_stat.min_val = ULONG_MAX;
1705 td->bw_stat.min_val = ULONG_MAX;
1707 run_str[td->thread_number - 1] = 'P';
1709 if (td->use_aio && !td->aio_depth)
1712 if (td->min_bs == -1U)
1713 td->min_bs = td->bs;
1714 if (td->max_bs == -1U)
1715 td->max_bs = td->bs;
1723 setup_log(&td->lat_log);
1725 setup_log(&td->bw_log);
1727 printf("Client%d: file=%s, rw=%d, prio=%d/%d, seq=%d, odir=%d, bs=%d-%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, filename, td->ddir, prioclass, prio, td->sequential, td->odirect, td->min_bs, td->max_bs, td->rate, td->use_aio, td->aio_depth);
1731 static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
1737 for (i = 0; i < sizeof(int) * 8; i++) {
1739 CPU_SET(i, &cpumask);
1743 unsigned long get_mult(char c)
1754 return 1024 * 1024 * 1024;
1761 * convert string after '=' into decimal value, noting any size suffix
1763 static int str_cnv(char *p, unsigned long long *val)
1768 str = strstr(p, "=");
1775 *val = strtoul(str, NULL, 10);
1776 if (*val == ULONG_MAX && errno == ERANGE)
1779 *val *= get_mult(str[len - 2]);
1783 static int check_strcnv(char *p, char *name, unsigned long long *val)
1785 if (!strstr(p, name))
1788 return str_cnv(p, val);
1791 static int check_str(char *p, char *name, char *option)
1793 char *s = strstr(p, name);
1799 if (strstr(s, option))
1805 static int check_range(char *p, char *name, unsigned long *s, unsigned long *e)
1810 sprintf(str, "%s=%%lu%%c-%%lu%%c", name);
1811 if (sscanf(p, str, s, &s1, e, &s2) == 4) {
1817 sprintf(str, "%s = %%lu%%c-%%lu%%c", name);
1818 if (sscanf(p, str, s, &s1, e, &s2) == 4) {
1824 sprintf(str, "%s=%%lu-%%lu", name);
1825 if (sscanf(p, str, s, e) == 2)
1828 sprintf(str, "%s = %%lu-%%lu", name);
1829 if (sscanf(p, str, s, e) == 2)
1836 static int check_int(char *p, char *name, unsigned int *val)
1840 sprintf(str, "%s=%%d", name);
1841 if (sscanf(p, str, val) == 1)
1844 sprintf(str, "%s = %%d", name);
1845 if (sscanf(p, str, val) == 1)
1851 static int is_empty_or_comment(char *line)
1855 for (i = 0; i < strlen(line); i++) {
1858 if (!isspace(line[i]) && !iscntrl(line[i]))
1865 static int parse_jobs_ini(char *file)
1867 unsigned int prioclass, prio, cpu, global;
1868 unsigned long long ull;
1869 unsigned long ul1, ul2;
1870 struct thread_data *td;
1871 char *string, *name;
1876 f = fopen(file, "r");
1882 string = malloc(4096);
1885 while ((p = fgets(string, 4096, f)) != NULL) {
1886 if (is_empty_or_comment(p))
1888 if (sscanf(p, "[%s]", name) != 1)
1891 global = !strncmp(name, "global", 6);
1893 name[strlen(name) - 1] = '\0';
1895 td = get_new_job(global);
1903 while ((p = fgets(string, 4096, f)) != NULL) {
1904 if (is_empty_or_comment(p))
1908 if (!check_int(p, "rw", &td->ddir)) {
1912 if (!check_int(p, "prio", &prio)) {
1916 if (!check_int(p, "prioclass", &prioclass)) {
1920 if (!check_int(p, "direct", &td->odirect)) {
1924 if (!check_int(p, "rate", &td->rate)) {
1928 if (!check_int(p, "ratemin", &td->ratemin)) {
1932 if (!check_int(p, "ratecycle", &td->ratecycle)) {
1936 if (!check_int(p, "thinktime", &td->thinktime)) {
1940 if (!check_int(p, "cpumask", &cpu)) {
1941 fill_cpu_mask(td->cpumask, cpu);
1945 if (!check_int(p, "fsync", &td->fsync_blocks)) {
1949 if (!check_int(p, "startdelay", &td->start_delay)) {
1953 if (!check_int(p, "timeout", &td->timeout)) {
1957 if (!check_int(p, "invalidate",&td->invalidate_cache)) {
1961 if (!check_int(p, "aio_depth", &td->aio_depth)) {
1965 if (!check_int(p, "sync", &td->sync_io)) {
1969 if (!check_int(p, "bwavgtime", &td->bw_avg_time)) {
1973 if (!check_int(p, "create_serialize", &td->create_serialize)) {
1977 if (!check_int(p, "create_fsync", &td->create_fsync)) {
1981 if (!check_int(p, "loops", &td->loops)) {
1985 if (!check_int(p, "verify", &td->verify)) {
1989 if (!check_range(p, "bsrange", &ul1, &ul2)) {
1991 printf("bad min block size, must be a multiple of 512\n");
1995 printf("bad max block size, must be a multiple of 512\n");
2001 if (!check_strcnv(p, "bs", &ull)) {
2003 printf("bad block size, must be a multiple of 512\n");
2009 if (!check_strcnv(p, "size", &td->file_size)) {
2013 if (!check_strcnv(p, "offset", &td->file_offset)) {
2017 if (!check_str(p, "mem", "malloc")) {
2018 td->mem_type = MEM_MALLOC;
2022 if (!check_str(p, "mem", "shm")) {
2023 td->mem_type = MEM_SHM;
2027 if (!strncmp(p, "sequential", 10)) {
2032 if (!strncmp(p, "random", 6)) {
2037 if (!strncmp(p, "aio", 3)) {
2042 if (!strncmp(p, "create", 6)) {
2043 td->create_file = 1;
2047 if (!strncmp(p, "overwrite", 9)) {
2052 if (!strncmp(p, "exitall", 7)) {
2053 exitall_on_terminate = 1;
2057 printf("Client%d: bad option %s\n",td->thread_number,p);
2061 if (add_job(td, name, prioclass, prio))
2071 static int parse_options(int argc, char *argv[])
2075 for (i = 1; i < argc; i++) {
2076 char *parm = argv[i];
2085 def_thread.sequential = !!atoi(parm);
2089 def_thread.bs = atoi(parm);
2090 def_thread.bs <<= 10;
2091 if (!def_thread.bs) {
2092 printf("bad block size\n");
2093 def_thread.bs = DEF_BS;
2098 def_thread.timeout = atoi(parm);
2102 repeatable = !!atoi(parm);
2106 rate_quit = !!atoi(parm);
2110 def_thread.odirect = !!atoi(parm);
2113 if (i + 1 >= argc) {
2114 printf("-f needs file as arg\n");
2117 ini_file = strdup(argv[i+1]);
2127 printf("bad option %s\n", argv[i]);
2135 static void print_thread_status(struct thread_data *td, int nr_running,
2136 int t_rate, int m_rate)
2138 printf("Threads now running: %d", nr_running);
2139 if (m_rate || t_rate)
2140 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
2141 printf(" : [%s]\r", run_str);
2145 static void check_str_update(struct thread_data *td, int n, int t, int m)
2147 char c = run_str[td->thread_number - 1];
2149 if (td->runstate == td->old_runstate)
2152 switch (td->runstate) {
2168 case TD_NOT_CREATED:
2172 printf("state %d\n", td->runstate);
2175 run_str[td->thread_number - 1] = c;
2176 print_thread_status(td, n, t, m);
2177 td->old_runstate = td->runstate;
2180 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
2185 * reap exited threads (TD_EXITED -> TD_REAPED)
2187 for (i = 0; i < thread_number; i++) {
2188 struct thread_data *td = &threads[i];
2190 check_str_update(td, *nr_running, *t_rate, *m_rate);
2192 if (td->runstate != TD_EXITED)
2195 td_set_runstate(td, TD_REAPED);
2196 waitpid(td->pid, NULL, 0);
2198 (*m_rate) -= td->ratemin;
2199 (*t_rate) -= td->rate;
2200 check_str_update(td, *nr_running, *t_rate, *m_rate);
2207 static void run_threads(char *argv[])
2209 struct timeval genesis;
2210 struct thread_data *td;
2211 unsigned long spent;
2212 int i, todo, nr_running, m_rate, t_rate;
2214 printf("Starting %d threads\n", thread_number);
2217 signal(SIGINT, sig_handler);
2219 todo = thread_number;
2221 m_rate = t_rate = 0;
2223 for (i = 0; i < thread_number; i++) {
2226 if (!td->create_serialize)
2230 * do file setup here so it happens sequentially,
2231 * we don't want X number of threads getting their
2232 * client data interspersed on disk
2234 if (setup_file(td)) {
2235 td_set_runstate(td, TD_REAPED);
2240 gettimeofday(&genesis, NULL);
2244 * create threads (TD_NOT_CREATED -> TD_CREATED)
2246 for (i = 0; i < thread_number; i++) {
2249 if (td->runstate != TD_NOT_CREATED)
2253 * never got a chance to start, killed by other
2254 * thread for some reason
2256 if (td->terminate) {
2261 if (td->start_delay) {
2262 spent = mtime_since_now(&genesis);
2264 if (td->start_delay * 1000 > spent)
2268 td_set_runstate(td, TD_CREATED);
2269 check_str_update(td, nr_running, t_rate, m_rate);
2270 sem_init(&startup_sem, 1, 1);
2274 sem_wait(&startup_sem);
2276 thread_main(shm_id, i, argv);
2282 * start created threads (TD_CREATED -> TD_RUNNING)
2284 for (i = 0; i < thread_number; i++) {
2285 struct thread_data *td = &threads[i];
2287 if (td->runstate != TD_CREATED)
2290 td_set_runstate(td, TD_RUNNING);
2292 m_rate += td->ratemin;
2294 check_str_update(td, nr_running, t_rate, m_rate);
2295 sem_post(&td->mutex);
2298 for (i = 0; i < thread_number; i++) {
2299 struct thread_data *td = &threads[i];
2301 if (td->runstate == TD_RUNNING)
2302 run_str[td->thread_number - 1] = '+';
2303 else if (td->runstate == TD_VERIFYING)
2304 run_str[td->thread_number - 1] = 'V';
2308 check_str_update(td, nr_running, t_rate, m_rate);
2311 reap_threads(&nr_running, &t_rate, &m_rate);
2317 while (nr_running) {
2318 reap_threads(&nr_running, &t_rate, &m_rate);
2323 int setup_thread_area(void)
2326 * 1024 is too much on some machines, scale max_jobs if
2327 * we get a failure that looks like too large a shm segment
2330 int s = max_jobs * sizeof(struct thread_data);
2332 shm_id = shmget(0, s, IPC_CREAT | 0600);
2335 if (errno != EINVAL) {
2346 threads = shmat(shm_id, NULL, 0);
2347 if (threads == (void *) -1) {
2356 int main(int argc, char *argv[])
2358 static unsigned long max_run[2], min_run[2];
2359 static unsigned long max_bw[2], min_bw[2];
2360 static unsigned long io_mb[2], agg[2];
2363 if (setup_thread_area())
2366 if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
2367 perror("sched_getaffinity");
2374 def_thread.ddir = DDIR_READ;
2375 def_thread.bs = DEF_BS;
2376 def_thread.min_bs = -1;
2377 def_thread.max_bs = -1;
2378 def_thread.odirect = DEF_ODIRECT;
2379 def_thread.ratecycle = DEF_RATE_CYCLE;
2380 def_thread.sequential = DEF_SEQUENTIAL;
2381 def_thread.timeout = DEF_TIMEOUT;
2382 def_thread.create_file = DEF_CREATE;
2383 def_thread.overwrite = DEF_OVERWRITE;
2384 def_thread.invalidate_cache = DEF_INVALIDATE;
2385 def_thread.sync_io = DEF_SYNCIO;
2386 def_thread.mem_type = MEM_MALLOC;
2387 def_thread.bw_avg_time = DEF_BWAVGTIME;
2388 def_thread.create_serialize = DEF_CREATE_SER;
2389 def_thread.create_fsync = DEF_CREATE_FSYNC;
2390 def_thread.loops = DEF_LOOPS;
2391 def_thread.verify = DEF_VERIFY;
2393 i = parse_options(argc, argv);
2396 printf("Need job file\n");
2400 if (parse_jobs_ini(ini_file))
2403 if (!thread_number) {
2404 printf("Nothing to do\n");
2410 min_bw[0] = min_run[0] = ~0UL;
2411 min_bw[1] = min_run[1] = ~0UL;
2412 io_mb[0] = io_mb[1] = 0;
2413 agg[0] = agg[1] = 0;
2414 for (i = 0; i < thread_number; i++) {
2415 struct thread_data *td = &threads[i];
2416 unsigned long bw = 0;
2419 if (td->runtime < min_run[td->ddir])
2420 min_run[td->ddir] = td->runtime;
2421 if (td->runtime > max_run[td->ddir])
2422 max_run[td->ddir] = td->runtime;
2425 bw = (td->io_sectors << 9) / td->runtime;
2426 if (bw < min_bw[td->ddir])
2427 min_bw[td->ddir] = bw;
2428 if (bw > max_bw[td->ddir])
2429 max_bw[td->ddir] = bw;
2431 io_mb[td->ddir] += td->io_sectors >> 9;
2434 show_thread_status(td);
2438 agg[0] = io_mb[0] * 1024 * 1000 / max_run[0];
2440 agg[1] = io_mb[1] * 1024 * 1000 / max_run[1];
2442 printf("\nRun status:\n");
2443 if (max_run[DDIR_READ])
2444 printf(" READ: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[0], agg[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
2445 if (max_run[DDIR_WRITE])
2446 printf(" WRITE: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[1], agg[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);