2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #include <sys/ioctl.h>
40 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
43 int thread_number = 0;
46 char *fio_inst_prefix = _INST_PREFIX;
48 #define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync))
50 static volatile int startup_sem;
52 #define TERMINATE_ALL (-1)
53 #define JOB_START_TIMEOUT (5 * 1000)
55 static void terminate_threads(int group_id)
57 struct thread_data *td;
61 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
68 static void sig_handler(int sig)
73 disk_util_timer_arm();
74 print_thread_status();
77 printf("\nfio: terminating on signal\n");
79 terminate_threads(TERMINATE_ALL);
85 * The ->file_map[] contains a map of blocks we have or have not done io
86 * to yet. Used to make sure we cover the entire range in a fair fashion.
88 static int random_map_free(struct thread_data *td, struct fio_file *f,
89 unsigned long long block)
91 unsigned int idx = RAND_MAP_IDX(td, f, block);
92 unsigned int bit = RAND_MAP_BIT(td, f, block);
94 return (f->file_map[idx] & (1UL << bit)) == 0;
98 * Return the next free block in the map.
100 static int get_next_free_block(struct thread_data *td, struct fio_file *f,
101 unsigned long long *b)
107 while ((*b) * td->min_bs < f->file_size) {
108 if (f->file_map[i] != -1UL) {
109 *b += ffz(f->file_map[i]);
113 *b += BLOCKS_PER_MAP;
121 * Mark a given offset as used in the map.
123 static void mark_random_map(struct thread_data *td, struct fio_file *f,
126 unsigned long long block = io_u->offset / (unsigned long long) td->min_bs;
127 unsigned int blocks = 0;
129 while (blocks < (io_u->buflen / td->min_bs)) {
130 unsigned int idx, bit;
132 if (!random_map_free(td, f, block))
135 idx = RAND_MAP_IDX(td, f, block);
136 bit = RAND_MAP_BIT(td, f, block);
138 assert(idx < f->num_maps);
140 f->file_map[idx] |= (1UL << bit);
145 if ((blocks * td->min_bs) < io_u->buflen)
146 io_u->buflen = blocks * td->min_bs;
150 * For random io, generate a random new block and see if it's used. Repeat
151 * until we find a free one. For sequential io, just return the end of
152 * the last io issued.
154 static int get_next_offset(struct thread_data *td, struct fio_file *f,
155 unsigned long long *offset)
157 unsigned long long b, rb;
160 if (!td->sequential) {
161 unsigned long long max_blocks = td->io_size / td->min_bs;
165 r = os_random_long(&td->random_state);
166 b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
167 rb = b + (f->file_offset / td->min_bs);
169 } while (!random_map_free(td, f, rb) && loops);
172 if (get_next_free_block(td, f, &b))
176 b = f->last_pos / td->min_bs;
178 *offset = (b * td->min_bs) + f->file_offset;
179 if (*offset > f->file_size)
185 static unsigned int get_next_buflen(struct thread_data *td)
190 if (td->min_bs == td->max_bs)
193 r = os_random_long(&td->bsrange_state);
194 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
195 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
198 if (buflen > td->io_size - td->this_io_bytes[td->ddir]) {
200 * if using direct/raw io, we may not be able to
201 * shrink the size. so just fail it.
203 if (td->io_ops->flags & FIO_RAWIO)
206 buflen = td->io_size - td->this_io_bytes[td->ddir];
213 * Check if we are above the minimum rate given.
215 static int check_min_rate(struct thread_data *td, struct timeval *now)
222 * allow a 2 second settle period in the beginning
224 if (mtime_since(&td->start, now) < 2000)
228 * if rate blocks is set, sample is running
230 if (td->rate_bytes) {
231 spent = mtime_since(&td->lastrate, now);
232 if (spent < td->ratecycle)
235 rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
236 if (rate < td->ratemin) {
237 fprintf(f_out, "%s: min rate %d not met, got %ldKiB/sec\n", td->name, td->ratemin, rate);
242 td->rate_bytes = td->this_io_bytes[ddir];
243 memcpy(&td->lastrate, now, sizeof(*now));
247 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
251 if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
258 * Return the data direction for the next io_u. If the job is a
259 * mixed read/write workload, check the rwmix cycle and switch if
262 static int get_rw_ddir(struct thread_data *td)
266 unsigned long elapsed;
268 gettimeofday(&now, NULL);
269 elapsed = mtime_since_now(&td->rwmix_switch);
272 * Check if it's time to seed a new data direction.
274 if (elapsed >= td->rwmixcycle) {
278 r = os_random_long(&td->rwmix_state);
279 v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
280 if (v < td->rwmixread)
281 td->rwmix_ddir = DDIR_READ;
283 td->rwmix_ddir = DDIR_WRITE;
284 memcpy(&td->rwmix_switch, &now, sizeof(now));
286 return td->rwmix_ddir;
287 } else if (td_read(td))
293 static int td_io_prep(struct thread_data *td, struct io_u *io_u)
295 if (td->io_ops->prep && td->io_ops->prep(td, io_u))
301 void put_io_u(struct thread_data *td, struct io_u *io_u)
304 list_del(&io_u->list);
305 list_add(&io_u->list, &td->io_u_freelist);
309 static int fill_io_u(struct thread_data *td, struct fio_file *f,
313 * If using an iolog, grab next piece if any available.
316 return read_iolog_get(td, io_u);
319 * No log, let the seq/rand engine retrieve the next position.
321 if (!get_next_offset(td, f, &io_u->offset)) {
322 io_u->buflen = get_next_buflen(td);
325 io_u->ddir = get_rw_ddir(td);
328 * If using a write iolog, store this entry.
331 write_iolog_put(td, io_u);
341 #define queue_full(td) list_empty(&(td)->io_u_freelist)
343 struct io_u *__get_io_u(struct thread_data *td)
345 struct io_u *io_u = NULL;
347 if (!queue_full(td)) {
348 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
352 list_del(&io_u->list);
353 list_add(&io_u->list, &td->io_u_busylist);
361 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
362 * etc. The returned io_u is fully ready to be prepped and submitted.
364 static struct io_u *get_io_u(struct thread_data *td, struct fio_file *f)
368 io_u = __get_io_u(td);
372 if (td->zone_bytes >= td->zone_size) {
374 f->last_pos += td->zone_skip;
377 if (fill_io_u(td, f, io_u)) {
382 if (io_u->buflen + io_u->offset > f->file_size) {
383 if (td->io_ops->flags & FIO_RAWIO) {
388 io_u->buflen = f->file_size - io_u->offset;
396 if (!td->read_iolog && !td->sequential)
397 mark_random_map(td, f, io_u);
399 f->last_pos += io_u->buflen;
401 if (td->verify != VERIFY_NONE)
402 populate_verify_io_u(td, io_u);
404 if (td_io_prep(td, io_u)) {
409 gettimeofday(&io_u->start_time, NULL);
413 static inline void td_set_runstate(struct thread_data *td, int runstate)
415 td->runstate = runstate;
418 static struct fio_file *get_next_file(struct thread_data *td)
420 unsigned int old_next_file = td->next_file;
424 f = &td->files[td->next_file];
427 if (td->next_file >= td->nr_files)
434 } while (td->next_file != old_next_file);
439 static int td_io_sync(struct thread_data *td, struct fio_file *f)
441 if (td->io_ops->sync)
442 return td->io_ops->sync(td, f);
447 static int td_io_getevents(struct thread_data *td, int min, int max,
450 return td->io_ops->getevents(td, min, max, t);
453 static int td_io_queue(struct thread_data *td, struct io_u *io_u)
455 gettimeofday(&io_u->issue_time, NULL);
457 return td->io_ops->queue(td, io_u);
460 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
462 static void io_completed(struct thread_data *td, struct io_u *io_u,
463 struct io_completion_data *icd)
468 gettimeofday(&e, NULL);
471 unsigned int bytes = io_u->buflen - io_u->resid;
472 const int idx = io_u->ddir;
474 td->io_blocks[idx]++;
475 td->io_bytes[idx] += bytes;
476 td->zone_bytes += bytes;
477 td->this_io_bytes[idx] += bytes;
479 msec = mtime_since(&io_u->issue_time, &e);
481 add_clat_sample(td, idx, msec);
482 add_bw_sample(td, idx);
484 if ((td_rw(td) || td_write(td)) && idx == DDIR_WRITE)
485 log_io_piece(td, io_u);
487 icd->bytes_done[idx] += bytes;
489 icd->error = io_u->error;
492 static void ios_completed(struct thread_data *td,struct io_completion_data *icd)
498 icd->bytes_done[0] = icd->bytes_done[1] = 0;
500 for (i = 0; i < icd->nr; i++) {
501 io_u = td->io_ops->event(td, i);
503 io_completed(td, io_u, icd);
509 * When job exits, we can cancel the in-flight IO if we are using async
510 * io. Attempt to do so.
512 static void cleanup_pending_aio(struct thread_data *td)
514 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
515 struct list_head *entry, *n;
516 struct io_completion_data icd;
521 * get immediately available events, if any
523 r = td_io_getevents(td, 0, td->cur_depth, &ts);
526 ios_completed(td, &icd);
530 * now cancel remaining active events
532 if (td->io_ops->cancel) {
533 list_for_each_safe(entry, n, &td->io_u_busylist) {
534 io_u = list_entry(entry, struct io_u, list);
536 r = td->io_ops->cancel(td, io_u);
543 r = td_io_getevents(td, td->cur_depth, td->cur_depth, NULL);
546 ios_completed(td, &icd);
552 * The main verify engine. Runs over the writes we previusly submitted,
553 * reads the blocks back in, and checks the crc/md5 of the data.
555 void do_verify(struct thread_data *td)
558 struct io_u *io_u, *v_io_u = NULL;
559 struct io_completion_data icd;
564 * sync io first and invalidate cache, to make sure we really
567 for_each_file(td, f, i) {
569 file_invalidate_cache(td, f);
572 td_set_runstate(td, TD_VERIFYING);
578 gettimeofday(&t, NULL);
579 if (runtime_exceeded(td, &t))
582 io_u = __get_io_u(td);
586 if (get_next_verify(td, io_u)) {
591 f = get_next_file(td);
597 if (td_io_prep(td, io_u)) {
602 ret = td_io_queue(td, io_u);
610 * we have one pending to verify, do that while
611 * we are doing io on the next one
613 if (do_io_u_verify(td, &v_io_u))
616 ret = td_io_getevents(td, 1, 1, NULL);
623 v_io_u = td->io_ops->event(td, 0);
626 io_completed(td, v_io_u, &icd);
629 td_verror(td, icd.error);
630 put_io_u(td, v_io_u);
636 * if we can't submit more io, we need to verify now
638 if (queue_full(td) && do_io_u_verify(td, &v_io_u))
643 do_io_u_verify(td, &v_io_u);
646 cleanup_pending_aio(td);
648 td_set_runstate(td, TD_RUNNING);
652 * Not really an io thread, all it does is burn CPU cycles in the specified
655 static void do_cpuio(struct thread_data *td)
658 int split = 100 / td->cpuload;
661 while (!td->terminate) {
662 gettimeofday(&e, NULL);
664 if (runtime_exceeded(td, &e))
670 usec_sleep(td, 10000);
677 * Main IO worker function. It retrieves io_u's to process and queues
678 * and reaps them, checking for rate and errors along the way.
680 static void do_io(struct thread_data *td)
682 struct io_completion_data icd;
688 td_set_runstate(td, TD_RUNNING);
690 while (td->this_io_bytes[td->ddir] < td->io_size) {
691 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
692 struct timespec *timeout;
699 f = get_next_file(td);
703 io_u = get_io_u(td, f);
707 memcpy(&s, &io_u->start_time, sizeof(s));
709 ret = td_io_queue(td, io_u);
716 add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time));
718 if (td->cur_depth < td->iodepth) {
727 ret = td_io_getevents(td, min_evts, td->cur_depth, timeout);
735 ios_completed(td, &icd);
737 td_verror(td, icd.error);
742 * the rate is batched for now, it should work for batches
743 * of completions except the very first one which may look
746 gettimeofday(&e, NULL);
747 usec = utime_since(&s, &e);
749 rate_throttle(td, usec, icd.bytes_done[td->ddir]);
751 if (check_min_rate(td, &e)) {
753 terminate_threads(td->groupid);
754 td_verror(td, ENOMEM);
758 if (runtime_exceeded(td, &e))
762 usec_sleep(td, td->thinktime);
764 if (should_fsync(td) && td->fsync_blocks &&
765 (td->io_blocks[DDIR_WRITE] % td->fsync_blocks) == 0)
771 cleanup_pending_aio(td);
773 if (should_fsync(td) && td->end_fsync) {
774 td_set_runstate(td, TD_FSYNCING);
775 for_each_file(td, f, i)
781 static int td_io_init(struct thread_data *td)
783 if (td->io_ops->init)
784 return td->io_ops->init(td);
789 static void cleanup_io_u(struct thread_data *td)
791 struct list_head *entry, *n;
794 list_for_each_safe(entry, n, &td->io_u_freelist) {
795 io_u = list_entry(entry, struct io_u, list);
797 list_del(&io_u->list);
804 static int init_io_u(struct thread_data *td)
810 if (td->io_ops->flags & FIO_CPUIO)
813 if (td->io_ops->flags & FIO_SYNCIO)
816 max_units = td->iodepth;
818 td->orig_buffer_size = td->max_bs * max_units + MASK;
820 if (allocate_io_mem(td))
823 p = ALIGN(td->orig_buffer);
824 for (i = 0; i < max_units; i++) {
825 io_u = malloc(sizeof(*io_u));
826 memset(io_u, 0, sizeof(*io_u));
827 INIT_LIST_HEAD(&io_u->list);
829 io_u->buf = p + td->max_bs * i;
831 list_add(&io_u->list, &td->io_u_freelist);
837 static int switch_ioscheduler(struct thread_data *td)
839 char tmp[256], tmp2[128];
843 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
845 f = fopen(tmp, "r+");
847 td_verror(td, errno);
854 ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
855 if (ferror(f) || ret != 1) {
856 td_verror(td, errno);
864 * Read back and check that the selected scheduler is now the default.
866 ret = fread(tmp, 1, sizeof(tmp), f);
867 if (ferror(f) || ret < 0) {
868 td_verror(td, errno);
873 sprintf(tmp2, "[%s]", td->ioscheduler);
874 if (!strstr(tmp, tmp2)) {
875 log_err("fio: io scheduler %s not found\n", td->ioscheduler);
876 td_verror(td, EINVAL);
885 static void clear_io_state(struct thread_data *td)
890 td->stat_io_bytes[0] = td->stat_io_bytes[1] = 0;
891 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
894 for_each_file(td, f, i) {
896 if (td->io_ops->flags & FIO_SYNCIO)
897 lseek(f->fd, SEEK_SET, 0);
900 memset(f->file_map, 0, f->num_maps * sizeof(long));
905 * Entry point for the thread based jobs. The process based jobs end up
906 * here as well, after a little setup.
908 static void *thread_main(void *data)
910 struct thread_data *td = data;
917 INIT_LIST_HEAD(&td->io_u_freelist);
918 INIT_LIST_HEAD(&td->io_u_busylist);
919 INIT_LIST_HEAD(&td->io_hist_list);
920 INIT_LIST_HEAD(&td->io_log_list);
925 if (fio_setaffinity(td) == -1) {
926 td_verror(td, errno);
937 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
938 td_verror(td, errno);
943 if (nice(td->nice) == -1) {
944 td_verror(td, errno);
948 if (init_random_state(td))
951 if (td->ioscheduler && switch_ioscheduler(td))
954 td_set_runstate(td, TD_INITIALIZED);
955 fio_sem_up(&startup_sem);
956 fio_sem_down(&td->mutex);
958 if (!td->create_serialize && setup_files(td))
961 gettimeofday(&td->epoch, NULL);
964 system(td->exec_prerun);
966 while (td->loops--) {
967 getrusage(RUSAGE_SELF, &td->ru_start);
968 gettimeofday(&td->start, NULL);
969 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
972 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
975 prune_io_piece_log(td);
977 if (td->io_ops->flags & FIO_CPUIO)
982 td->runtime[td->ddir] += mtime_since_now(&td->start);
983 if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
984 td->runtime[td->ddir ^ 1] = td->runtime[td->ddir];
986 update_rusage_stat(td);
988 if (td->error || td->terminate)
991 if (td->verify == VERIFY_NONE)
995 gettimeofday(&td->start, NULL);
999 td->runtime[DDIR_READ] += mtime_since_now(&td->start);
1001 if (td->error || td->terminate)
1006 finish_log(td, td->bw_log, "bw");
1008 finish_log(td, td->slat_log, "slat");
1010 finish_log(td, td->clat_log, "clat");
1011 if (td->write_iolog)
1012 write_iolog_close(td);
1013 if (td->exec_postrun)
1014 system(td->exec_postrun);
1016 if (exitall_on_terminate)
1017 terminate_threads(td->groupid);
1023 td_set_runstate(td, TD_EXITED);
1029 * We cannot pass the td data into a forked process, so attach the td and
1030 * pass it to the thread worker.
1032 static void *fork_main(int shmid, int offset)
1034 struct thread_data *td;
1037 data = shmat(shmid, NULL, 0);
1038 if (data == (void *) -1) {
1043 td = data + offset * sizeof(struct thread_data);
1050 * Run over the job map and reap the threads that have exited, if any.
1052 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1054 struct thread_data *td;
1058 * reap exited threads (TD_EXITED -> TD_REAPED)
1061 for_each_td(td, i) {
1063 * ->io_ops is NULL for a thread that has closed its
1066 if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
1069 if (td->runstate != TD_EXITED)
1072 td_set_runstate(td, TD_REAPED);
1074 if (td->use_thread) {
1077 if (pthread_join(td->thread, (void *) &ret))
1078 perror("thread_join");
1080 waitpid(td->pid, NULL, 0);
1083 (*m_rate) -= td->ratemin;
1084 (*t_rate) -= td->rate;
1087 if (*nr_running == cputhreads)
1088 terminate_threads(TERMINATE_ALL);
1092 * Main function for kicking off and reaping jobs, as needed.
1094 static void run_threads(void)
1096 struct thread_data *td;
1097 unsigned long spent;
1098 int i, todo, nr_running, m_rate, t_rate, nr_started;
1100 if (fio_pin_memory())
1103 if (!terse_output) {
1104 printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
1108 signal(SIGINT, sig_handler);
1109 signal(SIGALRM, sig_handler);
1111 todo = thread_number;
1114 m_rate = t_rate = 0;
1116 for_each_td(td, i) {
1117 print_status_init(td->thread_number - 1);
1121 if (!td->create_serialize)
1125 * do file setup here so it happens sequentially,
1126 * we don't want X number of threads getting their
1127 * client data interspersed on disk
1129 if (setup_files(td)) {
1130 td_set_runstate(td, TD_REAPED);
1138 struct thread_data *map[MAX_JOBS];
1139 struct timeval this_start;
1140 int this_jobs = 0, left;
1143 * create threads (TD_NOT_CREATED -> TD_CREATED)
1145 for_each_td(td, i) {
1146 if (td->runstate != TD_NOT_CREATED)
1150 * never got a chance to start, killed by other
1151 * thread for some reason
1153 if (td->terminate) {
1158 if (td->start_delay) {
1159 spent = mtime_since_genesis();
1161 if (td->start_delay * 1000 > spent)
1165 if (td->stonewall && (nr_started || nr_running))
1169 * Set state to created. Thread will transition
1170 * to TD_INITIALIZED when it's done setting up.
1172 td_set_runstate(td, TD_CREATED);
1173 map[this_jobs++] = td;
1174 fio_sem_init(&startup_sem, 1);
1177 if (td->use_thread) {
1178 if (pthread_create(&td->thread, NULL, thread_main, td)) {
1179 perror("thread_create");
1184 fio_sem_down(&startup_sem);
1186 fork_main(shm_id, i);
1193 * Wait for the started threads to transition to
1196 gettimeofday(&this_start, NULL);
1199 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
1204 for (i = 0; i < this_jobs; i++) {
1208 if (td->runstate == TD_INITIALIZED) {
1211 } else if (td->runstate >= TD_EXITED) {
1215 nr_running++; /* work-around... */
1221 log_err("fio: %d jobs failed to start\n", left);
1222 for (i = 0; i < this_jobs; i++) {
1226 kill(td->pid, SIGTERM);
1232 * start created threads (TD_INITIALIZED -> TD_RUNNING).
1234 for_each_td(td, i) {
1235 if (td->runstate != TD_INITIALIZED)
1238 td_set_runstate(td, TD_RUNNING);
1241 m_rate += td->ratemin;
1244 fio_sem_up(&td->mutex);
1247 reap_threads(&nr_running, &t_rate, &m_rate);
1253 while (nr_running) {
1254 reap_threads(&nr_running, &t_rate, &m_rate);
1262 int main(int argc, char *argv[])
1264 if (parse_options(argc, argv))
1267 if (!thread_number) {
1268 log_err("Nothing to do\n");
1272 disk_util_timer_arm();