- }
-
- ret = pthread_detach(disk_util_thread);
- if (ret) {
- log_err("Can't detatch disk util thread: %s\n", strerror(ret));
- return 1;
- }
-
- dprint(FD_MUTEX, "wait on startup_mutex\n");
- fio_mutex_down(startup_mutex);
- dprint(FD_MUTEX, "done waiting on startup_mutex\n");
- return 0;
-}
-
-static void set_sig_handlers(void)
-{
- struct sigaction act;
-
- memset(&act, 0, sizeof(act));
- act.sa_handler = sig_int;
- act.sa_flags = SA_RESTART;
- sigaction(SIGINT, &act, NULL);
-
- memset(&act, 0, sizeof(act));
- act.sa_handler = sig_int;
- act.sa_flags = SA_RESTART;
- sigaction(SIGTERM, &act, NULL);
-}
-
-/*
- * Check if we are above the minimum rate given.
- */
-static int __check_min_rate(struct thread_data *td, struct timeval *now,
- enum fio_ddir ddir)
-{
- unsigned long long bytes = 0;
- unsigned long iops = 0;
- unsigned long spent;
- unsigned long rate;
- unsigned int ratemin = 0;
- unsigned int rate_iops = 0;
- unsigned int rate_iops_min = 0;
-
- assert(ddir_rw(ddir));
-
- if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir])
- return 0;
-
- /*
- * allow a 2 second settle period in the beginning
- */
- if (mtime_since(&td->start, now) < 2000)
- return 0;
-
- iops += td->io_blocks[ddir];
- bytes += td->this_io_bytes[ddir];
- ratemin += td->o.ratemin[ddir];
- rate_iops += td->o.rate_iops[ddir];
- rate_iops_min += td->o.rate_iops_min[ddir];
-
- /*
- * if rate blocks is set, sample is running
- */
- if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) {
- spent = mtime_since(&td->lastrate[ddir], now);
- if (spent < td->o.ratecycle)
- return 0;
-
- if (td->o.rate[ddir]) {
- /*
- * check bandwidth specified rate
- */
- if (bytes < td->rate_bytes[ddir]) {
- log_err("%s: min rate %u not met\n", td->o.name,
- ratemin);
- return 1;
- } else {
- rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
- if (rate < ratemin ||
- bytes < td->rate_bytes[ddir]) {
- log_err("%s: min rate %u not met, got"
- " %luKB/sec\n", td->o.name,
- ratemin, rate);
- return 1;
- }
- }
- } else {
- /*
- * checks iops specified rate
- */
- if (iops < rate_iops) {
- log_err("%s: min iops rate %u not met\n",
- td->o.name, rate_iops);
- return 1;
- } else {
- rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
- if (rate < rate_iops_min ||
- iops < td->rate_blocks[ddir]) {
- log_err("%s: min iops rate %u not met,"
- " got %lu\n", td->o.name,
- rate_iops_min, rate);
- }
- }
- }
- }
-
- td->rate_bytes[ddir] = bytes;
- td->rate_blocks[ddir] = iops;
- memcpy(&td->lastrate[ddir], now, sizeof(*now));
- return 0;
-}
-
-static int check_min_rate(struct thread_data *td, struct timeval *now,
- unsigned long *bytes_done)
-{
- int ret = 0;
-
- if (bytes_done[0])
- ret |= __check_min_rate(td, now, 0);
- if (bytes_done[1])
- ret |= __check_min_rate(td, now, 1);
-
- return ret;
-}
-
-static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
-{
- if (!td->o.timeout)
- return 0;
- if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000)
- return 1;
-
- return 0;
-}
-
-/*
- * When job exits, we can cancel the in-flight IO if we are using async
- * io. Attempt to do so.
- */
-static void cleanup_pending_aio(struct thread_data *td)
-{
- struct flist_head *entry, *n;
- struct io_u *io_u;
- int r;
-
- /*
- * get immediately available events, if any
- */
- r = io_u_queued_complete(td, 0, NULL);
- if (r < 0)
- return;
-
- /*
- * now cancel remaining active events
- */
- if (td->io_ops->cancel) {
- flist_for_each_safe(entry, n, &td->io_u_busylist) {
- io_u = flist_entry(entry, struct io_u, list);
-
- /*
- * if the io_u isn't in flight, then that generally
- * means someone leaked an io_u. complain but fix
- * it up, so we don't stall here.
- */
- if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
- log_err("fio: non-busy IO on busy list\n");
- put_io_u(td, io_u);
- } else {
- r = td->io_ops->cancel(td, io_u);
- if (!r)
- put_io_u(td, io_u);
- }
- }
- }
-
- if (td->cur_depth)
- r = io_u_queued_complete(td, td->cur_depth, NULL);
-}
-
-/*
- * Helper to handle the final sync of a file. Works just like the normal
- * io path, just does everything sync.
- */
-static int fio_io_sync(struct thread_data *td, struct fio_file *f)
-{
- struct io_u *io_u = __get_io_u(td);
- int ret;
-
- if (!io_u)
- return 1;
-
- io_u->ddir = DDIR_SYNC;
- io_u->file = f;
-
- if (td_io_prep(td, io_u)) {
- put_io_u(td, io_u);
- return 1;
- }
-
-requeue:
- ret = td_io_queue(td, io_u);
- if (ret < 0) {
- td_verror(td, io_u->error, "td_io_queue");
- put_io_u(td, io_u);
- return 1;
- } else if (ret == FIO_Q_QUEUED) {
- if (io_u_queued_complete(td, 1, NULL) < 0)
- return 1;
- } else if (ret == FIO_Q_COMPLETED) {
- if (io_u->error) {
- td_verror(td, io_u->error, "td_io_queue");
- return 1;
- }
-
- if (io_u_sync_complete(td, io_u, NULL) < 0)
- return 1;
- } else if (ret == FIO_Q_BUSY) {
- if (td_io_commit(td))
- return 1;
- goto requeue;
- }
-
- return 0;
-}
-
-static inline void update_tv_cache(struct thread_data *td)
-{
- if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask)
- fio_gettime(&td->tv_cache, NULL);
-}
-
-static int break_on_this_error(struct thread_data *td, int *retptr)
-{
- int ret = *retptr;
-
- if (ret < 0 || td->error) {
- int err;
-
- if (!td->o.continue_on_error)
- return 1;
-
- if (ret < 0)
- err = -ret;
- else
- err = td->error;
-
- if (td_non_fatal_error(err)) {
- /*
- * Continue with the I/Os in case of
- * a non fatal error.
- */
- update_error_count(td, err);
- td_clear_error(td);
- *retptr = 0;
- return 0;
- } else if (td->o.fill_device && err == ENOSPC) {
- /*
- * We expect to hit this error if
- * fill_device option is set.
- */
- td_clear_error(td);
- td->terminate = 1;
- return 1;
- } else {
- /*
- * Stop the I/O in case of a fatal
- * error.
- */
- update_error_count(td, err);
- return 1;
- }
- }
-
- return 0;
-}
-
-/*
- * The main verify engine. Runs over the writes we previously submitted,
- * reads the blocks back in, and checks the crc/md5 of the data.
- */
-static void do_verify(struct thread_data *td)
-{
- struct fio_file *f;
- struct io_u *io_u;
- int ret, min_events;
- unsigned int i;
-
- dprint(FD_VERIFY, "starting loop\n");
-
- /*
- * sync io first and invalidate cache, to make sure we really
- * read from disk.
- */
- for_each_file(td, f, i) {
- if (!fio_file_open(f))
- continue;
- if (fio_io_sync(td, f))
- break;
- if (file_invalidate_cache(td, f))
- break;
- }
-
- if (td->error)
- return;
-
- td_set_runstate(td, TD_VERIFYING);
-
- io_u = NULL;
- while (!td->terminate) {
- int ret2, full;
-
- update_tv_cache(td);
-
- if (runtime_exceeded(td, &td->tv_cache)) {
- td->terminate = 1;
- break;
- }
-
- io_u = __get_io_u(td);
- if (!io_u)
- break;
-
- if (get_next_verify(td, io_u)) {
- put_io_u(td, io_u);
- break;
- }
-
- if (td_io_prep(td, io_u)) {
- put_io_u(td, io_u);
- break;
- }
-
- if (td->o.verify_async)
- io_u->end_io = verify_io_u_async;
- else
- io_u->end_io = verify_io_u;
-
- ret = td_io_queue(td, io_u);
- switch (ret) {
- case FIO_Q_COMPLETED:
- if (io_u->error) {
- ret = -io_u->error;
- clear_io_u(td, io_u);
- } else if (io_u->resid) {
- int bytes = io_u->xfer_buflen - io_u->resid;
-
- /*
- * zero read, fail
- */
- if (!bytes) {
- td_verror(td, EIO, "full resid");
- put_io_u(td, io_u);
- break;
- }
-
- io_u->xfer_buflen = io_u->resid;
- io_u->xfer_buf += bytes;
- io_u->offset += bytes;
-
- if (ddir_rw(io_u->ddir))
- td->ts.short_io_u[io_u->ddir]++;
-
- f = io_u->file;
- if (io_u->offset == f->real_file_size)
- goto sync_done;
-
- requeue_io_u(td, &io_u);
- } else {
-sync_done:
- ret = io_u_sync_complete(td, io_u, NULL);
- if (ret < 0)
- break;
- }
- continue;
- case FIO_Q_QUEUED:
- break;
- case FIO_Q_BUSY:
- requeue_io_u(td, &io_u);
- ret2 = td_io_commit(td);
- if (ret2 < 0)
- ret = ret2;
- break;
- default:
- assert(ret < 0);
- td_verror(td, -ret, "td_io_queue");
- break;
- }
-
- if (break_on_this_error(td, &ret))
- break;
-
- /*
- * if we can queue more, do so. but check if there are
- * completed io_u's first.
- */
- full = queue_full(td) || ret == FIO_Q_BUSY;
- if (full || !td->o.iodepth_batch_complete) {
- min_events = min(td->o.iodepth_batch_complete,
- td->cur_depth);
- if (full && !min_events)
- min_events = 1;
-
- do {
- /*
- * Reap required number of io units, if any,
- * and do the verification on them through
- * the callback handler
- */
- if (io_u_queued_complete(td, min_events, NULL) < 0) {
- ret = -1;
- break;
- }
- } while (full && (td->cur_depth > td->o.iodepth_low));
- }
- if (ret < 0)
- break;
- }
-
- if (!td->error) {
- min_events = td->cur_depth;
-
- if (min_events)
- ret = io_u_queued_complete(td, min_events, NULL);
- } else
- cleanup_pending_aio(td);
-
- td_set_runstate(td, TD_RUNNING);
-
- dprint(FD_VERIFY, "exiting loop\n");
-}
-
-/*
- * Main IO worker function. It retrieves io_u's to process and queues
- * and reaps them, checking for rate and errors along the way.
- */
-static void do_io(struct thread_data *td)
-{
- unsigned int i;
- int ret = 0;
-
- if (in_ramp_time(td))
- td_set_runstate(td, TD_RAMP);
- else
- td_set_runstate(td, TD_RUNNING);
-
- while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
- (!flist_empty(&td->trim_list)) ||
- ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) {
- struct timeval comp_time;
- unsigned long bytes_done[2] = { 0, 0 };
- int min_evts = 0;
- struct io_u *io_u;
- int ret2, full;
-
- if (td->terminate)
- break;
-
- update_tv_cache(td);
-
- if (runtime_exceeded(td, &td->tv_cache)) {
- td->terminate = 1;
- break;
- }
-
- io_u = get_io_u(td);
- if (!io_u)
- break;
-
- /*
- * Add verification end_io handler, if asked to verify
- * a previously written file.
- */
- if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
- !td_rw(td)) {
- if (td->o.verify_async)
- io_u->end_io = verify_io_u_async;
- else
- io_u->end_io = verify_io_u;
- td_set_runstate(td, TD_VERIFYING);
- } else if (in_ramp_time(td))
- td_set_runstate(td, TD_RAMP);
- else
- td_set_runstate(td, TD_RUNNING);
-
- ret = td_io_queue(td, io_u);
- switch (ret) {
- case FIO_Q_COMPLETED:
- if (io_u->error) {
- ret = -io_u->error;
- clear_io_u(td, io_u);
- } else if (io_u->resid) {
- int bytes = io_u->xfer_buflen - io_u->resid;
- struct fio_file *f = io_u->file;
-
- /*
- * zero read, fail
- */
- if (!bytes) {
- td_verror(td, EIO, "full resid");
- put_io_u(td, io_u);
- break;
- }
-
- io_u->xfer_buflen = io_u->resid;
- io_u->xfer_buf += bytes;
- io_u->offset += bytes;
-
- if (ddir_rw(io_u->ddir))
- td->ts.short_io_u[io_u->ddir]++;
-
- if (io_u->offset == f->real_file_size)
- goto sync_done;
-
- requeue_io_u(td, &io_u);
- } else {
-sync_done:
- if (__should_check_rate(td, 0) ||
- __should_check_rate(td, 1))
- fio_gettime(&comp_time, NULL);
-
- ret = io_u_sync_complete(td, io_u, bytes_done);
- if (ret < 0)
- break;
- }
- break;
- case FIO_Q_QUEUED:
- /*
- * if the engine doesn't have a commit hook,
- * the io_u is really queued. if it does have such
- * a hook, it has to call io_u_queued() itself.
- */
- if (td->io_ops->commit == NULL)
- io_u_queued(td, io_u);
- break;
- case FIO_Q_BUSY:
- requeue_io_u(td, &io_u);
- ret2 = td_io_commit(td);
- if (ret2 < 0)
- ret = ret2;
- break;
- default:
- assert(ret < 0);
- put_io_u(td, io_u);
- break;
- }
-
- if (break_on_this_error(td, &ret))
- break;
-
- /*
- * See if we need to complete some commands
- */
- full = queue_full(td) || ret == FIO_Q_BUSY;
- if (full || !td->o.iodepth_batch_complete) {
- min_evts = min(td->o.iodepth_batch_complete,
- td->cur_depth);
- if (full && !min_evts)
- min_evts = 1;
-
- if (__should_check_rate(td, 0) ||
- __should_check_rate(td, 1))
- fio_gettime(&comp_time, NULL);
-
- do {
- ret = io_u_queued_complete(td, min_evts, bytes_done);
- if (ret < 0)
- break;
-
- } while (full && (td->cur_depth > td->o.iodepth_low));
- }
-
- if (ret < 0)
- break;
- if (!(bytes_done[0] + bytes_done[1]))
- continue;
-
- if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) {
- if (check_min_rate(td, &comp_time, bytes_done)) {
- if (exitall_on_terminate)
- terminate_threads(td->groupid);
- td_verror(td, EIO, "check_min_rate");
- break;
- }
- }
-
- if (td->o.thinktime) {
- unsigned long long b;
-
- b = td->io_blocks[0] + td->io_blocks[1];
- if (!(b % td->o.thinktime_blocks)) {
- int left;
-
- if (td->o.thinktime_spin)
- usec_spin(td->o.thinktime_spin);
-
- left = td->o.thinktime - td->o.thinktime_spin;
- if (left)
- usec_sleep(td, left);
- }
- }
- }
-
- if (td->trim_entries)
- printf("trim entries %ld\n", td->trim_entries);
-
- if (td->o.fill_device && td->error == ENOSPC) {
- td->error = 0;
- td->terminate = 1;
- }
- if (!td->error) {
- struct fio_file *f;
-
- i = td->cur_depth;
- if (i) {
- ret = io_u_queued_complete(td, i, NULL);
- if (td->o.fill_device && td->error == ENOSPC)
- td->error = 0;
- }
-
- if (should_fsync(td) && td->o.end_fsync) {
- td_set_runstate(td, TD_FSYNCING);
-
- for_each_file(td, f, i) {
- if (!fio_file_open(f))
- continue;
- fio_io_sync(td, f);
- }
- }
- } else
- cleanup_pending_aio(td);
-
- /*
- * stop job if we failed doing any IO
- */
- if ((td->this_io_bytes[0] + td->this_io_bytes[1]) == 0)
- td->done = 1;
-}
-
-static void cleanup_io_u(struct thread_data *td)
-{
- struct flist_head *entry, *n;
- struct io_u *io_u;
-
- flist_for_each_safe(entry, n, &td->io_u_freelist) {
- io_u = flist_entry(entry, struct io_u, list);
-
- flist_del(&io_u->list);
- fio_memfree(io_u, sizeof(*io_u));
- }
-
- free_io_mem(td);
-}
-
-static int init_io_u(struct thread_data *td)
-{
- struct io_u *io_u;
- unsigned int max_bs;
- int cl_align, i, max_units;
- char *p;
-
- max_units = td->o.iodepth;
- max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
- td->orig_buffer_size = (unsigned long long) max_bs
- * (unsigned long long) max_units;
-
- if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
- unsigned long bs;
-
- bs = td->orig_buffer_size + td->o.hugepage_size - 1;
- td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
- }
-
- if (td->orig_buffer_size != (size_t) td->orig_buffer_size) {
- log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");