td->runstate = runstate;
}
-static void terminate_threads(int group_id, int forced_kill)
+static void terminate_threads(int group_id)
{
struct thread_data *td;
int i;
for_each_td(td, i) {
if (group_id == TERMINATE_ALL || groupid == td->groupid) {
+ /*
+ * if the thread is running, just let it exit
+ */
+ if (td->runstate < TD_RUNNING)
+ kill(td->pid, SIGQUIT);
td->terminate = 1;
td->start_delay = 0;
- if (forced_kill)
- td_set_runstate(td, TD_EXITED);
}
}
}
default:
printf("\nfio: terminating on signal %d\n", sig);
fflush(stdout);
- terminate_threads(TERMINATE_ALL, 0);
+ terminate_threads(TERMINATE_ALL);
break;
}
}
*/
static int check_min_rate(struct thread_data *td, struct timeval *now)
{
+ unsigned long long bytes = 0;
unsigned long spent;
unsigned long rate;
- int ddir = td->ddir;
+
+ /*
+ * No minimum rate set, always ok
+ */
+ if (!td->ratemin)
+ return 0;
/*
* allow a 2 second settle period in the beginning
if (mtime_since(&td->start, now) < 2000)
return 0;
+ if (td_read(td))
+ bytes += td->this_io_bytes[DDIR_READ];
+ if (td_write(td))
+ bytes += td->this_io_bytes[DDIR_WRITE];
+
/*
* if rate blocks is set, sample is running
*/
if (spent < td->ratecycle)
return 0;
- rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
- if (rate < td->ratemin) {
- fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+ if (bytes < td->rate_bytes) {
+ fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin);
return 1;
+ } else {
+ rate = (bytes - td->rate_bytes) / spent;
+ if (rate < td->ratemin || bytes < td->rate_bytes) {
+ fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+ return 1;
+ }
}
}
- td->rate_bytes = td->this_io_bytes[ddir];
+ td->rate_bytes = bytes;
memcpy(&td->lastrate, now, sizeof(*now));
return 0;
}
return 0;
}
-static struct fio_file *get_next_file(struct thread_data *td)
-{
- unsigned int old_next_file = td->next_file;
- struct fio_file *f;
-
- do {
- f = &td->files[td->next_file];
-
- td->next_file++;
- if (td->next_file >= td->nr_files)
- td->next_file = 0;
-
- if (f->fd != -1)
- break;
-
- f = NULL;
- } while (td->next_file != old_next_file);
-
- return f;
-}
-
/*
* When job exits, we can cancel the in-flight IO if we are using async
* io. Attempt to do so.
*/
static void cleanup_pending_aio(struct thread_data *td)
{
- struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
struct list_head *entry, *n;
- struct io_completion_data icd;
struct io_u *io_u;
int r;
/*
* get immediately available events, if any
*/
- r = td_io_getevents(td, 0, td->cur_depth, &ts);
- if (r > 0) {
- init_icd(&icd, NULL, r);
- ios_completed(td, &icd);
- }
+ r = io_u_queued_complete(td, 0);
+ if (r < 0)
+ return;
/*
* now cancel remaining active events
list_for_each_safe(entry, n, &td->io_u_busylist) {
io_u = list_entry(entry, struct io_u, list);
- r = td->io_ops->cancel(td, io_u);
- if (!r)
+ /*
+ * if the io_u isn't in flight, then that generally
+ * means someone leaked an io_u. complain but fix
+ * it up, so we don't stall here.
+ */
+ if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
+ log_err("fio: non-busy IO on busy list\n");
put_io_u(td, io_u);
+ } else {
+ r = td->io_ops->cancel(td, io_u);
+ if (!r)
+ put_io_u(td, io_u);
+ }
}
}
- if (td->cur_depth) {
- r = td_io_getevents(td, td->cur_depth, td->cur_depth, NULL);
- if (r > 0) {
- init_icd(&icd, NULL, r);
- ios_completed(td, &icd);
- }
- }
+ if (td->cur_depth)
+ r = io_u_queued_complete(td, td->cur_depth);
}
/*
static int fio_io_sync(struct thread_data *td, struct fio_file *f)
{
struct io_u *io_u = __get_io_u(td);
- struct io_completion_data icd;
int ret;
if (!io_u)
return 1;
}
+requeue:
ret = td_io_queue(td, io_u);
if (ret < 0) {
- td_verror(td, io_u->error);
+ td_verror(td, io_u->error, "td_io_queue");
put_io_u(td, io_u);
return 1;
} else if (ret == FIO_Q_QUEUED) {
- ret = td_io_getevents(td, 1, td->cur_depth, NULL);
- if (ret < 0) {
- td_verror(td, -ret);
- return 1;
- }
-
- init_icd(&icd, NULL, ret);
- ios_completed(td, &icd);
- if (icd.error) {
- td_verror(td, icd.error);
+ if (io_u_queued_complete(td, 1) < 0)
return 1;
- }
} else if (ret == FIO_Q_COMPLETED) {
if (io_u->error) {
- td_verror(td, io_u->error);
+ td_verror(td, io_u->error, "td_io_queue");
return 1;
}
- init_icd(&icd, NULL, 1);
- io_completed(td, io_u, &icd);
- put_io_u(td, io_u);
+ if (io_u_sync_complete(td, io_u) < 0)
+ return 1;
+ } else if (ret == FIO_Q_BUSY) {
+ if (td_io_commit(td))
+ return 1;
+ goto requeue;
}
return 0;
}
/*
- * The main verify engine. Runs over the writes we previusly submitted,
+ * The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
*/
static void do_verify(struct thread_data *td)
* read from disk.
*/
for_each_file(td, f, i) {
- fio_io_sync(td, f);
- file_invalidate_cache(td, f);
+ if (fio_io_sync(td, f))
+ break;
+ if (file_invalidate_cache(td, f))
+ break;
}
+ if (td->error)
+ return;
+
td_set_runstate(td, TD_VERIFYING);
io_u = NULL;
while (!td->terminate) {
- struct io_completion_data icd;
- struct timespec *timeout;
+ int ret2;
io_u = __get_io_u(td);
if (!io_u)
break;
- if (runtime_exceeded(td, &io_u->start_time))
+ if (runtime_exceeded(td, &io_u->start_time)) {
+ put_io_u(td, io_u);
break;
+ }
- if (get_next_verify(td, io_u))
+ if (get_next_verify(td, io_u)) {
+ put_io_u(td, io_u);
break;
+ }
- if (td_io_prep(td, io_u))
+ if (td_io_prep(td, io_u)) {
+ put_io_u(td, io_u);
break;
+ }
-requeue:
- ret = td_io_queue(td, io_u);
+ io_u->end_io = verify_io_u;
+ ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
if (io_u->error)
ret = -io_u->error;
- if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+ else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
- goto requeue;
- }
- init_icd(&icd, verify_io_u, 1);
- io_completed(td, io_u, &icd);
- if (icd.error) {
- ret = icd.error;
- break;
+ requeue_io_u(td, &io_u);
+ } else {
+ ret = io_u_sync_complete(td, io_u);
+ if (ret < 0)
+ break;
}
- put_io_u(td, io_u);
continue;
case FIO_Q_QUEUED:
break;
+ case FIO_Q_BUSY:
+ requeue_io_u(td, &io_u);
+ ret2 = td_io_commit(td);
+ if (ret2 < 0)
+ ret = ret2;
+ break;
default:
assert(ret < 0);
- td_verror(td, -ret);
+ td_verror(td, -ret, "td_io_queue");
break;
}
- if (ret < 0)
+ if (ret < 0 || td->error)
break;
/*
* if we can queue more, do so. but check if there are
* completed io_u's first.
*/
- if (queue_full(td)) {
- timeout = NULL;
+ min_events = 0;
+ if (queue_full(td) || ret == FIO_Q_BUSY) {
min_events = 1;
- } else {
- struct timespec ts;
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- timeout = &ts;
- min_events = 0;
+ if (td->cur_depth > td->iodepth_low)
+ min_events = td->cur_depth - td->iodepth_low;
}
/*
* Reap required number of io units, if any, and do the
* verification on them through the callback handler
*/
- ret = td_io_getevents(td, min_events, td->cur_depth, timeout);
- if (ret < 0) {
- td_verror(td, -ret);
- break;
- } else if (!ret)
- continue;
-
- init_icd(&icd, verify_io_u, ret);
- ios_completed(td, &icd);
-
- if (icd.error) {
- td_verror(td, icd.error);
+ if (io_u_queued_complete(td, min_events) < 0)
break;
- }
}
- if (io_u)
- put_io_u(td, io_u);
+ if (!td->error) {
+ min_events = td->cur_depth;
- if (td->cur_depth)
+ if (min_events)
+ ret = io_u_queued_complete(td, min_events);
+ } else
cleanup_pending_aio(td);
td_set_runstate(td, TD_RUNNING);
*/
static void do_io(struct thread_data *td)
{
- struct io_completion_data icd;
struct timeval s;
unsigned long usec;
- struct fio_file *f;
int i, ret = 0;
td_set_runstate(td, TD_RUNNING);
while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) {
- struct timespec *timeout;
+ struct timeval comp_time;
+ long bytes_done = 0;
int min_evts = 0;
struct io_u *io_u;
+ int ret2;
if (td->terminate)
break;
- f = get_next_file(td);
- if (!f)
- break;
-
- io_u = get_io_u(td, f);
+ io_u = get_io_u(td);
if (!io_u)
break;
memcpy(&s, &io_u->start_time, sizeof(s));
-requeue:
- ret = td_io_queue(td, io_u);
+ if (runtime_exceeded(td, &s)) {
+ put_io_u(td, io_u);
+ break;
+ }
+
+ ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
- if (io_u->error) {
- ret = io_u->error;
- break;
- }
- if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+ if (io_u->error)
+ ret = -io_u->error;
+ else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
- goto requeue;
+ requeue_io_u(td, &io_u);
+ } else {
+ fio_gettime(&comp_time, NULL);
+ bytes_done = io_u_sync_complete(td, io_u);
+ if (bytes_done < 0)
+ ret = bytes_done;
}
- init_icd(&icd, NULL, 1);
- io_completed(td, io_u, &icd);
- put_io_u(td, io_u);
break;
case FIO_Q_QUEUED:
+ /*
+ * if the engine doesn't have a commit hook,
+ * the io_u is really queued. if it does have such
+ * a hook, it has to call io_u_queued() itself.
+ */
+ if (td->io_ops->commit == NULL)
+ io_u_queued(td, io_u);
+ break;
+ case FIO_Q_BUSY:
+ requeue_io_u(td, &io_u);
+ ret2 = td_io_commit(td);
+ if (ret2 < 0)
+ ret = ret2;
break;
default:
assert(ret < 0);
break;
}
- if (ret < 0)
+ if (ret < 0 || td->error)
break;
- add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time));
-
- if (ret == FIO_Q_QUEUED) {
- if (td->cur_depth < td->iodepth) {
- struct timespec ts;
-
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- timeout = &ts;
- min_evts = 0;
- } else {
- timeout = NULL;
+ /*
+ * See if we need to complete some commands
+ */
+ if (ret == FIO_Q_QUEUED || ret == FIO_Q_BUSY) {
+ min_evts = 0;
+ if (queue_full(td) || ret == FIO_Q_BUSY) {
min_evts = 1;
- }
- ret = td_io_getevents(td, min_evts, td->cur_depth, timeout);
- if (ret < 0) {
- td_verror(td, -ret);
- break;
- } else if (!ret)
- continue;
+ if (td->cur_depth > td->iodepth_low)
+ min_evts = td->cur_depth - td->iodepth_low;
+ }
- init_icd(&icd, NULL, ret);
- ios_completed(td, &icd);
- if (icd.error) {
- td_verror(td, icd.error);
+ fio_gettime(&comp_time, NULL);
+ bytes_done = io_u_queued_complete(td, min_evts);
+ if (bytes_done < 0)
break;
- }
}
+ if (!bytes_done)
+ continue;
+
/*
* the rate is batched for now, it should work for batches
* of completions except the very first one which may look
* a little bursty
*/
- usec = utime_since(&s, &icd.time);
+ usec = utime_since(&s, &comp_time);
- rate_throttle(td, usec, icd.bytes_done[td->ddir], td->ddir);
+ rate_throttle(td, usec, bytes_done);
- if (check_min_rate(td, &icd.time)) {
+ if (check_min_rate(td, &comp_time)) {
if (exitall_on_terminate)
- terminate_threads(td->groupid, 0);
- td_verror(td, ENODATA);
+ terminate_threads(td->groupid);
+ td_verror(td, ENODATA, "check_min_rate");
break;
}
- if (runtime_exceeded(td, &icd.time))
- break;
-
if (td->thinktime) {
unsigned long long b;
}
if (!td->error) {
- if (td->cur_depth)
- cleanup_pending_aio(td);
+ struct fio_file *f;
+
+ i = td->cur_depth;
+ if (i)
+ ret = io_u_queued_complete(td, i);
if (should_fsync(td) && td->end_fsync) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i)
fio_io_sync(td, f);
}
- }
+ } else
+ cleanup_pending_aio(td);
}
static void cleanup_io_u(struct thread_data *td)
fill_rand_buf(io_u, max_bs);
io_u->index = i;
+ io_u->flags = IO_U_F_FREE;
list_add(&io_u->list, &td->io_u_freelist);
}
+ io_u_init_timeout();
+
return 0;
}
f = fopen(tmp, "r+");
if (!f) {
- td_verror(td, errno);
+ td_verror(td, errno, "fopen");
return 1;
}
*/
ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
if (ferror(f) || ret != 1) {
- td_verror(td, errno);
+ td_verror(td, errno, "fwrite");
fclose(f);
return 1;
}
*/
ret = fread(tmp, 1, sizeof(tmp), f);
if (ferror(f) || ret < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "fread");
fclose(f);
return 1;
}
sprintf(tmp2, "[%s]", td->ioscheduler);
if (!strstr(tmp, tmp2)) {
log_err("fio: io scheduler %s not found\n", td->ioscheduler);
- td_verror(td, EINVAL);
+ td_verror(td, EINVAL, "iosched_switch");
fclose(f);
return 1;
}
struct fio_file *f;
int i;
- td->stat_io_bytes[0] = td->stat_io_bytes[1] = 0;
+ td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
td->zone_bytes = 0;
+ td->last_was_sync = 0;
+
for_each_file(td, f, i) {
+ f->last_completed_pos = 0;
+
f->last_pos = 0;
if (td->io_ops->flags & FIO_SYNCIO)
lseek(f->fd, SEEK_SET, 0);
INIT_LIST_HEAD(&td->io_u_freelist);
INIT_LIST_HEAD(&td->io_u_busylist);
+ INIT_LIST_HEAD(&td->io_u_requeues);
INIT_LIST_HEAD(&td->io_hist_list);
INIT_LIST_HEAD(&td->io_log_list);
goto err;
if (fio_setaffinity(td) == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "cpu_set_affinity");
goto err;
}
if (td->ioprio) {
if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "ioprio_set");
goto err;
}
}
if (nice(td->nice) == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "nice");
goto err;
}
if (!td->create_serialize && setup_files(td))
goto err;
- if (open_files(td))
- goto err;
- /*
- * Do this late, as some IO engines would like to have the
- * files setup prior to initializing structures.
- */
if (td_io_init(td))
goto err;
+ if (open_files(td))
+ goto err;
+
if (td->exec_prerun) {
if (system(td->exec_prerun) < 0)
goto err;
}
fio_gettime(&td->epoch, NULL);
- getrusage(RUSAGE_SELF, &td->ru_start);
+ memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch));
+ getrusage(RUSAGE_SELF, &td->ts.ru_start);
runtime[0] = runtime[1] = 0;
while (td->loops--) {
fio_gettime(&td->start, NULL);
- memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
+ memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
if (td->ratemin)
- memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
+ memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate));
clear_io_state(td);
prune_io_piece_log(td);
else
do_io(td);
- runtime[td->ddir] += utime_since_now(&td->start);
- if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
- runtime[td->ddir ^ 1] = runtime[td->ddir];
-
+ if (td_read(td) && td->io_bytes[DDIR_READ])
+ runtime[DDIR_READ] += utime_since_now(&td->start);
+ if (td_write(td) && td->io_bytes[DDIR_WRITE])
+ runtime[DDIR_WRITE] += utime_since_now(&td->start);
+
if (td->error || td->terminate)
break;
}
update_rusage_stat(td);
- fio_gettime(&td->end_time, NULL);
- td->runtime[0] = runtime[0] / 1000;
- td->runtime[1] = runtime[1] / 1000;
-
- if (td->bw_log)
- finish_log(td, td->bw_log, "bw");
- if (td->slat_log)
- finish_log(td, td->slat_log, "slat");
- if (td->clat_log)
- finish_log(td, td->clat_log, "clat");
+ td->ts.runtime[0] = runtime[0] / 1000;
+ td->ts.runtime[1] = runtime[1] / 1000;
+ td->ts.total_run_time = mtime_since_now(&td->epoch);
+ td->ts.io_bytes[0] = td->io_bytes[0];
+ td->ts.io_bytes[1] = td->io_bytes[1];
+
+ if (td->ts.bw_log)
+ finish_log(td, td->ts.bw_log, "bw");
+ if (td->ts.slat_log)
+ finish_log(td, td->ts.slat_log, "slat");
+ if (td->ts.clat_log)
+ finish_log(td, td->ts.clat_log, "clat");
if (td->write_iolog_file)
write_iolog_close(td);
if (td->exec_postrun) {
}
if (exitall_on_terminate)
- terminate_threads(td->groupid, 0);
+ terminate_threads(td->groupid);
err:
if (td->error)
close_ioengine(td);
cleanup_io_u(td);
td_set_runstate(td, TD_EXITED);
- return (void *) td->error;
+ return (void *) (unsigned long) td->error;
}
/*
td = data + offset * sizeof(struct thread_data);
ret = thread_main(td);
shmdt(data);
- return (int) ret;
+ return (int) (unsigned long) ret;
}
/*
*/
pending = cputhreads = 0;
for_each_td(td, i) {
+ int flags = 0;
+
/*
* ->io_ops is NULL for a thread that has closed its
* io engine
if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
cputhreads++;
- if (td->runstate < TD_EXITED) {
- /*
- * check if someone quit or got killed in an unusual way
- */
- ret = waitpid(td->pid, &status, WNOHANG);
- if (ret < 0)
- perror("waitpid");
- else if ((ret == td->pid) && WIFSIGNALED(status)) {
- int sig = WTERMSIG(status);
-
- log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
+ if (!td->pid || td->runstate == TD_REAPED)
+ continue;
+ if (td->use_thread) {
+ if (td->runstate == TD_EXITED) {
td_set_runstate(td, TD_REAPED);
goto reaped;
}
+ continue;
}
- if (td->runstate != TD_EXITED) {
- if (td->runstate < TD_RUNNING)
- pending++;
+ flags = WNOHANG;
+ if (td->runstate == TD_EXITED)
+ flags = 0;
- continue;
- }
+ /*
+ * check if someone quit or got killed in an unusual way
+ */
+ ret = waitpid(td->pid, &status, flags);
+ if (ret < 0) {
+ if (errno == ECHILD) {
+ log_err("fio: pid=%d disappeared %d\n", td->pid, td->runstate);
+ td_set_runstate(td, TD_REAPED);
+ goto reaped;
+ }
+ perror("waitpid");
+ } else if (ret == td->pid) {
+ if (WIFSIGNALED(status)) {
+ int sig = WTERMSIG(status);
- if (td->error)
- exit_value++;
+ if (sig != SIGQUIT)
+ log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
+ td_set_runstate(td, TD_REAPED);
+ goto reaped;
+ }
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) && !td->error)
+ td->error = WEXITSTATUS(status);
- td_set_runstate(td, TD_REAPED);
+ td_set_runstate(td, TD_REAPED);
+ goto reaped;
+ }
+ }
+ /*
+ * thread is not dead, continue
+ */
+ continue;
+reaped:
if (td->use_thread) {
long ret;
if (pthread_join(td->thread, (void *) &ret))
- perror("thread_join");
- } else {
- int status;
-
- ret = waitpid(td->pid, &status, 0);
- if (ret < 0)
- perror("waitpid");
- else if (WIFEXITED(status) && WEXITSTATUS(status)) {
- if (!exit_value)
- exit_value++;
- }
+ perror("pthread_join");
}
-reaped:
(*nr_running)--;
(*m_rate) -= td->ratemin;
(*t_rate) -= td->rate;
+
+ if (td->error)
+ exit_value++;
}
if (*nr_running == cputhreads && !pending)
- terminate_threads(TERMINATE_ALL, 0);
+ terminate_threads(TERMINATE_ALL);
}
/*
init_disk_util(td);
}
+ set_genesis_time();
+
while (todo) {
struct thread_data *map[MAX_JOBS];
struct timeval this_start;
setup_log(&agg_io_log[DDIR_WRITE]);
}
+ set_genesis_time();
+
disk_util_timer_arm();
run_threads();