td->runstate = runstate;
}
-static void terminate_threads(int group_id, int forced_kill)
+static void terminate_threads(int group_id)
{
struct thread_data *td;
int i;
for_each_td(td, i) {
if (group_id == TERMINATE_ALL || groupid == td->groupid) {
+ /*
+ * if the thread is running, just let it exit
+ */
+ if (td->runstate < TD_RUNNING)
+ kill(td->pid, SIGQUIT);
td->terminate = 1;
td->start_delay = 0;
- if (forced_kill)
- td_set_runstate(td, TD_EXITED);
}
}
}
default:
printf("\nfio: terminating on signal %d\n", sig);
fflush(stdout);
- terminate_threads(TERMINATE_ALL, 0);
+ terminate_threads(TERMINATE_ALL);
break;
}
}
unsigned long spent;
unsigned long rate;
+ /*
+ * No minimum rate set, always ok
+ */
+ if (!td->ratemin)
+ return 0;
+
/*
* allow a 2 second settle period in the beginning
*/
/*
* get immediately available events, if any
*/
- r = io_u_queued_complete(td, 0, NULL);
+ r = io_u_queued_complete(td, 0);
if (r < 0)
return;
}
if (td->cur_depth)
- r = io_u_queued_complete(td, td->cur_depth, NULL);
+ r = io_u_queued_complete(td, td->cur_depth);
}
/*
put_io_u(td, io_u);
return 1;
} else if (ret == FIO_Q_QUEUED) {
- if (io_u_queued_complete(td, 1, NULL) < 0)
+ if (io_u_queued_complete(td, 1) < 0)
return 1;
} else if (ret == FIO_Q_COMPLETED) {
if (io_u->error) {
return 1;
}
- if (io_u_sync_complete(td, io_u, NULL) < 0)
+ if (io_u_sync_complete(td, io_u) < 0)
return 1;
} else if (ret == FIO_Q_BUSY) {
if (td_io_commit(td))
}
/*
- * The main verify engine. Runs over the writes we previusly submitted,
+ * The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
*/
static void do_verify(struct thread_data *td)
io_u = NULL;
while (!td->terminate) {
+ int ret2;
+
io_u = __get_io_u(td);
if (!io_u)
break;
put_io_u(td, io_u);
break;
}
-requeue:
- ret = td_io_queue(td, io_u);
+ io_u->end_io = verify_io_u;
+
+ ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
if (io_u->error)
ret = -io_u->error;
- if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+ else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
- goto requeue;
+ requeue_io_u(td, &io_u);
+ } else {
+ ret = io_u_sync_complete(td, io_u);
+ if (ret < 0)
+ break;
}
- ret = io_u_sync_complete(td, io_u, verify_io_u);
- if (ret < 0)
- break;
continue;
case FIO_Q_QUEUED:
break;
case FIO_Q_BUSY:
requeue_io_u(td, &io_u);
- ret = td_io_commit(td);
+ ret2 = td_io_commit(td);
+ if (ret2 < 0)
+ ret = ret2;
break;
default:
assert(ret < 0);
* Reap required number of io units, if any, and do the
* verification on them through the callback handler
*/
- if (io_u_queued_complete(td, min_events, verify_io_u) < 0)
+ if (io_u_queued_complete(td, min_events) < 0)
break;
}
- if (td->cur_depth)
+ if (!td->error) {
+ min_events = td->cur_depth;
+
+ if (min_events)
+ ret = io_u_queued_complete(td, min_events);
+ } else
cleanup_pending_aio(td);
td_set_runstate(td, TD_RUNNING);
long bytes_done = 0;
int min_evts = 0;
struct io_u *io_u;
+ int ret2;
if (td->terminate)
break;
put_io_u(td, io_u);
break;
}
-requeue:
- ret = td_io_queue(td, io_u);
+ ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
- if (io_u->error) {
- ret = io_u->error;
- break;
- }
- if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+ if (io_u->error)
+ ret = -io_u->error;
+ else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
- goto requeue;
+ requeue_io_u(td, &io_u);
+ } else {
+ fio_gettime(&comp_time, NULL);
+ bytes_done = io_u_sync_complete(td, io_u);
+ if (bytes_done < 0)
+ ret = bytes_done;
}
- fio_gettime(&comp_time, NULL);
- bytes_done = io_u_sync_complete(td, io_u, NULL);
- if (bytes_done < 0)
- ret = bytes_done;
break;
case FIO_Q_QUEUED:
/*
break;
case FIO_Q_BUSY:
requeue_io_u(td, &io_u);
- ret = td_io_commit(td);
+ ret2 = td_io_commit(td);
+ if (ret2 < 0)
+ ret = ret2;
break;
default:
assert(ret < 0);
}
fio_gettime(&comp_time, NULL);
- bytes_done = io_u_queued_complete(td, min_evts, NULL);
+ bytes_done = io_u_queued_complete(td, min_evts);
if (bytes_done < 0)
break;
}
if (check_min_rate(td, &comp_time)) {
if (exitall_on_terminate)
- terminate_threads(td->groupid, 0);
+ terminate_threads(td->groupid);
td_verror(td, ENODATA, "check_min_rate");
break;
}
if (!td->error) {
struct fio_file *f;
- if (td->cur_depth)
- cleanup_pending_aio(td);
+ i = td->cur_depth;
+ if (i)
+ ret = io_u_queued_complete(td, i);
if (should_fsync(td) && td->end_fsync) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i)
fio_io_sync(td, f);
}
- }
+ } else
+ cleanup_pending_aio(td);
}
static void cleanup_io_u(struct thread_data *td)
if (!td->create_serialize && setup_files(td))
goto err;
- if (open_files(td))
- goto err;
- /*
- * Do this late, as some IO engines would like to have the
- * files setup prior to initializing structures.
- */
if (td_io_init(td))
goto err;
+ if (open_files(td))
+ goto err;
+
if (td->exec_prerun) {
if (system(td->exec_prerun) < 0)
goto err;
}
update_rusage_stat(td);
- fio_gettime(&td->end_time, NULL);
- td->runtime[0] = runtime[0] / 1000;
- td->runtime[1] = runtime[1] / 1000;
+ td->ts.runtime[0] = runtime[0] / 1000;
+ td->ts.runtime[1] = runtime[1] / 1000;
+ td->ts.total_run_time = mtime_since_now(&td->epoch);
+ td->ts.io_bytes[0] = td->io_bytes[0];
+ td->ts.io_bytes[1] = td->io_bytes[1];
if (td->ts.bw_log)
finish_log(td, td->ts.bw_log, "bw");
}
if (exitall_on_terminate)
- terminate_threads(td->groupid, 0);
+ terminate_threads(td->groupid);
err:
if (td->error)
if (WIFSIGNALED(status)) {
int sig = WTERMSIG(status);
- log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
+ if (sig != SIGQUIT)
+ log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
td_set_runstate(td, TD_REAPED);
goto reaped;
}
}
if (*nr_running == cputhreads && !pending)
- terminate_threads(TERMINATE_ALL, 0);
+ terminate_threads(TERMINATE_ALL);
}
/*