#ifdef WIN32
static void sig_break(int sig)
{
- struct thread_data *td;
- int i;
-
sig_int(sig);
/**
* Windows terminates all job processes on SIGBREAK after the handler
* returns, so give them time to wrap-up and give stats
*/
- for_each_td(td, i) {
+ for_each_td(td) {
while (td->runstate < TD_EXITED)
sleep(1);
- }
+ } end_for_each();
}
#endif
if (!from_verify)
unlog_io_piece(td, io_u);
td_verror(td, EIO, "full resid");
- put_io_u(td, io_u);
+ clear_io_u(td, io_u);
break;
}
if (td->error)
return;
- /*
- * verify_state needs to be reset before verification
- * proceeds so that expected random seeds match actual
- * random seeds in headers. The main loop will reset
- * all random number generators if randrepeat is set.
- */
- if (!td->o.rand_repeatable)
- td_fill_verify_state_seed(td);
-
td_set_runstate(td, TD_VERIFYING);
io_u = NULL;
struct timespec *time)
{
unsigned long long b;
+ unsigned long long runtime_left;
uint64_t total;
int left;
struct timespec now;
if (td->o.thinktime_iotime) {
fio_gettime(&now, NULL);
if (utime_since(&td->last_thinktime, &now)
- >= td->o.thinktime_iotime + td->o.thinktime) {
+ >= td->o.thinktime_iotime) {
stall = true;
} else if (!fio_option_is_set(&td->o, thinktime_blocks)) {
/*
io_u_quiesce(td);
+ left = td->o.thinktime_spin;
+ if (td->o.timeout) {
+ runtime_left = td->o.timeout - utime_since_now(&td->epoch);
+ if (runtime_left < (unsigned long long)left)
+ left = runtime_left;
+ }
+
total = 0;
- if (td->o.thinktime_spin)
- total = usec_spin(td->o.thinktime_spin);
+ if (left)
+ total = usec_spin(left);
+
+ /*
+ * usec_spin() might run for slightly longer than intended in a VM
+ * where the vCPU could get descheduled or the hypervisor could steal
+ * CPU time. Ensure "left" doesn't become negative.
+ */
+ if (total < td->o.thinktime)
+ left = td->o.thinktime - total;
+ else
+ left = 0;
+
+ if (td->o.timeout) {
+ runtime_left = td->o.timeout - utime_since_now(&td->epoch);
+ if (runtime_left < (unsigned long long)left)
+ left = runtime_left;
+ }
- left = td->o.thinktime - total;
if (left)
total += usec_sleep(td, left);
fio_gettime(time, NULL);
td->last_thinktime_blocks = b;
- if (td->o.thinktime_iotime)
+ if (td->o.thinktime_iotime) {
+ fio_gettime(&now, NULL);
td->last_thinktime = now;
+ }
}
/*
}
if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY) {
- io_u->numberio = td->io_issues[io_u->ddir];
- populate_verify_io_u(td, io_u);
+ if (!(io_u->flags & IO_U_F_PATTERN_DONE)) {
+ io_u_set(td, io_u, IO_U_F_PATTERN_DONE);
+ io_u->numberio = td->io_issues[io_u->ddir];
+ populate_verify_io_u(td, io_u);
+ }
}
ddir = io_u->ddir;
uint64_t bytes_done[DDIR_RWDIR_CNT];
int deadlock_loop_cnt;
bool clear_state;
- int res, ret;
+ int ret;
sk_out_assign(sk_out);
free(fd);
/* ioprio_set() has to be done before td_io_init() */
if (fio_option_is_set(o, ioprio) ||
- fio_option_is_set(o, ioprio_class)) {
- ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
+ fio_option_is_set(o, ioprio_class) ||
+ fio_option_is_set(o, ioprio_hint)) {
+ ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class,
+ o->ioprio, o->ioprio_hint);
if (ret == -1) {
td_verror(td, errno, "ioprio_set");
goto err;
}
- td->ioprio = ioprio_value(o->ioprio_class, o->ioprio);
+ td->ioprio = ioprio_value(o->ioprio_class, o->ioprio,
+ o->ioprio_hint);
td->ts.ioprio = td->ioprio;
}
if (rate_submit_init(td, sk_out))
goto err;
- set_epoch_time(td, o->log_unix_epoch | o->log_alternate_epoch, o->log_alternate_epoch_clock_id);
+ set_epoch_time(td, o->log_alternate_epoch_clock_id, o->job_start_clock_id);
fio_getrusage(&td->ru_start);
memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
if (td->o.verify_only && td_write(td))
verify_bytes = do_dry_run(td);
else {
+ if (!td->o.rand_repeatable)
+ /* save verify rand state to replay hdr seeds later at verify */
+ frand_copy(&td->verify_state_last_do_io, &td->verify_state);
do_io(td, bytes_done);
-
+ if (!td->o.rand_repeatable)
+ frand_copy(&td->verify_state, &td->verify_state_last_do_io);
if (!ddir_rw_sum(bytes_done)) {
fio_mark_td_terminate(td);
verify_bytes = 0;
* another thread is checking its io_u's for overlap
*/
if (td_offload_overlap(td)) {
- int res = pthread_mutex_lock(&overlap_check);
- assert(res == 0);
+ int res;
+
+ res = pthread_mutex_lock(&overlap_check);
+ if (res) {
+ td->error = errno;
+ goto err;
+ }
}
td_set_runstate(td, TD_FINISHING);
if (td_offload_overlap(td)) {
+ int res;
+
res = pthread_mutex_unlock(&overlap_check);
- assert(res == 0);
+ if (res) {
+ td->error = errno;
+ goto err;
+ }
}
update_rusage_stat(td);
static void reap_threads(unsigned int *nr_running, uint64_t *t_rate,
uint64_t *m_rate)
{
- struct thread_data *td;
unsigned int cputhreads, realthreads, pending;
- int i, status, ret;
+ int status, ret;
/*
* reap exited threads (TD_EXITED -> TD_REAPED)
*/
realthreads = pending = cputhreads = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
int flags = 0;
if (!strcmp(td->o.ioengine, "cpuio"))
done_secs += mtime_since_now(&td->epoch) / 1000;
profile_td_exit(td);
flow_exit_job(td);
- }
+ } end_for_each();
if (*nr_running == cputhreads && !pending && realthreads)
fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
{
const char *waitee = me->o.wait_for;
const char *self = me->o.name;
- struct thread_data *td;
- int i;
if (!waitee)
return false;
- for_each_td(td, i) {
+ for_each_td(td) {
if (!strcmp(td->o.name, self) || strcmp(td->o.name, waitee))
continue;
runstate_to_name(td->runstate));
return true;
}
- }
+ } end_for_each();
dprint(FD_PROCESS, "%s: %s completed, can run\n", self, waitee);
return false;
set_sig_handlers();
nr_thread = nr_process = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
if (check_mount_writes(td))
return;
if (td->o.use_thread)
nr_thread++;
else
nr_process++;
- }
+ } end_for_each();
if (output_format & FIO_OUTPUT_NORMAL) {
struct buf_output out;
nr_started = 0;
m_rate = t_rate = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
print_status_init(td->thread_number - 1);
if (!td->o.create_serialize)
td_io_close_file(td, f);
}
}
- }
+ } end_for_each();
/* start idle threads before io threads start to run */
fio_idle_prof_start();
/*
* create threads (TD_NOT_CREATED -> TD_CREATED)
*/
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate != TD_NOT_CREATED)
continue;
ret = (int)(uintptr_t)thread_main(fd);
_exit(ret);
- } else if (i == fio_debug_jobno)
+ } else if (__td_index == fio_debug_jobno)
*fio_debug_jobp = pid;
free(eo);
free(fd);
break;
}
dprint(FD_MUTEX, "done waiting on startup_sem\n");
- }
+ } end_for_each();
/*
* Wait for the started threads to transition to
/*
* start created threads (TD_INITIALIZED -> TD_RUNNING).
*/
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate != TD_INITIALIZED)
continue;
t_rate += ddir_rw_sum(td->o.rate);
todo--;
fio_sem_up(td->sem);
- }
+ } end_for_each();
reap_threads(&nr_running, &t_rate, &m_rate);
int fio_backend(struct sk_out *sk_out)
{
- struct thread_data *td;
int i;
-
if (exec_profile) {
if (load_profile(exec_profile))
return 1;
}
}
- for_each_td(td, i) {
+ for_each_td(td) {
struct thread_stat *ts = &td->ts;
free_clat_prio_stats(ts);
}
fio_sem_remove(td->sem);
td->sem = NULL;
- }
+ } end_for_each();
free_disk_util();
if (cgroup_list) {