}
}
+#ifdef WIN32
+static void sig_break(int sig)
+{
+ struct thread_data *td;
+ int i;
+
+ sig_int(sig);
+
+ /**
+ * Windows terminates all job processes on SIGBREAK after the handler
+ * returns, so give them time to wrap-up and give stats
+ */
+ for_each_td(td, i) {
+ while (td->runstate < TD_EXITED)
+ sleep(1);
+ }
+}
+#endif
+
void sig_show_status(int sig)
{
show_running_run_stats();
/* Windows uses SIGBREAK as a quit signal from other applications */
#ifdef WIN32
memset(&act, 0, sizeof(act));
- act.sa_handler = sig_int;
+ act.sa_handler = sig_break;
act.sa_flags = SA_RESTART;
sigaction(SIGBREAK, &act, NULL);
#endif
static bool __check_min_rate(struct thread_data *td, struct timespec *now,
enum fio_ddir ddir)
{
- unsigned long long bytes = 0;
- unsigned long iops = 0;
- unsigned long spent;
- unsigned long long rate;
- unsigned long long ratemin = 0;
- unsigned int rate_iops = 0;
- unsigned int rate_iops_min = 0;
+ unsigned long long current_rate_check_bytes = td->this_io_bytes[ddir];
+ unsigned long current_rate_check_blocks = td->this_io_blocks[ddir];
+ unsigned long long option_rate_bytes_min = td->o.ratemin[ddir];
+ unsigned int option_rate_iops_min = td->o.rate_iops_min[ddir];
assert(ddir_rw(ddir));
if (mtime_since(&td->start, now) < 2000)
return false;
- iops += td->this_io_blocks[ddir];
- bytes += td->this_io_bytes[ddir];
- ratemin += td->o.ratemin[ddir];
- rate_iops += td->o.rate_iops[ddir];
- rate_iops_min += td->o.rate_iops_min[ddir];
-
/*
- * if rate blocks is set, sample is running
+ * if last_rate_check_blocks or last_rate_check_bytes is set,
+ * we can compute a rate per ratecycle
*/
- if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) {
- spent = mtime_since(&td->lastrate[ddir], now);
- if (spent < td->o.ratecycle)
+ if (td->last_rate_check_bytes[ddir] || td->last_rate_check_blocks[ddir]) {
+ unsigned long spent = mtime_since(&td->last_rate_check_time[ddir], now);
+ if (spent < td->o.ratecycle || spent==0)
return false;
- if (td->o.rate[ddir] || td->o.ratemin[ddir]) {
+ if (td->o.ratemin[ddir]) {
/*
* check bandwidth specified rate
*/
- if (bytes < td->rate_bytes[ddir]) {
- log_err("%s: rate_min=%lluB/s not met, only transferred %lluB\n",
- td->o.name, ratemin, bytes);
+ unsigned long long current_rate_bytes =
+ ((current_rate_check_bytes - td->last_rate_check_bytes[ddir]) * 1000) / spent;
+ if (current_rate_bytes < option_rate_bytes_min) {
+ log_err("%s: rate_min=%lluB/s not met, got %lluB/s\n",
+ td->o.name, option_rate_bytes_min, current_rate_bytes);
return true;
- } else {
- if (spent)
- rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
- else
- rate = 0;
-
- if (rate < ratemin ||
- bytes < td->rate_bytes[ddir]) {
- log_err("%s: rate_min=%lluB/s not met, got %lluB/s\n",
- td->o.name, ratemin, rate);
- return true;
- }
}
} else {
/*
* checks iops specified rate
*/
- if (iops < rate_iops) {
- log_err("%s: rate_iops_min=%u not met, only performed %lu IOs\n",
- td->o.name, rate_iops, iops);
+ unsigned long long current_rate_iops =
+ ((current_rate_check_blocks - td->last_rate_check_blocks[ddir]) * 1000) / spent;
+
+ if (current_rate_iops < option_rate_iops_min) {
+ log_err("%s: rate_iops_min=%u not met, got %llu IOPS\n",
+ td->o.name, option_rate_iops_min, current_rate_iops);
return true;
- } else {
- if (spent)
- rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
- else
- rate = 0;
-
- if (rate < rate_iops_min ||
- iops < td->rate_blocks[ddir]) {
- log_err("%s: rate_iops_min=%u not met, got %llu IOPS\n",
- td->o.name, rate_iops_min, rate);
- return true;
- }
}
}
}
- td->rate_bytes[ddir] = bytes;
- td->rate_blocks[ddir] = iops;
- memcpy(&td->lastrate[ddir], now, sizeof(*now));
+ td->last_rate_check_bytes[ddir] = current_rate_check_bytes;
+ td->last_rate_check_blocks[ddir] = current_rate_check_blocks;
+ memcpy(&td->last_rate_check_time[ddir], now, sizeof(*now));
return false;
}
if (td->error)
return;
- /*
- * verify_state needs to be reset before verification
- * proceeds so that expected random seeds match actual
- * random seeds in headers. The main loop will reset
- * all random number generators if randrepeat is set.
- */
- if (!td->o.rand_repeatable)
- td_fill_verify_state_seed(td);
-
td_set_runstate(td, TD_VERIFYING);
io_u = NULL;
break;
}
} else {
- if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
+ if (td->bytes_verified + td->o.rw_min_bs > verify_bytes)
break;
while ((io_u = get_io_u(td)) != NULL) {
break;
} else if (io_u->ddir == DDIR_WRITE) {
io_u->ddir = DDIR_READ;
+ io_u->numberio = td->verify_read_issues;
+ td->verify_read_issues++;
populate_verify_io_u(td, io_u);
break;
} else {
struct timespec *time)
{
unsigned long long b;
+ unsigned long long runtime_left;
uint64_t total;
int left;
struct timespec now;
if (td->o.thinktime_iotime) {
fio_gettime(&now, NULL);
if (utime_since(&td->last_thinktime, &now)
- >= td->o.thinktime_iotime + td->o.thinktime) {
+ >= td->o.thinktime_iotime) {
stall = true;
} else if (!fio_option_is_set(&td->o, thinktime_blocks)) {
/*
io_u_quiesce(td);
+ left = td->o.thinktime_spin;
+ if (td->o.timeout) {
+ runtime_left = td->o.timeout - utime_since_now(&td->epoch);
+ if (runtime_left < (unsigned long long)left)
+ left = runtime_left;
+ }
+
total = 0;
- if (td->o.thinktime_spin)
- total = usec_spin(td->o.thinktime_spin);
+ if (left)
+ total = usec_spin(left);
left = td->o.thinktime - total;
+ if (td->o.timeout) {
+ runtime_left = td->o.timeout - utime_since_now(&td->epoch);
+ if (runtime_left < (unsigned long long)left)
+ left = runtime_left;
+ }
+
if (left)
total += usec_sleep(td, left);
fio_gettime(time, NULL);
td->last_thinktime_blocks = b;
- if (td->o.thinktime_iotime)
+ if (td->o.thinktime_iotime) {
+ fio_gettime(&now, NULL);
td->last_thinktime = now;
+ }
}
/*
total_bytes += td->o.size;
/* In trimwrite mode, each byte is trimmed and then written, so
- * allow total_bytes to be twice as big */
- if (td_trimwrite(td))
+ * allow total_bytes or number of ios to be twice as big */
+ if (td_trimwrite(td)) {
total_bytes += td->total_io_size;
+ td->o.number_ios *= 2;
+ }
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
(!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
break;
}
- if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY)
- populate_verify_io_u(td, io_u);
+ if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY) {
+ if (!(io_u->flags & IO_U_F_PATTERN_DONE)) {
+ io_u_set(td, io_u, IO_U_F_PATTERN_DONE);
+ io_u->numberio = td->io_issues[io_u->ddir];
+ populate_verify_io_u(td, io_u);
+ }
+ }
ddir = io_u->ddir;
td->rate_io_issue_bytes[__ddir] += blen;
}
- if (should_check_rate(td))
+ if (should_check_rate(td)) {
td->rate_next_io_time[__ddir] = usec_for_io(td, __ddir);
+ fio_gettime(&comp_time, NULL);
+ }
} else {
ret = io_u_submit(td, io_u);
f->file_name);
}
}
- } else
+ } else {
+ if (td->o.io_submit_mode == IO_MODE_OFFLOAD)
+ workqueue_flush(&td->io_wq);
cleanup_pending_aio(td);
+ }
/*
* stop job if we failed doing any IO
}
}
- init_io_u_buffers(td);
+ if (init_io_u_buffers(td))
+ return 1;
if (init_file_completion_logging(td, max_units))
return 1;
* overflow later. this adjustment may be too much if we get
* lucky and the allocator gives us an aligned address.
*/
- if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ if (td->o.odirect || td->o.mem_align ||
td_ioengine_flagged(td, FIO_RAWIO))
td->orig_buffer_size += page_mask + td->o.mem_align;
if (data_xfer && allocate_io_mem(td))
return 1;
- if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ if (td->o.odirect || td->o.mem_align ||
td_ioengine_flagged(td, FIO_RAWIO))
p = PTR_ALIGN(td->orig_buffer, page_mask) + td->o.mem_align;
else
if (!init_iolog(td))
goto err;
+ /* ioprio_set() has to be done before td_io_init() */
+ if (fio_option_is_set(o, ioprio) ||
+ fio_option_is_set(o, ioprio_class)) {
+ ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
+ if (ret == -1) {
+ td_verror(td, errno, "ioprio_set");
+ goto err;
+ }
+ td->ioprio = ioprio_value(o->ioprio_class, o->ioprio);
+ td->ts.ioprio = td->ioprio;
+ }
+
if (td_io_init(td))
goto err;
+ if (td_ioengine_flagged(td, FIO_SYNCIO) && td->o.iodepth > 1 && td->o.io_submit_mode != IO_MODE_OFFLOAD) {
+ log_info("note: both iodepth >= 1 and synchronous I/O engine "
+ "are selected, queue depth will be capped at 1\n");
+ }
+
if (init_io_u(td))
goto err;
if (o->verify_async && verify_async_init(td))
goto err;
- if (fio_option_is_set(o, ioprio) ||
- fio_option_is_set(o, ioprio_class)) {
- ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
- if (ret == -1) {
- td_verror(td, errno, "ioprio_set");
- goto err;
- }
- td->ioprio = ioprio_value(o->ioprio_class, o->ioprio);
- }
-
if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt))
goto err;
if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
o->ratemin[DDIR_TRIM]) {
- memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
+ memcpy(&td->last_rate_check_time[DDIR_READ], &td->bw_sample_time,
sizeof(td->bw_sample_time));
- memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
+ memcpy(&td->last_rate_check_time[DDIR_WRITE], &td->bw_sample_time,
sizeof(td->bw_sample_time));
- memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
+ memcpy(&td->last_rate_check_time[DDIR_TRIM], &td->bw_sample_time,
sizeof(td->bw_sample_time));
}
if (td->o.verify_only && td_write(td))
verify_bytes = do_dry_run(td);
else {
+ if (!td->o.rand_repeatable)
+ /* save verify rand state to replay hdr seeds later at verify */
+ frand_copy(&td->verify_state_last_do_io, &td->verify_state);
do_io(td, bytes_done);
-
+ if (!td->o.rand_repeatable)
+ frand_copy(&td->verify_state, &td->verify_state_last_do_io);
if (!ddir_rw_sum(bytes_done)) {
fio_mark_td_terminate(td);
verify_bytes = 0;
}
} while (1);
- if (td_read(td) && td->io_bytes[DDIR_READ])
+ if (td->io_bytes[DDIR_READ] && (td_read(td) ||
+ ((td->flags & TD_F_VER_BACKLOG) && td_write(td))))
update_runtime(td, elapsed_us, DDIR_READ);
if (td_write(td) && td->io_bytes[DDIR_WRITE])
update_runtime(td, elapsed_us, DDIR_WRITE);
for_each_td(td, i) {
int flags = 0;
- if (!strcmp(td->o.ioengine, "cpuio"))
+ if (!strcmp(td->o.ioengine, "cpuio"))
cputhreads++;
else
realthreads++;
strerror(ret));
} else {
pid_t pid;
+ void *eo;
dprint(FD_PROCESS, "will fork\n");
+ eo = td->eo;
+ read_barrier();
pid = fork();
if (!pid) {
int ret;
_exit(ret);
} else if (i == fio_debug_jobno)
*fio_debug_jobp = pid;
+ free(eo);
+ free(fd);
+ fd = NULL;
}
dprint(FD_MUTEX, "wait on startup_sem\n");
if (fio_sem_down_timeout(startup_sem, 10000)) {
setup_log(&agg_io_log[DDIR_TRIM], &p, "agg-trim_bw.log");
}
+ if (init_global_dedupe_working_set_seeds()) {
+ log_err("fio: failed to initialize global dedupe working set\n");
+ return 1;
+ }
+
startup_sem = fio_sem_init(FIO_SEM_LOCKED);
if (!sk_out)
is_local_backend = true;
}
for_each_td(td, i) {
+ struct thread_stat *ts = &td->ts;
+
+ free_clat_prio_stats(ts);
steadystate_free(td);
fio_options_free(td);
fio_dump_options_free(td);