#include <sys/stat.h>
#include <sys/wait.h>
#include <math.h>
+#include <pthread.h>
#include "fio.h"
#include "smalloc.h"
#include "rate-submit.h"
#include "helper_thread.h"
#include "pshared.h"
+#include "zone-dist.h"
+#include "fio_time.h"
static struct fio_sem *startup_sem;
static struct flist_head *cgroup_list;
-static char *cgroup_mnt;
+static struct cgroup_mnt *cgroup_mnt;
static int exit_value;
-static volatile int fio_abort;
+static volatile bool fio_abort;
static unsigned int nr_process = 0;
static unsigned int nr_thread = 0;
int groupid = 0;
unsigned int thread_number = 0;
+unsigned int nr_segments = 0;
+unsigned int cur_segment = 0;
unsigned int stat_number = 0;
-int shm_id = 0;
int temp_stall_ts;
unsigned long done_secs = 0;
+#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+pthread_mutex_t overlap_check = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
+#else
+pthread_mutex_t overlap_check = PTHREAD_MUTEX_INITIALIZER;
+#endif
#define JOB_START_TIMEOUT (5 * 1000)
static void sig_int(int sig)
{
- if (threads) {
+ if (nr_segments) {
if (is_backend)
fio_server_got_signal(sig);
else {
exit_value = 128;
}
- fio_terminate_threads(TERMINATE_ALL);
+ fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
}
}
+#ifdef WIN32
+static void sig_break(int sig)
+{
+ sig_int(sig);
+
+ /**
+ * Windows terminates all job processes on SIGBREAK after the handler
+ * returns, so give them time to wrap-up and give stats
+ */
+ for_each_td(td) {
+ while (td->runstate < TD_EXITED)
+ sleep(1);
+ } end_for_each();
+}
+#endif
+
void sig_show_status(int sig)
{
show_running_run_stats();
/* Windows uses SIGBREAK as a quit signal from other applications */
#ifdef WIN32
memset(&act, 0, sizeof(act));
- act.sa_handler = sig_int;
+ act.sa_handler = sig_break;
act.sa_flags = SA_RESTART;
sigaction(SIGBREAK, &act, NULL);
#endif
static bool __check_min_rate(struct thread_data *td, struct timespec *now,
enum fio_ddir ddir)
{
- unsigned long long bytes = 0;
- unsigned long iops = 0;
- unsigned long spent;
- unsigned long rate;
- unsigned int ratemin = 0;
- unsigned int rate_iops = 0;
- unsigned int rate_iops_min = 0;
+ unsigned long long current_rate_check_bytes = td->this_io_bytes[ddir];
+ unsigned long current_rate_check_blocks = td->this_io_blocks[ddir];
+ unsigned long long option_rate_bytes_min = td->o.ratemin[ddir];
+ unsigned int option_rate_iops_min = td->o.rate_iops_min[ddir];
assert(ddir_rw(ddir));
if (mtime_since(&td->start, now) < 2000)
return false;
- iops += td->this_io_blocks[ddir];
- bytes += td->this_io_bytes[ddir];
- ratemin += td->o.ratemin[ddir];
- rate_iops += td->o.rate_iops[ddir];
- rate_iops_min += td->o.rate_iops_min[ddir];
-
/*
- * if rate blocks is set, sample is running
+ * if last_rate_check_blocks or last_rate_check_bytes is set,
+ * we can compute a rate per ratecycle
*/
- if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) {
- spent = mtime_since(&td->lastrate[ddir], now);
- if (spent < td->o.ratecycle)
+ if (td->last_rate_check_bytes[ddir] || td->last_rate_check_blocks[ddir]) {
+ unsigned long spent = mtime_since(&td->last_rate_check_time[ddir], now);
+ if (spent < td->o.ratecycle || spent==0)
return false;
- if (td->o.rate[ddir] || td->o.ratemin[ddir]) {
+ if (td->o.ratemin[ddir]) {
/*
* check bandwidth specified rate
*/
- if (bytes < td->rate_bytes[ddir]) {
- log_err("%s: rate_min=%uB/s not met, only transferred %lluB\n",
- td->o.name, ratemin, bytes);
+ unsigned long long current_rate_bytes =
+ ((current_rate_check_bytes - td->last_rate_check_bytes[ddir]) * 1000) / spent;
+ if (current_rate_bytes < option_rate_bytes_min) {
+ log_err("%s: rate_min=%lluB/s not met, got %lluB/s\n",
+ td->o.name, option_rate_bytes_min, current_rate_bytes);
return true;
- } else {
- if (spent)
- rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
- else
- rate = 0;
-
- if (rate < ratemin ||
- bytes < td->rate_bytes[ddir]) {
- log_err("%s: rate_min=%uB/s not met, got %luB/s\n",
- td->o.name, ratemin, rate);
- return true;
- }
}
} else {
/*
* checks iops specified rate
*/
- if (iops < rate_iops) {
- log_err("%s: rate_iops_min=%u not met, only performed %lu IOs\n",
- td->o.name, rate_iops, iops);
+ unsigned long long current_rate_iops =
+ ((current_rate_check_blocks - td->last_rate_check_blocks[ddir]) * 1000) / spent;
+
+ if (current_rate_iops < option_rate_iops_min) {
+ log_err("%s: rate_iops_min=%u not met, got %llu IOPS\n",
+ td->o.name, option_rate_iops_min, current_rate_iops);
return true;
- } else {
- if (spent)
- rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
- else
- rate = 0;
-
- if (rate < rate_iops_min ||
- iops < td->rate_blocks[ddir]) {
- log_err("%s: rate_iops_min=%u not met, got %lu IOPS\n",
- td->o.name, rate_iops_min, rate);
- return true;
- }
}
}
}
- td->rate_bytes[ddir] = bytes;
- td->rate_blocks[ddir] = iops;
- memcpy(&td->lastrate[ddir], now, sizeof(*now));
+ td->last_rate_check_bytes[ddir] = current_rate_check_bytes;
+ td->last_rate_check_blocks[ddir] = current_rate_check_blocks;
+ memcpy(&td->last_rate_check_time[ddir], now, sizeof(*now));
return false;
}
{
bool ret = false;
- if (td->bytes_done[DDIR_READ])
- ret |= __check_min_rate(td, now, DDIR_READ);
- if (td->bytes_done[DDIR_WRITE])
- ret |= __check_min_rate(td, now, DDIR_WRITE);
- if (td->bytes_done[DDIR_TRIM])
- ret |= __check_min_rate(td, now, DDIR_TRIM);
+ for_each_rw_ddir(ddir) {
+ if (td->bytes_done[ddir])
+ ret |= __check_min_rate(td, now, ddir);
+ }
return ret;
}
* get immediately available events, if any
*/
r = io_u_queued_complete(td, 0);
- if (r < 0)
- return;
/*
* now cancel remaining active events
static bool fio_io_sync(struct thread_data *td, struct fio_file *f)
{
struct io_u *io_u = __get_io_u(td);
- int ret;
+ enum fio_q_status ret;
if (!io_u)
return true;
io_u->ddir = DDIR_SYNC;
io_u->file = f;
+ io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
if (td_io_prep(td, io_u)) {
put_io_u(td, io_u);
requeue:
ret = td_io_queue(td, io_u);
- if (ret < 0) {
- td_verror(td, io_u->error, "td_io_queue");
- put_io_u(td, io_u);
- return true;
- } else if (ret == FIO_Q_QUEUED) {
+ switch (ret) {
+ case FIO_Q_QUEUED:
td_io_commit(td);
if (io_u_queued_complete(td, 1) < 0)
return true;
- } else if (ret == FIO_Q_COMPLETED) {
+ break;
+ case FIO_Q_COMPLETED:
if (io_u->error) {
td_verror(td, io_u->error, "td_io_queue");
return true;
if (io_u_sync_complete(td, io_u) < 0)
return true;
- } else if (ret == FIO_Q_BUSY) {
+ break;
+ case FIO_Q_BUSY:
td_io_commit(td);
goto requeue;
}
static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
{
- int ret;
+ int ret, ret2;
if (fio_file_open(f))
return fio_io_sync(td, f);
return 1;
ret = fio_io_sync(td, f);
- td_io_close_file(td, f);
- return ret;
+ ret2 = 0;
+ if (fio_file_open(f))
+ ret2 = td_io_close_file(td, f);
+ return (ret || ret2);
}
static inline void __update_ts_cache(struct thread_data *td)
td_clear_error(td);
*retptr = 0;
return false;
- } else if (td->o.fill_device && err == ENOSPC) {
+ } else if (td->o.fill_device && (err == ENOSPC || err == EDQUOT)) {
/*
* We expect to hit this error if
* fill_device option is set.
if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
min_evts = 1;
- if (time && (__should_check_rate(td, DDIR_READ) ||
- __should_check_rate(td, DDIR_WRITE) ||
- __should_check_rate(td, DDIR_TRIM)))
+ if (time && should_check_rate(td))
fio_gettime(time, NULL);
do {
*ret = -io_u->error;
clear_io_u(td, io_u);
} else if (io_u->resid) {
- int bytes = io_u->xfer_buflen - io_u->resid;
+ long long bytes = io_u->xfer_buflen - io_u->resid;
struct fio_file *f = io_u->file;
if (bytes_issued)
*bytes_issued += bytes;
if (!from_verify)
- trim_io_piece(td, io_u);
+ trim_io_piece(io_u);
/*
* zero read, fail
if (!from_verify)
unlog_io_piece(td, io_u);
td_verror(td, EIO, "full resid");
- put_io_u(td, io_u);
+ clear_io_u(td, io_u);
break;
}
requeue_io_u(td, &io_u);
} else {
sync_done:
- if (comp_time && (__should_check_rate(td, DDIR_READ) ||
- __should_check_rate(td, DDIR_WRITE) ||
- __should_check_rate(td, DDIR_TRIM)))
+ if (comp_time && should_check_rate(td))
fio_gettime(comp_time, NULL);
*ret = io_u_sync_complete(td, io_u);
/*
* Check if io_u will overlap an in-flight IO in the queue
*/
-static bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u)
+bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u)
{
bool overlap;
struct io_u *check_io_u;
if (x1 < y2 && y1 < x2) {
overlap = true;
- dprint(FD_IO, "in-flight overlap: %llu/%lu, %llu/%lu\n",
+ dprint(FD_IO, "in-flight overlap: %llu/%llu, %llu/%llu\n",
x1, io_u->buflen,
y1, check_io_u->buflen);
break;
return overlap;
}
-static int io_u_submit(struct thread_data *td, struct io_u *io_u)
+static enum fio_q_status io_u_submit(struct thread_data *td, struct io_u *io_u)
{
/*
* Check for overlap if the user asked us to, and we have
if (td->error)
return;
- /*
- * verify_state needs to be reset before verification
- * proceeds so that expected random seeds match actual
- * random seeds in headers. The main loop will reset
- * all random number generators if randrepeat is set.
- */
- if (!td->o.rand_repeatable)
- td_fill_verify_state_seed(td);
-
td_set_runstate(td, TD_VERIFYING);
io_u = NULL;
break;
}
} else {
- if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
+ if (td->bytes_verified + td->o.rw_min_bs > verify_bytes)
break;
while ((io_u = get_io_u(td)) != NULL) {
break;
} else if (io_u->ddir == DDIR_WRITE) {
io_u->ddir = DDIR_READ;
+ io_u->numberio = td->verify_read_issues;
+ td->verify_read_issues++;
populate_verify_io_u(td, io_u);
break;
} else {
if (td->o.rate_process == RATE_PROCESS_POISSON) {
uint64_t val, iops;
- iops = bps / td->o.bs[ddir];
+ iops = bps / td->o.min_bs[ddir];
val = (int64_t) (1000000 / iops) *
-logf(__rand_0_1(&td->poisson_state[ddir]));
if (val) {
return 0;
}
-static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir)
+static void init_thinktime(struct thread_data *td)
+{
+ if (td->o.thinktime_blocks_type == THINKTIME_BLOCKS_TYPE_COMPLETE)
+ td->thinktime_blocks_counter = td->io_blocks;
+ else
+ td->thinktime_blocks_counter = td->io_issues;
+ td->last_thinktime = td->epoch;
+ td->last_thinktime_blocks = 0;
+}
+
+static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir,
+ struct timespec *time)
{
unsigned long long b;
+ unsigned long long runtime_left;
uint64_t total;
int left;
+ struct timespec now;
+ bool stall = false;
+
+ if (td->o.thinktime_iotime) {
+ fio_gettime(&now, NULL);
+ if (utime_since(&td->last_thinktime, &now)
+ >= td->o.thinktime_iotime) {
+ stall = true;
+ } else if (!fio_option_is_set(&td->o, thinktime_blocks)) {
+ /*
+ * When thinktime_iotime is set and thinktime_blocks is
+ * not set, skip the thinktime_blocks check, since
+ * thinktime_blocks default value 1 does not work
+ * together with thinktime_iotime.
+ */
+ return;
+ }
- b = ddir_rw_sum(td->io_blocks);
- if (b % td->o.thinktime_blocks)
+ }
+
+ b = ddir_rw_sum(td->thinktime_blocks_counter);
+ if (b >= td->last_thinktime_blocks + td->o.thinktime_blocks)
+ stall = true;
+
+ if (!stall)
return;
io_u_quiesce(td);
+ left = td->o.thinktime_spin;
+ if (td->o.timeout) {
+ runtime_left = td->o.timeout - utime_since_now(&td->epoch);
+ if (runtime_left < (unsigned long long)left)
+ left = runtime_left;
+ }
+
total = 0;
- if (td->o.thinktime_spin)
- total = usec_spin(td->o.thinktime_spin);
+ if (left)
+ total = usec_spin(left);
+
+ /*
+ * usec_spin() might run for slightly longer than intended in a VM
+ * where the vCPU could get descheduled or the hypervisor could steal
+ * CPU time. Ensure "left" doesn't become negative.
+ */
+ if (total < td->o.thinktime)
+ left = td->o.thinktime - total;
+ else
+ left = 0;
+
+ if (td->o.timeout) {
+ runtime_left = td->o.timeout - utime_since_now(&td->epoch);
+ if (runtime_left < (unsigned long long)left)
+ left = runtime_left;
+ }
- left = td->o.thinktime - total;
if (left)
total += usec_sleep(td, left);
over = (usperop - total) / usperop * -bs;
td->rate_io_issue_bytes[ddir] += (missed - over);
+ /* adjust for rate_process=poisson */
+ td->last_usec[ddir] += total;
+ }
+
+ if (time && should_check_rate(td))
+ fio_gettime(time, NULL);
+
+ td->last_thinktime_blocks = b;
+ if (td->o.thinktime_iotime) {
+ fio_gettime(&now, NULL);
+ td->last_thinktime = now;
}
}
*/
if (td_write(td) && td_random(td) && td->o.norandommap)
total_bytes = max(total_bytes, (uint64_t) td->o.io_size);
+
+ /* Don't break too early if io_size > size */
+ if (td_rw(td) && !td_random(td))
+ total_bytes = max(total_bytes, (uint64_t)td->o.io_size);
+
/*
* If verify_backlog is enabled, we'll run the verify in this
* handler as well. For that case, we may need up to twice the
total_bytes += td->o.size;
/* In trimwrite mode, each byte is trimmed and then written, so
- * allow total_bytes to be twice as big */
- if (td_trimwrite(td))
+ * allow total_bytes or number of ios to be twice as big */
+ if (td_trimwrite(td)) {
total_bytes += td->total_io_size;
+ td->o.number_ios *= 2;
+ }
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
(!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
* Break if we exceeded the bytes. The exception is time
* based runs, but we still need to break out of the loop
* for those to run verification, if enabled.
+ * Jobs read from iolog do not use this stop condition.
*/
if (bytes_issued >= total_bytes &&
+ !td->o.read_iolog_file &&
(!td->o.time_based ||
(td->o.time_based && td->o.verify != VERIFY_NONE)))
break;
break;
}
- if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY)
- populate_verify_io_u(td, io_u);
+ if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY) {
+ if (!(io_u->flags & IO_U_F_PATTERN_DONE)) {
+ io_u_set(td, io_u, IO_U_F_PATTERN_DONE);
+ io_u->numberio = td->io_issues[io_u->ddir];
+ populate_verify_io_u(td, io_u);
+ }
+ }
ddir = io_u->ddir;
if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
- if (!td->o.verify_pattern_bytes) {
- io_u->rand_seed = __rand(&td->verify_state);
- if (sizeof(int) != sizeof(long *))
- io_u->rand_seed *= __rand(&td->verify_state);
- }
-
if (verify_state_should_stop(td, io_u)) {
put_io_u(td, io_u);
break;
log_io_piece(td, io_u);
if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
- const unsigned long blen = io_u->xfer_buflen;
- const enum fio_ddir ddir = acct_ddir(io_u);
+ const unsigned long long blen = io_u->xfer_buflen;
+ const enum fio_ddir __ddir = acct_ddir(io_u);
if (td->error)
break;
workqueue_enqueue(&td->io_wq, &io_u->work);
ret = FIO_Q_QUEUED;
- if (ddir_rw(ddir)) {
- td->io_issues[ddir]++;
- td->io_issue_bytes[ddir] += blen;
- td->rate_io_issue_bytes[ddir] += blen;
+ if (ddir_rw(__ddir)) {
+ td->io_issues[__ddir]++;
+ td->io_issue_bytes[__ddir] += blen;
+ td->rate_io_issue_bytes[__ddir] += blen;
}
- if (should_check_rate(td))
- td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
+ if (should_check_rate(td)) {
+ td->rate_next_io_time[__ddir] = usec_for_io(td, __ddir);
+ fio_gettime(&comp_time, NULL);
+ }
} else {
ret = io_u_submit(td, io_u);
}
if (ret < 0)
break;
+
+ if (ddir_rw(ddir) && td->o.thinkcycles)
+ cycles_spin(td->o.thinkcycles);
+
+ if (ddir_rw(ddir) && td->o.thinktime)
+ handle_thinktime(td, ddir, &comp_time);
+
if (!ddir_rw_sum(td->bytes_done) &&
!td_ioengine_flagged(td, FIO_NOIO))
continue;
if (!in_ramp_time(td) && should_check_rate(td)) {
if (check_min_rate(td, &comp_time)) {
if (exitall_on_terminate || td->o.exitall_error)
- fio_terminate_threads(td->groupid);
+ fio_terminate_threads(td->groupid, td->o.exit_what);
td_verror(td, EIO, "check_min_rate");
break;
}
}
if (!in_ramp_time(td) && td->o.latency_target)
lat_target_check(td);
-
- if (ddir_rw(ddir) && td->o.thinktime)
- handle_thinktime(td, ddir);
}
check_update_rusage(td);
if (td->trim_entries)
log_err("fio: %lu trim entries leaked?\n", td->trim_entries);
- if (td->o.fill_device && td->error == ENOSPC) {
+ if (td->o.fill_device && (td->error == ENOSPC || td->error == EDQUOT)) {
td->error = 0;
fio_mark_td_terminate(td);
}
if (i) {
ret = io_u_queued_complete(td, i);
- if (td->o.fill_device && td->error == ENOSPC)
+ if (td->o.fill_device &&
+ (td->error == ENOSPC || td->error == EDQUOT))
td->error = 0;
}
- if (should_fsync(td) && td->o.end_fsync) {
+ if (should_fsync(td) && (td->o.end_fsync || td->o.fsync_on_close)) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i) {
f->file_name);
}
}
- } else
+ } else {
+ if (td->o.io_submit_mode == IO_MODE_OFFLOAD)
+ workqueue_flush(&td->io_wq);
cleanup_pending_aio(td);
+ }
/*
* stop job if we failed doing any IO
if (td->io_ops->io_u_free)
td->io_ops->io_u_free(td, io_u);
- fio_memfree(io_u, sizeof(*io_u));
+ fio_memfree(io_u, sizeof(*io_u), td_offload_overlap(td));
}
free_io_mem(td);
io_u_rexit(&td->io_u_requeues);
- io_u_qexit(&td->io_u_freelist);
- io_u_qexit(&td->io_u_all);
+ io_u_qexit(&td->io_u_freelist, false);
+ io_u_qexit(&td->io_u_all, td_offload_overlap(td));
free_file_completion_logging(td);
}
static int init_io_u(struct thread_data *td)
{
struct io_u *io_u;
- unsigned int max_bs, min_write;
int cl_align, i, max_units;
- int data_xfer = 1, err;
- char *p;
+ int err;
max_units = td->o.iodepth;
- max_bs = td_max_bs(td);
- min_write = td->o.min_bs[DDIR_WRITE];
- td->orig_buffer_size = (unsigned long long) max_bs
- * (unsigned long long) max_units;
-
- if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td)))
- data_xfer = 0;
err = 0;
err += !io_u_rinit(&td->io_u_requeues, td->o.iodepth);
- err += !io_u_qinit(&td->io_u_freelist, td->o.iodepth);
- err += !io_u_qinit(&td->io_u_all, td->o.iodepth);
+ err += !io_u_qinit(&td->io_u_freelist, td->o.iodepth, false);
+ err += !io_u_qinit(&td->io_u_all, td->o.iodepth, td_offload_overlap(td));
if (err) {
log_err("fio: failed setting up IO queues\n");
return 1;
}
+ cl_align = os_cache_line_size();
+
+ for (i = 0; i < max_units; i++) {
+ void *ptr;
+
+ if (td->terminate)
+ return 1;
+
+ ptr = fio_memalign(cl_align, sizeof(*io_u), td_offload_overlap(td));
+ if (!ptr) {
+ log_err("fio: unable to allocate aligned memory\n");
+ return 1;
+ }
+
+ io_u = ptr;
+ memset(io_u, 0, sizeof(*io_u));
+ INIT_FLIST_HEAD(&io_u->verify_list);
+ dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
+
+ io_u->index = i;
+ io_u->flags = IO_U_F_FREE;
+ io_u_qpush(&td->io_u_freelist, io_u);
+
+ /*
+ * io_u never leaves this stack, used for iteration of all
+ * io_u buffers.
+ */
+ io_u_qpush(&td->io_u_all, io_u);
+
+ if (td->io_ops->io_u_init) {
+ int ret = td->io_ops->io_u_init(td, io_u);
+
+ if (ret) {
+ log_err("fio: failed to init engine data: %d\n", ret);
+ return 1;
+ }
+ }
+ }
+
+ if (init_io_u_buffers(td))
+ return 1;
+
+ if (init_file_completion_logging(td, max_units))
+ return 1;
+
+ return 0;
+}
+
+int init_io_u_buffers(struct thread_data *td)
+{
+ struct io_u *io_u;
+ unsigned long long max_bs, min_write, trim_bs = 0;
+ int i, max_units;
+ int data_xfer = 1;
+ char *p;
+
+ max_units = td->o.iodepth;
+ max_bs = td_max_bs(td);
+ min_write = td->o.min_bs[DDIR_WRITE];
+ td->orig_buffer_size = (unsigned long long) max_bs
+ * (unsigned long long) max_units;
+
+ if (td_trim(td) && td->o.num_range > 1) {
+ trim_bs = td->o.num_range * sizeof(struct trim_range);
+ td->orig_buffer_size = trim_bs
+ * (unsigned long long) max_units;
+ }
+
+ /*
+ * For reads, writes, and multi-range trim operations we need a
+ * data buffer
+ */
+ if (td_ioengine_flagged(td, FIO_NOIO) ||
+ !(td_read(td) || td_write(td) || (td_trim(td) && td->o.num_range > 1)))
+ data_xfer = 0;
+
/*
* if we may later need to do address alignment, then add any
* possible adjustment here so that we don't cause a buffer
* overflow later. this adjustment may be too much if we get
* lucky and the allocator gives us an aligned address.
*/
- if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ if (td->o.odirect || td->o.mem_align ||
td_ioengine_flagged(td, FIO_RAWIO))
td->orig_buffer_size += page_mask + td->o.mem_align;
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
- unsigned long bs;
+ unsigned long long bs;
bs = td->orig_buffer_size + td->o.hugepage_size - 1;
td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
if (data_xfer && allocate_io_mem(td))
return 1;
- if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ if (td->o.odirect || td->o.mem_align ||
td_ioengine_flagged(td, FIO_RAWIO))
p = PTR_ALIGN(td->orig_buffer, page_mask) + td->o.mem_align;
else
p = td->orig_buffer;
- cl_align = os_cache_line_size();
-
for (i = 0; i < max_units; i++) {
- void *ptr;
-
- if (td->terminate)
- return 1;
-
- ptr = fio_memalign(cl_align, sizeof(*io_u));
- if (!ptr) {
- log_err("fio: unable to allocate aligned memory\n");
- break;
- }
-
- io_u = ptr;
- memset(io_u, 0, sizeof(*io_u));
- INIT_FLIST_HEAD(&io_u->verify_list);
+ io_u = td->io_u_all.io_us[i];
dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
if (data_xfer) {
fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
}
}
-
- io_u->index = i;
- io_u->flags = IO_U_F_FREE;
- io_u_qpush(&td->io_u_freelist, io_u);
-
- /*
- * io_u never leaves this stack, used for iteration of all
- * io_u buffers.
- */
- io_u_qpush(&td->io_u_all, io_u);
-
- if (td->io_ops->io_u_init) {
- int ret = td->io_ops->io_u_init(td, io_u);
-
- if (ret) {
- log_err("fio: failed to init engine data: %d\n", ret);
- return 1;
- }
- }
-
- p += max_bs;
+ if (td_trim(td) && td->o.num_range > 1)
+ p += trim_bs;
+ else
+ p += max_bs;
}
- if (init_file_completion_logging(td, max_units))
- return 1;
-
return 0;
}
+#ifdef FIO_HAVE_IOSCHED_SWITCH
/*
- * This function is Linux specific.
+ * These functions are Linux specific.
* FIO_HAVE_IOSCHED_SWITCH enabled currently means it's Linux.
*/
-static int switch_ioscheduler(struct thread_data *td)
+static int set_ioscheduler(struct thread_data *td, struct fio_file *file)
{
-#ifdef FIO_HAVE_IOSCHED_SWITCH
char tmp[256], tmp2[128], *p;
FILE *f;
int ret;
- if (td_ioengine_flagged(td, FIO_DISKLESSIO))
- return 0;
-
- assert(td->files && td->files[0]);
- sprintf(tmp, "%s/queue/scheduler", td->files[0]->du->sysfs_root);
+ assert(file->du && file->du->sysfs_root);
+ sprintf(tmp, "%s/queue/scheduler", file->du->sysfs_root);
f = fopen(tmp, "r+");
if (!f) {
sprintf(tmp2, "[%s]", td->o.ioscheduler);
if (!strstr(tmp, tmp2)) {
- log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
+ log_err("fio: unable to set io scheduler to %s\n", td->o.ioscheduler);
td_verror(td, EINVAL, "iosched_switch");
fclose(f);
return 1;
fclose(f);
return 0;
+}
+
+static int switch_ioscheduler(struct thread_data *td)
+{
+ struct fio_file *f;
+ unsigned int i;
+ int ret = 0;
+
+ if (td_ioengine_flagged(td, FIO_DISKLESSIO))
+ return 0;
+
+ assert(td->files && td->files[0]);
+
+ for_each_file(td, f, i) {
+
+ /* Only consider regular files and block device files */
+ switch (f->filetype) {
+ case FIO_TYPE_FILE:
+ case FIO_TYPE_BLOCK:
+ /*
+ * Make sure that the device hosting the file could
+ * be determined.
+ */
+ if (!f->du)
+ continue;
+ break;
+ case FIO_TYPE_CHAR:
+ case FIO_TYPE_PIPE:
+ default:
+ continue;
+ }
+
+ ret = set_ioscheduler(td, f);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
#else
+
+static int switch_ioscheduler(struct thread_data *td)
+{
return 0;
-#endif
}
+#endif /* FIO_HAVE_IOSCHED_SWITCH */
+
static bool keep_running(struct thread_data *td)
{
unsigned long long limit;
return false;
}
-static int exec_string(struct thread_options *o, const char *string, const char *mode)
+static int exec_string(struct thread_options *o, const char *string,
+ const char *mode)
{
- size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
int ret;
char *str;
- str = malloc(newlen);
- sprintf(str, "%s &> %s.%s.txt", string, o->name, mode);
+ if (asprintf(&str, "%s > %s.%s.txt 2>&1", string, o->name, mode) < 0)
+ return -1;
- log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode);
+ log_info("%s : Saving output of %s in %s.%s.txt\n", o->name, mode,
+ o->name, mode);
ret = system(str);
if (ret == -1)
log_err("fio: exec of cmd <%s> failed\n", str);
struct sk_out *sk_out = fd->sk_out;
uint64_t bytes_done[DDIR_RWDIR_CNT];
int deadlock_loop_cnt;
- bool clear_state, did_some_io;
+ bool clear_state;
int ret;
sk_out_assign(sk_out);
} else
td->pid = gettid();
- fio_local_clock_init(o->use_thread);
+ fio_local_clock_init();
dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
goto err;
}
+ td_zone_gen_index(td);
+
/*
* Do this early, we don't want the compress threads to be limited
* to the same CPUs as the IO workers. So do this before we set
* May alter parameters that init_io_u() will use, so we need to
* do this first.
*/
- if (init_iolog(td))
- goto err;
-
- if (init_io_u(td))
- goto err;
-
- if (o->verify_async && verify_async_init(td))
+ if (!init_iolog(td))
goto err;
+ /* ioprio_set() has to be done before td_io_init() */
if (fio_option_is_set(o, ioprio) ||
- fio_option_is_set(o, ioprio_class)) {
- ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
+ fio_option_is_set(o, ioprio_class) ||
+ fio_option_is_set(o, ioprio_hint)) {
+ ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class,
+ o->ioprio, o->ioprio_hint);
if (ret == -1) {
td_verror(td, errno, "ioprio_set");
goto err;
}
+ td->ioprio = ioprio_value(o->ioprio_class, o->ioprio,
+ o->ioprio_hint);
+ td->ts.ioprio = td->ioprio;
+ }
+
+ if (td_io_init(td))
+ goto err;
+
+ if (td_ioengine_flagged(td, FIO_SYNCIO) && td->o.iodepth > 1 && td->o.io_submit_mode != IO_MODE_OFFLOAD) {
+ log_info("note: both iodepth >= 1 and synchronous I/O engine "
+ "are selected, queue depth will be capped at 1\n");
}
+ if (init_io_u(td))
+ goto err;
+
+ if (td->io_ops->post_init && td->io_ops->post_init(td))
+ goto err;
+
+ if (o->verify_async && verify_async_init(td))
+ goto err;
+
if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt))
goto err;
if (!o->create_serialize && setup_files(td))
goto err;
- if (td_io_init(td))
- goto err;
-
if (!init_random_map(td))
goto err;
- if (o->exec_prerun && exec_string(o, o->exec_prerun, (const char *)"prerun"))
+ if (o->exec_prerun && exec_string(o, o->exec_prerun, "prerun"))
goto err;
if (o->pre_read && !pre_read_files(td))
if (rate_submit_init(td, sk_out))
goto err;
- set_epoch_time(td, o->log_unix_epoch);
+ set_epoch_time(td, o->log_alternate_epoch_clock_id, o->job_start_clock_id);
fio_getrusage(&td->ru_start);
memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
memcpy(&td->ss.prev_time, &td->epoch, sizeof(td->epoch));
+ init_thinktime(td);
+
if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
o->ratemin[DDIR_TRIM]) {
- memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
+ memcpy(&td->last_rate_check_time[DDIR_READ], &td->bw_sample_time,
sizeof(td->bw_sample_time));
- memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
+ memcpy(&td->last_rate_check_time[DDIR_WRITE], &td->bw_sample_time,
sizeof(td->bw_sample_time));
- memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
+ memcpy(&td->last_rate_check_time[DDIR_TRIM], &td->bw_sample_time,
sizeof(td->bw_sample_time));
}
memset(bytes_done, 0, sizeof(bytes_done));
clear_state = false;
- did_some_io = false;
while (keep_running(td)) {
uint64_t verify_bytes;
if (td->o.verify_only && td_write(td))
verify_bytes = do_dry_run(td);
else {
+ if (!td->o.rand_repeatable)
+ /* save verify rand state to replay hdr seeds later at verify */
+ frand_copy(&td->verify_state_last_do_io, &td->verify_state);
do_io(td, bytes_done);
-
+ if (!td->o.rand_repeatable)
+ frand_copy(&td->verify_state, &td->verify_state_last_do_io);
if (!ddir_rw_sum(bytes_done)) {
fio_mark_td_terminate(td);
verify_bytes = 0;
}
} while (1);
- if (td_read(td) && td->io_bytes[DDIR_READ])
+ if (td->io_bytes[DDIR_READ] && (td_read(td) ||
+ ((td->flags & TD_F_VER_BACKLOG) && td_write(td))))
update_runtime(td, elapsed_us, DDIR_READ);
if (td_write(td) && td->io_bytes[DDIR_WRITE])
update_runtime(td, elapsed_us, DDIR_WRITE);
td_ioengine_flagged(td, FIO_UNIDIR))
continue;
- if (ddir_rw_sum(bytes_done))
- did_some_io = true;
-
clear_io_state(td, 0);
fio_gettime(&td->start, NULL);
}
/*
- * If td ended up with no I/O when it should have had,
- * then something went wrong unless FIO_NOIO or FIO_DISKLESSIO.
- * (Are we not missing other flags that can be ignored ?)
+ * Acquire this lock if we were doing overlap checking in
+ * offload mode so that we don't clean up this job while
+ * another thread is checking its io_u's for overlap
*/
- if ((td->o.size || td->o.io_size) && !ddir_rw_sum(bytes_done) &&
- !did_some_io && !td->o.create_only &&
- !(td_ioengine_flagged(td, FIO_NOIO) ||
- td_ioengine_flagged(td, FIO_DISKLESSIO)))
- log_err("%s: No I/O performed by %s, "
- "perhaps try --debug=io option for details?\n",
- td->o.name, td->io_ops->name);
+ if (td_offload_overlap(td)) {
+ int res;
+ res = pthread_mutex_lock(&overlap_check);
+ if (res) {
+ td->error = errno;
+ goto err;
+ }
+ }
td_set_runstate(td, TD_FINISHING);
+ if (td_offload_overlap(td)) {
+ int res;
+
+ res = pthread_mutex_unlock(&overlap_check);
+ if (res) {
+ td->error = errno;
+ goto err;
+ }
+ }
update_rusage_stat(td);
td->ts.total_run_time = mtime_since_now(&td->epoch);
- td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
- td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
- td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+ for_each_rw_ddir(ddir) {
+ td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+ }
if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
(td->o.verify != VERIFY_NONE && td_write(td)))
rate_submit_exit(td);
if (o->exec_postrun)
- exec_string(o, o->exec_postrun, (const char *)"postrun");
+ exec_string(o, o->exec_postrun, "postrun");
if (exitall_on_terminate || (o->exitall_error && td->error))
- fio_terminate_threads(td->groupid);
+ fio_terminate_threads(td->groupid, td->o.exit_what);
err:
if (td->error)
close_and_free_files(td);
cleanup_io_u(td);
close_ioengine(td);
- cgroup_shutdown(td, &cgroup_mnt);
+ cgroup_shutdown(td, cgroup_mnt);
verify_free_state(td);
-
- if (td->zone_state_index) {
- int i;
-
- for (i = 0; i < DDIR_RWDIR_CNT; i++)
- free(td->zone_state_index[i]);
- free(td->zone_state_index);
- td->zone_state_index = NULL;
- }
+ td_zone_free_index(td);
if (fio_option_is_set(o, cpumask)) {
ret = fio_cpuset_exit(&o->cpumask);
*/
if (o->write_iolog_file)
write_iolog_close(td);
+ if (td->io_log_rfile)
+ fclose(td->io_log_rfile);
td_set_runstate(td, TD_EXITED);
static void reap_threads(unsigned int *nr_running, uint64_t *t_rate,
uint64_t *m_rate)
{
- struct thread_data *td;
unsigned int cputhreads, realthreads, pending;
- int i, status, ret;
+ int ret;
/*
* reap exited threads (TD_EXITED -> TD_REAPED)
*/
realthreads = pending = cputhreads = 0;
- for_each_td(td, i) {
- int flags = 0;
+ for_each_td(td) {
+ int flags = 0, status;
- if (!strcmp(td->o.ioengine, "cpuio"))
+ if (!strcmp(td->o.ioengine, "cpuio"))
cputhreads++;
else
realthreads++;
done_secs += mtime_since_now(&td->epoch) / 1000;
profile_td_exit(td);
- }
+ flow_exit_job(td);
+ } end_for_each();
if (*nr_running == cputhreads && !pending && realthreads)
- fio_terminate_threads(TERMINATE_ALL);
+ fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
}
static bool __check_trigger_file(void)
fio_clients_send_trigger(trigger_remote_cmd);
else {
verify_save_state(IO_LIST_ALL);
- fio_terminate_threads(TERMINATE_ALL);
+ fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
exec_trigger(trigger_cmd);
}
}
td->thread_number - 1, &data);
if (!ret)
verify_assign_state(td, data);
- } else
- ret = verify_load_state(td, "local");
+ } else {
+ char prefix[PATH_MAX];
+
+ if (aux_path)
+ sprintf(prefix, "%s%clocal", aux_path,
+ FIO_OS_PATH_SEPARATOR);
+ else
+ strcpy(prefix, "local");
+ ret = verify_load_state(td, prefix);
+ }
return ret;
}
{
const char *waitee = me->o.wait_for;
const char *self = me->o.name;
- struct thread_data *td;
- int i;
if (!waitee)
return false;
- for_each_td(td, i) {
+ for_each_td(td) {
if (!strcmp(td->o.name, self) || strcmp(td->o.name, waitee))
continue;
runstate_to_name(td->runstate));
return true;
}
- }
+ } end_for_each();
dprint(FD_PROCESS, "%s: %s completed, can run\n", self, waitee);
return false;
set_sig_handlers();
nr_thread = nr_process = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
if (check_mount_writes(td))
return;
if (td->o.use_thread)
nr_thread++;
else
nr_process++;
- }
+ } end_for_each();
if (output_format & FIO_OUTPUT_NORMAL) {
- log_info("Starting ");
+ struct buf_output out;
+
+ buf_output_init(&out);
+ __log_buf(&out, "Starting ");
if (nr_thread)
- log_info("%d thread%s", nr_thread,
+ __log_buf(&out, "%d thread%s", nr_thread,
nr_thread > 1 ? "s" : "");
if (nr_process) {
if (nr_thread)
- log_info(" and ");
- log_info("%d process%s", nr_process,
+ __log_buf(&out, " and ");
+ __log_buf(&out, "%d process%s", nr_process,
nr_process > 1 ? "es" : "");
}
- log_info("\n");
- log_info_flush();
+ __log_buf(&out, "\n");
+ log_info_buf(out.buf, out.buflen);
+ buf_output_free(&out);
}
todo = thread_number;
nr_started = 0;
m_rate = t_rate = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
print_status_init(td->thread_number - 1);
if (!td->o.create_serialize)
td_io_close_file(td, f);
}
}
- }
+ } end_for_each();
/* start idle threads before io threads start to run */
fio_idle_prof_start();
/*
* create threads (TD_NOT_CREATED -> TD_CREATED)
*/
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate != TD_NOT_CREATED)
continue;
strerror(ret));
} else {
pid_t pid;
+ void *eo;
dprint(FD_PROCESS, "will fork\n");
+ eo = td->eo;
+ read_barrier();
pid = fork();
if (!pid) {
int ret;
ret = (int)(uintptr_t)thread_main(fd);
_exit(ret);
- } else if (i == fio_debug_jobno)
+ } else if (__td_index == fio_debug_jobno)
*fio_debug_jobp = pid;
+ free(eo);
+ free(fd);
+ fd = NULL;
}
dprint(FD_MUTEX, "wait on startup_sem\n");
if (fio_sem_down_timeout(startup_sem, 10000)) {
log_err("fio: job startup hung? exiting.\n");
- fio_terminate_threads(TERMINATE_ALL);
- fio_abort = 1;
+ fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
+ fio_abort = true;
nr_started--;
free(fd);
break;
}
dprint(FD_MUTEX, "done waiting on startup_sem\n");
- }
+ } end_for_each();
/*
* Wait for the started threads to transition to
/*
* start created threads (TD_INITIALIZED -> TD_RUNNING).
*/
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate != TD_INITIALIZED)
continue;
t_rate += ddir_rw_sum(td->o.rate);
todo--;
fio_sem_up(td->sem);
- }
+ } end_for_each();
reap_threads(&nr_running, &t_rate, &m_rate);
int fio_backend(struct sk_out *sk_out)
{
- struct thread_data *td;
int i;
-
if (exec_profile) {
if (load_profile(exec_profile))
return 1;
setup_log(&agg_io_log[DDIR_TRIM], &p, "agg-trim_bw.log");
}
+ if (init_global_dedupe_working_set_seeds()) {
+ log_err("fio: failed to initialize global dedupe working set\n");
+ return 1;
+ }
+
startup_sem = fio_sem_init(FIO_SEM_LOCKED);
+ if (!sk_out)
+ is_local_backend = true;
if (startup_sem == NULL)
return 1;
set_genesis_time();
stat_init();
- helper_thread_create(startup_sem, sk_out);
+ if (helper_thread_create(startup_sem, sk_out))
+ log_err("fio: failed to create helper thread\n");
cgroup_list = smalloc(sizeof(*cgroup_list));
if (cgroup_list)
}
}
- for_each_td(td, i) {
+ for_each_td(td) {
+ struct thread_stat *ts = &td->ts;
+
+ free_clat_prio_stats(ts);
steadystate_free(td);
fio_options_free(td);
+ fio_dump_options_free(td);
if (td->rusage_sem) {
fio_sem_remove(td->rusage_sem);
td->rusage_sem = NULL;
}
fio_sem_remove(td->sem);
td->sem = NULL;
- }
+ } end_for_each();
free_disk_util();
if (cgroup_list) {
cgroup_kill(cgroup_list);
sfree(cgroup_list);
}
- sfree(cgroup_mnt);
fio_sem_remove(startup_sem);
stat_exit();