#include "server.h"
#include "lib/getrusage.h"
#include "idletime.h"
+#include "err.h"
static pthread_t disk_util_thread;
static struct fio_mutex *disk_thread_mutex;
static struct fio_mutex *startup_mutex;
-static struct fio_mutex *writeout_mutex;
static struct flist_head *cgroup_list;
static char *cgroup_mnt;
static int exit_value;
return 0;
if (!td->o.timeout)
return 0;
- if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000)
+ if (utime_since(&td->epoch, t) >= td->o.timeout)
return 1;
return 0;
break;
while ((io_u = get_io_u(td)) != NULL) {
+ if (IS_ERR(io_u)) {
+ io_u = NULL;
+ ret = FIO_Q_BUSY;
+ goto reap;
+ }
+
/*
* We are only interested in the places where
* we wrote or trimmed IOs. Turn those into
* completed io_u's first. Note that we can get BUSY even
* without IO queued, if the system is resource starved.
*/
+reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete) {
min_events = min(td->o.iodepth_batch_complete,
dprint(FD_VERIFY, "exiting loop\n");
}
+static unsigned int exceeds_number_ios(struct thread_data *td)
+{
+ unsigned long long number_ios;
+
+ if (!td->o.number_ios)
+ return 0;
+
+ number_ios = ddir_rw_sum(td->this_io_blocks);
+ number_ios += td->io_u_queued + td->io_u_in_flight;
+
+ return number_ios >= td->o.number_ios;
+}
+
static int io_bytes_exceeded(struct thread_data *td)
{
unsigned long long bytes;
else
bytes = td->this_io_bytes[DDIR_TRIM];
- return bytes >= td->o.size;
+ return bytes >= td->o.size || exceeds_number_ios(td);
}
/*
uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
unsigned int i;
int ret = 0;
- uint64_t bytes_issued = 0;
+ uint64_t total_bytes, bytes_issued = 0;
if (in_ramp_time(td))
td_set_runstate(td, TD_RAMP);
lat_target_init(td);
+ /*
+ * If verify_backlog is enabled, we'll run the verify in this
+ * handler as well. For that case, we may need up to twice the
+ * amount of bytes.
+ */
+ total_bytes = td->o.size;
+ if (td->o.verify != VERIFY_NONE &&
+ (td_write(td) && td->o.verify_backlog))
+ total_bytes += td->o.size;
+
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
(!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
td->o.time_based) {
if (flow_threshold_exceeded(td))
continue;
- if (bytes_issued >= (uint64_t) td->o.size)
+ if (bytes_issued >= total_bytes)
break;
io_u = get_io_u(td);
- if (!io_u) {
+ if (IS_ERR_OR_NULL(io_u)) {
+ int err = PTR_ERR(io_u);
+
+ io_u = NULL;
+ if (err == -EBUSY) {
+ ret = FIO_Q_BUSY;
+ goto reap;
+ }
if (td->o.latency_target)
goto reap;
break;
*/
if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
+
+ if (!td->o.verify_pattern_bytes) {
+ io_u->rand_seed = __rand(&td->__verify_state);
+ if (sizeof(int) != sizeof(long *))
+ io_u->rand_seed *= __rand(&td->__verify_state);
+ }
+
if (td->o.verify_async)
io_u->end_io = verify_io_u_async;
else
else
td_set_runstate(td, TD_RUNNING);
+ /*
+ * Always log IO before it's issued, so we know the specific
+ * order of it. The logged unit will track when the IO has
+ * completed.
+ */
+ if (td_write(td) && io_u->ddir == DDIR_WRITE &&
+ td->o.do_verify &&
+ td->o.verify != VERIFY_NONE &&
+ !td->o.experimental_verify)
+ log_io_piece(td, io_u);
+
ret = td_io_queue(td, io_u);
switch (ret) {
case FIO_Q_COMPLETED:
* Fill the buffer with the pattern if we are
* going to be doing writes.
*/
- fill_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
+ fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
}
}
td->o.loops--;
return 1;
}
+ if (exceeds_number_ios(td))
+ return 0;
if (td->o.size != -1ULL && ddir_rw_sum(td->io_bytes) < td->o.size) {
uint64_t diff;
if (diff < td_max_bs(td))
return 0;
+ if (fio_files_done(td))
+ return 0;
+
return 1;
}
return ret;
}
+/*
+ * Dry run to compute correct state of numberio for verification.
+ */
+static uint64_t do_dry_run(struct thread_data *td)
+{
+ uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
+
+ td_set_runstate(td, TD_RUNNING);
+
+ while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
+ (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) {
+ struct io_u *io_u;
+ int ret;
+
+ if (td->terminate || td->done)
+ break;
+
+ io_u = get_io_u(td);
+ if (!io_u)
+ break;
+
+ io_u->flags |= IO_U_F_FLIGHT;
+ io_u->error = 0;
+ io_u->resid = 0;
+ if (ddir_rw(acct_ddir(io_u)))
+ td->io_issues[acct_ddir(io_u)]++;
+ if (ddir_rw(io_u->ddir)) {
+ io_u_mark_depth(td, 1);
+ td->ts.total_io_u[io_u->ddir]++;
+ }
+
+ if (td_write(td) && io_u->ddir == DDIR_WRITE &&
+ td->o.do_verify &&
+ td->o.verify != VERIFY_NONE &&
+ !td->o.experimental_verify)
+ log_io_piece(td, io_u);
+
+ ret = io_u_sync_complete(td, io_u, bytes_done);
+ (void) ret;
+ }
+
+ return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
+}
+
+static int write_this_log(struct thread_data *td, struct io_log *log,
+ const char *log_file, const char *name, int try)
+{
+ int ret;
+
+ if (!log)
+ return 0;
+
+ if (log_file)
+ ret = finish_log_named(td, log, log_file, name, try);
+ else
+ ret = finish_log(td, log, name, try);
+
+ return ret;
+}
+
+static int write_iops_log(struct thread_data *td, struct thread_options *o,
+ int try)
+{
+ return write_this_log(td, td->iops_log, o->iops_log_file, "iops", try);
+}
+
+static int write_slat_log(struct thread_data *td, struct thread_options *o,
+ int try)
+{
+ return write_this_log(td, td->slat_log, o->lat_log_file, "slat", try);
+}
+
+static int write_clat_log(struct thread_data *td, struct thread_options *o,
+ int try)
+{
+ return write_this_log(td, td->clat_log, o->lat_log_file, "clat" , try);
+}
+
+static int write_lat_log(struct thread_data *td, struct thread_options *o,
+ int try)
+{
+ return write_this_log(td, td->lat_log, o->lat_log_file, "lat", try);
+}
+
+static int write_bandw_log(struct thread_data *td, struct thread_options *o,
+ int try)
+{
+ return write_this_log(td, td->bw_log, o->bw_log_file, "bw", try);
+}
+
+enum {
+ BW_LOG_MASK = 1,
+ LAT_LOG_MASK = 2,
+ SLAT_LOG_MASK = 4,
+ CLAT_LOG_MASK = 8,
+ IOPS_LOG_MASK = 16,
+
+ ALL_LOG_MASK = 31,
+ ALL_LOG_NR = 5,
+};
+
+static void writeout_logs(struct thread_data *td)
+{
+ struct thread_options *o = &td->o;
+ unsigned int log_mask = ALL_LOG_MASK;
+ unsigned int log_left = ALL_LOG_NR;
+ int old_state;
+
+ old_state = td_bump_runstate(td, TD_FINISHING);
+
+ finalize_logs(td);
+
+ while (log_left) {
+ int ret, prev_log_left = log_left;
+
+ if (log_mask & BW_LOG_MASK) {
+ ret = write_bandw_log(td, o, log_left != 1);
+ if (!ret) {
+ log_left--;
+ log_mask &= ~BW_LOG_MASK;
+ }
+ }
+ if (log_mask & LAT_LOG_MASK) {
+ ret = write_lat_log(td, o, log_left != 1);
+ if (!ret) {
+ log_left--;
+ log_mask &= ~LAT_LOG_MASK;
+ }
+ }
+ if (log_mask & SLAT_LOG_MASK) {
+ ret = write_slat_log(td, o, log_left != 1);
+ if (!ret) {
+ log_left--;
+ log_mask &= ~SLAT_LOG_MASK;
+ }
+ }
+ if (log_mask & CLAT_LOG_MASK) {
+ ret = write_clat_log(td, o, log_left != 1);
+ if (!ret) {
+ log_left--;
+ log_mask &= ~CLAT_LOG_MASK;
+ }
+ }
+ if (log_mask & IOPS_LOG_MASK) {
+ ret = write_iops_log(td, o, log_left != 1);
+ if (!ret) {
+ log_left--;
+ log_mask &= ~IOPS_LOG_MASK;
+ }
+ }
+
+ if (prev_log_left == log_left)
+ usleep(5000);
+ }
+
+ td_restore_runstate(td, old_state);
+}
+
/*
* Entry point for the thread based jobs. The process based jobs end up
* here as well, after a little setup.
fio_mutex_down(td->mutex);
dprint(FD_MUTEX, "done waiting on td->mutex\n");
- /*
- * the ->mutex mutex is now no longer used, close it to avoid
- * eating a file descriptor
- */
- fio_mutex_remove(td->mutex);
- td->mutex = NULL;
-
/*
* A new gid requires privilege, so we need to do this before setting
* the uid.
* allocations.
*/
if (o->cpumask_set) {
+ if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
+ ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
+ if (!ret) {
+ log_err("fio: no CPUs set\n");
+ log_err("fio: Try increasing number of available CPUs\n");
+ td_verror(td, EINVAL, "cpus_split");
+ goto err;
+ }
+ }
ret = fio_setaffinity(td->pid, o->cpumask);
if (ret == -1) {
td_verror(td, errno, "cpu_set_affinity");
prune_io_piece_log(td);
- verify_bytes = do_io(td);
+ if (td->o.verify_only && (td_write(td) || td_rw(td)))
+ verify_bytes = do_dry_run(td);
+ else
+ verify_bytes = do_io(td);
clear_state = 1;
fio_unpin_memory(td);
- fio_mutex_down(writeout_mutex);
- if (td->bw_log) {
- if (o->bw_log_file) {
- finish_log_named(td, td->bw_log,
- o->bw_log_file, "bw");
- } else
- finish_log(td, td->bw_log, "bw");
- }
- if (td->lat_log) {
- if (o->lat_log_file) {
- finish_log_named(td, td->lat_log,
- o->lat_log_file, "lat");
- } else
- finish_log(td, td->lat_log, "lat");
- }
- if (td->slat_log) {
- if (o->lat_log_file) {
- finish_log_named(td, td->slat_log,
- o->lat_log_file, "slat");
- } else
- finish_log(td, td->slat_log, "slat");
- }
- if (td->clat_log) {
- if (o->lat_log_file) {
- finish_log_named(td, td->clat_log,
- o->lat_log_file, "clat");
- } else
- finish_log(td, td->clat_log, "clat");
- }
- if (td->iops_log) {
- if (o->iops_log_file) {
- finish_log_named(td, td->iops_log,
- o->iops_log_file, "iops");
- } else
- finish_log(td, td->iops_log, "iops");
- }
+ writeout_logs(td);
- fio_mutex_up(writeout_mutex);
if (o->exec_postrun)
exec_string(o, o->exec_postrun, (const char *)"postrun");
fio_mutex_remove(td->rusage_sem);
td->rusage_sem = NULL;
+ fio_mutex_remove(td->mutex);
+ td->mutex = NULL;
+
td_set_runstate(td, TD_EXITED);
return (void *) (uintptr_t) td->error;
}
static void run_threads(void)
{
struct thread_data *td;
- unsigned long spent;
unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
+ uint64_t spent;
if (fio_gtod_offload && fio_start_gtod_thread())
return;
}
if (td->o.start_delay) {
- spent = mtime_since_genesis();
+ spent = utime_since_genesis();
- if (td->o.start_delay * 1000 > spent)
+ if (td->o.start_delay > spent)
continue;
}
startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
if (startup_mutex == NULL)
return 1;
- writeout_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED);
- if (writeout_mutex == NULL)
- return 1;
set_genesis_time();
stat_init();
sfree(cgroup_mnt);
fio_mutex_remove(startup_mutex);
- fio_mutex_remove(writeout_mutex);
fio_mutex_remove(disk_thread_mutex);
stat_exit();
return exit_value;