}
}
+static int wait_for_completions(struct thread_data *td, struct timeval *time,
+ uint64_t *bytes_done)
+{
+ const int full = queue_full(td);
+ int min_evts = 0;
+ int ret;
+
+ /*
+ * if the queue is full, we MUST reap at least 1 event
+ */
+ min_evts = min(td->o.iodepth_batch_complete, td->cur_depth);
+ if (full && !min_evts)
+ min_evts = 1;
+
+ if (time && (__should_check_rate(td, DDIR_READ) ||
+ __should_check_rate(td, DDIR_WRITE) ||
+ __should_check_rate(td, DDIR_TRIM)))
+ fio_gettime(time, NULL);
+
+ do {
+ ret = io_u_queued_complete(td, min_evts, bytes_done);
+ if (ret < 0)
+ break;
+ } while (full && (td->cur_depth > td->o.iodepth_low));
+
+ return ret;
+}
+
/*
* The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
io_u->end_io = verify_io_u;
ddir = io_u->ddir;
+ if (!td->o.disable_slat)
+ fio_gettime(&io_u->start_time, NULL);
ret = td_io_queue(td, io_u);
switch (ret) {
*/
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
- if (full || !td->o.iodepth_batch_complete) {
- min_events = min(td->o.iodepth_batch_complete,
- td->cur_depth);
- /*
- * if the queue is full, we MUST reap at least 1 event
- */
- if (full && !min_events)
- min_events = 1;
+ if (full || !td->o.iodepth_batch_complete)
+ ret = wait_for_completions(td, NULL, bytes_done);
- do {
- /*
- * Reap required number of io units, if any,
- * and do the verification on them through
- * the callback handler
- */
- if (io_u_queued_complete(td, min_events, bytes_done) < 0) {
- ret = -1;
- break;
- }
- } while (full && (td->cur_depth > td->o.iodepth_low));
- }
if (ret < 0)
break;
}
if (!td->o.number_ios)
return 0;
- number_ios = ddir_rw_sum(td->this_io_blocks);
+ number_ios = ddir_rw_sum(td->io_blocks);
number_ios += td->io_u_queued + td->io_u_in_flight;
- return number_ios >= td->o.number_ios;
+ return number_ios >= (td->o.number_ios * td->loops);
+}
+
+static int io_issue_bytes_exceeded(struct thread_data *td)
+{
+ unsigned long long bytes, limit;
+
+ if (td_rw(td))
+ bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE];
+ else if (td_write(td))
+ bytes = td->io_issue_bytes[DDIR_WRITE];
+ else if (td_read(td))
+ bytes = td->io_issue_bytes[DDIR_READ];
+ else
+ bytes = td->io_issue_bytes[DDIR_TRIM];
+
+ if (td->o.io_limit)
+ limit = td->o.io_limit;
+ else
+ limit = td->o.size;
+
+ limit *= td->loops;
+ return bytes >= limit || exceeds_number_ios(td);
}
-static int io_bytes_exceeded(struct thread_data *td)
+static int io_complete_bytes_exceeded(struct thread_data *td)
{
unsigned long long bytes, limit;
else
limit = td->o.size;
+ limit *= td->loops;
return bytes >= limit || exceeds_number_ios(td);
}
lat_target_init(td);
+ total_bytes = td->o.size;
+ /*
+ * Allow random overwrite workloads to write up to io_limit
+ * before starting verification phase as 'size' doesn't apply.
+ */
+ if (td_write(td) && td_random(td) && td->o.norandommap)
+ total_bytes = max(total_bytes, (uint64_t) td->o.io_limit);
/*
* If verify_backlog is enabled, we'll run the verify in this
* handler as well. For that case, we may need up to twice the
* amount of bytes.
*/
- total_bytes = td->o.size;
if (td->o.verify != VERIFY_NONE &&
(td_write(td) && td->o.verify_backlog))
total_bytes += td->o.size;
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
- (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
+ (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
td->o.time_based) {
struct timeval comp_time;
- int min_evts = 0;
struct io_u *io_u;
int ret2, full;
enum fio_ddir ddir;
*/
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
- if (full || !td->o.iodepth_batch_complete) {
- min_evts = min(td->o.iodepth_batch_complete,
- td->cur_depth);
- /*
- * if the queue is full, we MUST reap at least 1 event
- */
- if (full && !min_evts)
- min_evts = 1;
-
- if (__should_check_rate(td, DDIR_READ) ||
- __should_check_rate(td, DDIR_WRITE) ||
- __should_check_rate(td, DDIR_TRIM))
- fio_gettime(&comp_time, NULL);
-
- do {
- ret = io_u_queued_complete(td, min_evts, bytes_done);
- if (ret < 0)
- break;
-
- } while (full && (td->cur_depth > td->o.iodepth_low));
- }
-
+ if (full || !td->o.iodepth_batch_complete)
+ ret = wait_for_completions(td, &comp_time, bytes_done);
if (ret < 0)
break;
if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO))
td_set_runstate(td, TD_RUNNING);
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
- (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) {
+ (!flist_empty(&td->trim_list)) || !io_complete_bytes_exceeded(td)) {
struct io_u *io_u;
int ret;
* Set affinity first, in case it has an impact on the memory
* allocations.
*/
- if (o->cpumask_set) {
+ if (fio_option_is_set(o, cpumask)) {
if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
if (!ret) {
#ifdef CONFIG_LIBNUMA
/* numa node setup */
- if (o->numa_cpumask_set || o->numa_memmask_set) {
+ if (fio_option_is_set(o, numa_cpunodes) ||
+ fio_option_is_set(o, numa_memnodes)) {
struct bitmask *mask;
if (numa_available() < 0) {
goto err;
}
- if (o->numa_cpumask_set) {
+ if (fio_option_is_set(o, numa_cpunodes)) {
mask = numa_parse_nodestring(o->numa_cpunodes);
ret = numa_run_on_node_mask(mask);
numa_free_nodemask(mask);
}
}
- if (o->numa_memmask_set) {
-
+ if (fio_option_is_set(o, numa_memnodes)) {
mask = NULL;
if (o->numa_memnodes)
mask = numa_parse_nodestring(o->numa_memnodes);
if (o->verify_async && verify_async_init(td))
goto err;
- if (o->ioprio) {
+ if (fio_option_is_set(o, ioprio) ||
+ fio_option_is_set(o, ioprio_class)) {
ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
if (ret == -1) {
td_verror(td, errno, "ioprio_set");
cgroup_shutdown(td, &cgroup_mnt);
verify_free_state(td);
- if (o->cpumask_set) {
+ if (fio_option_is_set(o, cpumask)) {
ret = fio_cpuset_exit(&o->cpumask);
if (ret)
td_verror(td, ret, "fio_cpuset_exit");
void check_trigger_file(void)
{
if (__check_trigger_file() || trigger_timedout()) {
- if (nr_clients) {
- if (trigger_remote_cmd)
- fio_clients_send_trigger(trigger_remote_cmd);
- } else {
+ if (nr_clients)
+ fio_clients_send_trigger(trigger_remote_cmd);
+ else {
verify_save_state();
fio_terminate_threads(TERMINATE_ALL);
exec_trigger(trigger_cmd);
gettimeofday(&tv, NULL);
ts.tv_sec = tv.tv_sec + sec;
ts.tv_nsec = (tv.tv_usec * 1000) + nsec;
- if (ts.tv_nsec > 1000000000ULL) {
+
+ if (ts.tv_nsec >= 1000000000ULL) {
ts.tv_nsec -= 1000000000ULL;
ts.tv_sec++;
}