}
}
-static void sig_show_status(int sig)
+void sig_show_status(int sig)
{
show_running_run_stats();
}
if (spent < td->o.ratecycle)
return 0;
- if (td->o.rate[ddir]) {
+ if (td->o.rate[ddir] || td->o.ratemin[ddir]) {
/*
* check bandwidth specified rate
*/
log_err("%s: min iops rate %u not met,"
" got %lu\n", td->o.name,
rate_iops_min, rate);
+ return 1;
}
}
}
/*
* if the queue is full, we MUST reap at least 1 event
*/
- min_evts = min(td->o.iodepth_batch_complete, td->cur_depth);
- if ((full && !min_evts) || !td->o.iodepth_batch_complete)
+ min_evts = min(td->o.iodepth_batch_complete_min, td->cur_depth);
+ if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
min_evts = 1;
if (time && (__should_check_rate(td, DDIR_READ) ||
*ret = ret2;
break;
default:
- assert(ret < 0);
+ assert(*ret < 0);
td_verror(td, -(*ret), "td_io_queue");
break;
}
return 0;
}
+static inline int io_in_polling(struct thread_data *td)
+{
+ return !td->o.iodepth_batch_complete_min &&
+ !td->o.iodepth_batch_complete_max;
+}
+
/*
* The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
*/
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
- if (full || !td->o.iodepth_batch_complete)
+ if (full || io_in_polling(td))
ret = wait_for_completions(td, NULL);
if (ret < 0)
return bytes >= limit || exceeds_number_ios(td);
}
+/*
+ * used to calculate the next io time for rate control
+ *
+ */
+static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
+{
+ uint64_t secs, remainder, bps, bytes;
+
+ assert(!(td->flags & TD_F_CHILD));
+ bytes = td->rate_io_issue_bytes[ddir];
+ bps = td->rate_bps[ddir];
+ if (bps) {
+ secs = bytes / bps;
+ remainder = bytes % bps;
+ return remainder * 1000000 / bps + secs * 1000000;
+ } else
+ return 0;
+}
+
/*
* Main IO worker function. It retrieves io_u's to process and queues
* and reaps them, checking for rate and errors along the way.
if (td->error)
break;
ret = workqueue_enqueue(&td->io_wq, io_u);
+
+ if (should_check_rate(td))
+ td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
+
} else {
ret = td_io_queue(td, io_u);
- if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 1, &comp_time))
+ if (should_check_rate(td))
+ td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
+
+ if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 0, &comp_time))
break;
/*
reap:
full = queue_full(td) ||
(ret == FIO_Q_BUSY && td->cur_depth);
- if (full || !td->o.iodepth_batch_complete)
+ if (full || io_in_polling(td))
ret = wait_for_completions(td, &comp_time);
}
if (ret < 0)
static int exec_string(struct thread_options *o, const char *string, const char *mode)
{
size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
- int ret; /* newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1; */
+ int ret;
char *str;
str = malloc(newlen);
td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
- (td->o.verify != VERIFY_NONE && td_write(td))) {
- struct all_io_list *state;
- size_t sz;
-
- state = get_all_io_list(td->thread_number, &sz);
- if (state) {
- __verify_save_state(state, "local");
- free(state);
- }
- }
+ (td->o.verify != VERIFY_NONE && td_write(td)))
+ verify_save_state(td->thread_number);
fio_unpin_memory(td);
if (nr_clients)
fio_clients_send_trigger(trigger_remote_cmd);
else {
- verify_save_state();
+ verify_save_state(IO_LIST_ALL);
fio_terminate_threads(TERMINATE_ALL);
exec_trigger(trigger_cmd);
}
nr_process++;
}
- if (output_format == FIO_OUTPUT_NORMAL) {
+ if (output_format & FIO_OUTPUT_NORMAL) {
log_info("Starting ");
if (nr_thread)
log_info("%d thread%s", nr_thread,