if (full || !td->o.iodepth_batch_complete) {
min_events = min(td->o.iodepth_batch_complete,
td->cur_depth);
- if (full && !min_events && td->o.iodepth_batch_complete != 0)
+ /*
+ * if the queue is full, we MUST reap at least 1 event
+ */
+ if (full && !min_events)
min_events = 1;
do {
td_set_runstate(td, TD_RUNNING);
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
- (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) {
+ (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
+ td->o.time_based) {
struct timeval comp_time;
unsigned long bytes_done[2] = { 0, 0 };
int min_evts = 0;
if (full || !td->o.iodepth_batch_complete) {
min_evts = min(td->o.iodepth_batch_complete,
td->cur_depth);
- if (full && !min_evts && td->o.iodepth_batch_complete != 0)
+ /*
+ * if the queue is full, we MUST reap at least 1 event
+ */
+ if (full && !min_evts)
min_evts = 1;
if (__should_check_rate(td, 0) ||
return 0;
if (write_bw_log) {
- setup_log(&agg_io_log[DDIR_READ], 0);
- setup_log(&agg_io_log[DDIR_WRITE], 0);
+ setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW);
+ setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW);
}
startup_mutex = fio_mutex_init(0);