*/
static int check_min_rate(struct thread_data *td, struct timeval *now)
{
+ unsigned long long bytes = 0;
unsigned long spent;
unsigned long rate;
+ /*
+ * No minimum rate set, always ok
+ */
+ if (!td->ratemin)
+ return 0;
+
/*
* allow a 2 second settle period in the beginning
*/
if (mtime_since(&td->start, now) < 2000)
return 0;
+ if (td_read(td))
+ bytes += td->this_io_bytes[DDIR_READ];
+ if (td_write(td))
+ bytes += td->this_io_bytes[DDIR_WRITE];
+
/*
* if rate blocks is set, sample is running
*/
if (td->rate_bytes) {
- unsigned long long bytes = 0;
-
spent = mtime_since(&td->lastrate, now);
if (spent < td->ratecycle)
return 0;
- if (td_read(td))
- bytes += td->this_io_bytes[DDIR_READ];
- if (td_write(td))
- bytes += td->this_io_bytes[DDIR_WRITE];
-
if (bytes < td->rate_bytes) {
fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin);
return 1;
return 1;
}
}
- td->rate_bytes = bytes;
}
+ td->rate_bytes = bytes;
memcpy(&td->lastrate, now, sizeof(*now));
return 0;
}
/*
* get immediately available events, if any
*/
- r = io_u_queued_complete(td, 0, NULL);
+ r = io_u_queued_complete(td, 0);
if (r < 0)
return;
}
if (td->cur_depth)
- r = io_u_queued_complete(td, td->cur_depth, NULL);
+ r = io_u_queued_complete(td, td->cur_depth);
}
/*
put_io_u(td, io_u);
return 1;
} else if (ret == FIO_Q_QUEUED) {
- if (io_u_queued_complete(td, 1, NULL) < 0)
+ if (io_u_queued_complete(td, 1) < 0)
return 1;
} else if (ret == FIO_Q_COMPLETED) {
if (io_u->error) {
return 1;
}
- if (io_u_sync_complete(td, io_u, NULL) < 0)
+ if (io_u_sync_complete(td, io_u) < 0)
return 1;
} else if (ret == FIO_Q_BUSY) {
if (td_io_commit(td))
}
/*
- * The main verify engine. Runs over the writes we previusly submitted,
+ * The main verify engine. Runs over the writes we previously submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
*/
static void do_verify(struct thread_data *td)
put_io_u(td, io_u);
break;
}
+
+ io_u->end_io = verify_io_u;
requeue:
ret = td_io_queue(td, io_u);
io_u->xfer_buf += bytes;
goto requeue;
}
- ret = io_u_sync_complete(td, io_u, verify_io_u);
+ ret = io_u_sync_complete(td, io_u);
if (ret < 0)
break;
continue;
* Reap required number of io units, if any, and do the
* verification on them through the callback handler
*/
- if (io_u_queued_complete(td, min_events, verify_io_u) < 0)
+ if (io_u_queued_complete(td, min_events) < 0)
break;
}
- if (td->cur_depth)
+ if (!td->error) {
+ min_events = td->cur_depth;
+
+ if (min_events)
+ ret = io_u_queued_complete(td, min_events);
+ } else
cleanup_pending_aio(td);
td_set_runstate(td, TD_RUNNING);
goto requeue;
}
fio_gettime(&comp_time, NULL);
- bytes_done = io_u_sync_complete(td, io_u, NULL);
+ bytes_done = io_u_sync_complete(td, io_u);
if (bytes_done < 0)
ret = bytes_done;
break;
}
fio_gettime(&comp_time, NULL);
- bytes_done = io_u_queued_complete(td, min_evts, NULL);
+ bytes_done = io_u_queued_complete(td, min_evts);
if (bytes_done < 0)
break;
}
if (!td->error) {
struct fio_file *f;
- if (td->cur_depth)
- cleanup_pending_aio(td);
+ i = td->cur_depth;
+ if (i)
+ ret = io_u_queued_complete(td, i);
if (should_fsync(td) && td->end_fsync) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i)
fio_io_sync(td, f);
}
- }
+ } else
+ cleanup_pending_aio(td);
}
static void cleanup_io_u(struct thread_data *td)