Rate limiting logic was using thread_data->cur_depth to decide the
min_evts number to ask for during its "flush" prior to sleeping.
td->cur_depth, however, does not properly track in-flight IOs submitted
to the async engines. Added field to thread_data structure and use
that, instead, to track IOs currently in flight.
Signed-off-by: Ryan Marchand <rmarchan@amazon.com>
Signed-off-by: Steven Noonan <snoonan@amazon.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
struct ioengine_ops *io_ops;
/*
struct ioengine_ops *io_ops;
/*
- * Current IO depth and list of free and busy io_u's.
+ * Queue depth of io_u's that fio MIGHT do
*/
unsigned int cur_depth;
*/
unsigned int cur_depth;
+
+ /*
+ * io_u's about to be committed
+ */
unsigned int io_u_queued;
unsigned int io_u_queued;
+
+ /*
+ * io_u's submitted but not completed yet
+ */
+ unsigned int io_u_in_flight;
+
+ /*
+ * List of free and busy io_u's
+ */
struct flist_head io_u_freelist;
struct flist_head io_u_busylist;
struct flist_head io_u_requeues;
struct flist_head io_u_freelist;
struct flist_head io_u_busylist;
struct flist_head io_u_requeues;
/*
* We are going to sleep, ensure that we flush anything pending as
/*
* We are going to sleep, ensure that we flush anything pending as
- * not to skew our latency numbers
+ * not to skew our latency numbers.
+ *
+ * Changed to only monitor 'in flight' requests here instead of the
+ * td->cur_depth, b/c td->cur_depth does not accurately represent
+ * io's that have been actually submitted to an async engine,
+ * and cur_depth is meaningless for sync engines.
+ if (td->io_u_in_flight) {
- ret = io_u_queued_complete(td, td->cur_depth, NULL);
+ ret = io_u_queued_complete(td, td->io_u_in_flight, NULL);
if (max && td->io_ops->getevents)
r = td->io_ops->getevents(td, min, max, t);
out:
if (max && td->io_ops->getevents)
r = td->io_ops->getevents(td, min, max, t);
out:
+ if (r >= 0) {
+ /*
+ * Reflect that our submitted requests were retrieved with
+ * whatever OS async calls are in the underlying engine.
+ */
+ td->io_u_in_flight -= r;
io_u_mark_complete(td, r);
io_u_mark_complete(td, r);
td_verror(td, r, "get_events");
dprint(FD_IO, "getevents: %d\n", r);
td_verror(td, r, "get_events");
dprint(FD_IO, "getevents: %d\n", r);
if (!td->cur_depth || !td->io_u_queued)
return 0;
if (!td->cur_depth || !td->io_u_queued)
return 0;
- io_u_mark_depth(td, td->io_u_queued);
- td->io_u_queued = 0;
+ io_u_mark_depth(td, td->io_u_queued);
if (td->io_ops->commit) {
ret = td->io_ops->commit(td);
if (ret)
td_verror(td, -ret, "io commit");
}
if (td->io_ops->commit) {
ret = td->io_ops->commit(td);
if (ret)
td_verror(td, -ret, "io commit");
}
+
+ /*
+ * Reflect that events were submitted as async IO requests.
+ */
+ td->io_u_in_flight += td->io_u_queued;
+ td->io_u_queued = 0;