For rate limiting or thinktime handling, ensure that we have
no busy IO when we do spin or sleep. Otherwise we potentially
skew the latencies a lot, since events could have been reaped.
Already working for rate iops, just abstracted it out and
ensure that we do the same for thinktime. Only a problem for
certain workloads, those with queuedepth > 1.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
if (!(b % td->o.thinktime_blocks)) {
int left;
if (!(b % td->o.thinktime_blocks)) {
int left;
if (td->o.thinktime_spin)
usec_spin(td->o.thinktime_spin);
if (td->o.thinktime_spin)
usec_spin(td->o.thinktime_spin);
+void io_u_quiesce(struct thread_data *td)
+{
+ /*
+ * We are going to sleep, ensure that we flush anything pending as
+ * not to skew our latency numbers.
+ *
+ * Changed to only monitor 'in flight' requests here instead of the
+ * td->cur_depth, b/c td->cur_depth does not accurately represent
+ * io's that have been actually submitted to an async engine,
+ * and cur_depth is meaningless for sync engines.
+ */
+ while (td->io_u_in_flight) {
+ int fio_unused ret;
+
+ ret = io_u_queued_complete(td, 1, NULL);
+ }
+}
+
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
{
enum fio_ddir odir = ddir ^ 1;
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
{
enum fio_ddir odir = ddir ^ 1;
} else
usec = td->rate_pending_usleep[ddir];
} else
usec = td->rate_pending_usleep[ddir];
- /*
- * We are going to sleep, ensure that we flush anything pending as
- * not to skew our latency numbers.
- *
- * Changed to only monitor 'in flight' requests here instead of the
- * td->cur_depth, b/c td->cur_depth does not accurately represent
- * io's that have been actually submitted to an async engine,
- * and cur_depth is meaningless for sync engines.
- */
- while (td->io_u_in_flight) {
- int fio_unused ret;
-
- ret = io_u_queued_complete(td, 1, NULL);
- }
fio_gettime(&t, NULL);
usec_sleep(td, usec);
fio_gettime(&t, NULL);
usec_sleep(td, usec);
extern int __must_check io_u_sync_complete(struct thread_data *, struct io_u *, uint64_t *);
extern int __must_check io_u_queued_complete(struct thread_data *, int, uint64_t *);
extern void io_u_queued(struct thread_data *, struct io_u *);
extern int __must_check io_u_sync_complete(struct thread_data *, struct io_u *, uint64_t *);
extern int __must_check io_u_queued_complete(struct thread_data *, int, uint64_t *);
extern void io_u_queued(struct thread_data *, struct io_u *);
+extern void io_u_quiesce(struct thread_data *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_mark_depth(struct thread_data *, unsigned int);
extern void fill_io_buffer(struct thread_data *, void *, unsigned int, unsigned int);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_mark_depth(struct thread_data *, unsigned int);
extern void fill_io_buffer(struct thread_data *, void *, unsigned int, unsigned int);