switch (msec) {
default:
index++;
- case 1024 ... 2047:
+ case 1000 ... 1999:
index++;
- case 512 ... 1023:
+ case 750 ... 999:
index++;
- case 256 ... 511:
+ case 500 ... 749:
index++;
- case 128 ... 255:
+ case 250 ... 499:
index++;
- case 64 ... 127:
+ case 100 ... 249:
index++;
- case 32 ... 63:
+ case 50 ... 99:
index++;
- case 16 ... 31:
+ case 20 ... 49:
index++;
- case 8 ... 15:
+ case 10 ... 19:
index++;
- case 4 ... 7:
+ case 4 ... 9:
index++;
case 2 ... 3:
index++;
* from a requeue, io_u already setup
*/
if (io_u->file)
- return io_u;
+ goto out;
f = get_next_file(td);
if (!f) {
/*
* Set io data pointers.
*/
+out:
io_u->xfer_buf = io_u->buf;
io_u->xfer_buflen = io_u->buflen;
}
}
+/*
+ * Complete a single io_u for the sync engines.
+ */
long io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
endio_handler *handler)
{
return -1;
}
+/*
+ * Called to complete min_events number of io for the async engines.
+ */
long io_u_queued_complete(struct thread_data *td, int min_events,
endio_handler *handler)
{
- struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
- struct timespec *tsp = NULL;
struct io_completion_data icd;
+ struct timespec *tvp = NULL;
int ret;
if (min_events > 0) {
- tsp = &ts;
ret = td_io_commit(td);
if (ret < 0) {
td_verror(td, -ret);
return ret;
}
+ } else {
+ struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
+
+ tvp = &ts;
}
- ret = td_io_getevents(td, min_events, td->cur_depth, tsp);
+ ret = td_io_getevents(td, min_events, td->cur_depth, tvp);
if (ret < 0) {
td_verror(td, -ret);
return ret;
return -1;
}
+
+/*
+ * Call when io_u is really queued, to update the submission latency.
+ */
+void io_u_queued(struct thread_data *td, struct io_u *io_u)
+{
+ unsigned long slat_time;
+
+ slat_time = mtime_since(&io_u->start_time, &io_u->issue_time);
+ add_slat_sample(td, io_u->ddir, slat_time);
+}