return sec + usec;
}
+static inline unsigned long msec_now(struct timeval *s)
+{
+ return s->tv_sec * 1000 + s->tv_usec / 1000;
+}
+
static unsigned long get_next_offset(struct thread_data *td)
{
unsigned long b;
td->aio_iocbs_status[offset] = 0;
}
-static struct iocb *aio_get_iocb(struct thread_data *td, char *buffer)
+static struct iocb *aio_get_iocb(struct thread_data *td, char *buffer,
+ struct timeval *t)
{
struct iocb *iocb = NULL;
int i;
io_prep_pread(iocb, td->fd, p, td->bs, off);
else
io_prep_pwrite(iocb, td->fd, p, td->bs, off);
+
+ io_set_callback(iocb, (io_callback_t) msec_now(t));
}
return iocb;
}
+#define iocb_time(iocb) ((unsigned long) (iocb)->data)
+
static void do_async_io(struct thread_data *td)
{
struct timeval s, e;
gettimeofday(&s, NULL);
- iocb = aio_get_iocb(td, buf);
+ iocb = aio_get_iocb(td, buf, &s);
ret = io_submit(*td->aio_ctx, 1, &iocb);
if (ret < 0) {
} else if (!ret)
continue;
+ gettimeofday(&e, NULL);
+
for (i = 0; i < ret; i++) {
struct io_event *ev = td->aio_events + i;
td->aio_cur_depth--;
iocb = ev->obj;
+
+ msec = msec_now(&e) - iocb_time(iocb);
+ add_stat_sample(td, msec);
+
+ if (msec < td->min_latency)
+ td->min_latency = msec;
+ if (msec > td->max_latency)
+ td->max_latency = msec;
+
aio_put_iocb(td, iocb);
}
- gettimeofday(&e, NULL);
-
+ /*
+ * the rate is batched for now, it should work for batches
+ * of completions except the very first one which may look
+ * a little bursty
+ */
usec = utime_since(&s, &e);
rate_throttle(td, usec);
td->error = ENODATA;
break;
}
-
- msec = usec / 1000;
- add_stat_sample(td, msec);
-
- if (msec < td->min_latency)
- td->min_latency = msec;
- if (msec > td->max_latency)
- td->max_latency = msec;
}
gettimeofday(&e, NULL);