char *buf;
off_t off;
- *back = 0;
+ if (back)
+ *back = 0;
left = io_u->buflen;
buf = io_u->buf;
i.buflen = hdr->len;
if (hdr->len > left) {
- *back = left;
+ if (back)
+ *back = left;
return 0;
}
if (verify_io_u(td, &i)) {
printf("failed verify at offset %lu\n", (unsigned long) off);
+ td->error = EBADMSG;
return 1;
}
td->cur_depth--;
}
+#define queue_full(td) (list_empty(&(td)->io_u_freelist))
+
static struct io_u *__get_io_u(struct thread_data *td)
{
struct io_u *io_u;
- if (list_empty(&td->io_u_freelist))
+ if (queue_full(td))
return NULL;
io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
#define iocb_time(iocb) ((unsigned long) (iocb)->data)
#define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
-static int ios_completed(struct thread_data *td, int nr, int stats)
+static int ios_completed(struct thread_data *td, int nr)
{
unsigned long msec;
struct io_u *io_u;
struct timeval e;
int i, bytes_done;
- if (stats)
- gettimeofday(&e, NULL);
+ gettimeofday(&e, NULL);
for (i = 0, bytes_done = 0; i < nr; i++) {
io_u = ev_to_iou(td->aio_events + i);
- if (stats) {
- td->io_blocks++;
- td->io_kb += io_u->buflen >> 10;
- td->this_io_kb += io_u->buflen >> 10;
+ td->io_blocks++;
+ td->io_kb += io_u->buflen >> 10;
+ td->this_io_kb += io_u->buflen >> 10;
- msec = mtime_since(&io_u->issue_time, &e);
+ msec = mtime_since(&io_u->issue_time, &e);
- add_clat_sample(td, msec);
- add_bw_sample(td, msec);
- }
+ add_clat_sample(td, msec);
+ add_bw_sample(td, msec);
bytes_done += io_u->buflen;
put_io_u(td, io_u);
*/
r = io_getevents(td->aio_ctx, 0, td->cur_depth, td->aio_events, &ts);
if (r > 0)
- ios_completed(td, r, 1);
+ ios_completed(td, r);
/*
* now cancel remaining active events
if (td->cur_depth) {
r = io_getevents(td->aio_ctx, td->cur_depth, td->cur_depth, td->aio_events, NULL);
if (r > 0)
- ios_completed(td, r, 1);
+ ios_completed(td, r);
}
}
static int do_async_verify(struct thread_data *td)
{
struct timeval t;
- struct io_u *io_u;
+ struct io_u *io_u, *v_io_u;
+ struct verify_header *hdr;
int ret, back;
+ char *p;
td_set_runstate(td, TD_VERIFYING);
td->cur_off = 0;
td->last_kb = 0;
+ v_io_u = NULL;
do {
if (td->terminate)
break;
}
- ret = io_getevents(td->aio_ctx, 1, td->cur_depth, td->aio_events, NULL);
- if (ret < 0) {
- td->error = errno;
+ /*
+ * we have one pending to verify, do that while the next
+ * we are doing io on the next one
+ */
+ if (v_io_u) {
+ ret = verify_io_us(td, v_io_u, NULL);
+ put_io_u(td, v_io_u);
+ v_io_u = NULL;
+ if (ret)
+ break;
+ }
+
+ ret = io_getevents(td->aio_ctx, 1, 1, td->aio_events, NULL);
+ if (ret != 1) {
+ if (ret < 0)
+ td->error = ret;
break;
}
- td->cur_off += ios_completed(td, ret, 0);
+ /*
+ * got our io_u to verify, find back offset so we can
+ * submit the next one before verifying this one
+ */
+ v_io_u = ev_to_iou(td->aio_events);
+ p = v_io_u->buf;
+ back = v_io_u->buflen;
+ do {
+ hdr = (struct verify_header *) p;
+
+ if (hdr->len > back)
+ break;
- if (verify_io_us(td, io_u, &back))
- break;
+ back -= hdr->len;
+ p += hdr->len;
+ } while (back);
- td->cur_off -= back;
+ td->cur_off += (v_io_u->buflen - back);
+
+ /*
+ * if max depth is 1, we need to verify now
+ */
+ if (queue_full(td)) {
+ ret = verify_io_us(td, v_io_u, NULL);
+ put_io_u(td, v_io_u);
+ v_io_u = NULL;
+ if (ret)
+ break;
+ }
} while (1);
+ if (v_io_u) {
+ verify_io_us(td, v_io_u, NULL);
+ put_io_u(td, v_io_u);
+ }
+
if (td->cur_depth)
cleanup_pending_aio(td);
} else if (!ret)
continue;
- bytes_done = ios_completed(td, ret, 1);
+ bytes_done = ios_completed(td, ret);
/*
* the rate is batched for now, it should work for batches