io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
list_del(&io_u->list);
list_add(&io_u->list, &td->io_u_busylist);
+ td->cur_depth++;
return io_u;
}
}
gettimeofday(&io_u->start_time, NULL);
- td->cur_depth++;
return io_u;
}
#define iocb_time(iocb) ((unsigned long) (iocb)->data)
#define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
-static int ios_completed(struct thread_data *td, int nr)
+static int ios_completed(struct thread_data *td, int nr, int stats)
{
unsigned long msec;
struct io_u *io_u;
struct timeval e;
int i, bytes_done;
- gettimeofday(&e, NULL);
+ if (stats)
+ gettimeofday(&e, NULL);
for (i = 0, bytes_done = 0; i < nr; i++) {
io_u = ev_to_iou(td->aio_events + i);
- td->io_blocks++;
- td->io_kb += io_u->buflen >> 10;
- td->this_io_kb += io_u->buflen >> 10;
- bytes_done += io_u->buflen;
+ if (stats) {
+ td->io_blocks++;
+ td->io_kb += io_u->buflen >> 10;
+ td->this_io_kb += io_u->buflen >> 10;
- msec = mtime_since(&io_u->issue_time, &e);
+ msec = mtime_since(&io_u->issue_time, &e);
- add_clat_sample(td, msec);
- add_bw_sample(td, msec);
+ add_clat_sample(td, msec);
+ add_bw_sample(td, msec);
+ }
+ bytes_done += io_u->buflen;
put_io_u(td, io_u);
}
*/
r = io_getevents(td->aio_ctx, 0, td->cur_depth, td->aio_events, &ts);
if (r > 0)
- ios_completed(td, r);
+ ios_completed(td, r, 1);
/*
* now cancel remaining active events
if (td->cur_depth) {
r = io_getevents(td->aio_ctx, td->cur_depth, td->cur_depth, td->aio_events, NULL);
if (r > 0)
- ios_completed(td, r);
+ ios_completed(td, r, 1);
}
}
static int do_async_verify(struct thread_data *td)
{
- /*
- * need to implement the async version
- */
- return do_sync_verify(td);
+ struct timeval t;
+ struct io_u *io_u;
+ int ret, back;
+
+ td_set_runstate(td, TD_VERIFYING);
+
+ td->cur_off = 0;
+ td->last_kb = 0;
+
+ do {
+ if (td->terminate)
+ break;
+
+ gettimeofday(&t, NULL);
+ if (runtime_exceeded(td, &t))
+ break;
+
+ io_u = __get_io_u(td);
+ if (!io_u)
+ break;
+
+ io_u->offset = td->cur_off;
+ io_u->buflen = td->max_bs;
+
+ if (io_u->offset + io_u->buflen > (td->kb << 10)) {
+ io_u->buflen = (td->kb << 10) - io_u->offset;
+ if (!io_u->buflen) {
+ put_io_u(td, io_u);
+ break;
+ }
+ }
+
+ io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
+ ret = io_u_queue(td, io_u);
+ if (ret) {
+ put_io_u(td, io_u);
+ td->error = ret;
+ break;
+ }
+
+ ret = io_getevents(td->aio_ctx, 1, td->cur_depth, td->aio_events, NULL);
+ if (ret < 0) {
+ td->error = errno;
+ break;
+ }
+
+ td->cur_off += ios_completed(td, ret, 0);
+
+ if (verify_io_us(td, io_u, &back))
+ break;
+
+ td->cur_off -= back;
+ } while (1);
+
+ if (td->cur_depth)
+ cleanup_pending_aio(td);
+
+ td_set_runstate(td, TD_RUNNING);
+ return td->error == 0;
}
static void do_async_io(struct thread_data *td)
} else if (!ret)
continue;
- bytes_done = ios_completed(td, ret);
+ bytes_done = ios_completed(td, ret, 1);
/*
* the rate is batched for now, it should work for batches
case TD_REAPED:
c = '_';
break;
+ case TD_EXITED:
+ c = 'E';
+ break;
case TD_RUNNING:
c = '+';
break;