+
+long io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
+ endio_handler *handler)
+{
+ struct io_completion_data icd;
+
+ init_icd(&icd, handler, 1);
+ io_completed(td, io_u, &icd);
+ put_io_u(td, io_u);
+
+ if (!icd.error)
+ return icd.bytes_done[0] + icd.bytes_done[1];
+
+ td_verror(td, icd.error);
+ return -1;
+}
+
+long io_u_queued_complete(struct thread_data *td, int min_events,
+ endio_handler *handler)
+
+{
+ struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
+ struct timespec *tsp = NULL;
+ struct io_completion_data icd;
+ int ret;
+
+ if (min_events > 0)
+ tsp = &ts;
+
+ ret = td_io_getevents(td, min_events, td->cur_depth, tsp);
+ if (ret < 0) {
+ td_verror(td, -ret);
+ return ret;
+ } else if (!ret)
+ return ret;
+
+ init_icd(&icd, handler, ret);
+ ios_completed(td, &icd);
+ if (!icd.error)
+ return icd.bytes_done[0] + icd.bytes_done[1];
+
+ td_verror(td, icd.error);
+ return -1;
+}