td->cur_depth--;
}
+void requeue_io_u(struct thread_data *td, struct io_u **io_u)
+{
+ struct io_u *__io_u = *io_u;
+
+ list_del(&__io_u->list);
+ list_add_tail(&__io_u->list, &td->io_u_requeues);
+ td->cur_depth--;
+ *io_u = NULL;
+}
+
static int fill_io_u(struct thread_data *td, struct fio_file *f,
struct io_u *io_u)
{
/*
* see if it's time to sync
*/
- if (td->fsync_blocks && !(td->io_blocks[DDIR_WRITE] % td->fsync_blocks)
- && should_fsync(td)) {
+ if (td->fsync_blocks && !(td->io_issues[DDIR_WRITE] % td->fsync_blocks)
+ && td->io_issues[DDIR_WRITE] && should_fsync(td)) {
io_u->ddir = DDIR_SYNC;
io_u->file = f;
return 0;
td->io_u_lat[index]++;
}
+static struct fio_file *get_next_file(struct thread_data *td)
+{
+ unsigned int old_next_file = td->next_file;
+ struct fio_file *f;
+
+ do {
+ f = &td->files[td->next_file];
+
+ td->next_file++;
+ if (td->next_file >= td->nr_files)
+ td->next_file = 0;
+
+ if (f->fd != -1)
+ break;
+
+ f = NULL;
+ } while (td->next_file != old_next_file);
+
+ return f;
+}
+
struct io_u *__get_io_u(struct thread_data *td)
{
struct io_u *io_u = NULL;
- if (!queue_full(td)) {
+ if (!list_empty(&td->io_u_requeues))
+ io_u = list_entry(td->io_u_requeues.next, struct io_u, list);
+ else if (!queue_full(td)) {
io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
io_u->buflen = 0;
- io_u->error = 0;
io_u->resid = 0;
+ io_u->file = NULL;
+ }
+
+ if (io_u) {
+ io_u->error = 0;
list_del(&io_u->list);
list_add(&io_u->list, &td->io_u_busylist);
td->cur_depth++;
* Return an io_u to be processed. Gets a buflen and offset, sets direction,
* etc. The returned io_u is fully ready to be prepped and submitted.
*/
-struct io_u *get_io_u(struct thread_data *td, struct fio_file *f)
+struct io_u *get_io_u(struct thread_data *td)
{
+ struct fio_file *f;
struct io_u *io_u;
io_u = __get_io_u(td);
if (!io_u)
return NULL;
+ /*
+ * from a requeue, io_u already setup
+ */
+ if (io_u->file)
+ goto out;
+
+ f = get_next_file(td);
+ if (!f) {
+ put_io_u(td, io_u);
+ return NULL;
+ }
+
+ io_u->file = f;
+
if (td->zone_bytes >= td->zone_size) {
td->zone_bytes = 0;
f->last_pos += td->zone_skip;
/*
* Set io data pointers.
*/
+out:
io_u->xfer_buf = io_u->buf;
io_u->xfer_buflen = io_u->buflen;
if (!icd.error)
return icd.bytes_done[0] + icd.bytes_done[1];
- td_verror(td, icd.error);
return -1;
}
endio_handler *handler)
{
- struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
- struct timespec *tsp = NULL;
struct io_completion_data icd;
int ret;
- if (min_events > 0)
- tsp = &ts;
+ if (min_events > 0) {
+ ret = td_io_commit(td);
+ if (ret < 0) {
+ td_verror(td, -ret);
+ return ret;
+ }
+ }
- ret = td_io_getevents(td, min_events, td->cur_depth, tsp);
+ ret = td_io_getevents(td, min_events, td->cur_depth, NULL);
if (ret < 0) {
td_verror(td, -ret);
return ret;
if (!icd.error)
return icd.bytes_done[0] + icd.bytes_done[1];
- td_verror(td, icd.error);
return -1;
}