td->cur_depth--;
}
+void requeue_io_u(struct thread_data *td, struct io_u **io_u)
+{
+ struct io_u *__io_u = *io_u;
+
+ list_del(&__io_u->list);
+ list_add_tail(&__io_u->list, &td->io_u_requeues);
+ td->cur_depth--;
+ *io_u = NULL;
+}
+
static int fill_io_u(struct thread_data *td, struct fio_file *f,
struct io_u *io_u)
{
/*
* see if it's time to sync
*/
- if (td->fsync_blocks && !(td->io_blocks[DDIR_WRITE] % td->fsync_blocks)
- && should_fsync(td)) {
+ if (td->fsync_blocks && !(td->io_issues[DDIR_WRITE] % td->fsync_blocks)
+ && td->io_issues[DDIR_WRITE] && should_fsync(td)) {
io_u->ddir = DDIR_SYNC;
io_u->file = f;
return 0;
switch (msec) {
default:
index++;
- case 1024 ... 2047:
+ case 1000 ... 1999:
index++;
- case 512 ... 1023:
+ case 750 ... 999:
index++;
- case 256 ... 511:
+ case 500 ... 749:
index++;
- case 128 ... 255:
+ case 250 ... 499:
index++;
- case 64 ... 127:
+ case 100 ... 249:
index++;
- case 32 ... 63:
+ case 50 ... 99:
index++;
- case 16 ... 31:
+ case 20 ... 49:
index++;
- case 8 ... 15:
+ case 10 ... 19:
index++;
- case 4 ... 7:
+ case 4 ... 9:
index++;
case 2 ... 3:
index++;
td->io_u_lat[index]++;
}
+static struct fio_file *get_next_file(struct thread_data *td)
+{
+ unsigned int old_next_file = td->next_file;
+ struct fio_file *f;
+
+ do {
+ f = &td->files[td->next_file];
+
+ td->next_file++;
+ if (td->next_file >= td->nr_files)
+ td->next_file = 0;
+
+ if (f->fd != -1)
+ break;
+
+ f = NULL;
+ } while (td->next_file != old_next_file);
+
+ return f;
+}
+
struct io_u *__get_io_u(struct thread_data *td)
{
struct io_u *io_u = NULL;
- if (!queue_full(td)) {
+ if (!list_empty(&td->io_u_requeues))
+ io_u = list_entry(td->io_u_requeues.next, struct io_u, list);
+ else if (!queue_full(td)) {
io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
io_u->buflen = 0;
- io_u->error = 0;
io_u->resid = 0;
+ io_u->file = NULL;
+ }
+
+ if (io_u) {
+ io_u->error = 0;
list_del(&io_u->list);
list_add(&io_u->list, &td->io_u_busylist);
td->cur_depth++;
* Return an io_u to be processed. Gets a buflen and offset, sets direction,
* etc. The returned io_u is fully ready to be prepped and submitted.
*/
-struct io_u *get_io_u(struct thread_data *td, struct fio_file *f)
+struct io_u *get_io_u(struct thread_data *td)
{
+ struct fio_file *f;
struct io_u *io_u;
io_u = __get_io_u(td);
if (!io_u)
return NULL;
+ /*
+ * from a requeue, io_u already setup
+ */
+ if (io_u->file)
+ goto out;
+
+ f = get_next_file(td);
+ if (!f) {
+ put_io_u(td, io_u);
+ return NULL;
+ }
+
+ io_u->file = f;
+
if (td->zone_bytes >= td->zone_size) {
td->zone_bytes = 0;
f->last_pos += td->zone_skip;
/*
* Set io data pointers.
*/
+out:
io_u->xfer_buf = io_u->buf;
io_u->xfer_buflen = io_u->buflen;
}
}
+/*
+ * Complete a single io_u for the sync engines.
+ */
long io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
endio_handler *handler)
{
if (!icd.error)
return icd.bytes_done[0] + icd.bytes_done[1];
- td_verror(td, icd.error);
return -1;
}
+/*
+ * Called to complete min_events number of io for the async engines.
+ */
long io_u_queued_complete(struct thread_data *td, int min_events,
endio_handler *handler)
{
- struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
- struct timespec *tsp = NULL;
struct io_completion_data icd;
+ struct timespec *tvp = NULL;
int ret;
- if (min_events > 0)
- tsp = &ts;
+ if (min_events > 0) {
+ ret = td_io_commit(td);
+ if (ret < 0) {
+ td_verror(td, -ret);
+ return ret;
+ }
+ } else {
+ struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
+
+ tvp = &ts;
+ }
- ret = td_io_getevents(td, min_events, td->cur_depth, tsp);
+ ret = td_io_getevents(td, min_events, td->cur_depth, tvp);
if (ret < 0) {
td_verror(td, -ret);
return ret;
if (!icd.error)
return icd.bytes_done[0] + icd.bytes_done[1];
- td_verror(td, icd.error);
return -1;
}
+
+/*
+ * Call when io_u is really queued, to update the submission latency.
+ */
+void io_u_queued(struct thread_data *td, struct io_u *io_u)
+{
+ unsigned long slat_time;
+
+ slat_time = mtime_since(&io_u->start_time, &io_u->issue_time);
+ add_slat_sample(td, io_u->ddir, slat_time);
+}