X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=23a64b046861bcf13e68bf316fd5111593468ca4;hp=b0e91e7a1bcc2b52d479df079e671629b8cf3ac8;hb=069c29183fb55f467ba841959ba6731f3e4f2132;hpb=97601024a53586d77a368763f08be9ee483fdc9c diff --git a/io_u.c b/io_u.c index b0e91e7a..23a64b04 100644 --- a/io_u.c +++ b/io_u.c @@ -199,6 +199,16 @@ void put_io_u(struct thread_data *td, struct io_u *io_u) td->cur_depth--; } +void requeue_io_u(struct thread_data *td, struct io_u **io_u) +{ + struct io_u *__io_u = *io_u; + + list_del(&__io_u->list); + list_add_tail(&__io_u->list, &td->io_u_requeues); + td->cur_depth--; + *io_u = NULL; +} + static int fill_io_u(struct thread_data *td, struct fio_file *f, struct io_u *io_u) { @@ -211,8 +221,8 @@ static int fill_io_u(struct thread_data *td, struct fio_file *f, /* * see if it's time to sync */ - if (td->fsync_blocks && !(td->io_blocks[DDIR_WRITE] % td->fsync_blocks) - && should_fsync(td)) { + if (td->fsync_blocks && !(td->io_issues[DDIR_WRITE] % td->fsync_blocks) + && td->io_issues[DDIR_WRITE] && should_fsync(td)) { io_u->ddir = DDIR_SYNC; io_u->file = f; return 0; @@ -279,23 +289,23 @@ static void io_u_mark_latency(struct thread_data *td, unsigned long msec) switch (msec) { default: index++; - case 1024 ... 2047: + case 1000 ... 1999: index++; - case 512 ... 1023: + case 750 ... 999: index++; - case 256 ... 511: + case 500 ... 749: index++; - case 128 ... 255: + case 250 ... 499: index++; - case 64 ... 127: + case 100 ... 249: index++; - case 32 ... 63: + case 50 ... 99: index++; - case 16 ... 31: + case 20 ... 49: index++; - case 8 ... 15: + case 10 ... 19: index++; - case 4 ... 7: + case 4 ... 9: index++; case 2 ... 3: index++; @@ -306,16 +316,43 @@ static void io_u_mark_latency(struct thread_data *td, unsigned long msec) td->io_u_lat[index]++; } +static struct fio_file *get_next_file(struct thread_data *td) +{ + unsigned int old_next_file = td->next_file; + struct fio_file *f; + + do { + f = &td->files[td->next_file]; + + td->next_file++; + if (td->next_file >= td->nr_files) + td->next_file = 0; + + if (f->fd != -1) + break; + + f = NULL; + } while (td->next_file != old_next_file); + + return f; +} + struct io_u *__get_io_u(struct thread_data *td) { struct io_u *io_u = NULL; - if (!queue_full(td)) { + if (!list_empty(&td->io_u_requeues)) + io_u = list_entry(td->io_u_requeues.next, struct io_u, list); + else if (!queue_full(td)) { io_u = list_entry(td->io_u_freelist.next, struct io_u, list); io_u->buflen = 0; - io_u->error = 0; io_u->resid = 0; + io_u->file = NULL; + } + + if (io_u) { + io_u->error = 0; list_del(&io_u->list); list_add(&io_u->list, &td->io_u_busylist); td->cur_depth++; @@ -329,14 +366,29 @@ struct io_u *__get_io_u(struct thread_data *td) * Return an io_u to be processed. Gets a buflen and offset, sets direction, * etc. The returned io_u is fully ready to be prepped and submitted. */ -struct io_u *get_io_u(struct thread_data *td, struct fio_file *f) +struct io_u *get_io_u(struct thread_data *td) { + struct fio_file *f; struct io_u *io_u; io_u = __get_io_u(td); if (!io_u) return NULL; + /* + * from a requeue, io_u already setup + */ + if (io_u->file) + goto out; + + f = get_next_file(td); + if (!f) { + put_io_u(td, io_u); + return NULL; + } + + io_u->file = f; + if (td->zone_bytes >= td->zone_size) { td->zone_bytes = 0; f->last_pos += td->zone_skip; @@ -371,6 +423,7 @@ struct io_u *get_io_u(struct thread_data *td, struct fio_file *f) /* * Set io data pointers. */ +out: io_u->xfer_buf = io_u->buf; io_u->xfer_buflen = io_u->buflen; @@ -453,6 +506,9 @@ static void ios_completed(struct thread_data *td, } } +/* + * Complete a single io_u for the sync engines. + */ long io_u_sync_complete(struct thread_data *td, struct io_u *io_u, endio_handler *handler) { @@ -465,23 +521,33 @@ long io_u_sync_complete(struct thread_data *td, struct io_u *io_u, if (!icd.error) return icd.bytes_done[0] + icd.bytes_done[1]; - td_verror(td, icd.error); return -1; } +/* + * Called to complete min_events number of io for the async engines. + */ long io_u_queued_complete(struct thread_data *td, int min_events, endio_handler *handler) { - struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; - struct timespec *tsp = NULL; struct io_completion_data icd; + struct timespec *tvp = NULL; int ret; - if (min_events > 0) - tsp = &ts; + if (min_events > 0) { + ret = td_io_commit(td); + if (ret < 0) { + td_verror(td, -ret); + return ret; + } + } else { + struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; + + tvp = &ts; + } - ret = td_io_getevents(td, min_events, td->cur_depth, tsp); + ret = td_io_getevents(td, min_events, td->cur_depth, tvp); if (ret < 0) { td_verror(td, -ret); return ret; @@ -493,6 +559,16 @@ long io_u_queued_complete(struct thread_data *td, int min_events, if (!icd.error) return icd.bytes_done[0] + icd.bytes_done[1]; - td_verror(td, icd.error); return -1; } + +/* + * Call when io_u is really queued, to update the submission latency. + */ +void io_u_queued(struct thread_data *td, struct io_u *io_u) +{ + unsigned long slat_time; + + slat_time = mtime_since(&io_u->start_time, &io_u->issue_time); + add_slat_sample(td, io_u->ddir, slat_time); +}