X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=ea0d46c5db3dd0d4e32da6b726b0abda9e249bd9;hp=69edd70e5eafbb97ce3f5f915ee50450fcc90ac6;hb=0d29de831183dfd049c97a03008d425ce21e2fa4;hpb=637ef8d9f7645135cf4829894d1e3983cd7a042e diff --git a/io_u.c b/io_u.c index 69edd70e..ea0d46c5 100644 --- a/io_u.c +++ b/io_u.c @@ -8,6 +8,7 @@ #include "fio.h" #include "hash.h" #include "verify.h" +#include "trim.h" #include "lib/rand.h" struct io_completion_data { @@ -41,10 +42,12 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u) struct fio_file *f = io_u->file; unsigned long long block; unsigned int blocks, nr_blocks; + int busy_check; block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs; nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; blocks = 0; + busy_check = !(io_u->flags & IO_U_F_BUSY_OK); while (nr_blocks) { unsigned int this_blocks, mask; @@ -54,7 +57,11 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u) * If we have a mixed random workload, we may * encounter blocks we already did IO to. */ - if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) + if (!busy_check) { + blocks = nr_blocks; + break; + } + if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block)) break; idx = RAND_MAP_IDX(f, block); @@ -97,6 +104,8 @@ static unsigned long long last_block(struct thread_data *td, struct fio_file *f, unsigned long long max_blocks; unsigned long long max_size; + assert(ddir_rw(ddir)); + /* * Hmm, should we make sure that ->io_size <= ->real_file_size? */ @@ -190,6 +199,66 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, return get_next_free_block(td, f, ddir, b); } +static int get_next_rand_block(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, unsigned long long *b) +{ + if (get_next_rand_offset(td, f, ddir, b)) { + dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", + f->file_name, f->last_pos, f->real_file_size); + return 1; + } + + return 0; +} + +static int get_next_seq_block(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, unsigned long long *b) +{ + assert(ddir_rw(ddir)); + + if (f->last_pos < f->real_file_size) { + *b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir]; + return 0; + } + + return 1; +} + +static int get_next_block(struct thread_data *td, struct io_u *io_u, + enum fio_ddir ddir, int rw_seq, unsigned long long *b) +{ + struct fio_file *f = io_u->file; + int ret; + + assert(ddir_rw(ddir)); + + if (rw_seq) { + if (td_random(td)) + ret = get_next_rand_block(td, f, ddir, b); + else + ret = get_next_seq_block(td, f, ddir, b); + } else { + io_u->flags |= IO_U_F_BUSY_OK; + + if (td->o.rw_seq == RW_SEQ_SEQ) { + ret = get_next_seq_block(td, f, ddir, b); + if (ret) + ret = get_next_rand_block(td, f, ddir, b); + } else if (td->o.rw_seq == RW_SEQ_IDENT) { + if (f->last_start != -1ULL) + *b = (f->last_start - f->file_offset) / td->o.min_bs[ddir]; + else + *b = 0; + ret = 0; + } else { + log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); + ret = 1; + } + } + + return ret; +} + /* * For random io, generate a random new block and see if it's used. Repeat * until we find a free one. For sequential io, just return the end of @@ -200,26 +269,18 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u) struct fio_file *f = io_u->file; unsigned long long b; enum fio_ddir ddir = io_u->ddir; + int rw_seq_hit = 0; - if (td_random(td) && (td->o.ddir_nr && !--td->ddir_nr)) { - td->ddir_nr = td->o.ddir_nr; + assert(ddir_rw(ddir)); - if (get_next_rand_offset(td, f, ddir, &b)) { - dprint(FD_IO, "%s: getting rand offset failed\n", - f->file_name); - return 1; - } - } else { - if (f->last_pos >= f->real_file_size) { - if (!td_random(td) || - get_next_rand_offset(td, f, ddir, &b)) { - dprint(FD_IO, "%s: pos %llu > size %llu\n", - f->file_name, f->last_pos, - f->real_file_size); - return 1; - } - } else - b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir]; + if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) { + rw_seq_hit = 1; + td->ddir_seq_nr = td->o.ddir_seq_nr; + } + + if (get_next_block(td, io_u, ddir, rw_seq_hit, &b)) { + printf("fail\n"); + return 1; } io_u->offset = b * td->o.ba[ddir]; @@ -256,6 +317,8 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) unsigned int minbs, maxbs; long r; + assert(ddir_rw(ddir)); + minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; @@ -336,6 +399,8 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) struct timeval t; long usec; + assert(ddir_rw(ddir)); + if (td->rate_pending_usleep[ddir] <= 0) return ddir; @@ -479,7 +544,7 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u) td_io_u_lock(td); __io_u->flags |= IO_U_F_FREE; - if ((__io_u->flags & IO_U_F_FLIGHT) && !ddir_sync(__io_u->ddir)) + if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir)) td->io_issues[__io_u->ddir]--; __io_u->flags &= ~IO_U_F_FLIGHT; @@ -499,9 +564,9 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) io_u->ddir = get_rw_ddir(td); /* - * fsync() or fdatasync(), we are done + * fsync() or fdatasync() or trim etc, we are done */ - if (ddir_sync(io_u->ddir)) + if (!ddir_rw(io_u->ddir)) goto out; /* @@ -918,21 +983,31 @@ again: return io_u; } -/* - * Return an io_u to be processed. Gets a buflen and offset, sets direction, - * etc. The returned io_u is fully ready to be prepped and submitted. - */ -struct io_u *get_io_u(struct thread_data *td) +static int check_get_trim(struct thread_data *td, struct io_u *io_u) { - struct fio_file *f; - struct io_u *io_u; + if (td->o.trim_backlog && td->trim_entries) { + int get_trim = 0; - io_u = __get_io_u(td); - if (!io_u) { - dprint(FD_IO, "__get_io_u failed\n"); - return NULL; + if (td->trim_batch) { + td->trim_batch--; + get_trim = 1; + } else if (!(td->io_hist_len % td->o.trim_backlog) && + td->last_ddir != DDIR_READ) { + td->trim_batch = td->o.trim_batch; + if (!td->trim_batch) + td->trim_batch = td->o.trim_backlog; + get_trim = 1; + } + + if (get_trim && !get_next_trim(td, io_u)) + return 1; } + return 0; +} + +static int check_get_verify(struct thread_data *td, struct io_u *io_u) +{ if (td->o.verify_backlog && td->io_hist_len) { int get_verify = 0; @@ -948,9 +1023,32 @@ struct io_u *get_io_u(struct thread_data *td) } if (get_verify && !get_next_verify(td, io_u)) - goto out; + return 1; + } + + return 0; +} + +/* + * Return an io_u to be processed. Gets a buflen and offset, sets direction, + * etc. The returned io_u is fully ready to be prepped and submitted. + */ +struct io_u *get_io_u(struct thread_data *td) +{ + struct fio_file *f; + struct io_u *io_u; + + io_u = __get_io_u(td); + if (!io_u) { + dprint(FD_IO, "__get_io_u failed\n"); + return NULL; } + if (check_get_verify(td, io_u)) + goto out; + if (check_get_trim(td, io_u)) + goto out; + /* * from a requeue, io_u already setup */ @@ -971,18 +1069,26 @@ struct io_u *get_io_u(struct thread_data *td) f = io_u->file; assert(fio_file_open(f)); - if (!ddir_sync(io_u->ddir)) { + if (ddir_rw(io_u->ddir)) { if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); goto err_put; } + f->last_start = io_u->offset; f->last_pos = io_u->offset + io_u->buflen; if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_WRITE) populate_verify_io_u(td, io_u); else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE) io_u_fill_buffer(td, io_u, io_u->xfer_buflen); + else if (io_u->ddir == DDIR_READ) { + /* + * Reset the buf_filled parameters so next time if the + * buffer is used for writes it is refilled. + */ + io_u->buf_filled_len = 0; + } } /* @@ -992,6 +1098,7 @@ struct io_u *get_io_u(struct thread_data *td) io_u->xfer_buflen = io_u->buflen; out: + assert(io_u->file); if (!td_io_prep(td, io_u)) { if (!td->o.disable_slat) fio_gettime(&io_u->start_time, NULL); @@ -1005,7 +1112,10 @@ err_put: void io_u_log_error(struct thread_data *td, struct io_u *io_u) { - const char *msg[] = { "read", "write", "sync" }; + const char *msg[] = { "read", "write", "sync", "datasync", + "sync_file_range", "wait", "trim" }; + + log_err("fio: io_u error"); @@ -1035,7 +1145,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, td_io_u_lock(td); assert(io_u->flags & IO_U_F_FLIGHT); - io_u->flags &= ~IO_U_F_FLIGHT; + io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); td_io_u_unlock(td); if (ddir_sync(io_u->ddir)) { @@ -1051,7 +1161,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, td->last_was_sync = 0; td->last_ddir = io_u->ddir; - if (!io_u->error) { + if (!io_u->error && ddir_rw(io_u->ddir)) { unsigned int bytes = io_u->buflen - io_u->resid; const enum fio_ddir idx = io_u->ddir; const enum fio_ddir odx = io_u->ddir ^ 1; @@ -1079,7 +1189,13 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, if (!td->o.disable_clat || !td->o.disable_bw) lusec = utime_since(&io_u->issue_time, &icd->time); + if (!td->o.disable_lat) { + unsigned long tusec; + tusec = utime_since(&io_u->start_time, + &icd->time); + add_lat_sample(td, idx, tusec, bytes); + } if (!td->o.disable_clat) { add_clat_sample(td, idx, lusec, bytes); io_u_mark_latency(td, lusec); @@ -1111,7 +1227,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, if (ret && !icd->error) icd->error = ret; } - } else { + } else if (io_u->error) { icd->error = io_u->error; io_u_log_error(td, io_u); }