X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=f68b2139ef8de06d6946b8e3fbed208dc0691a55;hp=0ff66f9dc4036402e0030c06f1aa17cebdc7c110;hb=c4b6117;hpb=1b8dbf256b974fac2864052931f3041ff3ead157 diff --git a/io_u.c b/io_u.c index 0ff66f9d..f68b2139 100644 --- a/io_u.c +++ b/io_u.c @@ -10,28 +10,23 @@ #include "verify.h" #include "trim.h" #include "lib/rand.h" +#include "lib/axmap.h" struct io_completion_data { int nr; /* input */ - int account; /* input */ int error; /* output */ - unsigned long bytes_done[2]; /* output */ + uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */ struct timeval time; /* output */ }; /* - * The ->file_map[] contains a map of blocks we have or have not done io + * The ->io_axmap contains a map of blocks we have or have not done io * to yet. Used to make sure we cover the entire range in a fair fashion. */ -static int random_map_free(struct fio_file *f, const unsigned long long block) +static int random_map_free(struct fio_file *f, const uint64_t block) { - unsigned int idx = RAND_MAP_IDX(f, block); - unsigned int bit = RAND_MAP_BIT(f, block); - - dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit); - - return (f->file_map[idx] & (1UL << bit)) == 0; + return !axmap_isset(f->io_axmap, block); } /* @@ -41,69 +36,24 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u) { unsigned int min_bs = td->o.rw_min_bs; struct fio_file *f = io_u->file; - unsigned long long block; - unsigned int blocks, nr_blocks; - int busy_check; + unsigned int nr_blocks; + uint64_t block; - block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs; + block = (io_u->offset - f->file_offset) / (uint64_t) min_bs; nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; - blocks = 0; - busy_check = !(io_u->flags & IO_U_F_BUSY_OK); - - while (nr_blocks) { - unsigned int idx, bit; - unsigned long mask, this_blocks; - - /* - * If we have a mixed random workload, we may - * encounter blocks we already did IO to. - */ - if (!busy_check) { - blocks = nr_blocks; - break; - } - if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block)) - break; - idx = RAND_MAP_IDX(f, block); - bit = RAND_MAP_BIT(f, block); + if (!(io_u->flags & IO_U_F_BUSY_OK)) + nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks); - fio_assert(td, idx < f->num_maps); - - this_blocks = nr_blocks; - if (this_blocks + bit > BLOCKS_PER_MAP) - this_blocks = BLOCKS_PER_MAP - bit; - - do { - if (this_blocks == BLOCKS_PER_MAP) - mask = -1UL; - else - mask = ((1UL << this_blocks) - 1) << bit; - - if (!(f->file_map[idx] & mask)) - break; - - this_blocks--; - } while (this_blocks); - - if (!this_blocks) - break; - - f->file_map[idx] |= mask; - nr_blocks -= this_blocks; - blocks += this_blocks; - block += this_blocks; - } - - if ((blocks * min_bs) < io_u->buflen) - io_u->buflen = blocks * min_bs; + if ((nr_blocks * min_bs) < io_u->buflen) + io_u->buflen = nr_blocks * min_bs; } -static unsigned long long last_block(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir) +static uint64_t last_block(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir) { - unsigned long long max_blocks; - unsigned long long max_size; + uint64_t max_blocks; + uint64_t max_size; assert(ddir_rw(ddir)); @@ -114,143 +64,219 @@ static unsigned long long last_block(struct thread_data *td, struct fio_file *f, if (max_size > f->real_file_size) max_size = f->real_file_size; - max_blocks = max_size / (unsigned long long) td->o.ba[ddir]; + if (td->o.zone_range) + max_size = td->o.zone_range; + + max_blocks = max_size / (uint64_t) td->o.ba[ddir]; if (!max_blocks) return 0; return max_blocks; } -/* - * Return the next free block in the map. - */ -static int get_next_free_block(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, unsigned long long *b) +struct rand_off { + struct flist_head list; + uint64_t off; +}; + +static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, uint64_t *b) { - unsigned long long block, min_bs = td->o.rw_min_bs, lastb; - int i; + uint64_t r, lastb; lastb = last_block(td, f, ddir); if (!lastb) return 1; - i = f->last_free_lookup; - block = i * BLOCKS_PER_MAP; - while (block * min_bs < f->real_file_size && - block * min_bs < f->io_size) { - if (f->file_map[i] != -1UL) { - block += ffz(f->file_map[i]); - if (block > lastb) - break; - f->last_free_lookup = i; - *b = block; - return 0; + if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) { + uint64_t rmax; + + rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; + + if (td->o.use_os_rand) { + rmax = OS_RAND_MAX; + r = os_random_long(&td->random_state); + } else { + rmax = FRAND_MAX; + r = __rand(&td->__random_state); } - block += BLOCKS_PER_MAP; - i++; + dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); + + *b = (lastb - 1) * (r / ((uint64_t) rmax + 1.0)); + } else { + uint64_t off = 0; + + if (lfsr_next(&f->lfsr, &off, lastb)) + return 1; + + *b = off; } - dprint(FD_IO, "failed finding a free block\n"); - return 1; -} + /* + * if we are not maintaining a random map, we are done. + */ + if (!file_randommap(td, f)) + goto ret; -static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, unsigned long long *b) -{ - unsigned long long rmax, r, lastb; - int loops = 5; + /* + * calculate map offset and check if it's free + */ + if (random_map_free(f, *b)) + goto ret; - lastb = last_block(td, f, ddir); - if (!lastb) + dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", + (unsigned long long) *b); + + *b = axmap_next_free(f->io_axmap, *b); + if (*b == (uint64_t) -1ULL) return 1; +ret: + return 0; +} - if (f->failed_rands >= 200) - goto ffz; +static int __get_next_rand_offset_zipf(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + uint64_t *b) +{ + *b = zipf_next(&f->zipf); + return 0; +} - rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; - do { - if (td->o.use_os_rand) - r = os_random_long(&td->random_state); - else - r = __rand(&td->__random_state); +static int __get_next_rand_offset_pareto(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + uint64_t *b) +{ + *b = pareto_next(&f->zipf); + return 0; +} - *b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0)); +static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b) +{ + struct rand_off *r1 = flist_entry(a, struct rand_off, list); + struct rand_off *r2 = flist_entry(b, struct rand_off, list); - dprint(FD_RANDOM, "off rand %llu\n", r); + return r1->off - r2->off; +} +static int get_off_from_method(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, uint64_t *b) +{ + if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) + return __get_next_rand_offset(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) + return __get_next_rand_offset_zipf(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) + return __get_next_rand_offset_pareto(td, f, ddir, b); + + log_err("fio: unknown random distribution: %d\n", td->o.random_distribution); + return 1; +} - /* - * if we are not maintaining a random map, we are done. - */ - if (!file_randommap(td, f)) - goto ret_good; +/* + * Sort the reads for a verify phase in batches of verifysort_nr, if + * specified. + */ +static inline int should_sort_io(struct thread_data *td) +{ + if (!td->o.verifysort_nr || !td->o.do_verify) + return 0; + if (!td_random(td)) + return 0; + if (td->runstate != TD_VERIFYING) + return 0; + if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) + return 0; - /* - * calculate map offset and check if it's free - */ - if (random_map_free(f, *b)) - goto ret_good; + return 1; +} - dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", - *b); - } while (--loops); +static int should_do_random(struct thread_data *td, enum fio_ddir ddir) +{ + unsigned int v; + unsigned long r; - if (!f->failed_rands++) - f->last_free_lookup = 0; + if (td->o.perc_rand[ddir] == 100) + return 1; - /* - * we get here, if we didn't suceed in looking up a block. generate - * a random start offset into the filemap, and find the first free - * block from there. - */ - loops = 10; - do { - f->last_free_lookup = (f->num_maps - 1) * - (r / ((unsigned long long) rmax + 1.0)); - if (!get_next_free_block(td, f, ddir, b)) - goto ret; + if (td->o.use_os_rand) { + r = os_random_long(&td->seq_rand_state[ddir]); + v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); + } else { + r = __rand(&td->__seq_rand_state[ddir]); + v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); + } - if (td->o.use_os_rand) - r = os_random_long(&td->random_state); - else - r = __rand(&td->__random_state); - } while (--loops); + return v <= td->o.perc_rand[ddir]; +} - /* - * that didn't work either, try exhaustive search from the start - */ - f->last_free_lookup = 0; -ffz: - if (!get_next_free_block(td, f, ddir, b)) +static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, uint64_t *b) +{ + struct rand_off *r; + int i, ret = 1; + + if (!should_sort_io(td)) + return get_off_from_method(td, f, ddir, b); + + if (!flist_empty(&td->next_rand_list)) { + struct rand_off *r; +fetch: + r = flist_entry(td->next_rand_list.next, struct rand_off, list); + flist_del(&r->list); + *b = r->off; + free(r); return 0; - f->last_free_lookup = 0; - return get_next_free_block(td, f, ddir, b); -ret_good: - f->failed_rands = 0; -ret: - return 0; + } + + for (i = 0; i < td->o.verifysort_nr; i++) { + r = malloc(sizeof(*r)); + + ret = get_off_from_method(td, f, ddir, &r->off); + if (ret) { + free(r); + break; + } + + flist_add(&r->list, &td->next_rand_list); + } + + if (ret && !i) + return ret; + + assert(!flist_empty(&td->next_rand_list)); + flist_sort(NULL, &td->next_rand_list, flist_cmp); + goto fetch; } static int get_next_rand_block(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, unsigned long long *b) + enum fio_ddir ddir, uint64_t *b) { - if (get_next_rand_offset(td, f, ddir, b)) { - dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", - f->file_name, f->last_pos, f->real_file_size); - return 1; + if (!get_next_rand_offset(td, f, ddir, b)) + return 0; + + if (td->o.time_based) { + fio_file_reset(td, f); + if (!get_next_rand_offset(td, f, ddir, b)) + return 0; } - return 0; + dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", + f->file_name, (unsigned long long) f->last_pos, + (unsigned long long) f->real_file_size); + return 1; } -static int get_next_seq_block(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, unsigned long long *b) +static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, uint64_t *offset) { assert(ddir_rw(ddir)); + if (f->last_pos >= f->io_size + get_start_offset(td) && td->o.time_based) + f->last_pos = f->last_pos - f->io_size; + if (f->last_pos < f->real_file_size) { - unsigned long long pos; + uint64_t pos; if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0) f->last_pos = f->real_file_size; @@ -259,7 +285,7 @@ static int get_next_seq_block(struct thread_data *td, struct fio_file *f, if (pos) pos += td->o.ddir_seq_add; - *b = pos / td->o.min_bs[ddir]; + *offset = pos; return 0; } @@ -267,38 +293,66 @@ static int get_next_seq_block(struct thread_data *td, struct fio_file *f, } static int get_next_block(struct thread_data *td, struct io_u *io_u, - enum fio_ddir ddir, int rw_seq, unsigned long long *b) + enum fio_ddir ddir, int rw_seq, + unsigned int *is_random) { struct fio_file *f = io_u->file; + uint64_t b, offset; int ret; assert(ddir_rw(ddir)); + b = offset = -1ULL; + if (rw_seq) { - if (td_random(td)) - ret = get_next_rand_block(td, f, ddir, b); - else - ret = get_next_seq_block(td, f, ddir, b); + if (td_random(td)) { + if (should_do_random(td, ddir)) { + ret = get_next_rand_block(td, f, ddir, &b); + *is_random = 1; + } else { + *is_random = 0; + io_u->flags |= IO_U_F_BUSY_OK; + ret = get_next_seq_offset(td, f, ddir, &offset); + if (ret) + ret = get_next_rand_block(td, f, ddir, &b); + } + } else { + *is_random = 0; + ret = get_next_seq_offset(td, f, ddir, &offset); + } } else { io_u->flags |= IO_U_F_BUSY_OK; + *is_random = 0; if (td->o.rw_seq == RW_SEQ_SEQ) { - ret = get_next_seq_block(td, f, ddir, b); - if (ret) - ret = get_next_rand_block(td, f, ddir, b); + ret = get_next_seq_offset(td, f, ddir, &offset); + if (ret) { + ret = get_next_rand_block(td, f, ddir, &b); + *is_random = 0; + } } else if (td->o.rw_seq == RW_SEQ_IDENT) { if (f->last_start != -1ULL) - *b = (f->last_start - f->file_offset) - / td->o.min_bs[ddir]; + offset = f->last_start - f->file_offset; else - *b = 0; + offset = 0; ret = 0; } else { log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); ret = 1; } } - + + if (!ret) { + if (offset != -1ULL) + io_u->offset = offset; + else if (b != -1ULL) + io_u->offset = b * td->o.ba[ddir]; + else { + log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b); + ret = 1; + } + } + return ret; } @@ -307,10 +361,10 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, * until we find a free one. For sequential io, just return the end of * the last io issued. */ -static int __get_next_offset(struct thread_data *td, struct io_u *io_u) +static int __get_next_offset(struct thread_data *td, struct io_u *io_u, + unsigned int *is_random) { struct fio_file *f = io_u->file; - unsigned long long b; enum fio_ddir ddir = io_u->ddir; int rw_seq_hit = 0; @@ -321,34 +375,38 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u) td->ddir_seq_nr = td->o.ddir_seq_nr; } - if (get_next_block(td, io_u, ddir, rw_seq_hit, &b)) + if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random)) return 1; - io_u->offset = b * td->o.ba[ddir]; if (io_u->offset >= f->io_size) { dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", - io_u->offset, f->io_size); + (unsigned long long) io_u->offset, + (unsigned long long) f->io_size); return 1; } io_u->offset += f->file_offset; if (io_u->offset >= f->real_file_size) { dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n", - io_u->offset, f->real_file_size); + (unsigned long long) io_u->offset, + (unsigned long long) f->real_file_size); return 1; } return 0; } -static int get_next_offset(struct thread_data *td, struct io_u *io_u) +static int get_next_offset(struct thread_data *td, struct io_u *io_u, + unsigned int *is_random) { - struct prof_io_ops *ops = &td->prof_io_ops; + if (td->flags & TD_F_PROFILE_OPS) { + struct prof_io_ops *ops = &td->prof_io_ops; - if (ops->fill_io_u_off) - return ops->fill_io_u_off(td, io_u); + if (ops->fill_io_u_off) + return ops->fill_io_u_off(td, io_u, is_random); + } - return __get_next_offset(td, io_u); + return __get_next_offset(td, io_u, is_random); } static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, @@ -356,17 +414,23 @@ static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, { struct fio_file *f = io_u->file; - return io_u->offset + buflen <= f->io_size + td->o.start_offset; + return io_u->offset + buflen <= f->io_size + get_start_offset(td); } -static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) +static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, + unsigned int is_random) { - const int ddir = io_u->ddir; - unsigned int uninitialized_var(buflen); + int ddir = io_u->ddir; + unsigned int buflen = 0; unsigned int minbs, maxbs; unsigned long r, rand_max; - assert(ddir_rw(ddir)); + assert(ddir_rw(io_u->ddir)); + + if (td->o.bs_is_seq_rand) + ddir = is_random ? DDIR_WRITE: DDIR_READ; + else + ddir = io_u->ddir; minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; @@ -374,6 +438,12 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) if (minbs == maxbs) return minbs; + /* + * If we can't satisfy the min block size from here, then fail + */ + if (!io_u_fits(td, io_u, minbs)) + return 0; + if (td->o.use_os_rand) rand_max = OS_RAND_MAX; else @@ -405,6 +475,10 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) } } + if (td->o.do_verify && td->o.verify != VERIFY_NONE) + buflen = (buflen + td->o.verify_interval - 1) & + ~(td->o.verify_interval - 1); + if (!td->o.bs_unaligned && is_power_of_2(minbs)) buflen = (buflen + minbs - 1) & ~(minbs - 1); @@ -413,14 +487,17 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) return buflen; } -static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) +static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, + unsigned int is_random) { - struct prof_io_ops *ops = &td->prof_io_ops; + if (td->flags & TD_F_PROFILE_OPS) { + struct prof_io_ops *ops = &td->prof_io_ops; - if (ops->fill_io_u_size) - return ops->fill_io_u_size(td, io_u); + if (ops->fill_io_u_size) + return ops->fill_io_u_size(td, io_u, is_random); + } - return __get_next_buflen(td, io_u); + return __get_next_buflen(td, io_u, is_random); } static void set_rwmix_bytes(struct thread_data *td) @@ -455,6 +532,24 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) return DDIR_WRITE; } +void io_u_quiesce(struct thread_data *td) +{ + /* + * We are going to sleep, ensure that we flush anything pending as + * not to skew our latency numbers. + * + * Changed to only monitor 'in flight' requests here instead of the + * td->cur_depth, b/c td->cur_depth does not accurately represent + * io's that have been actually submitted to an async engine, + * and cur_depth is meaningless for sync engines. + */ + while (td->io_u_in_flight) { + int fio_unused ret; + + ret = io_u_queued_complete(td, 1, NULL); + } +} + static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; @@ -470,7 +565,7 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) * We have too much pending sleep in this direction. See if we * should switch. */ - if (td_rw(td)) { + if (td_rw(td) && td->o.rwmix[odir]) { /* * Other direction does not have too much pending, switch */ @@ -491,15 +586,7 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) } else usec = td->rate_pending_usleep[ddir]; - /* - * We are going to sleep, ensure that we flush anything pending as - * not to skew our latency numbers - */ - if (td->cur_depth) { - int fio_unused ret; - - ret = io_u_queued_complete(td, td->cur_depth, NULL); - } + io_u_quiesce(td); fio_gettime(&t, NULL); usec_sleep(td, usec); @@ -511,6 +598,9 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) if (td_rw(td) && __should_check_rate(td, odir)) td->rate_pending_usleep[odir] -= usec; + if (ddir_trim(ddir)) + return ddir; + return ddir; } @@ -567,8 +657,10 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) ddir = td->rwmix_ddir; } else if (td_read(td)) ddir = DDIR_READ; - else + else if (td_write(td)) ddir = DDIR_WRITE; + else + ddir = DDIR_TRIM; td->rwmix_ddir = rate_ddir(td, ddir); return td->rwmix_ddir; @@ -576,7 +668,7 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) { - io_u->ddir = get_rw_ddir(td); + io_u->ddir = io_u->acct_ddir = get_rw_ddir(td); if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) && td->o.barrier_blocks && @@ -605,8 +697,7 @@ void put_io_u(struct thread_data *td, struct io_u *io_u) if (io_u->flags & IO_U_F_IN_CUR_DEPTH) td->cur_depth--; - flist_del_init(&io_u->list); - flist_add(&io_u->list, &td->io_u_freelist); + io_u_qpush(&td->io_u_freelist, io_u); td_io_u_unlock(td); td_io_u_free_notify(td); } @@ -620,26 +711,29 @@ void clear_io_u(struct thread_data *td, struct io_u *io_u) void requeue_io_u(struct thread_data *td, struct io_u **io_u) { struct io_u *__io_u = *io_u; + enum fio_ddir ddir = acct_ddir(__io_u); dprint(FD_IO, "requeue %p\n", __io_u); td_io_u_lock(td); __io_u->flags |= IO_U_F_FREE; - if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir)) - td->io_issues[__io_u->ddir]--; + if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir)) + td->io_issues[ddir]--; __io_u->flags &= ~IO_U_F_FLIGHT; if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) td->cur_depth--; - flist_del(&__io_u->list); - flist_add_tail(&__io_u->list, &td->io_u_requeues); + + io_u_rpush(&td->io_u_requeues, __io_u); td_io_u_unlock(td); *io_u = NULL; } static int fill_io_u(struct thread_data *td, struct io_u *io_u) { + unsigned int is_random; + if (td->io_ops->flags & FIO_NOIO) goto out; @@ -654,9 +748,10 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) /* * See if it's time to switch to a new zone */ - if (td->zone_bytes >= td->o.zone_size) { + if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { td->zone_bytes = 0; - io_u->file->last_pos += td->o.zone_skip; + io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; + io_u->file->last_pos = io_u->file->file_offset; td->io_skip_bytes += td->o.zone_skip; } @@ -664,12 +759,12 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) * No log, let the seq/rand engine retrieve the next buflen and * position. */ - if (get_next_offset(td, io_u)) { + if (get_next_offset(td, io_u, &is_random)) { dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); return 1; } - io_u->buflen = get_next_buflen(td, io_u); + io_u->buflen = get_next_buflen(td, io_u, is_random); if (!io_u->buflen) { dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); return 1; @@ -677,8 +772,9 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { dprint(FD_IO, "io_u %p, offset too large\n", io_u); - dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset, - io_u->buflen, io_u->file->real_file_size); + dprint(FD_IO, " off=%llu/%lu > %llu\n", + (unsigned long long) io_u->offset, io_u->buflen, + (unsigned long long) io_u->file->real_file_size); return 1; } @@ -688,13 +784,9 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) if (td_random(td) && file_randommap(td, io_u->file)) mark_random_map(td, io_u); - /* - * If using a write iolog, store this entry. - */ out: dprint_io_u(io_u, "fill_io_u"); td->zone_bytes += io_u->buflen; - log_io_u(td, io_u); return 0; } @@ -997,10 +1089,12 @@ out: static struct fio_file *get_next_file(struct thread_data *td) { - struct prof_io_ops *ops = &td->prof_io_ops; + if (!(td->flags & TD_F_PROFILE_OPS)) { + struct prof_io_ops *ops = &td->prof_io_ops; - if (ops->get_next_file) - return ops->get_next_file(td); + if (ops->get_next_file) + return ops->get_next_file(td); + } return __get_next_file(td); } @@ -1032,18 +1126,148 @@ static int set_io_u_file(struct thread_data *td, struct io_u *io_u) return 0; } +static void lat_fatal(struct thread_data *td, struct io_completion_data *icd, + unsigned long tusec, unsigned long max_usec) +{ + if (!td->error) + log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec); + td_verror(td, ETIMEDOUT, "max latency exceeded"); + icd->error = ETIMEDOUT; +} + +static void lat_new_cycle(struct thread_data *td) +{ + fio_gettime(&td->latency_ts, NULL); + td->latency_ios = ddir_rw_sum(td->io_blocks); + td->latency_failed = 0; +} + +/* + * We had an IO outside the latency target. Reduce the queue depth. If we + * are at QD=1, then it's time to give up. + */ +static int __lat_target_failed(struct thread_data *td) +{ + if (td->latency_qd == 1) + return 1; + + td->latency_qd_high = td->latency_qd; + td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2; + + dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); + + /* + * When we ramp QD down, quiesce existing IO to prevent + * a storm of ramp downs due to pending higher depth. + */ + io_u_quiesce(td); + lat_new_cycle(td); + return 0; +} + +static int lat_target_failed(struct thread_data *td) +{ + if (td->o.latency_percentile.u.f == 100.0) + return __lat_target_failed(td); + + td->latency_failed++; + return 0; +} + +void lat_target_init(struct thread_data *td) +{ + if (td->o.latency_target) { + dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target); + fio_gettime(&td->latency_ts, NULL); + td->latency_qd = 1; + td->latency_qd_high = td->o.iodepth; + td->latency_qd_low = 1; + td->latency_ios = ddir_rw_sum(td->io_blocks); + } else + td->latency_qd = td->o.iodepth; +} + +static void lat_target_success(struct thread_data *td) +{ + const unsigned int qd = td->latency_qd; + + td->latency_qd_low = td->latency_qd; + + /* + * If we haven't failed yet, we double up to a failing value instead + * of bisecting from highest possible queue depth. If we have set + * a limit other than td->o.iodepth, bisect between that. + */ + if (td->latency_qd_high != td->o.iodepth) + td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2; + else + td->latency_qd *= 2; + + if (td->latency_qd > td->o.iodepth) + td->latency_qd = td->o.iodepth; + + dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); + /* + * Same as last one, we are done + */ + if (td->latency_qd == qd) + td->done = 1; + + lat_new_cycle(td); +} + +/* + * Check if we can bump the queue depth + */ +void lat_target_check(struct thread_data *td) +{ + uint64_t usec_window; + uint64_t ios; + double success_ios; + + usec_window = utime_since_now(&td->latency_ts); + if (usec_window < td->o.latency_window) + return; + + ios = ddir_rw_sum(td->io_blocks) - td->latency_ios; + success_ios = (double) (ios - td->latency_failed) / (double) ios; + success_ios *= 100.0; + + dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f); + + if (success_ios >= td->o.latency_percentile.u.f) + lat_target_success(td); + else + __lat_target_failed(td); +} + +/* + * If latency target is enabled, we might be ramping up or down and not + * using the full queue depth available. + */ +int queue_full(struct thread_data *td) +{ + const int qempty = io_u_qempty(&td->io_u_freelist); + + if (qempty) + return 1; + if (!td->o.latency_target) + return 0; + + return td->cur_depth >= td->latency_qd; +} struct io_u *__get_io_u(struct thread_data *td) { - struct io_u *io_u = NULL; + struct io_u *io_u; td_io_u_lock(td); again: - if (!flist_empty(&td->io_u_requeues)) - io_u = flist_entry(td->io_u_requeues.next, struct io_u, list); + if (!io_u_rempty(&td->io_u_requeues)) + io_u = io_u_rpop(&td->io_u_requeues); else if (!queue_full(td)) { - io_u = flist_entry(td->io_u_freelist.next, struct io_u, list); + io_u = io_u_qpop(&td->io_u_freelist); io_u->buflen = 0; io_u->resid = 0; @@ -1055,10 +1279,10 @@ again: assert(io_u->flags & IO_U_F_FREE); io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF); io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER); + io_u->flags &= ~IO_U_F_VER_LIST; io_u->error = 0; - flist_del(&io_u->list); - flist_add(&io_u->list, &td->io_u_busylist); + io_u->acct_ddir = -1; td->cur_depth++; io_u->flags |= IO_U_F_IN_CUR_DEPTH; } else if (td->o.verify_async) { @@ -1076,7 +1300,10 @@ again: static int check_get_trim(struct thread_data *td, struct io_u *io_u) { - if (td->o.trim_backlog && td->trim_entries) { + if (!(td->flags & TD_F_TRIM_BACKLOG)) + return 0; + + if (td->trim_entries) { int get_trim = 0; if (td->trim_batch) { @@ -1099,13 +1326,15 @@ static int check_get_trim(struct thread_data *td, struct io_u *io_u) static int check_get_verify(struct thread_data *td, struct io_u *io_u) { - if (td->o.verify_backlog && td->io_hist_len) { + if (!(td->flags & TD_F_VER_BACKLOG)) + return 0; + + if (td->io_hist_len) { int get_verify = 0; - if (td->verify_batch) { - td->verify_batch--; + if (td->verify_batch) get_verify = 1; - } else if (!(td->io_hist_len % td->o.verify_backlog) && + else if (!(td->io_hist_len % td->o.verify_backlog) && td->last_ddir != DDIR_READ) { td->verify_batch = td->o.verify_batch; if (!td->verify_batch) @@ -1113,8 +1342,10 @@ static int check_get_verify(struct thread_data *td, struct io_u *io_u) get_verify = 1; } - if (get_verify && !get_next_verify(td, io_u)) + if (get_verify && !get_next_verify(td, io_u)) { + td->verify_batch--; return 1; + } } return 0; @@ -1129,7 +1360,7 @@ static int check_get_verify(struct thread_data *td, struct io_u *io_u) static void small_content_scramble(struct io_u *io_u) { unsigned int i, nr_blocks = io_u->buflen / 512; - unsigned long long boffset; + uint64_t boffset; unsigned int offset; void *p, *end; @@ -1138,6 +1369,7 @@ static void small_content_scramble(struct io_u *io_u) p = io_u->xfer_buf; boffset = io_u->offset; + io_u->buf_filled_len = 0; for (i = 0; i < nr_blocks; i++) { /* @@ -1146,9 +1378,9 @@ static void small_content_scramble(struct io_u *io_u) * and the actual offset. */ offset = (io_u->start_time.tv_usec ^ boffset) & 511; - offset &= ~(sizeof(unsigned long long) - 1); - if (offset >= 512 - sizeof(unsigned long long)) - offset -= sizeof(unsigned long long); + offset &= ~(sizeof(uint64_t) - 1); + if (offset >= 512 - sizeof(uint64_t)) + offset -= sizeof(uint64_t); memcpy(p + offset, &boffset, sizeof(boffset)); end = p + 512 - sizeof(io_u->start_time); @@ -1188,7 +1420,7 @@ struct io_u *get_io_u(struct thread_data *td) /* * If using an iolog, grab next piece if any available. */ - if (td->o.read_iolog_file) { + if (td->flags & TD_F_READ_IOLOG) { if (read_iolog_get(td, io_u)) goto err_put; } else if (set_io_u_file(td, io_u)) { @@ -1209,12 +1441,15 @@ struct io_u *get_io_u(struct thread_data *td) f->last_pos = io_u->offset + io_u->buflen; if (io_u->ddir == DDIR_WRITE) { - if (td->o.verify != VERIFY_NONE) - populate_verify_io_u(td, io_u); - else if (td->o.refill_buffers) - io_u_fill_buffer(td, io_u, io_u->xfer_buflen); - else if (td->o.scramble_buffers) + if (td->flags & TD_F_REFILL_BUFFERS) { + io_u_fill_buffer(td, io_u, + io_u->xfer_buflen, io_u->xfer_buflen); + } else if (td->flags & TD_F_SCRAMBLE_BUFFERS) do_scramble = 1; + if (td->flags & TD_F_VER_NONE) { + populate_verify_io_u(td, io_u); + do_scramble = 0; + } } else if (io_u->ddir == DDIR_READ) { /* * Reset the buf_filled parameters so next time if the @@ -1247,10 +1482,12 @@ err_put: void io_u_log_error(struct thread_data *td, struct io_u *io_u) { + enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error); const char *msg[] = { "read", "write", "sync", "datasync", "sync_file_range", "wait", "trim" }; - + if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump) + return; log_err("fio: io_u error"); @@ -1270,10 +1507,7 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd, const enum fio_ddir idx, unsigned int bytes) { - unsigned long uninitialized_var(lusec); - - if (!icd->account) - return; + unsigned long lusec = 0; if (!td->o.disable_clat || !td->o.disable_bw) lusec = utime_since(&io_u->issue_time, &icd->time); @@ -1283,6 +1517,20 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, tusec = utime_since(&io_u->start_time, &icd->time); add_lat_sample(td, idx, tusec, bytes); + + if (td->flags & TD_F_PROFILE_OPS) { + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->io_u_lat) + icd->error = ops->io_u_lat(td, tusec); + } + + if (td->o.max_latency && tusec > td->o.max_latency) + lat_fatal(td, icd, tusec, td->o.max_latency); + if (td->o.latency_target && tusec > td->o.latency_target) { + if (lat_target_failed(td)) + lat_fatal(td, icd, tusec, td->o.latency_target); + } } if (!td->o.disable_clat) { @@ -1293,12 +1541,16 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, if (!td->o.disable_bw) add_bw_sample(td, idx, bytes, &icd->time); - add_iops_sample(td, idx, &icd->time); + add_iops_sample(td, idx, bytes, &icd->time); + + if (td->o.number_ios && !--td->o.number_ios) + td->done = 1; } static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) { - unsigned long long secs, remainder, bps, bytes; + uint64_t secs, remainder, bps, bytes; + bytes = td->this_io_bytes[ddir]; bps = td->rate_bps[ddir]; secs = bytes / bps; @@ -1309,11 +1561,6 @@ static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) static void io_completed(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd) { - /* - * Older gcc's are too dumb to realize that usec is always used - * initialized, silence that warning. - */ - unsigned long uninitialized_var(usec); struct fio_file *f; dprint_io_u(io_u, "io complete"); @@ -1345,7 +1592,9 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, td->io_blocks[idx]++; td->this_io_blocks[idx]++; td->io_bytes[idx] += bytes; - td->this_io_bytes[idx] += bytes; + + if (!(io_u->flags & IO_U_F_VER_LIST)) + td->this_io_bytes[idx] += bytes; if (idx == DDIR_WRITE) { f = io_u->file; @@ -1359,7 +1608,8 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, } } - if (ramp_time_over(td) && td->runstate == TD_RUNNING) { + if (ramp_time_over(td) && (td->runstate == TD_RUNNING || + td->runstate == TD_VERIFYING)) { account_io_completion(td, io_u, icd, idx, bytes); if (__should_check_rate(td, idx)) { @@ -1367,17 +1617,12 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, (usec_for_io(td, idx) - utime_since_now(&td->start)); } - if (__should_check_rate(td, odx)) + if (idx != DDIR_TRIM && __should_check_rate(td, odx)) td->rate_pending_usleep[odx] = (usec_for_io(td, odx) - utime_since_now(&td->start)); } - if (td_write(td) && idx == DDIR_WRITE && - td->o.do_verify && - td->o.verify != VERIFY_NONE) - log_io_piece(td, io_u); - icd->bytes_done[idx] += bytes; if (io_u->end_io) { @@ -1389,8 +1634,10 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, icd->error = io_u->error; io_u_log_error(td, io_u); } - if (td->o.continue_on_error && icd->error && - td_non_fatal_error(icd->error)) { + if (icd->error) { + enum error_type_bit eb = td_error_type(io_u->ddir, icd->error); + if (!td_non_fatal_error(td, eb, icd->error)) + return; /* * If there is a non_fatal error, then add to the error count * and clear all the errors. @@ -1405,14 +1652,15 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, static void init_icd(struct thread_data *td, struct io_completion_data *icd, int nr) { + int ddir; if (!td->o.disable_clat || !td->o.disable_bw) fio_gettime(&icd->time, NULL); icd->nr = nr; - icd->account = 1; icd->error = 0; - icd->bytes_done[0] = icd->bytes_done[1] = 0; + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + icd->bytes_done[ddir] = 0; } static void ios_completed(struct thread_data *td, @@ -1428,8 +1676,6 @@ static void ios_completed(struct thread_data *td, if (!(io_u->flags & IO_U_F_FREE_DEF)) put_io_u(td, io_u); - - icd->account = 0; } } @@ -1437,7 +1683,7 @@ static void ios_completed(struct thread_data *td, * Complete a single io_u for the sync engines. */ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, - unsigned long *bytes) + uint64_t *bytes) { struct io_completion_data icd; @@ -1453,8 +1699,10 @@ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, } if (bytes) { - bytes[0] += icd.bytes_done[0]; - bytes[1] += icd.bytes_done[1]; + int ddir; + + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + bytes[ddir] += icd.bytes_done[ddir]; } return 0; @@ -1464,7 +1712,7 @@ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, * Called to complete min_events number of io for the async engines. */ int io_u_queued_complete(struct thread_data *td, int min_evts, - unsigned long *bytes) + uint64_t *bytes) { struct io_completion_data icd; struct timespec *tvp = NULL; @@ -1491,8 +1739,10 @@ int io_u_queued_complete(struct thread_data *td, int min_evts, } if (bytes) { - bytes[0] += icd.bytes_done[0]; - bytes[1] += icd.bytes_done[1]; + int ddir; + + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + bytes[ddir] += icd.bytes_done[ddir]; } return 0; @@ -1511,16 +1761,35 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) } } +void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, + unsigned int max_bs) +{ + if (td->o.buffer_pattern_bytes) + fill_buffer_pattern(td, buf, max_bs); + else if (!td->o.zero_buffers) { + unsigned int perc = td->o.compress_percentage; + + if (perc) { + unsigned int seg = min_write; + + seg = min(min_write, td->o.compress_chunk); + if (!seg) + seg = min_write; + + fill_random_buf_percentage(&td->buf_state, buf, + perc, seg, max_bs); + } else + fill_random_buf(&td->buf_state, buf, max_bs); + } else + memset(buf, 0, max_bs); +} + /* * "randomly" fill the buffer contents */ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, - unsigned int max_bs) + unsigned int min_write, unsigned int max_bs) { io_u->buf_filled_len = 0; - - if (!td->o.zero_buffers) - fill_random_buf(&td->buf_state, io_u->buf, max_bs); - else - memset(io_u->buf, 0, max_bs); + fill_io_buffer(td, io_u->buf, min_write, max_bs); }