X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=46d97319fc174512fc4ca353486144c57097db31;hp=7cbdb915a19629fcbd3b500b9255c918bfbd6dd1;hb=79591fa9555024aa9564656bbbb9d4c828a3e189;hpb=709c8313cf171e63c92740c6b2f38041cb11c722 diff --git a/io_u.c b/io_u.c index 7cbdb915..46d97319 100644 --- a/io_u.c +++ b/io_u.c @@ -12,6 +12,8 @@ #include "lib/rand.h" #include "lib/axmap.h" #include "err.h" +#include "lib/pow2.h" +#include "minmax.h" struct io_completion_data { int nr; /* input */ @@ -25,7 +27,7 @@ struct io_completion_data { * The ->io_axmap contains a map of blocks we have or have not done io * to yet. Used to make sure we cover the entire range in a fair fashion. */ -static int random_map_free(struct fio_file *f, const uint64_t block) +static bool random_map_free(struct fio_file *f, const uint64_t block) { return !axmap_isset(f->io_axmap, block); } @@ -60,6 +62,7 @@ static uint64_t last_block(struct thread_data *td, struct fio_file *f, /* * Hmm, should we make sure that ->io_size <= ->real_file_size? + * -> not for now since there is code assuming it could go either. */ max_size = f->io_size; if (max_size > f->real_file_size) @@ -68,6 +71,9 @@ static uint64_t last_block(struct thread_data *td, struct fio_file *f, if (td->o.zone_range) max_size = td->o.zone_range; + if (td->o.min_bs[ddir] > td->o.ba[ddir]) + max_size -= td->o.min_bs[ddir] - td->o.ba[ddir]; + max_blocks = max_size / (uint64_t) td->o.ba[ddir]; if (!max_blocks) return 0; @@ -81,34 +87,25 @@ struct rand_off { }; static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, uint64_t *b) + enum fio_ddir ddir, uint64_t *b, + uint64_t lastb) { - uint64_t r, lastb; + uint64_t r; - lastb = last_block(td, f, ddir); - if (!lastb) - return 1; + if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || + td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) { - if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) { - uint64_t rmax; - - rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; - - if (td->o.use_os_rand) { - rmax = OS_RAND_MAX; - r = os_random_long(&td->random_state); - } else { - rmax = FRAND_MAX; - r = __rand(&td->__random_state); - } + r = __rand(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); - *b = lastb * (r / ((uint64_t) rmax + 1.0)); + *b = lastb * (r / (rand_max(&td->random_state) + 1.0)); } else { uint64_t off = 0; - if (lfsr_next(&f->lfsr, &off, lastb)) + assert(fio_file_lfsr(f)); + + if (lfsr_next(&f->lfsr, &off)) return 1; *b = off; @@ -152,6 +149,79 @@ static int __get_next_rand_offset_pareto(struct thread_data *td, return 0; } +static int __get_next_rand_offset_gauss(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + uint64_t *b) +{ + *b = gauss_next(&f->gauss); + return 0; +} + +static int __get_next_rand_offset_zoned(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + uint64_t *b) +{ + unsigned int v, send, stotal; + uint64_t offset, lastb; + static int warned; + struct zone_split_index *zsi; + + lastb = last_block(td, f, ddir); + if (!lastb) + return 1; + + if (!td->o.zone_split_nr[ddir]) { +bail: + return __get_next_rand_offset(td, f, ddir, b, lastb); + } + + /* + * Generate a value, v, between 1 and 100, both inclusive + */ + v = rand32_between(&td->zone_state, 1, 100); + + zsi = &td->zone_state_index[ddir][v - 1]; + stotal = zsi->size_perc_prev; + send = zsi->size_perc; + + /* + * Should never happen + */ + if (send == -1U) { + if (!warned) { + log_err("fio: bug in zoned generation\n"); + warned = 1; + } + goto bail; + } + + /* + * 'send' is some percentage below or equal to 100 that + * marks the end of the current IO range. 'stotal' marks + * the start, in percent. + */ + if (stotal) + offset = stotal * lastb / 100ULL; + else + offset = 0; + + lastb = lastb * (send - stotal) / 100ULL; + + /* + * Generate index from 0..send-of-lastb + */ + if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1) + return 1; + + /* + * Add our start offset, if any + */ + if (offset) + *b += offset; + + return 0; +} + static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b) { struct rand_off *r1 = flist_entry(a, struct rand_off, list); @@ -163,12 +233,22 @@ static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b) static int get_off_from_method(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { - if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) - return __get_next_rand_offset(td, f, ddir, b); - else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) + if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) { + uint64_t lastb; + + lastb = last_block(td, f, ddir); + if (!lastb) + return 1; + + return __get_next_rand_offset(td, f, ddir, b, lastb); + } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) return __get_next_rand_offset_zipf(td, f, ddir, b); else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) return __get_next_rand_offset_pareto(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS) + return __get_next_rand_offset_gauss(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_ZONED) + return __get_next_rand_offset_zoned(td, f, ddir, b); log_err("fio: unknown random distribution: %d\n", td->o.random_distribution); return 1; @@ -178,35 +258,29 @@ static int get_off_from_method(struct thread_data *td, struct fio_file *f, * Sort the reads for a verify phase in batches of verifysort_nr, if * specified. */ -static inline int should_sort_io(struct thread_data *td) +static inline bool should_sort_io(struct thread_data *td) { if (!td->o.verifysort_nr || !td->o.do_verify) - return 0; + return false; if (!td_random(td)) - return 0; + return false; if (td->runstate != TD_VERIFYING) - return 0; - if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) - return 0; + return false; + if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || + td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) + return false; - return 1; + return true; } -static int should_do_random(struct thread_data *td, enum fio_ddir ddir) +static bool should_do_random(struct thread_data *td, enum fio_ddir ddir) { unsigned int v; - unsigned long r; if (td->o.perc_rand[ddir] == 100) - return 1; + return true; - if (td->o.use_os_rand) { - r = os_random_long(&td->seq_rand_state[ddir]); - v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); - } else { - r = __rand(&td->__seq_rand_state[ddir]); - v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); - } + v = rand32_between(&td->seq_rand_state[ddir], 1, 100); return v <= td->o.perc_rand[ddir]; } @@ -221,7 +295,6 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, return get_off_from_method(td, f, ddir, b); if (!flist_empty(&td->next_rand_list)) { - struct rand_off *r; fetch: r = flist_first_entry(&td->next_rand_list, struct rand_off, list); flist_del(&r->list); @@ -256,14 +329,15 @@ static int get_next_rand_block(struct thread_data *td, struct fio_file *f, if (!get_next_rand_offset(td, f, ddir, b)) return 0; - if (td->o.time_based) { + if (td->o.time_based || + (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) { fio_file_reset(td, f); if (!get_next_rand_offset(td, f, ddir, b)) return 0; } dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", - f->file_name, (unsigned long long) f->last_pos, + f->file_name, (unsigned long long) f->last_pos[ddir], (unsigned long long) f->real_file_size); return 1; } @@ -271,20 +345,54 @@ static int get_next_rand_block(struct thread_data *td, struct fio_file *f, static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *offset) { + struct thread_options *o = &td->o; + assert(ddir_rw(ddir)); - if (f->last_pos >= f->io_size + get_start_offset(td, f) && td->o.time_based) - f->last_pos = f->last_pos - f->io_size; + if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) && + o->time_based) { + struct thread_options *o = &td->o; + uint64_t io_size = f->io_size + (f->io_size % o->min_bs[ddir]); + + if (io_size > f->last_pos[ddir]) + f->last_pos[ddir] = 0; + else + f->last_pos[ddir] = f->last_pos[ddir] - io_size; + } - if (f->last_pos < f->real_file_size) { + if (f->last_pos[ddir] < f->real_file_size) { uint64_t pos; - if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0) - f->last_pos = f->real_file_size; + if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) { + if (f->real_file_size > f->io_size) + f->last_pos[ddir] = f->io_size; + else + f->last_pos[ddir] = f->real_file_size; + } - pos = f->last_pos - f->file_offset; - if (pos) - pos += td->o.ddir_seq_add; + pos = f->last_pos[ddir] - f->file_offset; + if (pos && o->ddir_seq_add) { + pos += o->ddir_seq_add; + + /* + * If we reach beyond the end of the file + * with holed IO, wrap around to the + * beginning again. If we're doing backwards IO, + * wrap to the end. + */ + if (pos >= f->real_file_size) { + if (o->ddir_seq_add > 0) + pos = f->file_offset; + else { + if (f->real_file_size > f->io_size) + pos = f->io_size; + else + pos = f->real_file_size; + + pos += o->ddir_seq_add; + } + } + } *offset = pos; return 0; @@ -312,7 +420,7 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, *is_random = 1; } else { *is_random = 0; - io_u->flags |= IO_U_F_BUSY_OK; + io_u_set(td, io_u, IO_U_F_BUSY_OK); ret = get_next_seq_offset(td, f, ddir, &offset); if (ret) ret = get_next_rand_block(td, f, ddir, &b); @@ -322,7 +430,7 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, ret = get_next_seq_offset(td, f, ddir, &offset); } } else { - io_u->flags |= IO_U_F_BUSY_OK; + io_u_set(td, io_u, IO_U_F_BUSY_OK); *is_random = 0; if (td->o.rw_seq == RW_SEQ_SEQ) { @@ -332,8 +440,8 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, *is_random = 0; } } else if (td->o.rw_seq == RW_SEQ_IDENT) { - if (f->last_start != -1ULL) - offset = f->last_start - f->file_offset; + if (f->last_start[ddir] != -1ULL) + offset = f->last_start[ddir] - f->file_offset; else offset = 0; ret = 0; @@ -410,8 +518,8 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u, return __get_next_offset(td, io_u, is_random); } -static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, - unsigned int buflen) +static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u, + unsigned int buflen) { struct fio_file *f = io_u->file; @@ -424,7 +532,7 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, int ddir = io_u->ddir; unsigned int buflen = 0; unsigned int minbs, maxbs; - unsigned long r, rand_max; + uint64_t frand_max, r; assert(ddir_rw(ddir)); @@ -443,24 +551,17 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, if (!io_u_fits(td, io_u, minbs)) return 0; - if (td->o.use_os_rand) - rand_max = OS_RAND_MAX; - else - rand_max = FRAND_MAX; - + frand_max = rand_max(&td->bsrange_state); do { - if (td->o.use_os_rand) - r = os_random_long(&td->bsrange_state); - else - r = __rand(&td->__bsrange_state); + r = __rand(&td->bsrange_state); if (!td->o.bssplit_nr[ddir]) { buflen = 1 + (unsigned int) ((double) maxbs * - (r / (rand_max + 1.0))); + (r / (frand_max + 1.0))); if (buflen < minbs) buflen = minbs; } else { - long perc = 0; + long long perc = 0; unsigned int i; for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { @@ -468,18 +569,16 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, buflen = bsp->bs; perc += bsp->perc; - if ((r <= ((rand_max / 100L) * perc)) && + if (!perc) + break; + if ((r / perc <= frand_max / 100ULL) && io_u_fits(td, io_u, buflen)) break; } } - if (td->o.do_verify && td->o.verify != VERIFY_NONE) - buflen = (buflen + td->o.verify_interval - 1) & - ~(td->o.verify_interval - 1); - if (!td->o.bs_unaligned && is_power_of_2(minbs)) - buflen = (buflen + minbs - 1) & ~(minbs - 1); + buflen &= ~(minbs - 1); } while (!io_u_fits(td, io_u, buflen)); @@ -515,15 +614,8 @@ static void set_rwmix_bytes(struct thread_data *td) static inline enum fio_ddir get_rand_ddir(struct thread_data *td) { unsigned int v; - unsigned long r; - if (td->o.use_os_rand) { - r = os_random_long(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); - } else { - r = __rand(&td->__rwmix_state); - v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); - } + v = rand32_between(&td->rwmix_state, 1, 100); if (v <= td->o.rwmix[DDIR_READ]) return DDIR_READ; @@ -531,8 +623,10 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) return DDIR_WRITE; } -void io_u_quiesce(struct thread_data *td) +int io_u_quiesce(struct thread_data *td) { + int completed = 0; + /* * We are going to sleep, ensure that we flush anything pending as * not to skew our latency numbers. @@ -542,63 +636,70 @@ void io_u_quiesce(struct thread_data *td) * io's that have been actually submitted to an async engine, * and cur_depth is meaningless for sync engines. */ + if (td->io_u_queued || td->cur_depth) { + int fio_unused ret; + + ret = td_io_commit(td); + } + while (td->io_u_in_flight) { int fio_unused ret; - ret = io_u_queued_complete(td, 1, NULL); + ret = io_u_queued_complete(td, 1); + if (ret > 0) + completed += ret; } + + if (td->flags & TD_F_REGROW_LOGS) + regrow_logs(td); + + return completed; } static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; - struct timeval t; long usec; + uint64_t now; assert(ddir_rw(ddir)); + now = utime_since_now(&td->start); - if (td->rate_pending_usleep[ddir] <= 0) + /* + * if rate_next_io_time is in the past, need to catch up to rate + */ + if (td->rate_next_io_time[ddir] <= now) return ddir; /* - * We have too much pending sleep in this direction. See if we + * We are ahead of rate in this direction. See if we * should switch. */ if (td_rw(td) && td->o.rwmix[odir]) { /* - * Other direction does not have too much pending, switch + * Other direction is behind rate, switch */ - if (td->rate_pending_usleep[odir] < 100000) + if (td->rate_next_io_time[odir] <= now) return odir; /* - * Both directions have pending sleep. Sleep the minimum time - * and deduct from both. + * Both directions are ahead of rate. sleep the min + * switch if necissary */ - if (td->rate_pending_usleep[ddir] <= - td->rate_pending_usleep[odir]) { - usec = td->rate_pending_usleep[ddir]; + if (td->rate_next_io_time[ddir] <= + td->rate_next_io_time[odir]) { + usec = td->rate_next_io_time[ddir] - now; } else { - usec = td->rate_pending_usleep[odir]; + usec = td->rate_next_io_time[odir] - now; ddir = odir; } } else - usec = td->rate_pending_usleep[ddir]; - - io_u_quiesce(td); - - fio_gettime(&t, NULL); - usec_sleep(td, usec); - usec = utime_since_now(&t); + usec = td->rate_next_io_time[ddir] - now; - td->rate_pending_usleep[ddir] -= usec; + if (td->o.io_submit_mode == IO_MODE_INLINE) + io_u_quiesce(td); - odir = ddir ^ 1; - if (td_rw(td) && __should_check_rate(td, odir)) - td->rate_pending_usleep[odir] -= usec; - - if (ddir_trim(ddir)) - return ddir; + usec = usec_sleep(td, usec); return ddir; } @@ -658,8 +759,10 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) ddir = DDIR_READ; else if (td_write(td)) ddir = DDIR_WRITE; - else + else if (td_trim(td)) ddir = DDIR_TRIM; + else + ddir = DDIR_INVAL; td->rwmix_ddir = rate_ddir(td, ddir); return td->rwmix_ddir; @@ -667,13 +770,23 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) { - io_u->ddir = io_u->acct_ddir = get_rw_ddir(td); + enum fio_ddir ddir = get_rw_ddir(td); + + if (td_trimwrite(td)) { + struct fio_file *f = io_u->file; + if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM]) + ddir = DDIR_TRIM; + else + ddir = DDIR_WRITE; + } + + io_u->ddir = io_u->acct_ddir = ddir; - if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) && + if (io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_BARRIER) && td->o.barrier_blocks && !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) && td->io_issues[DDIR_WRITE]) - io_u->flags |= IO_U_F_BARRIER; + io_u_set(td, io_u, IO_U_F_BARRIER); } void put_file_log(struct thread_data *td, struct fio_file *f) @@ -686,16 +799,21 @@ void put_file_log(struct thread_data *td, struct fio_file *f) void put_io_u(struct thread_data *td, struct io_u *io_u) { + if (td->parent) + td = td->parent; + td_io_u_lock(td); if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT)) put_file_log(td, io_u->file); io_u->file = NULL; - io_u->flags |= IO_U_F_FREE; + io_u_set(td, io_u, IO_U_F_FREE); - if (io_u->flags & IO_U_F_IN_CUR_DEPTH) + if (io_u->flags & IO_U_F_IN_CUR_DEPTH) { td->cur_depth--; + assert(!(td->flags & TD_F_CHILD)); + } io_u_qpush(&td->io_u_freelist, io_u); td_io_u_unlock(td); td_io_u_free_notify(td); @@ -703,7 +821,7 @@ void put_io_u(struct thread_data *td, struct io_u *io_u) void clear_io_u(struct thread_data *td, struct io_u *io_u) { - io_u->flags &= ~IO_U_F_FLIGHT; + io_u_clear(td, io_u, IO_U_F_FLIGHT); put_io_u(td, io_u); } @@ -714,18 +832,24 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u) dprint(FD_IO, "requeue %p\n", __io_u); + if (td->parent) + td = td->parent; + td_io_u_lock(td); - __io_u->flags |= IO_U_F_FREE; + io_u_set(td, __io_u, IO_U_F_FREE); if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir)) td->io_issues[ddir]--; - __io_u->flags &= ~IO_U_F_FLIGHT; - if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) + io_u_clear(td, __io_u, IO_U_F_FLIGHT); + if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) { td->cur_depth--; + assert(!(td->flags & TD_F_CHILD)); + } io_u_rpush(&td->io_u_requeues, __io_u); td_io_u_unlock(td); + td_io_u_free_notify(td); *io_u = NULL; } @@ -733,7 +857,7 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) { unsigned int is_random; - if (td->io_ops->flags & FIO_NOIO) + if (td_ioengine_flagged(td, FIO_NOIO)) goto out; set_rw_ddir(td, io_u); @@ -748,9 +872,17 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) * See if it's time to switch to a new zone */ if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { + struct fio_file *f = io_u->file; + td->zone_bytes = 0; - io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; - io_u->file->last_pos = io_u->file->file_offset; + f->file_offset += td->o.zone_range + td->o.zone_skip; + + /* + * Wrap from the beginning, if we exceed the file size + */ + if (f->file_offset >= f->real_file_size) + f->file_offset = f->real_file_size - f->file_offset; + f->last_pos[io_u->ddir] = f->file_offset; td->io_skip_bytes += td->o.zone_skip; } @@ -953,6 +1085,34 @@ static void io_u_mark_latency(struct thread_data *td, unsigned long usec) io_u_mark_lat_msec(td, usec / 1000); } +static unsigned int __get_next_fileno_rand(struct thread_data *td) +{ + unsigned long fileno; + + if (td->o.file_service_type == FIO_FSERVICE_RANDOM) { + uint64_t frand_max = rand_max(&td->next_file_state); + unsigned long r; + + r = __rand(&td->next_file_state); + return (unsigned int) ((double) td->o.nr_files + * (r / (frand_max + 1.0))); + } + + if (td->o.file_service_type == FIO_FSERVICE_ZIPF) + fileno = zipf_next(&td->next_file_zipf); + else if (td->o.file_service_type == FIO_FSERVICE_PARETO) + fileno = pareto_next(&td->next_file_zipf); + else if (td->o.file_service_type == FIO_FSERVICE_GAUSS) + fileno = gauss_next(&td->next_file_gauss); + else { + log_err("fio: bad file service type: %d\n", td->o.file_service_type); + assert(0); + return 0; + } + + return fileno >> FIO_FSERVICE_SHIFT; +} + /* * Get next file to service by choosing one at random */ @@ -965,17 +1125,8 @@ static struct fio_file *get_next_file_rand(struct thread_data *td, do { int opened = 0; - unsigned long r; - if (td->o.use_os_rand) { - r = os_random_long(&td->next_file_state); - fno = (unsigned int) ((double) td->o.nr_files - * (r / (OS_RAND_MAX + 1.0))); - } else { - r = __rand(&td->__next_file_state); - fno = (unsigned int) ((double) td->o.nr_files - * (r / (FRAND_MAX + 1.0))); - } + fno = __get_next_fileno_rand(td); f = td->files[fno]; if (fio_file_done(f)) @@ -1128,10 +1279,14 @@ static long set_io_u_file(struct thread_data *td, struct io_u *io_u) put_file_log(td, f); td_io_close_file(td, f); io_u->file = NULL; - fio_file_set_done(f); - td->nr_done_files++; - dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, + if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM) + fio_file_reset(td, f); + else { + fio_file_set_done(f); + td->nr_done_files++; + dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, td->nr_done_files, td->o.nr_files); + } } while (1); return 0; @@ -1157,10 +1312,10 @@ static void lat_new_cycle(struct thread_data *td) * We had an IO outside the latency target. Reduce the queue depth. If we * are at QD=1, then it's time to give up. */ -static int __lat_target_failed(struct thread_data *td) +static bool __lat_target_failed(struct thread_data *td) { if (td->latency_qd == 1) - return 1; + return true; td->latency_qd_high = td->latency_qd; @@ -1177,16 +1332,16 @@ static int __lat_target_failed(struct thread_data *td) */ io_u_quiesce(td); lat_new_cycle(td); - return 0; + return false; } -static int lat_target_failed(struct thread_data *td) +static bool lat_target_failed(struct thread_data *td) { if (td->o.latency_percentile.u.f == 100.0) return __lat_target_failed(td); td->latency_failed++; - return 0; + return false; } void lat_target_init(struct thread_data *td) @@ -1281,14 +1436,14 @@ void lat_target_check(struct thread_data *td) * If latency target is enabled, we might be ramping up or down and not * using the full queue depth available. */ -int queue_full(struct thread_data *td) +bool queue_full(const struct thread_data *td) { const int qempty = io_u_qempty(&td->io_u_freelist); if (qempty) - return 1; + return true; if (!td->o.latency_target) - return 0; + return false; return td->cur_depth >= td->latency_qd; } @@ -1297,6 +1452,9 @@ struct io_u *__get_io_u(struct thread_data *td) { struct io_u *io_u = NULL; + if (td->stop_io) + return NULL; + td_io_u_lock(td); again: @@ -1313,21 +1471,23 @@ again: if (io_u) { assert(io_u->flags & IO_U_F_FREE); - io_u->flags &= ~(IO_U_F_FREE | IO_U_F_NO_FILE_PUT | + io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT | IO_U_F_TRIMMED | IO_U_F_BARRIER | IO_U_F_VER_LIST); io_u->error = 0; io_u->acct_ddir = -1; td->cur_depth++; - io_u->flags |= IO_U_F_IN_CUR_DEPTH; + assert(!(td->flags & TD_F_CHILD)); + io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH); io_u->ipo = NULL; - } else if (td->o.verify_async) { + } else if (td_async_processing(td)) { /* * We ran out, wait for async verify threads to finish and * return one */ - pthread_cond_wait(&td->free_cond, &td->io_u_lock); + assert(!(td->flags & TD_F_CHILD)); + assert(!pthread_cond_wait(&td->free_cond, &td->io_u_lock)); goto again; } @@ -1335,10 +1495,10 @@ again: return io_u; } -static int check_get_trim(struct thread_data *td, struct io_u *io_u) +static bool check_get_trim(struct thread_data *td, struct io_u *io_u) { if (!(td->flags & TD_F_TRIM_BACKLOG)) - return 0; + return false; if (td->trim_entries) { int get_trim = 0; @@ -1354,17 +1514,17 @@ static int check_get_trim(struct thread_data *td, struct io_u *io_u) get_trim = 1; } - if (get_trim && !get_next_trim(td, io_u)) - return 1; + if (get_trim && get_next_trim(td, io_u)) + return true; } - return 0; + return false; } -static int check_get_verify(struct thread_data *td, struct io_u *io_u) +static bool check_get_verify(struct thread_data *td, struct io_u *io_u) { if (!(td->flags & TD_F_VER_BACKLOG)) - return 0; + return false; if (td->io_hist_len) { int get_verify = 0; @@ -1381,11 +1541,11 @@ static int check_get_verify(struct thread_data *td, struct io_u *io_u) if (get_verify && !get_next_verify(td, io_u)) { td->verify_batch--; - return 1; + return true; } } - return 0; + return false; } /* @@ -1476,18 +1636,19 @@ struct io_u *get_io_u(struct thread_data *td) assert(fio_file_open(f)); if (ddir_rw(io_u->ddir)) { - if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { + if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) { dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); goto err_put; } - f->last_start = io_u->offset; - f->last_pos = io_u->offset + io_u->buflen; + f->last_start[io_u->ddir] = io_u->offset; + f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen; if (io_u->ddir == DDIR_WRITE) { if (td->flags & TD_F_REFILL_BUFFERS) { io_u_fill_buffer(td, io_u, - io_u->xfer_buflen, io_u->xfer_buflen); + td->o.min_bs[DDIR_WRITE], + io_u->buflen); } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) && !(td->flags & TD_F_COMPRESS)) do_scramble = 1; @@ -1513,7 +1674,7 @@ struct io_u *get_io_u(struct thread_data *td) out: assert(io_u->file); if (!td_io_prep(td, io_u)) { - if (!td->o.disable_slat) + if (!td->o.disable_lat) fio_gettime(&io_u->start_time, NULL); if (do_scramble) small_content_scramble(io_u); @@ -1525,7 +1686,7 @@ err_put: return ERR_PTR(ret); } -void io_u_log_error(struct thread_data *td, struct io_u *io_u) +static void __io_u_log_error(struct thread_data *td, struct io_u *io_u) { enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error); @@ -1539,23 +1700,41 @@ void io_u_log_error(struct thread_data *td, struct io_u *io_u) io_ddir_name(io_u->ddir), io_u->offset, io_u->xfer_buflen); + if (td->io_ops->errdetails) { + char *err = td->io_ops->errdetails(io_u); + + log_err("fio: %s\n", err); + free(err); + } + if (!td->error) td_verror(td, io_u->error, "io_u error"); } -static inline int gtod_reduce(struct thread_data *td) +void io_u_log_error(struct thread_data *td, struct io_u *io_u) +{ + __io_u_log_error(td, io_u); + if (td->parent) + __io_u_log_error(td->parent, io_u); +} + +static inline bool gtod_reduce(struct thread_data *td) { - return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat - && td->o.disable_bw; + return (td->o.disable_clat && td->o.disable_slat && td->o.disable_bw) + || td->o.gtod_reduce; } static void account_io_completion(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd, const enum fio_ddir idx, unsigned int bytes) { + const int no_reduce = !gtod_reduce(td); unsigned long lusec = 0; - if (!gtod_reduce(td)) + if (td->parent) + td = td->parent; + + if (no_reduce) lusec = utime_since(&io_u->issue_time, &icd->time); if (!td->o.disable_lat) { @@ -1579,27 +1758,53 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, } } - if (!td->o.disable_clat) { - add_clat_sample(td, idx, lusec, bytes, io_u->offset); - io_u_mark_latency(td, lusec); - } + if (ddir_rw(idx)) { + if (!td->o.disable_clat) { + add_clat_sample(td, idx, lusec, bytes, io_u->offset); + io_u_mark_latency(td, lusec); + } - if (!td->o.disable_bw) - add_bw_sample(td, idx, bytes, &icd->time); + if (!td->o.disable_bw && per_unit_log(td->bw_log)) + add_bw_sample(td, io_u, bytes, lusec); - if (!gtod_reduce(td)) - add_iops_sample(td, idx, bytes, &icd->time); + if (no_reduce && per_unit_log(td->iops_log)) + add_iops_sample(td, io_u, bytes); + } + + if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) { + uint32_t *info = io_u_block_info(td, io_u); + if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) { + if (io_u->ddir == DDIR_TRIM) { + *info = BLOCK_INFO(BLOCK_STATE_TRIMMED, + BLOCK_INFO_TRIMS(*info) + 1); + } else if (io_u->ddir == DDIR_WRITE) { + *info = BLOCK_INFO_SET_STATE(BLOCK_STATE_WRITTEN, + *info); + } + } + } } -static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) +static void file_log_write_comp(const struct thread_data *td, struct fio_file *f, + uint64_t offset, unsigned int bytes) { - uint64_t secs, remainder, bps, bytes; + int idx; + + if (!f) + return; + + if (f->first_write == -1ULL || offset < f->first_write) + f->first_write = offset; + if (f->last_write == -1ULL || ((offset + bytes) > f->last_write)) + f->last_write = offset + bytes; + + if (!f->last_write_comp) + return; - bytes = td->this_io_bytes[ddir]; - bps = td->rate_bps[ddir]; - secs = bytes / bps; - remainder = bytes % bps; - return remainder * 1000000 / bps + secs * 1000000; + idx = f->last_write_idx++; + f->last_write_comp[idx] = offset; + if (f->last_write_idx == td->o.iodepth) + f->last_write_idx = 0; } static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, @@ -1611,9 +1816,8 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, dprint_io_u(io_u, "io complete"); - td_io_u_lock(td); assert(io_u->flags & IO_U_F_FLIGHT); - io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); + io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK); /* * Mark IO ok to verify @@ -1630,8 +1834,6 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, } } - td_io_u_unlock(td); - if (ddir_sync(ddir)) { td->last_was_sync = 1; if (f) { @@ -1646,7 +1848,6 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, if (!io_u->error && ddir_rw(ddir)) { unsigned int bytes = io_u->buflen - io_u->resid; - const enum fio_ddir oddir = ddir ^ 1; int ret; td->io_blocks[ddir]++; @@ -1656,32 +1857,13 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, if (!(io_u->flags & IO_U_F_VER_LIST)) td->this_io_bytes[ddir] += bytes; - if (ddir == DDIR_WRITE && f) { - if (f->first_write == -1ULL || - io_u->offset < f->first_write) - f->first_write = io_u->offset; - if (f->last_write == -1ULL || - ((io_u->offset + bytes) > f->last_write)) - f->last_write = io_u->offset + bytes; - } + if (ddir == DDIR_WRITE) + file_log_write_comp(td, f, io_u->offset, bytes); if (ramp_time_over(td) && (td->runstate == TD_RUNNING || - td->runstate == TD_VERIFYING)) { + td->runstate == TD_VERIFYING)) account_io_completion(td, io_u, icd, ddir, bytes); - if (__should_check_rate(td, ddir)) { - td->rate_pending_usleep[ddir] = - (usec_for_io(td, ddir) - - utime_since_now(&td->start)); - } - if (ddir != DDIR_TRIM && - __should_check_rate(td, oddir)) { - td->rate_pending_usleep[oddir] = - (usec_for_io(td, oddir) - - utime_since_now(&td->start)); - } - } - icd->bytes_done[ddir] += bytes; if (io_u->end_io) { @@ -1723,7 +1905,7 @@ static void init_icd(struct thread_data *td, struct io_completion_data *icd, icd->nr = nr; icd->error = 0; - for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) icd->bytes_done[ddir] = 0; } @@ -1746,10 +1928,10 @@ static void ios_completed(struct thread_data *td, /* * Complete a single io_u for the sync engines. */ -int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, - uint64_t *bytes) +int io_u_sync_complete(struct thread_data *td, struct io_u *io_u) { struct io_completion_data icd; + int ddir; init_icd(td, &icd, 1); io_completed(td, &io_u, &icd); @@ -1762,12 +1944,8 @@ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, return -1; } - if (bytes) { - int ddir; - - for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) - bytes[ddir] += icd.bytes_done[ddir]; - } + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) + td->bytes_done[ddir] += icd.bytes_done[ddir]; return 0; } @@ -1775,12 +1953,11 @@ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, /* * Called to complete min_events number of io for the async engines. */ -int io_u_queued_complete(struct thread_data *td, int min_evts, - uint64_t *bytes) +int io_u_queued_complete(struct thread_data *td, int min_evts) { struct io_completion_data icd; struct timespec *tvp = NULL; - int ret; + int ret, ddir; struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); @@ -1790,7 +1967,9 @@ int io_u_queued_complete(struct thread_data *td, int min_evts, else if (min_evts > td->cur_depth) min_evts = td->cur_depth; - ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); + /* No worries, td_io_getevents fixes min and max if they are + * set incorrectly */ + ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp); if (ret < 0) { td_verror(td, -ret, "td_io_getevents"); return ret; @@ -1804,14 +1983,10 @@ int io_u_queued_complete(struct thread_data *td, int min_evts, return -1; } - if (bytes) { - int ddir; + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) + td->bytes_done[ddir] += icd.bytes_done[ddir]; - for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) - bytes[ddir] += icd.bytes_done[ddir]; - } - - return 0; + return ret; } /* @@ -1823,32 +1998,84 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) unsigned long slat_time; slat_time = utime_since(&io_u->start_time, &io_u->issue_time); + + if (td->parent) + td = td->parent; + add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen, io_u->offset); } } +/* + * See if we should reuse the last seed, if dedupe is enabled + */ +static struct frand_state *get_buf_state(struct thread_data *td) +{ + unsigned int v; + + if (!td->o.dedupe_percentage) + return &td->buf_state; + else if (td->o.dedupe_percentage == 100) { + frand_copy(&td->buf_state_prev, &td->buf_state); + return &td->buf_state; + } + + v = rand32_between(&td->dedupe_state, 1, 100); + + if (v <= td->o.dedupe_percentage) + return &td->buf_state_prev; + + return &td->buf_state; +} + +static void save_buf_state(struct thread_data *td, struct frand_state *rs) +{ + if (td->o.dedupe_percentage == 100) + frand_copy(rs, &td->buf_state_prev); + else if (rs == &td->buf_state) + frand_copy(&td->buf_state_prev, rs); +} + void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, unsigned int max_bs) { - if (td->o.buffer_pattern_bytes) - fill_buffer_pattern(td, buf, max_bs); - else if (!td->o.zero_buffers) { + struct thread_options *o = &td->o; + + if (o->compress_percentage || o->dedupe_percentage) { unsigned int perc = td->o.compress_percentage; + struct frand_state *rs; + unsigned int left = max_bs; + unsigned int this_write; - if (perc) { - unsigned int seg = min_write; + do { + rs = get_buf_state(td); - seg = min(min_write, td->o.compress_chunk); - if (!seg) - seg = min_write; + min_write = min(min_write, left); - fill_random_buf_percentage(&td->buf_state, buf, - perc, seg, max_bs); - } else - fill_random_buf(&td->buf_state, buf, max_bs); - } else + if (perc) { + this_write = min_not_zero(min_write, + td->o.compress_chunk); + + fill_random_buf_percentage(rs, buf, perc, + this_write, this_write, + o->buffer_pattern, + o->buffer_pattern_bytes); + } else { + fill_random_buf(rs, buf, min_write); + this_write = min_write; + } + + buf += this_write; + left -= this_write; + save_buf_state(td, rs); + } while (left); + } else if (o->buffer_pattern_bytes) + fill_buffer_pattern(td, buf, max_bs); + else if (o->zero_buffers) memset(buf, 0, max_bs); + else + fill_random_buf(get_buf_state(td), buf, max_bs); } /*