X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=a3540d14714ef45f59f7fcf22744bae507f6b491;hp=ba3f7ca00fcd1db4f657f5dcf7321fc8c9f7ca0e;hb=fb1a0dcdc0869f0c92c49f13e640c15f4ab3b7e2;hpb=36870c47801ca85d1a98c7ec47950b0aa360f865 diff --git a/io_u.c b/io_u.c index ba3f7ca0..a3540d14 100644 --- a/io_u.c +++ b/io_u.c @@ -1,31 +1,30 @@ #include -#include #include -#include -#include #include #include "fio.h" -#include "hash.h" #include "verify.h" #include "trim.h" #include "lib/rand.h" #include "lib/axmap.h" #include "err.h" +#include "lib/pow2.h" +#include "minmax.h" +#include "zbd.h" struct io_completion_data { int nr; /* input */ int error; /* output */ uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */ - struct timeval time; /* output */ + struct timespec time; /* output */ }; /* * The ->io_axmap contains a map of blocks we have or have not done io * to yet. Used to make sure we cover the entire range in a fair fashion. */ -static int random_map_free(struct fio_file *f, const uint64_t block) +static bool random_map_free(struct fio_file *f, const uint64_t block) { return !axmap_isset(f->io_axmap, block); } @@ -33,21 +32,27 @@ static int random_map_free(struct fio_file *f, const uint64_t block) /* * Mark a given offset as used in the map. */ -static void mark_random_map(struct thread_data *td, struct io_u *io_u) +static uint64_t mark_random_map(struct thread_data *td, struct io_u *io_u, + uint64_t offset, uint64_t buflen) { - unsigned int min_bs = td->o.rw_min_bs; + unsigned long long min_bs = td->o.min_bs[io_u->ddir]; struct fio_file *f = io_u->file; - unsigned int nr_blocks; + unsigned long long nr_blocks; uint64_t block; - block = (io_u->offset - f->file_offset) / (uint64_t) min_bs; - nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; + block = (offset - f->file_offset) / (uint64_t) min_bs; + nr_blocks = (buflen + min_bs - 1) / min_bs; + assert(nr_blocks > 0); - if (!(io_u->flags & IO_U_F_BUSY_OK)) + if (!(io_u->flags & IO_U_F_BUSY_OK)) { nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks); + assert(nr_blocks > 0); + } + + if ((nr_blocks * min_bs) < buflen) + buflen = nr_blocks * min_bs; - if ((nr_blocks * min_bs) < io_u->buflen) - io_u->buflen = nr_blocks * min_bs; + return buflen; } static uint64_t last_block(struct thread_data *td, struct fio_file *f, @@ -60,12 +65,13 @@ static uint64_t last_block(struct thread_data *td, struct fio_file *f, /* * Hmm, should we make sure that ->io_size <= ->real_file_size? + * -> not for now since there is code assuming it could go either. */ max_size = f->io_size; if (max_size > f->real_file_size) max_size = f->real_file_size; - if (td->o.zone_range) + if (td->o.zone_mode == ZONE_MODE_STRIDED && td->o.zone_range) max_size = td->o.zone_range; if (td->o.min_bs[ddir] > td->o.ba[ddir]) @@ -78,28 +84,20 @@ static uint64_t last_block(struct thread_data *td, struct fio_file *f, return max_blocks; } -struct rand_off { - struct flist_head list; - uint64_t off; -}; - static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, uint64_t *b) + enum fio_ddir ddir, uint64_t *b, + uint64_t lastb) { uint64_t r; - if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) { - uint64_t lastb; - - lastb = last_block(td, f, ddir); - if (!lastb) - return 1; + if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || + td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) { r = __rand(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); - *b = lastb * (r / ((uint64_t) FRAND_MAX + 1.0)); + *b = lastb * (r / (rand_max(&td->random_state) + 1.0)); } else { uint64_t off = 0; @@ -157,99 +155,172 @@ static int __get_next_rand_offset_gauss(struct thread_data *td, return 0; } +static int __get_next_rand_offset_zoned_abs(struct thread_data *td, + struct fio_file *f, + enum fio_ddir ddir, uint64_t *b) +{ + struct zone_split_index *zsi; + uint64_t lastb, send, stotal; + unsigned int v; + + lastb = last_block(td, f, ddir); + if (!lastb) + return 1; + + if (!td->o.zone_split_nr[ddir]) { +bail: + return __get_next_rand_offset(td, f, ddir, b, lastb); + } + + /* + * Generate a value, v, between 1 and 100, both inclusive + */ + v = rand_between(&td->zone_state, 1, 100); + + /* + * Find our generated table. 'send' is the end block of this zone, + * 'stotal' is our start offset. + */ + zsi = &td->zone_state_index[ddir][v - 1]; + stotal = zsi->size_prev / td->o.ba[ddir]; + send = zsi->size / td->o.ba[ddir]; + + /* + * Should never happen + */ + if (send == -1U) { + if (!fio_did_warn(FIO_WARN_ZONED_BUG)) + log_err("fio: bug in zoned generation\n"); + goto bail; + } else if (send > lastb) { + /* + * This happens if the user specifies ranges that exceed + * the file/device size. We can't handle that gracefully, + * so error and exit. + */ + log_err("fio: zoned_abs sizes exceed file size\n"); + return 1; + } + + /* + * Generate index from 0..send-stotal + */ + if (__get_next_rand_offset(td, f, ddir, b, send - stotal) == 1) + return 1; + + *b += stotal; + return 0; +} -static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b) +static int __get_next_rand_offset_zoned(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + uint64_t *b) { - struct rand_off *r1 = flist_entry(a, struct rand_off, list); - struct rand_off *r2 = flist_entry(b, struct rand_off, list); + unsigned int v, send, stotal; + uint64_t offset, lastb; + struct zone_split_index *zsi; - return r1->off - r2->off; + lastb = last_block(td, f, ddir); + if (!lastb) + return 1; + + if (!td->o.zone_split_nr[ddir]) { +bail: + return __get_next_rand_offset(td, f, ddir, b, lastb); + } + + /* + * Generate a value, v, between 1 and 100, both inclusive + */ + v = rand_between(&td->zone_state, 1, 100); + + zsi = &td->zone_state_index[ddir][v - 1]; + stotal = zsi->size_perc_prev; + send = zsi->size_perc; + + /* + * Should never happen + */ + if (send == -1U) { + if (!fio_did_warn(FIO_WARN_ZONED_BUG)) + log_err("fio: bug in zoned generation\n"); + goto bail; + } + + /* + * 'send' is some percentage below or equal to 100 that + * marks the end of the current IO range. 'stotal' marks + * the start, in percent. + */ + if (stotal) + offset = stotal * lastb / 100ULL; + else + offset = 0; + + lastb = lastb * (send - stotal) / 100ULL; + + /* + * Generate index from 0..send-of-lastb + */ + if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1) + return 1; + + /* + * Add our start offset, if any + */ + if (offset) + *b += offset; + + return 0; } -static int get_off_from_method(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, uint64_t *b) +static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, uint64_t *b) { - if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) - return __get_next_rand_offset(td, f, ddir, b); - else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) + if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) { + uint64_t lastb; + + lastb = last_block(td, f, ddir); + if (!lastb) + return 1; + + return __get_next_rand_offset(td, f, ddir, b, lastb); + } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) return __get_next_rand_offset_zipf(td, f, ddir, b); else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) return __get_next_rand_offset_pareto(td, f, ddir, b); else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS) return __get_next_rand_offset_gauss(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_ZONED) + return __get_next_rand_offset_zoned(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS) + return __get_next_rand_offset_zoned_abs(td, f, ddir, b); log_err("fio: unknown random distribution: %d\n", td->o.random_distribution); return 1; } -/* - * Sort the reads for a verify phase in batches of verifysort_nr, if - * specified. - */ -static inline int should_sort_io(struct thread_data *td) -{ - if (!td->o.verifysort_nr || !td->o.do_verify) - return 0; - if (!td_random(td)) - return 0; - if (td->runstate != TD_VERIFYING) - return 0; - if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) - return 0; - - return 1; -} - -static int should_do_random(struct thread_data *td, enum fio_ddir ddir) +static bool should_do_random(struct thread_data *td, enum fio_ddir ddir) { unsigned int v; - unsigned long r; if (td->o.perc_rand[ddir] == 100) - return 1; + return true; - r = __rand(&td->seq_rand_state[ddir]); - v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); + v = rand_between(&td->seq_rand_state[ddir], 1, 100); return v <= td->o.perc_rand[ddir]; } -static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, uint64_t *b) +static void loop_cache_invalidate(struct thread_data *td, struct fio_file *f) { - struct rand_off *r; - int i, ret = 1; - - if (!should_sort_io(td)) - return get_off_from_method(td, f, ddir, b); - - if (!flist_empty(&td->next_rand_list)) { -fetch: - r = flist_first_entry(&td->next_rand_list, struct rand_off, list); - flist_del(&r->list); - *b = r->off; - free(r); - return 0; - } - - for (i = 0; i < td->o.verifysort_nr; i++) { - r = malloc(sizeof(*r)); + struct thread_options *o = &td->o; - ret = get_off_from_method(td, f, ddir, &r->off); - if (ret) { - free(r); - break; - } + if (o->invalidate_cache && !o->odirect) { + int fio_unused ret; - flist_add(&r->list, &td->next_rand_list); + ret = file_invalidate_cache(td, f); } - - if (ret && !i) - return ret; - - assert(!flist_empty(&td->next_rand_list)); - flist_sort(NULL, &td->next_rand_list, flist_cmp); - goto fetch; } static int get_next_rand_block(struct thread_data *td, struct fio_file *f, @@ -258,8 +329,10 @@ static int get_next_rand_block(struct thread_data *td, struct fio_file *f, if (!get_next_rand_offset(td, f, ddir, b)) return 0; - if (td->o.time_based) { + if (td->o.time_based || + (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) { fio_file_reset(td, f); + loop_cache_invalidate(td, f); if (!get_next_rand_offset(td, f, ddir, b)) return 0; } @@ -277,15 +350,29 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, assert(ddir_rw(ddir)); + /* + * If we reach the end for a time based run, reset us back to 0 + * and invalidate the cache, if we need to. + */ if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) && - o->time_based) - f->last_pos[ddir] = f->last_pos[ddir] - f->io_size; + o->time_based) { + f->last_pos[ddir] = f->file_offset; + loop_cache_invalidate(td, f); + } if (f->last_pos[ddir] < f->real_file_size) { uint64_t pos; - if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) - f->last_pos[ddir] = f->real_file_size; + /* + * Only rewind if we already hit the end + */ + if (f->last_pos[ddir] == f->file_offset && + f->file_offset && o->ddir_seq_add < 0) { + if (f->real_file_size > f->io_size) + f->last_pos[ddir] = f->io_size; + else + f->last_pos[ddir] = f->real_file_size; + } pos = f->last_pos[ddir] - f->file_offset; if (pos && o->ddir_seq_add) { @@ -294,10 +381,21 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, /* * If we reach beyond the end of the file * with holed IO, wrap around to the - * beginning again. + * beginning again. If we're doing backwards IO, + * wrap to the end. */ - if (pos >= f->real_file_size) - pos = f->file_offset; + if (pos >= f->real_file_size) { + if (o->ddir_seq_add > 0) + pos = f->file_offset; + else { + if (f->real_file_size > f->io_size) + pos = f->io_size; + else + pos = f->real_file_size; + + pos += o->ddir_seq_add; + } + } } *offset = pos; @@ -309,7 +407,7 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, static int get_next_block(struct thread_data *td, struct io_u *io_u, enum fio_ddir ddir, int rw_seq, - unsigned int *is_random) + bool *is_random) { struct fio_file *f = io_u->file; uint64_t b, offset; @@ -323,27 +421,27 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, if (td_random(td)) { if (should_do_random(td, ddir)) { ret = get_next_rand_block(td, f, ddir, &b); - *is_random = 1; + *is_random = true; } else { - *is_random = 0; - io_u_set(io_u, IO_U_F_BUSY_OK); + *is_random = false; + io_u_set(td, io_u, IO_U_F_BUSY_OK); ret = get_next_seq_offset(td, f, ddir, &offset); if (ret) ret = get_next_rand_block(td, f, ddir, &b); } } else { - *is_random = 0; + *is_random = false; ret = get_next_seq_offset(td, f, ddir, &offset); } } else { - io_u_set(io_u, IO_U_F_BUSY_OK); - *is_random = 0; + io_u_set(td, io_u, IO_U_F_BUSY_OK); + *is_random = false; if (td->o.rw_seq == RW_SEQ_SEQ) { ret = get_next_seq_offset(td, f, ddir, &offset); if (ret) { ret = get_next_rand_block(td, f, ddir, &b); - *is_random = 0; + *is_random = false; } } else if (td->o.rw_seq == RW_SEQ_IDENT) { if (f->last_start[ddir] != -1ULL) @@ -376,8 +474,8 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, * until we find a free one. For sequential io, just return the end of * the last io issued. */ -static int __get_next_offset(struct thread_data *td, struct io_u *io_u, - unsigned int *is_random) +static int get_next_offset(struct thread_data *td, struct io_u *io_u, + bool *is_random) { struct fio_file *f = io_u->file; enum fio_ddir ddir = io_u->ddir; @@ -411,39 +509,27 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u, return 0; } -static int get_next_offset(struct thread_data *td, struct io_u *io_u, - unsigned int *is_random) -{ - if (td->flags & TD_F_PROFILE_OPS) { - struct prof_io_ops *ops = &td->prof_io_ops; - - if (ops->fill_io_u_off) - return ops->fill_io_u_off(td, io_u, is_random); - } - - return __get_next_offset(td, io_u, is_random); -} - -static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, - unsigned int buflen) +static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u, + unsigned long long buflen) { struct fio_file *f = io_u->file; return io_u->offset + buflen <= f->io_size + get_start_offset(td, f); } -static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, - unsigned int is_random) +static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *io_u, + bool is_random) { int ddir = io_u->ddir; - unsigned int buflen = 0; - unsigned int minbs, maxbs; - unsigned long r; + unsigned long long buflen = 0; + unsigned long long minbs, maxbs; + uint64_t frand_max, r; + bool power_2; assert(ddir_rw(ddir)); if (td->o.bs_is_seq_rand) - ddir = is_random ? DDIR_WRITE: DDIR_READ; + ddir = is_random ? DDIR_WRITE : DDIR_READ; minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; @@ -457,16 +543,15 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, if (!io_u_fits(td, io_u, minbs)) return 0; + frand_max = rand_max(&td->bsrange_state[ddir]); do { - r = __rand(&td->bsrange_state); + r = __rand(&td->bsrange_state[ddir]); if (!td->o.bssplit_nr[ddir]) { - buflen = 1 + (unsigned int) ((double) maxbs * - (r / (FRAND_MAX + 1.0))); - if (buflen < minbs) - buflen = minbs; + buflen = minbs + (unsigned long long) ((double) maxbs * + (r / (frand_max + 1.0))); } else { - long perc = 0; + long long perc = 0; unsigned int i; for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { @@ -474,37 +559,24 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, buflen = bsp->bs; perc += bsp->perc; - if ((r <= ((FRAND_MAX / 100L) * perc)) && + if (!perc) + break; + if ((r / perc <= frand_max / 100ULL) && io_u_fits(td, io_u, buflen)) break; } } - if (td->o.do_verify && td->o.verify != VERIFY_NONE) - buflen = (buflen + td->o.verify_interval - 1) & - ~(td->o.verify_interval - 1); - - if (!td->o.bs_unaligned && is_power_of_2(minbs)) - buflen = (buflen + minbs - 1) & ~(minbs - 1); - + power_2 = is_power_of_2(minbs); + if (!td->o.bs_unaligned && power_2) + buflen &= ~(minbs - 1); + else if (!td->o.bs_unaligned && !power_2) + buflen -= buflen % minbs; } while (!io_u_fits(td, io_u, buflen)); return buflen; } -static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, - unsigned int is_random) -{ - if (td->flags & TD_F_PROFILE_OPS) { - struct prof_io_ops *ops = &td->prof_io_ops; - - if (ops->fill_io_u_size) - return ops->fill_io_u_size(td, io_u, is_random); - } - - return __get_next_buflen(td, io_u, is_random); -} - static void set_rwmix_bytes(struct thread_data *td) { unsigned int diff; @@ -521,10 +593,8 @@ static void set_rwmix_bytes(struct thread_data *td) static inline enum fio_ddir get_rand_ddir(struct thread_data *td) { unsigned int v; - unsigned long r; - r = __rand(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); + v = rand_between(&td->rwmix_state, 1, 100); if (v <= td->o.rwmix[DDIR_READ]) return DDIR_READ; @@ -532,8 +602,10 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) return DDIR_WRITE; } -void io_u_quiesce(struct thread_data *td) +int io_u_quiesce(struct thread_data *td) { + int completed = 0; + /* * We are going to sleep, ensure that we flush anything pending as * not to skew our latency numbers. @@ -543,65 +615,67 @@ void io_u_quiesce(struct thread_data *td) * io's that have been actually submitted to an async engine, * and cur_depth is meaningless for sync engines. */ - if (td->io_u_queued || td->cur_depth) { - int fio_unused ret; - - ret = td_io_commit(td); - } + if (td->io_u_queued || td->cur_depth) + td_io_commit(td); while (td->io_u_in_flight) { - int fio_unused ret; + int ret; ret = io_u_queued_complete(td, 1); + if (ret > 0) + completed += ret; } + + if (td->flags & TD_F_REGROW_LOGS) + regrow_logs(td); + + return completed; } static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; - long usec; + uint64_t usec; + uint64_t now; assert(ddir_rw(ddir)); + now = utime_since_now(&td->start); - if (td->rate_pending_usleep[ddir] <= 0) + /* + * if rate_next_io_time is in the past, need to catch up to rate + */ + if (td->rate_next_io_time[ddir] <= now) return ddir; /* - * We have too much pending sleep in this direction. See if we + * We are ahead of rate in this direction. See if we * should switch. */ if (td_rw(td) && td->o.rwmix[odir]) { /* - * Other direction does not have too much pending, switch + * Other direction is behind rate, switch */ - if (td->rate_pending_usleep[odir] < 100000) + if (td->rate_next_io_time[odir] <= now) return odir; /* - * Both directions have pending sleep. Sleep the minimum time - * and deduct from both. + * Both directions are ahead of rate. sleep the min, + * switch if necessary */ - if (td->rate_pending_usleep[ddir] <= - td->rate_pending_usleep[odir]) { - usec = td->rate_pending_usleep[ddir]; + if (td->rate_next_io_time[ddir] <= + td->rate_next_io_time[odir]) { + usec = td->rate_next_io_time[ddir] - now; } else { - usec = td->rate_pending_usleep[odir]; + usec = td->rate_next_io_time[odir] - now; ddir = odir; } } else - usec = td->rate_pending_usleep[ddir]; + usec = td->rate_next_io_time[ddir] - now; if (td->o.io_submit_mode == IO_MODE_INLINE) io_u_quiesce(td); - usec = usec_sleep(td, usec); - - td->rate_pending_usleep[ddir] -= usec; - - odir = ddir ^ 1; - if (td_rw(td) && __should_check_rate(td, odir)) - td->rate_pending_usleep[odir] -= usec; - + usec_sleep(td, usec); return ddir; } @@ -615,28 +689,22 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) enum fio_ddir ddir; /* - * see if it's time to fsync - */ - if (td->o.fsync_blocks && - !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && - td->io_issues[DDIR_WRITE] && should_fsync(td)) - return DDIR_SYNC; - - /* - * see if it's time to fdatasync - */ - if (td->o.fdatasync_blocks && - !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) && - td->io_issues[DDIR_WRITE] && should_fsync(td)) - return DDIR_DATASYNC; - - /* - * see if it's time to sync_file_range + * See if it's time to fsync/fdatasync/sync_file_range first, + * and if not then move on to check regular I/Os. */ - if (td->sync_file_range_nr && - !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) && - td->io_issues[DDIR_WRITE] && should_fsync(td)) - return DDIR_SYNC_FILE_RANGE; + if (should_fsync(td)) { + if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] && + !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks)) + return DDIR_SYNC; + + if (td->o.fdatasync_blocks && td->io_issues[DDIR_WRITE] && + !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks)) + return DDIR_DATASYNC; + + if (td->sync_file_range_nr && td->io_issues[DDIR_WRITE] && + !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr)) + return DDIR_SYNC_FILE_RANGE; + } if (td_rw(td)) { /* @@ -660,8 +728,10 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) ddir = DDIR_READ; else if (td_write(td)) ddir = DDIR_WRITE; - else + else if (td_trim(td)) ddir = DDIR_TRIM; + else + ddir = DDIR_INVAL; td->rwmix_ddir = rate_ddir(td, ddir); return td->rwmix_ddir; @@ -681,11 +751,11 @@ static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) io_u->ddir = io_u->acct_ddir = ddir; - if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) && + if (io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_BARRIER) && td->o.barrier_blocks && !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) && td->io_issues[DDIR_WRITE]) - io_u_set(io_u, IO_U_F_BARRIER); + io_u_set(td, io_u, IO_U_F_BARRIER); } void put_file_log(struct thread_data *td, struct fio_file *f) @@ -698,34 +768,45 @@ void put_file_log(struct thread_data *td, struct fio_file *f) void put_io_u(struct thread_data *td, struct io_u *io_u) { + const bool needs_lock = td_async_processing(td); + + if (io_u->post_submit) { + io_u->post_submit(io_u, io_u->error == 0); + io_u->post_submit = NULL; + } + if (td->parent) td = td->parent; - td_io_u_lock(td); + if (needs_lock) + __td_io_u_lock(td); if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT)) put_file_log(td, io_u->file); io_u->file = NULL; - io_u_set(io_u, IO_U_F_FREE); + io_u_set(td, io_u, IO_U_F_FREE); if (io_u->flags & IO_U_F_IN_CUR_DEPTH) { td->cur_depth--; assert(!(td->flags & TD_F_CHILD)); } io_u_qpush(&td->io_u_freelist, io_u); - td_io_u_unlock(td); td_io_u_free_notify(td); + + if (needs_lock) + __td_io_u_unlock(td); } void clear_io_u(struct thread_data *td, struct io_u *io_u) { - io_u_clear(io_u, IO_U_F_FLIGHT); + io_u_clear(td, io_u, IO_U_F_FLIGHT); put_io_u(td, io_u); } void requeue_io_u(struct thread_data *td, struct io_u **io_u) { + const bool needs_lock = td_async_processing(td); struct io_u *__io_u = *io_u; enum fio_ddir ddir = acct_ddir(__io_u); @@ -734,45 +815,40 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u) if (td->parent) td = td->parent; - td_io_u_lock(td); + if (needs_lock) + __td_io_u_lock(td); - io_u_set(__io_u, IO_U_F_FREE); + io_u_set(td, __io_u, IO_U_F_FREE); if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir)) td->io_issues[ddir]--; - io_u_clear(__io_u, IO_U_F_FLIGHT); + io_u_clear(td, __io_u, IO_U_F_FLIGHT); if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) { td->cur_depth--; assert(!(td->flags & TD_F_CHILD)); } io_u_rpush(&td->io_u_requeues, __io_u); - td_io_u_unlock(td); td_io_u_free_notify(td); + + if (needs_lock) + __td_io_u_unlock(td); + *io_u = NULL; } -static int fill_io_u(struct thread_data *td, struct io_u *io_u) +static void setup_strided_zone_mode(struct thread_data *td, struct io_u *io_u) { - unsigned int is_random; - - if (td->io_ops->flags & FIO_NOIO) - goto out; + struct fio_file *f = io_u->file; - set_rw_ddir(td, io_u); - - /* - * fsync() or fdatasync() or trim etc, we are done - */ - if (!ddir_rw(io_u->ddir)) - goto out; + assert(td->o.zone_mode == ZONE_MODE_STRIDED); + assert(td->o.zone_size); + assert(td->o.zone_range); /* * See if it's time to switch to a new zone */ if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { - struct fio_file *f = io_u->file; - td->zone_bytes = 0; f->file_offset += td->o.zone_range + td->o.zone_skip; @@ -780,11 +856,50 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) * Wrap from the beginning, if we exceed the file size */ if (f->file_offset >= f->real_file_size) - f->file_offset = f->real_file_size - f->file_offset; + f->file_offset = get_start_offset(td, f); + f->last_pos[io_u->ddir] = f->file_offset; td->io_skip_bytes += td->o.zone_skip; } + /* + * If zone_size > zone_range, then maintain the same zone until + * zone_bytes >= zone_size. + */ + if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) { + dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n", + f->file_offset, f->last_pos[io_u->ddir]); + f->last_pos[io_u->ddir] = f->file_offset; + } + + /* + * For random: if 'norandommap' is not set and zone_size > zone_range, + * map needs to be reset as it's done with zone_range everytime. + */ + if ((td->zone_bytes % td->o.zone_range) == 0) + fio_file_reset(td, f); +} + +static int fill_io_u(struct thread_data *td, struct io_u *io_u) +{ + bool is_random; + uint64_t offset; + enum io_u_action ret; + + if (td_ioengine_flagged(td, FIO_NOIO)) + goto out; + + set_rw_ddir(td, io_u); + + /* + * fsync() or fdatasync() or trim etc, we are done + */ + if (!ddir_rw(io_u->ddir)) + goto out; + + if (td->o.zone_mode == ZONE_MODE_STRIDED) + setup_strided_zone_mode(td, io_u); + /* * No log, let the seq/rand engine retrieve the next buflen and * position. @@ -800,9 +915,16 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) return 1; } + offset = io_u->offset; + if (td->o.zone_mode == ZONE_MODE_ZBD) { + ret = zbd_adjust_block(td, io_u); + if (ret == io_u_eof) + return 1; + } + if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { - dprint(FD_IO, "io_u %p, offset too large\n", io_u); - dprint(FD_IO, " off=%llu/%lu > %llu\n", + dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n", + io_u, (unsigned long long) io_u->offset, io_u->buflen, (unsigned long long) io_u->file->real_file_size); return 1; @@ -812,15 +934,15 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) * mark entry before potentially trimming io_u */ if (td_random(td) && file_randommap(td, io_u->file)) - mark_random_map(td, io_u); + io_u->buflen = mark_random_map(td, io_u, offset, io_u->buflen); out: - dprint_io_u(io_u, "fill_io_u"); + dprint_io_u(io_u, "fill"); td->zone_bytes += io_u->buflen; return 0; } -static void __io_u_mark_map(unsigned int *map, unsigned int nr) +static void __io_u_mark_map(uint64_t *map, unsigned int nr) { int idx = 0; @@ -890,11 +1012,52 @@ void io_u_mark_depth(struct thread_data *td, unsigned int nr) td->ts.io_u_map[idx] += nr; } -static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec) +static void io_u_mark_lat_nsec(struct thread_data *td, unsigned long long nsec) { int idx = 0; - assert(usec < 1000); + assert(nsec < 1000); + + switch (nsec) { + case 750 ... 999: + idx = 9; + break; + case 500 ... 749: + idx = 8; + break; + case 250 ... 499: + idx = 7; + break; + case 100 ... 249: + idx = 6; + break; + case 50 ... 99: + idx = 5; + break; + case 20 ... 49: + idx = 4; + break; + case 10 ... 19: + idx = 3; + break; + case 4 ... 9: + idx = 2; + break; + case 2 ... 3: + idx = 1; + case 0 ... 1: + break; + } + + assert(idx < FIO_IO_U_LAT_N_NR); + td->ts.io_u_lat_n[idx]++; +} + +static void io_u_mark_lat_usec(struct thread_data *td, unsigned long long usec) +{ + int idx = 0; + + assert(usec < 1000 && usec >= 1); switch (usec) { case 750 ... 999: @@ -931,10 +1094,12 @@ static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec) td->ts.io_u_lat_u[idx]++; } -static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec) +static void io_u_mark_lat_msec(struct thread_data *td, unsigned long long msec) { int idx = 0; + assert(msec >= 1); + switch (msec) { default: idx = 11; @@ -976,12 +1141,42 @@ static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec) td->ts.io_u_lat_m[idx]++; } -static void io_u_mark_latency(struct thread_data *td, unsigned long usec) +static void io_u_mark_latency(struct thread_data *td, unsigned long long nsec) { - if (usec < 1000) - io_u_mark_lat_usec(td, usec); + if (nsec < 1000) + io_u_mark_lat_nsec(td, nsec); + else if (nsec < 1000000) + io_u_mark_lat_usec(td, nsec / 1000); else - io_u_mark_lat_msec(td, usec / 1000); + io_u_mark_lat_msec(td, nsec / 1000000); +} + +static unsigned int __get_next_fileno_rand(struct thread_data *td) +{ + unsigned long fileno; + + if (td->o.file_service_type == FIO_FSERVICE_RANDOM) { + uint64_t frand_max = rand_max(&td->next_file_state); + unsigned long r; + + r = __rand(&td->next_file_state); + return (unsigned int) ((double) td->o.nr_files + * (r / (frand_max + 1.0))); + } + + if (td->o.file_service_type == FIO_FSERVICE_ZIPF) + fileno = zipf_next(&td->next_file_zipf); + else if (td->o.file_service_type == FIO_FSERVICE_PARETO) + fileno = pareto_next(&td->next_file_zipf); + else if (td->o.file_service_type == FIO_FSERVICE_GAUSS) + fileno = gauss_next(&td->next_file_gauss); + else { + log_err("fio: bad file service type: %d\n", td->o.file_service_type); + assert(0); + return 0; + } + + return fileno >> FIO_FSERVICE_SHIFT; } /* @@ -996,11 +1191,8 @@ static struct fio_file *get_next_file_rand(struct thread_data *td, do { int opened = 0; - unsigned long r; - r = __rand(&td->next_file_state); - fno = (unsigned int) ((double) td->o.nr_files - * (r / (FRAND_MAX + 1.0))); + fno = __get_next_fileno_rand(td); f = td->files[fno]; if (fio_file_done(f)) @@ -1125,13 +1317,6 @@ out: static struct fio_file *get_next_file(struct thread_data *td) { - if (td->flags & TD_F_PROFILE_OPS) { - struct prof_io_ops *ops = &td->prof_io_ops; - - if (ops->get_next_file) - return ops->get_next_file(td); - } - return __get_next_file(td); } @@ -1150,23 +1335,32 @@ static long set_io_u_file(struct thread_data *td, struct io_u *io_u) if (!fill_io_u(td, io_u)) break; + if (io_u->post_submit) { + io_u->post_submit(io_u, false); + io_u->post_submit = NULL; + } + put_file_log(td, f); td_io_close_file(td, f); io_u->file = NULL; - fio_file_set_done(f); - td->nr_done_files++; - dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, + if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM) + fio_file_reset(td, f); + else { + fio_file_set_done(f); + td->nr_done_files++; + dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, td->nr_done_files, td->o.nr_files); + } } while (1); return 0; } static void lat_fatal(struct thread_data *td, struct io_completion_data *icd, - unsigned long tusec, unsigned long max_usec) + unsigned long long tnsec, unsigned long long max_nsec) { if (!td->error) - log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec); + log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec)\n", tnsec, max_nsec); td_verror(td, ETIMEDOUT, "max latency exceeded"); icd->error = ETIMEDOUT; } @@ -1182,10 +1376,10 @@ static void lat_new_cycle(struct thread_data *td) * We had an IO outside the latency target. Reduce the queue depth. If we * are at QD=1, then it's time to give up. */ -static int __lat_target_failed(struct thread_data *td) +static bool __lat_target_failed(struct thread_data *td) { if (td->latency_qd == 1) - return 1; + return true; td->latency_qd_high = td->latency_qd; @@ -1202,16 +1396,16 @@ static int __lat_target_failed(struct thread_data *td) */ io_u_quiesce(td); lat_new_cycle(td); - return 0; + return false; } -static int lat_target_failed(struct thread_data *td) +static bool lat_target_failed(struct thread_data *td) { if (td->o.latency_percentile.u.f == 100.0) return __lat_target_failed(td); td->latency_failed++; - return 0; + return false; } void lat_target_init(struct thread_data *td) @@ -1306,26 +1500,29 @@ void lat_target_check(struct thread_data *td) * If latency target is enabled, we might be ramping up or down and not * using the full queue depth available. */ -int queue_full(const struct thread_data *td) +bool queue_full(const struct thread_data *td) { const int qempty = io_u_qempty(&td->io_u_freelist); if (qempty) - return 1; + return true; if (!td->o.latency_target) - return 0; + return false; return td->cur_depth >= td->latency_qd; } struct io_u *__get_io_u(struct thread_data *td) { + const bool needs_lock = td_async_processing(td); struct io_u *io_u = NULL; + int ret; if (td->stop_io) return NULL; - td_io_u_lock(td); + if (needs_lock) + __td_io_u_lock(td); again: if (!io_u_rempty(&td->io_u_requeues)) @@ -1341,7 +1538,7 @@ again: if (io_u) { assert(io_u->flags & IO_U_F_FREE); - io_u_clear(io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT | + io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT | IO_U_F_TRIMMED | IO_U_F_BARRIER | IO_U_F_VER_LIST); @@ -1349,7 +1546,7 @@ again: io_u->acct_ddir = -1; td->cur_depth++; assert(!(td->flags & TD_F_CHILD)); - io_u_set(io_u, IO_U_F_IN_CUR_DEPTH); + io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH); io_u->ipo = NULL; } else if (td_async_processing(td)) { /* @@ -1357,44 +1554,44 @@ again: * return one */ assert(!(td->flags & TD_F_CHILD)); - assert(!pthread_cond_wait(&td->free_cond, &td->io_u_lock)); + ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock); + assert(ret == 0); goto again; } - td_io_u_unlock(td); + if (needs_lock) + __td_io_u_unlock(td); + return io_u; } -static int check_get_trim(struct thread_data *td, struct io_u *io_u) +static bool check_get_trim(struct thread_data *td, struct io_u *io_u) { if (!(td->flags & TD_F_TRIM_BACKLOG)) - return 0; - - if (td->trim_entries) { - int get_trim = 0; - - if (td->trim_batch) { - td->trim_batch--; - get_trim = 1; - } else if (!(td->io_hist_len % td->o.trim_backlog) && - td->last_ddir != DDIR_READ) { - td->trim_batch = td->o.trim_batch; - if (!td->trim_batch) - td->trim_batch = td->o.trim_backlog; - get_trim = 1; - } - - if (get_trim && !get_next_trim(td, io_u)) - return 1; + return false; + if (!td->trim_entries) + return false; + + if (td->trim_batch) { + td->trim_batch--; + if (get_next_trim(td, io_u)) + return true; + } else if (!(td->io_hist_len % td->o.trim_backlog) && + td->last_ddir != DDIR_READ) { + td->trim_batch = td->o.trim_batch; + if (!td->trim_batch) + td->trim_batch = td->o.trim_backlog; + if (get_next_trim(td, io_u)) + return true; } - return 0; + return false; } -static int check_get_verify(struct thread_data *td, struct io_u *io_u) +static bool check_get_verify(struct thread_data *td, struct io_u *io_u) { if (!(td->flags & TD_F_VER_BACKLOG)) - return 0; + return false; if (td->io_hist_len) { int get_verify = 0; @@ -1411,11 +1608,11 @@ static int check_get_verify(struct thread_data *td, struct io_u *io_u) if (get_verify && !get_next_verify(td, io_u)) { td->verify_batch--; - return 1; + return true; } } - return 0; + return false; } /* @@ -1426,32 +1623,40 @@ static int check_get_verify(struct thread_data *td, struct io_u *io_u) */ static void small_content_scramble(struct io_u *io_u) { - unsigned int i, nr_blocks = io_u->buflen / 512; - uint64_t boffset; + unsigned long long i, nr_blocks = io_u->buflen >> 9; unsigned int offset; - void *p, *end; + uint64_t boffset, *iptr; + char *p; if (!nr_blocks) return; p = io_u->xfer_buf; boffset = io_u->offset; - io_u->buf_filled_len = 0; + + if (io_u->buf_filled_len) + io_u->buf_filled_len = 0; + + /* + * Generate random index between 0..7. We do chunks of 512b, if + * we assume a cacheline is 64 bytes, then we have 8 of those. + * Scramble content within the blocks in the same cacheline to + * speed things up. + */ + offset = (io_u->start_time.tv_nsec ^ boffset) & 7; for (i = 0; i < nr_blocks; i++) { /* - * Fill the byte offset into a "random" start offset of - * the buffer, given by the product of the usec time - * and the actual offset. + * Fill offset into start of cacheline, time into end + * of cacheline */ - offset = (io_u->start_time.tv_usec ^ boffset) & 511; - offset &= ~(sizeof(uint64_t) - 1); - if (offset >= 512 - sizeof(uint64_t)) - offset -= sizeof(uint64_t); - memcpy(p + offset, &boffset, sizeof(boffset)); - - end = p + 512 - sizeof(io_u->start_time); - memcpy(end, &io_u->start_time, sizeof(io_u->start_time)); + iptr = (void *) p + (offset << 6); + *iptr = boffset; + + iptr = (void *) p + 64 - 2 * sizeof(uint64_t); + iptr[0] = io_u->start_time.tv_sec; + iptr[1] = io_u->start_time.tv_nsec; + p += 512; boffset += 512; } @@ -1459,7 +1664,7 @@ static void small_content_scramble(struct io_u *io_u) /* * Return an io_u to be processed. Gets a buflen and offset, sets direction, - * etc. The returned io_u is fully ready to be prepped and submitted. + * etc. The returned io_u is fully ready to be prepped, populated and submitted. */ struct io_u *get_io_u(struct thread_data *td) { @@ -1506,7 +1711,7 @@ struct io_u *get_io_u(struct thread_data *td) assert(fio_file_open(f)); if (ddir_rw(io_u->ddir)) { - if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { + if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) { dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); goto err_put; } @@ -1518,14 +1723,11 @@ struct io_u *get_io_u(struct thread_data *td) if (td->flags & TD_F_REFILL_BUFFERS) { io_u_fill_buffer(td, io_u, td->o.min_bs[DDIR_WRITE], - io_u->xfer_buflen); + io_u->buflen); } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) && - !(td->flags & TD_F_COMPRESS)) + !(td->flags & TD_F_COMPRESS) && + !(td->flags & TD_F_DO_VERIFY)) do_scramble = 1; - if (td->flags & TD_F_VER_NONE) { - populate_verify_io_u(td, io_u); - do_scramble = 0; - } } else if (io_u->ddir == DDIR_READ) { /* * Reset the buf_filled parameters so next time if the @@ -1544,10 +1746,12 @@ struct io_u *get_io_u(struct thread_data *td) out: assert(io_u->file); if (!td_io_prep(td, io_u)) { - if (!td->o.disable_slat) + if (!td->o.disable_lat) fio_gettime(&io_u->start_time, NULL); + if (do_scramble) small_content_scramble(io_u); + return io_u; } err_put: @@ -1563,13 +1767,20 @@ static void __io_u_log_error(struct thread_data *td, struct io_u *io_u) if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump) return; - log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n", + log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%llu\n", io_u->file ? " on file " : "", io_u->file ? io_u->file->file_name : "", strerror(io_u->error), io_ddir_name(io_u->ddir), io_u->offset, io_u->xfer_buflen); + if (td->io_ops->errdetails) { + char *err = td->io_ops->errdetails(io_u); + + log_err("fio: %s\n", err); + free(err); + } + if (!td->error) td_verror(td, io_u->error, "io_u error"); } @@ -1578,13 +1789,13 @@ void io_u_log_error(struct thread_data *td, struct io_u *io_u) { __io_u_log_error(td, io_u); if (td->parent) - __io_u_log_error(td, io_u); + __io_u_log_error(td->parent, io_u); } -static inline int gtod_reduce(struct thread_data *td) +static inline bool gtod_reduce(struct thread_data *td) { - return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat - && td->o.disable_bw; + return (td->o.disable_clat && td->o.disable_slat && td->o.disable_bw) + || td->o.gtod_reduce; } static void account_io_completion(struct thread_data *td, struct io_u *io_u, @@ -1592,45 +1803,51 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, const enum fio_ddir idx, unsigned int bytes) { const int no_reduce = !gtod_reduce(td); - unsigned long lusec = 0; + unsigned long long llnsec = 0; + + if (td->parent) + td = td->parent; + + if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS)) + return; if (no_reduce) - lusec = utime_since(&io_u->issue_time, &icd->time); + llnsec = ntime_since(&io_u->issue_time, &icd->time); if (!td->o.disable_lat) { - unsigned long tusec; + unsigned long long tnsec; - tusec = utime_since(&io_u->start_time, &icd->time); - add_lat_sample(td, idx, tusec, bytes, io_u->offset); + tnsec = ntime_since(&io_u->start_time, &icd->time); + add_lat_sample(td, idx, tnsec, bytes, io_u->offset); if (td->flags & TD_F_PROFILE_OPS) { struct prof_io_ops *ops = &td->prof_io_ops; if (ops->io_u_lat) - icd->error = ops->io_u_lat(td, tusec); + icd->error = ops->io_u_lat(td, tnsec); } - if (td->o.max_latency && tusec > td->o.max_latency) - lat_fatal(td, icd, tusec, td->o.max_latency); - if (td->o.latency_target && tusec > td->o.latency_target) { + if (td->o.max_latency && tnsec > td->o.max_latency) + lat_fatal(td, icd, tnsec, td->o.max_latency); + if (td->o.latency_target && tnsec > td->o.latency_target) { if (lat_target_failed(td)) - lat_fatal(td, icd, tusec, td->o.latency_target); + lat_fatal(td, icd, tnsec, td->o.latency_target); } } - if (!td->o.disable_clat) { - add_clat_sample(td, idx, lusec, bytes, io_u->offset); - io_u_mark_latency(td, lusec); - } - - if (td->parent) - td = td->parent; + if (ddir_rw(idx)) { + if (!td->o.disable_clat) { + add_clat_sample(td, idx, llnsec, bytes, io_u->offset); + io_u_mark_latency(td, llnsec); + } - if (!td->o.disable_bw) - add_bw_sample(td, idx, bytes, &icd->time); + if (!td->o.disable_bw && per_unit_log(td->bw_log)) + add_bw_sample(td, io_u, bytes, llnsec); - if (no_reduce) - add_iops_sample(td, idx, bytes, &icd->time); + if (no_reduce && per_unit_log(td->iops_log)) + add_iops_sample(td, io_u, bytes); + } else if (ddir_sync(idx) && !td->o.disable_clat) + add_sync_clat_sample(&td->ts, llnsec); if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) { uint32_t *info = io_u_block_info(td, io_u); @@ -1646,16 +1863,32 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, } } -static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) +static void file_log_write_comp(const struct thread_data *td, struct fio_file *f, + uint64_t offset, unsigned int bytes) { - uint64_t secs, remainder, bps, bytes; + int idx; - assert(!(td->flags & TD_F_CHILD)); - bytes = td->this_io_bytes[ddir]; - bps = td->rate_bps[ddir]; - secs = bytes / bps; - remainder = bytes % bps; - return remainder * 1000000 / bps + secs * 1000000; + if (!f) + return; + + if (f->first_write == -1ULL || offset < f->first_write) + f->first_write = offset; + if (f->last_write == -1ULL || ((offset + bytes) > f->last_write)) + f->last_write = offset + bytes; + + if (!f->last_write_comp) + return; + + idx = f->last_write_idx++; + f->last_write_comp[idx] = offset; + if (f->last_write_idx == td->o.iodepth) + f->last_write_idx = 0; +} + +static bool should_account(struct thread_data *td) +{ + return ramp_time_over(td) && (td->runstate == TD_RUNNING || + td->runstate == TD_VERIFYING); } static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, @@ -1665,10 +1898,10 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, enum fio_ddir ddir = io_u->ddir; struct fio_file *f = io_u->file; - dprint_io_u(io_u, "io complete"); + dprint_io_u(io_u, "complete"); assert(io_u->flags & IO_U_F_FLIGHT); - io_u_clear(io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK); + io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK); /* * Mark IO ok to verify @@ -1686,69 +1919,37 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, } if (ddir_sync(ddir)) { - td->last_was_sync = 1; + td->last_was_sync = true; if (f) { f->first_write = -1ULL; f->last_write = -1ULL; } + if (should_account(td)) + account_io_completion(td, io_u, icd, ddir, io_u->buflen); return; } - td->last_was_sync = 0; + td->last_was_sync = false; td->last_ddir = ddir; if (!io_u->error && ddir_rw(ddir)) { - unsigned int bytes = io_u->buflen - io_u->resid; - const enum fio_ddir oddir = ddir ^ 1; + unsigned long long bytes = io_u->buflen - io_u->resid; int ret; td->io_blocks[ddir]++; - td->this_io_blocks[ddir]++; td->io_bytes[ddir] += bytes; - if (!(io_u->flags & IO_U_F_VER_LIST)) + if (!(io_u->flags & IO_U_F_VER_LIST)) { + td->this_io_blocks[ddir]++; td->this_io_bytes[ddir] += bytes; - - if (ddir == DDIR_WRITE) { - if (f) { - if (f->first_write == -1ULL || - io_u->offset < f->first_write) - f->first_write = io_u->offset; - if (f->last_write == -1ULL || - ((io_u->offset + bytes) > f->last_write)) - f->last_write = io_u->offset + bytes; - } - if (td->last_write_comp) { - int idx = td->last_write_idx++; - - td->last_write_comp[idx] = io_u->offset; - if (td->last_write_idx == td->o.iodepth) - td->last_write_idx = 0; - } } - if (ramp_time_over(td) && (td->runstate == TD_RUNNING || - td->runstate == TD_VERIFYING)) { - struct thread_data *__td = td; + if (ddir == DDIR_WRITE) + file_log_write_comp(td, f, io_u->offset, bytes); + if (should_account(td)) account_io_completion(td, io_u, icd, ddir, bytes); - if (td->parent) - __td = td->parent; - - if (__should_check_rate(__td, ddir)) { - __td->rate_pending_usleep[ddir] = - (usec_for_io(__td, ddir) - - utime_since_now(&__td->start)); - } - if (ddir != DDIR_TRIM && - __should_check_rate(__td, oddir)) { - __td->rate_pending_usleep[oddir] = - (usec_for_io(__td, oddir) - - utime_since_now(&__td->start)); - } - } - icd->bytes_done[ddir] += bytes; if (io_u->end_io) { @@ -1790,7 +1991,7 @@ static void init_icd(struct thread_data *td, struct io_completion_data *icd, icd->nr = nr; icd->error = 0; - for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) icd->bytes_done[ddir] = 0; } @@ -1829,7 +2030,7 @@ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u) return -1; } - for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) td->bytes_done[ddir] += icd.bytes_done[ddir]; return 0; @@ -1845,14 +2046,16 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) int ret, ddir; struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; - dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); + dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts); if (!min_evts) tvp = &ts; else if (min_evts > td->cur_depth) min_evts = td->cur_depth; - ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); + /* No worries, td_io_getevents fixes min and max if they are + * set incorrectly */ + ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp); if (ret < 0) { td_verror(td, -ret, "td_io_getevents"); return ret; @@ -1866,10 +2069,10 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) return -1; } - for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) td->bytes_done[ddir] += icd.bytes_done[ddir]; - return 0; + return ret; } /* @@ -1877,10 +2080,14 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) */ void io_u_queued(struct thread_data *td, struct io_u *io_u) { - if (!td->o.disable_slat) { + if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) { unsigned long slat_time; - slat_time = utime_since(&io_u->start_time, &io_u->issue_time); + slat_time = ntime_since(&io_u->start_time, &io_u->issue_time); + + if (td->parent) + td = td->parent; + add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen, io_u->offset); } @@ -1892,15 +2099,15 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) static struct frand_state *get_buf_state(struct thread_data *td) { unsigned int v; - unsigned long r; if (!td->o.dedupe_percentage) return &td->buf_state; - else if (td->o.dedupe_percentage == 100) - return &td->buf_state_prev; + else if (td->o.dedupe_percentage == 100) { + frand_copy(&td->buf_state_prev, &td->buf_state); + return &td->buf_state; + } - r = __rand(&td->dedupe_state); - v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); + v = rand_between(&td->dedupe_state, 1, 100); if (v <= td->o.dedupe_percentage) return &td->buf_state_prev; @@ -1910,19 +2117,25 @@ static struct frand_state *get_buf_state(struct thread_data *td) static void save_buf_state(struct thread_data *td, struct frand_state *rs) { - if (rs == &td->buf_state) + if (td->o.dedupe_percentage == 100) + frand_copy(rs, &td->buf_state_prev); + else if (rs == &td->buf_state) frand_copy(&td->buf_state_prev, rs); } -void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, - unsigned int max_bs) +void fill_io_buffer(struct thread_data *td, void *buf, unsigned long long min_write, + unsigned long long max_bs) { struct thread_options *o = &td->o; + if (o->mem_type == MEM_CUDA_MALLOC) + return; + if (o->compress_percentage || o->dedupe_percentage) { unsigned int perc = td->o.compress_percentage; struct frand_state *rs; - unsigned int left = max_bs; + unsigned long long left = max_bs; + unsigned long long this_write; do { rs = get_buf_state(td); @@ -1930,20 +2143,20 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, min_write = min(min_write, left); if (perc) { - unsigned int seg = min_write; - - seg = min(min_write, td->o.compress_chunk); - if (!seg) - seg = min_write; + this_write = min_not_zero(min_write, + (unsigned long long) td->o.compress_chunk); - fill_random_buf_percentage(rs, buf, perc, seg, - min_write, o->buffer_pattern, - o->buffer_pattern_bytes); - } else + fill_random_buf_percentage(rs, buf, perc, + this_write, this_write, + o->buffer_pattern, + o->buffer_pattern_bytes); + } else { fill_random_buf(rs, buf, min_write); + this_write = min_write; + } - buf += min_write; - left -= min_write; + buf += this_write; + left -= this_write; save_buf_state(td, rs); } while (left); } else if (o->buffer_pattern_bytes) @@ -1958,8 +2171,66 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, * "randomly" fill the buffer contents */ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, - unsigned int min_write, unsigned int max_bs) + unsigned long long min_write, unsigned long long max_bs) { io_u->buf_filled_len = 0; fill_io_buffer(td, io_u->buf, min_write, max_bs); } + +static int do_sync_file_range(const struct thread_data *td, + struct fio_file *f) +{ + off64_t offset, nbytes; + + offset = f->first_write; + nbytes = f->last_write - f->first_write; + + if (!nbytes) + return 0; + + return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range); +} + +int do_io_u_sync(const struct thread_data *td, struct io_u *io_u) +{ + int ret; + + if (io_u->ddir == DDIR_SYNC) { + ret = fsync(io_u->file->fd); + } else if (io_u->ddir == DDIR_DATASYNC) { +#ifdef CONFIG_FDATASYNC + ret = fdatasync(io_u->file->fd); +#else + ret = io_u->xfer_buflen; + io_u->error = EINVAL; +#endif + } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE) + ret = do_sync_file_range(td, io_u->file); + else { + ret = io_u->xfer_buflen; + io_u->error = EINVAL; + } + + if (ret < 0) + io_u->error = errno; + + return ret; +} + +int do_io_u_trim(const struct thread_data *td, struct io_u *io_u) +{ +#ifndef FIO_HAVE_TRIM + io_u->error = EINVAL; + return 0; +#else + struct fio_file *f = io_u->file; + int ret; + + ret = os_trim(f, io_u->offset, io_u->xfer_buflen); + if (!ret) + return io_u->xfer_buflen; + + io_u->error = ret; + return 0; +#endif +}