X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=3299f2907af47bb3e6d7b59d2c35aad004509df2;hp=9f10206b95e8b1e80d14504c35159284cf238802;hb=1ec4db15283551fc135b40dbd8abc07d4f67743e;hpb=50a8ce864e2c5bee7c44935b39b357aa8071615b diff --git a/io_u.c b/io_u.c index 9f10206b..3299f290 100644 --- a/io_u.c +++ b/io_u.c @@ -13,6 +13,7 @@ #include "lib/axmap.h" #include "err.h" #include "lib/pow2.h" +#include "minmax.h" struct io_completion_data { int nr; /* input */ @@ -26,7 +27,7 @@ struct io_completion_data { * The ->io_axmap contains a map of blocks we have or have not done io * to yet. Used to make sure we cover the entire range in a fair fashion. */ -static int random_map_free(struct fio_file *f, const uint64_t block) +static bool random_map_free(struct fio_file *f, const uint64_t block) { return !axmap_isset(f->io_axmap, block); } @@ -85,24 +86,19 @@ struct rand_off { }; static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, uint64_t *b) + enum fio_ddir ddir, uint64_t *b, + uint64_t lastb) { uint64_t r; if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) { - uint64_t frand_max, lastb; - lastb = last_block(td, f, ddir); - if (!lastb) - return 1; - - frand_max = rand_max(&td->random_state); r = __rand(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); - *b = lastb * (r / ((uint64_t) frand_max + 1.0)); + *b = lastb * (r / (rand_max(&td->random_state) + 1.0)); } else { uint64_t off = 0; @@ -160,6 +156,70 @@ static int __get_next_rand_offset_gauss(struct thread_data *td, return 0; } +static int __get_next_rand_offset_zoned(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + uint64_t *b) +{ + unsigned int v, send, stotal; + uint64_t offset, lastb; + static int warned; + struct zone_split_index *zsi; + + lastb = last_block(td, f, ddir); + if (!lastb) + return 1; + + if (!td->o.zone_split_nr[ddir]) { +bail: + return __get_next_rand_offset(td, f, ddir, b, lastb); + } + + /* + * Generate a value, v, between 1 and 100, both inclusive + */ + v = rand_between(&td->zone_state, 1, 100); + + zsi = &td->zone_state_index[ddir][v - 1]; + stotal = zsi->size_perc_prev; + send = zsi->size_perc; + + /* + * Should never happen + */ + if (send == -1U) { + if (!warned) { + log_err("fio: bug in zoned generation\n"); + warned = 1; + } + goto bail; + } + + /* + * 'send' is some percentage below or equal to 100 that + * marks the end of the current IO range. 'stotal' marks + * the start, in percent. + */ + if (stotal) + offset = stotal * lastb / 100ULL; + else + offset = 0; + + lastb = lastb * (send - stotal) / 100ULL; + + /* + * Generate index from 0..send-of-lastb + */ + if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1) + return 1; + + /* + * Add our start offset, if any + */ + if (offset) + *b += offset; + + return 0; +} static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b) { @@ -172,14 +232,22 @@ static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b) static int get_off_from_method(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { - if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) - return __get_next_rand_offset(td, f, ddir, b); - else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) + if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) { + uint64_t lastb; + + lastb = last_block(td, f, ddir); + if (!lastb) + return 1; + + return __get_next_rand_offset(td, f, ddir, b, lastb); + } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) return __get_next_rand_offset_zipf(td, f, ddir, b); else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) return __get_next_rand_offset_pareto(td, f, ddir, b); else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS) return __get_next_rand_offset_gauss(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_ZONED) + return __get_next_rand_offset_zoned(td, f, ddir, b); log_err("fio: unknown random distribution: %d\n", td->o.random_distribution); return 1; @@ -189,33 +257,29 @@ static int get_off_from_method(struct thread_data *td, struct fio_file *f, * Sort the reads for a verify phase in batches of verifysort_nr, if * specified. */ -static inline int should_sort_io(struct thread_data *td) +static inline bool should_sort_io(struct thread_data *td) { if (!td->o.verifysort_nr || !td->o.do_verify) - return 0; + return false; if (!td_random(td)) - return 0; + return false; if (td->runstate != TD_VERIFYING) - return 0; + return false; if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) - return 0; + return false; - return 1; + return true; } -static int should_do_random(struct thread_data *td, enum fio_ddir ddir) +static bool should_do_random(struct thread_data *td, enum fio_ddir ddir) { - uint64_t frand_max; unsigned int v; - unsigned long r; if (td->o.perc_rand[ddir] == 100) - return 1; + return true; - frand_max = rand_max(&td->seq_rand_state[ddir]); - r = __rand(&td->seq_rand_state[ddir]); - v = 1 + (int) (100.0 * (r / (frand_max + 1.0))); + v = rand_between(&td->seq_rand_state[ddir], 1, 100); return v <= td->o.perc_rand[ddir]; } @@ -284,8 +348,15 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, assert(ddir_rw(ddir)); if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) && - o->time_based) - f->last_pos[ddir] = f->last_pos[ddir] - f->io_size; + o->time_based) { + struct thread_options *o = &td->o; + uint64_t io_size = f->io_size + (f->io_size % o->min_bs[ddir]); + + if (io_size > f->last_pos[ddir]) + f->last_pos[ddir] = 0; + else + f->last_pos[ddir] = f->last_pos[ddir] - io_size; + } if (f->last_pos[ddir] < f->real_file_size) { uint64_t pos; @@ -430,8 +501,8 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u, return __get_next_offset(td, io_u, is_random); } -static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, - unsigned int buflen) +static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u, + unsigned int buflen) { struct fio_file *f = io_u->file; @@ -482,13 +553,13 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, buflen = bsp->bs; perc += bsp->perc; - if ((r <= ((frand_max / 100L) * perc)) && + if ((r * 100UL <= frand_max * perc) && io_u_fits(td, io_u, buflen)) break; } } - if (td->o.do_verify && td->o.verify != VERIFY_NONE) + if (td->o.verify != VERIFY_NONE) buflen = (buflen + td->o.verify_interval - 1) & ~(td->o.verify_interval - 1); @@ -528,12 +599,9 @@ static void set_rwmix_bytes(struct thread_data *td) static inline enum fio_ddir get_rand_ddir(struct thread_data *td) { - uint64_t frand_max = rand_max(&td->rwmix_state); unsigned int v; - unsigned long r; - r = __rand(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (frand_max + 1.0))); + v = rand_between(&td->rwmix_state, 1, 100); if (v <= td->o.rwmix[DDIR_READ]) return DDIR_READ; @@ -541,8 +609,10 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) return DDIR_WRITE; } -void io_u_quiesce(struct thread_data *td) +int io_u_quiesce(struct thread_data *td) { + int completed = 0; + /* * We are going to sleep, ensure that we flush anything pending as * not to skew our latency numbers. @@ -562,7 +632,11 @@ void io_u_quiesce(struct thread_data *td) int fio_unused ret; ret = io_u_queued_complete(td, 1); + if (ret > 0) + completed += ret; } + + return completed; } static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) @@ -1190,10 +1264,10 @@ static void lat_new_cycle(struct thread_data *td) * We had an IO outside the latency target. Reduce the queue depth. If we * are at QD=1, then it's time to give up. */ -static int __lat_target_failed(struct thread_data *td) +static bool __lat_target_failed(struct thread_data *td) { if (td->latency_qd == 1) - return 1; + return true; td->latency_qd_high = td->latency_qd; @@ -1210,16 +1284,16 @@ static int __lat_target_failed(struct thread_data *td) */ io_u_quiesce(td); lat_new_cycle(td); - return 0; + return false; } -static int lat_target_failed(struct thread_data *td) +static bool lat_target_failed(struct thread_data *td) { if (td->o.latency_percentile.u.f == 100.0) return __lat_target_failed(td); td->latency_failed++; - return 0; + return false; } void lat_target_init(struct thread_data *td) @@ -1314,14 +1388,14 @@ void lat_target_check(struct thread_data *td) * If latency target is enabled, we might be ramping up or down and not * using the full queue depth available. */ -int queue_full(const struct thread_data *td) +bool queue_full(const struct thread_data *td) { const int qempty = io_u_qempty(&td->io_u_freelist); if (qempty) - return 1; + return true; if (!td->o.latency_target) - return 0; + return false; return td->cur_depth >= td->latency_qd; } @@ -1373,10 +1447,10 @@ again: return io_u; } -static int check_get_trim(struct thread_data *td, struct io_u *io_u) +static bool check_get_trim(struct thread_data *td, struct io_u *io_u) { if (!(td->flags & TD_F_TRIM_BACKLOG)) - return 0; + return false; if (td->trim_entries) { int get_trim = 0; @@ -1393,16 +1467,16 @@ static int check_get_trim(struct thread_data *td, struct io_u *io_u) } if (get_trim && !get_next_trim(td, io_u)) - return 1; + return true; } - return 0; + return false; } -static int check_get_verify(struct thread_data *td, struct io_u *io_u) +static bool check_get_verify(struct thread_data *td, struct io_u *io_u) { if (!(td->flags & TD_F_VER_BACKLOG)) - return 0; + return false; if (td->io_hist_len) { int get_verify = 0; @@ -1419,11 +1493,11 @@ static int check_get_verify(struct thread_data *td, struct io_u *io_u) if (get_verify && !get_next_verify(td, io_u)) { td->verify_batch--; - return 1; + return true; } } - return 0; + return false; } /* @@ -1526,7 +1600,7 @@ struct io_u *get_io_u(struct thread_data *td) if (td->flags & TD_F_REFILL_BUFFERS) { io_u_fill_buffer(td, io_u, td->o.min_bs[DDIR_WRITE], - io_u->xfer_buflen); + io_u->buflen); } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) && !(td->flags & TD_F_COMPRESS)) do_scramble = 1; @@ -1552,7 +1626,7 @@ struct io_u *get_io_u(struct thread_data *td) out: assert(io_u->file); if (!td_io_prep(td, io_u)) { - if (!td->o.disable_slat) + if (!td->o.disable_lat) fio_gettime(&io_u->start_time, NULL); if (do_scramble) small_content_scramble(io_u); @@ -1578,6 +1652,13 @@ static void __io_u_log_error(struct thread_data *td, struct io_u *io_u) io_ddir_name(io_u->ddir), io_u->offset, io_u->xfer_buflen); + if (td->io_ops->errdetails) { + char *err = td->io_ops->errdetails(io_u); + + log_err("fio: %s\n", err); + free(err); + } + if (!td->error) td_verror(td, io_u->error, "io_u error"); } @@ -1586,13 +1667,13 @@ void io_u_log_error(struct thread_data *td, struct io_u *io_u) { __io_u_log_error(td, io_u); if (td->parent) - __io_u_log_error(td, io_u); + __io_u_log_error(td->parent, io_u); } -static inline int gtod_reduce(struct thread_data *td) +static inline bool gtod_reduce(struct thread_data *td) { - return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat - && td->o.disable_bw; + return (td->o.disable_clat && td->o.disable_slat && td->o.disable_bw) + || td->o.gtod_reduce; } static void account_io_completion(struct thread_data *td, struct io_u *io_u, @@ -1829,7 +1910,9 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) else if (min_evts > td->cur_depth) min_evts = td->cur_depth; - ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); + /* No worries, td_io_getevents fixes min and max if they are + * set incorrectly */ + ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp); if (ret < 0) { td_verror(td, -ret, "td_io_getevents"); return ret; @@ -1846,7 +1929,7 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) td->bytes_done[ddir] += icd.bytes_done[ddir]; - return 0; + return ret; } /* @@ -1872,9 +1955,7 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) */ static struct frand_state *get_buf_state(struct thread_data *td) { - uint64_t frand_max; unsigned int v; - unsigned long r; if (!td->o.dedupe_percentage) return &td->buf_state; @@ -1883,9 +1964,7 @@ static struct frand_state *get_buf_state(struct thread_data *td) return &td->buf_state; } - frand_max = rand_max(&td->dedupe_state); - r = __rand(&td->dedupe_state); - v = 1 + (int) (100.0 * (r / (frand_max + 1.0))); + v = rand_between(&td->dedupe_state, 1, 100); if (v <= td->o.dedupe_percentage) return &td->buf_state_prev; @@ -1910,6 +1989,7 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, unsigned int perc = td->o.compress_percentage; struct frand_state *rs; unsigned int left = max_bs; + unsigned int this_write; do { rs = get_buf_state(td); @@ -1917,20 +1997,20 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, min_write = min(min_write, left); if (perc) { - unsigned int seg = min_write; - - seg = min(min_write, td->o.compress_chunk); - if (!seg) - seg = min_write; + this_write = min_not_zero(min_write, + td->o.compress_chunk); - fill_random_buf_percentage(rs, buf, perc, seg, - min_write, o->buffer_pattern, - o->buffer_pattern_bytes); - } else + fill_random_buf_percentage(rs, buf, perc, + this_write, this_write, + o->buffer_pattern, + o->buffer_pattern_bytes); + } else { fill_random_buf(rs, buf, min_write); + this_write = min_write; + } - buf += min_write; - left -= min_write; + buf += this_write; + left -= this_write; save_buf_state(td, rs); } while (left); } else if (o->buffer_pattern_bytes)