X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=3b27d5ed7bbab2bdd0e6b49c0071d1709b4559dc;hp=69ca6fe06ff8abfa30be5694a24bbca810c3e5f8;hb=9e129577b8799a9578a6aa6021a58367981e320a;hpb=732eedd0666bbe555d97b04801998ea8c63e0236 diff --git a/io_u.c b/io_u.c index 69ca6fe0..3b27d5ed 100644 --- a/io_u.c +++ b/io_u.c @@ -12,6 +12,8 @@ #include "lib/rand.h" #include "lib/axmap.h" #include "err.h" +#include "lib/pow2.h" +#include "minmax.h" struct io_completion_data { int nr; /* input */ @@ -88,18 +90,20 @@ static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, { uint64_t r; - if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) { - uint64_t lastb; + if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || + td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) { + uint64_t frand_max, lastb; lastb = last_block(td, f, ddir); if (!lastb) return 1; + frand_max = rand_max(&td->random_state); r = __rand(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); - *b = lastb * (r / ((uint64_t) FRAND_MAX + 1.0)); + *b = lastb * (r / ((uint64_t) frand_max + 1.0)); } else { uint64_t off = 0; @@ -194,7 +198,8 @@ static inline int should_sort_io(struct thread_data *td) return 0; if (td->runstate != TD_VERIFYING) return 0; - if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) + if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || + td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) return 0; return 1; @@ -202,14 +207,16 @@ static inline int should_sort_io(struct thread_data *td) static int should_do_random(struct thread_data *td, enum fio_ddir ddir) { + uint64_t frand_max; unsigned int v; unsigned long r; if (td->o.perc_rand[ddir] == 100) return 1; + frand_max = rand_max(&td->seq_rand_state[ddir]); r = __rand(&td->seq_rand_state[ddir]); - v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); + v = 1 + (int) (100.0 * (r / (frand_max + 1.0))); return v <= td->o.perc_rand[ddir]; } @@ -438,6 +445,7 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, int ddir = io_u->ddir; unsigned int buflen = 0; unsigned int minbs, maxbs; + uint64_t frand_max; unsigned long r; assert(ddir_rw(ddir)); @@ -457,12 +465,13 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, if (!io_u_fits(td, io_u, minbs)) return 0; + frand_max = rand_max(&td->bsrange_state); do { r = __rand(&td->bsrange_state); if (!td->o.bssplit_nr[ddir]) { buflen = 1 + (unsigned int) ((double) maxbs * - (r / (FRAND_MAX + 1.0))); + (r / (frand_max + 1.0))); if (buflen < minbs) buflen = minbs; } else { @@ -474,13 +483,13 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, buflen = bsp->bs; perc += bsp->perc; - if ((r <= ((FRAND_MAX / 100L) * perc)) && + if ((r <= ((frand_max / 100L) * perc)) && io_u_fits(td, io_u, buflen)) break; } } - if (td->o.do_verify && td->o.verify != VERIFY_NONE) + if (td->o.verify != VERIFY_NONE) buflen = (buflen + td->o.verify_interval - 1) & ~(td->o.verify_interval - 1); @@ -520,11 +529,12 @@ static void set_rwmix_bytes(struct thread_data *td) static inline enum fio_ddir get_rand_ddir(struct thread_data *td) { + uint64_t frand_max = rand_max(&td->rwmix_state); unsigned int v; unsigned long r; r = __rand(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); + v = 1 + (int) (100.0 * (r / (frand_max + 1.0))); if (v <= td->o.rwmix[DDIR_READ]) return DDIR_READ; @@ -559,49 +569,47 @@ void io_u_quiesce(struct thread_data *td) static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; - long usec; + long usec, now; assert(ddir_rw(ddir)); + now = utime_since_now(&td->start); - if (td->rate_pending_usleep[ddir] <= 0) + /* + * if rate_next_io_time is in the past, need to catch up to rate + */ + if (td->rate_next_io_time[ddir] <= now) return ddir; /* - * We have too much pending sleep in this direction. See if we + * We are ahead of rate in this direction. See if we * should switch. */ if (td_rw(td) && td->o.rwmix[odir]) { /* - * Other direction does not have too much pending, switch + * Other direction is behind rate, switch */ - if (td->rate_pending_usleep[odir] < 100000) + if (td->rate_next_io_time[odir] <= now) return odir; /* - * Both directions have pending sleep. Sleep the minimum time - * and deduct from both. + * Both directions are ahead of rate. sleep the min + * switch if necissary */ - if (td->rate_pending_usleep[ddir] <= - td->rate_pending_usleep[odir]) { - usec = td->rate_pending_usleep[ddir]; + if (td->rate_next_io_time[ddir] <= + td->rate_next_io_time[odir]) { + usec = td->rate_next_io_time[ddir] - now; } else { - usec = td->rate_pending_usleep[odir]; + usec = td->rate_next_io_time[odir] - now; ddir = odir; } } else - usec = td->rate_pending_usleep[ddir]; + usec = td->rate_next_io_time[ddir] - now; if (td->o.io_submit_mode == IO_MODE_INLINE) io_u_quiesce(td); usec = usec_sleep(td, usec); - td->rate_pending_usleep[ddir] -= usec; - - odir = ddir ^ 1; - if (td_rw(td) && __should_check_rate(td, odir)) - td->rate_pending_usleep[odir] -= usec; - return ddir; } @@ -991,6 +999,7 @@ static struct fio_file *get_next_file_rand(struct thread_data *td, enum fio_file_flags goodf, enum fio_file_flags badf) { + uint64_t frand_max = rand_max(&td->next_file_state); struct fio_file *f; int fno; @@ -1000,7 +1009,7 @@ static struct fio_file *get_next_file_rand(struct thread_data *td, r = __rand(&td->next_file_state); fno = (unsigned int) ((double) td->o.nr_files - * (r / (FRAND_MAX + 1.0))); + * (r / (frand_max + 1.0))); f = td->files[fno]; if (fio_file_done(f)) @@ -1518,7 +1527,7 @@ struct io_u *get_io_u(struct thread_data *td) if (td->flags & TD_F_REFILL_BUFFERS) { io_u_fill_buffer(td, io_u, td->o.min_bs[DDIR_WRITE], - io_u->xfer_buflen); + io_u->buflen); } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) && !(td->flags & TD_F_COMPRESS)) do_scramble = 1; @@ -1570,6 +1579,13 @@ static void __io_u_log_error(struct thread_data *td, struct io_u *io_u) io_ddir_name(io_u->ddir), io_u->offset, io_u->xfer_buflen); + if (td->io_ops->errdetails) { + char *err = td->io_ops->errdetails(io_u); + + log_err("fio: %s\n", err); + free(err); + } + if (!td->error) td_verror(td, io_u->error, "io_u error"); } @@ -1594,6 +1610,9 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, const int no_reduce = !gtod_reduce(td); unsigned long lusec = 0; + if (td->parent) + td = td->parent; + if (no_reduce) lusec = utime_since(&io_u->issue_time, &icd->time); @@ -1623,9 +1642,6 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, io_u_mark_latency(td, lusec); } - if (td->parent) - td = td->parent; - if (!td->o.disable_bw) add_bw_sample(td, idx, bytes, &icd->time); @@ -1646,18 +1662,6 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, } } -static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) -{ - uint64_t secs, remainder, bps, bytes; - - assert(!(td->flags & TD_F_CHILD)); - bytes = td->this_io_bytes[ddir]; - bps = td->rate_bps[ddir]; - secs = bytes / bps; - remainder = bytes % bps; - return remainder * 1000000 / bps + secs * 1000000; -} - static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, struct io_completion_data *icd) { @@ -1699,7 +1703,6 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, if (!io_u->error && ddir_rw(ddir)) { unsigned int bytes = io_u->buflen - io_u->resid; - const enum fio_ddir oddir = ddir ^ 1; int ret; td->io_blocks[ddir]++; @@ -1728,27 +1731,9 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, } if (ramp_time_over(td) && (td->runstate == TD_RUNNING || - td->runstate == TD_VERIFYING)) { - struct thread_data *__td = td; - + td->runstate == TD_VERIFYING)) account_io_completion(td, io_u, icd, ddir, bytes); - if (td->parent) - __td = td->parent; - - if (__should_check_rate(__td, ddir)) { - __td->rate_pending_usleep[ddir] = - (usec_for_io(__td, ddir) - - utime_since_now(&__td->start)); - } - if (ddir != DDIR_TRIM && - __should_check_rate(__td, oddir)) { - __td->rate_pending_usleep[oddir] = - (usec_for_io(__td, oddir) - - utime_since_now(&__td->start)); - } - } - icd->bytes_done[ddir] += bytes; if (io_u->end_io) { @@ -1852,7 +1837,9 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) else if (min_evts > td->cur_depth) min_evts = td->cur_depth; - ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); + /* No worries, td_io_getevents fixes min and max if they are + * set incorrectly */ + ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp); if (ret < 0) { td_verror(td, -ret, "td_io_getevents"); return ret; @@ -1881,6 +1868,10 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) unsigned long slat_time; slat_time = utime_since(&io_u->start_time, &io_u->issue_time); + + if (td->parent) + td = td->parent; + add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen, io_u->offset); } @@ -1889,21 +1880,22 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) /* * See if we should reuse the last seed, if dedupe is enabled */ -static struct frand_state *get_buf_state(struct thread_data *td, - struct frand_state *save) +static struct frand_state *get_buf_state(struct thread_data *td) { + uint64_t frand_max; unsigned int v; unsigned long r; if (!td->o.dedupe_percentage) return &td->buf_state; else if (td->o.dedupe_percentage == 100) { - frand_copy(save, &td->buf_state_prev); - return &td->buf_state_prev; + frand_copy(&td->buf_state_prev, &td->buf_state); + return &td->buf_state; } + frand_max = rand_max(&td->dedupe_state); r = __rand(&td->dedupe_state); - v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); + v = 1 + (int) (100.0 * (r / (frand_max + 1.0))); if (v <= td->o.dedupe_percentage) return &td->buf_state_prev; @@ -1911,54 +1903,53 @@ static struct frand_state *get_buf_state(struct thread_data *td, return &td->buf_state; } -static void save_buf_state(struct thread_data *td, struct frand_state *rs, - struct frand_state *save) +static void save_buf_state(struct thread_data *td, struct frand_state *rs) { - if (rs == &td->buf_state) + if (td->o.dedupe_percentage == 100) + frand_copy(rs, &td->buf_state_prev); + else if (rs == &td->buf_state) frand_copy(&td->buf_state_prev, rs); - else if (rs == &td->buf_state_prev && td->o.dedupe_percentage == 100) - frand_copy(rs, save); } void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, unsigned int max_bs) { struct thread_options *o = &td->o; - struct frand_state save = { 0, }; if (o->compress_percentage || o->dedupe_percentage) { unsigned int perc = td->o.compress_percentage; struct frand_state *rs; unsigned int left = max_bs; + unsigned int this_write; do { - rs = get_buf_state(td, &save); + rs = get_buf_state(td); min_write = min(min_write, left); if (perc) { - unsigned int seg = min_write; + this_write = min_not_zero(min_write, + td->o.compress_chunk); - seg = min(min_write, td->o.compress_chunk); - if (!seg) - seg = min_write; - - fill_random_buf_percentage(rs, buf, perc, seg, - min_write, o->buffer_pattern, - o->buffer_pattern_bytes); - } else + fill_random_buf_percentage(rs, buf, perc, + this_write, this_write, + o->buffer_pattern, + o->buffer_pattern_bytes); + } else { fill_random_buf(rs, buf, min_write); + this_write = min_write; + } - buf += min_write; - left -= min_write; - save_buf_state(td, rs, &save); + buf += this_write; + left -= this_write; + save_buf_state(td, rs); } while (left); } else if (o->buffer_pattern_bytes) fill_buffer_pattern(td, buf, max_bs); else if (o->zero_buffers) memset(buf, 0, max_bs); else - fill_random_buf(get_buf_state(td, NULL), buf, max_bs); + fill_random_buf(get_buf_state(td), buf, max_bs); } /*