X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=ea6c251859969b3e86775ea2ba4260de73d00194;hp=f0b6170535d2a77a07861a4a0760d72a4130412e;hb=04778baf2def8c1c5a3d7d60233c56d140831711;hpb=1f64bd7cbb8b16d64b30c65393adf9c028713e42 diff --git a/io_u.c b/io_u.c index f0b61705..ea6c2518 100644 --- a/io_u.c +++ b/io_u.c @@ -191,23 +191,23 @@ static inline int should_sort_io(struct thread_data *td) return 1; } -static int should_do_random(struct thread_data *td) +static int should_do_random(struct thread_data *td, enum fio_ddir ddir) { unsigned int v; unsigned long r; - if (td->o.perc_rand == 100) + if (td->o.perc_rand[ddir] == 100) return 1; if (td->o.use_os_rand) { - r = os_random_long(&td->seq_rand_state); + r = os_random_long(&td->seq_rand_state[ddir]); v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); } else { - r = __rand(&td->__seq_rand_state); + r = __rand(&td->__seq_rand_state[ddir]); v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); } - return v <= td->o.perc_rand; + return v <= td->o.perc_rand[ddir]; } static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, @@ -293,7 +293,8 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, } static int get_next_block(struct thread_data *td, struct io_u *io_u, - enum fio_ddir ddir, int rw_seq) + enum fio_ddir ddir, int rw_seq, + unsigned int *is_random) { struct fio_file *f = io_u->file; uint64_t b, offset; @@ -305,23 +306,30 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, if (rw_seq) { if (td_random(td)) { - if (should_do_random(td)) + if (should_do_random(td, ddir)) { ret = get_next_rand_block(td, f, ddir, &b); - else { + *is_random = 1; + } else { + *is_random = 0; io_u->flags |= IO_U_F_BUSY_OK; ret = get_next_seq_offset(td, f, ddir, &offset); if (ret) ret = get_next_rand_block(td, f, ddir, &b); } - } else + } else { + *is_random = 0; ret = get_next_seq_offset(td, f, ddir, &offset); + } } else { io_u->flags |= IO_U_F_BUSY_OK; + *is_random = 0; if (td->o.rw_seq == RW_SEQ_SEQ) { ret = get_next_seq_offset(td, f, ddir, &offset); - if (ret) + if (ret) { ret = get_next_rand_block(td, f, ddir, &b); + *is_random = 0; + } } else if (td->o.rw_seq == RW_SEQ_IDENT) { if (f->last_start != -1ULL) offset = f->last_start - f->file_offset; @@ -353,7 +361,8 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, * until we find a free one. For sequential io, just return the end of * the last io issued. */ -static int __get_next_offset(struct thread_data *td, struct io_u *io_u) +static int __get_next_offset(struct thread_data *td, struct io_u *io_u, + unsigned int *is_random) { struct fio_file *f = io_u->file; enum fio_ddir ddir = io_u->ddir; @@ -366,7 +375,7 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u) td->ddir_seq_nr = td->o.ddir_seq_nr; } - if (get_next_block(td, io_u, ddir, rw_seq_hit)) + if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random)) return 1; if (io_u->offset >= f->io_size) { @@ -387,16 +396,17 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u) return 0; } -static int get_next_offset(struct thread_data *td, struct io_u *io_u) +static int get_next_offset(struct thread_data *td, struct io_u *io_u, + unsigned int *is_random) { if (td->flags & TD_F_PROFILE_OPS) { struct prof_io_ops *ops = &td->prof_io_ops; if (ops->fill_io_u_off) - return ops->fill_io_u_off(td, io_u); + return ops->fill_io_u_off(td, io_u, is_random); } - return __get_next_offset(td, io_u); + return __get_next_offset(td, io_u, is_random); } static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, @@ -407,14 +417,20 @@ static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, return io_u->offset + buflen <= f->io_size + get_start_offset(td); } -static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) +static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, + unsigned int is_random) { - const int ddir = io_u->ddir; + int ddir = io_u->ddir; unsigned int buflen = 0; unsigned int minbs, maxbs; unsigned long r, rand_max; - assert(ddir_rw(ddir)); + assert(ddir_rw(io_u->ddir)); + + if (td->o.bs_is_seq_rand) + ddir = is_random ? DDIR_WRITE: DDIR_READ; + else + ddir = io_u->ddir; minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; @@ -459,6 +475,10 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) } } + if (td->o.do_verify && td->o.verify != VERIFY_NONE) + buflen = (buflen + td->o.verify_interval - 1) & + ~(td->o.verify_interval - 1); + if (!td->o.bs_unaligned && is_power_of_2(minbs)) buflen = (buflen + minbs - 1) & ~(minbs - 1); @@ -467,16 +487,17 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) return buflen; } -static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) +static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, + unsigned int is_random) { if (td->flags & TD_F_PROFILE_OPS) { struct prof_io_ops *ops = &td->prof_io_ops; if (ops->fill_io_u_size) - return ops->fill_io_u_size(td, io_u); + return ops->fill_io_u_size(td, io_u, is_random); } - return __get_next_buflen(td, io_u); + return __get_next_buflen(td, io_u, is_random); } static void set_rwmix_bytes(struct thread_data *td) @@ -676,8 +697,7 @@ void put_io_u(struct thread_data *td, struct io_u *io_u) if (io_u->flags & IO_U_F_IN_CUR_DEPTH) td->cur_depth--; - flist_del_init(&io_u->list); - flist_add(&io_u->list, &td->io_u_freelist); + io_u_qpush(&td->io_u_freelist, io_u); td_io_u_unlock(td); td_io_u_free_notify(td); } @@ -704,14 +724,16 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u) __io_u->flags &= ~IO_U_F_FLIGHT; if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) td->cur_depth--; - flist_del(&__io_u->list); - flist_add_tail(&__io_u->list, &td->io_u_requeues); + + io_u_rpush(&td->io_u_requeues, __io_u); td_io_u_unlock(td); *io_u = NULL; } static int fill_io_u(struct thread_data *td, struct io_u *io_u) { + unsigned int is_random; + if (td->io_ops->flags & FIO_NOIO) goto out; @@ -737,12 +759,12 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) * No log, let the seq/rand engine retrieve the next buflen and * position. */ - if (get_next_offset(td, io_u)) { + if (get_next_offset(td, io_u, &is_random)) { dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); return 1; } - io_u->buflen = get_next_buflen(td, io_u); + io_u->buflen = get_next_buflen(td, io_u, is_random); if (!io_u->buflen) { dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); return 1; @@ -1104,18 +1126,148 @@ static int set_io_u_file(struct thread_data *td, struct io_u *io_u) return 0; } +static void lat_fatal(struct thread_data *td, struct io_completion_data *icd, + unsigned long tusec, unsigned long max_usec) +{ + if (!td->error) + log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec); + td_verror(td, ETIMEDOUT, "max latency exceeded"); + icd->error = ETIMEDOUT; +} + +static void lat_new_cycle(struct thread_data *td) +{ + fio_gettime(&td->latency_ts, NULL); + td->latency_ios = ddir_rw_sum(td->io_blocks); + td->latency_failed = 0; +} + +/* + * We had an IO outside the latency target. Reduce the queue depth. If we + * are at QD=1, then it's time to give up. + */ +static int __lat_target_failed(struct thread_data *td) +{ + if (td->latency_qd == 1) + return 1; + + td->latency_qd_high = td->latency_qd; + td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2; + + dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); + + /* + * When we ramp QD down, quiesce existing IO to prevent + * a storm of ramp downs due to pending higher depth. + */ + io_u_quiesce(td); + lat_new_cycle(td); + return 0; +} + +static int lat_target_failed(struct thread_data *td) +{ + if (td->o.latency_percentile.u.f == 100.0) + return __lat_target_failed(td); + + td->latency_failed++; + return 0; +} + +void lat_target_init(struct thread_data *td) +{ + if (td->o.latency_target) { + dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target); + fio_gettime(&td->latency_ts, NULL); + td->latency_qd = 1; + td->latency_qd_high = td->o.iodepth; + td->latency_qd_low = 1; + td->latency_ios = ddir_rw_sum(td->io_blocks); + } else + td->latency_qd = td->o.iodepth; +} + +static void lat_target_success(struct thread_data *td) +{ + const unsigned int qd = td->latency_qd; + + td->latency_qd_low = td->latency_qd; + + /* + * If we haven't failed yet, we double up to a failing value instead + * of bisecting from highest possible queue depth. If we have set + * a limit other than td->o.iodepth, bisect between that. + */ + if (td->latency_qd_high != td->o.iodepth) + td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2; + else + td->latency_qd *= 2; + + if (td->latency_qd > td->o.iodepth) + td->latency_qd = td->o.iodepth; + + dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); + /* + * Same as last one, we are done + */ + if (td->latency_qd == qd) + td->done = 1; + + lat_new_cycle(td); +} + +/* + * Check if we can bump the queue depth + */ +void lat_target_check(struct thread_data *td) +{ + uint64_t usec_window; + uint64_t ios; + double success_ios; + + usec_window = utime_since_now(&td->latency_ts); + if (usec_window < td->o.latency_window) + return; + + ios = ddir_rw_sum(td->io_blocks) - td->latency_ios; + success_ios = (double) (ios - td->latency_failed) / (double) ios; + success_ios *= 100.0; + + dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f); + + if (success_ios >= td->o.latency_percentile.u.f) + lat_target_success(td); + else + __lat_target_failed(td); +} + +/* + * If latency target is enabled, we might be ramping up or down and not + * using the full queue depth available. + */ +int queue_full(struct thread_data *td) +{ + const int qempty = io_u_qempty(&td->io_u_freelist); + + if (qempty) + return 1; + if (!td->o.latency_target) + return 0; + + return td->cur_depth >= td->latency_qd; +} struct io_u *__get_io_u(struct thread_data *td) { - struct io_u *io_u = NULL; + struct io_u *io_u; td_io_u_lock(td); again: - if (!flist_empty(&td->io_u_requeues)) - io_u = flist_entry(td->io_u_requeues.next, struct io_u, list); + if (!io_u_rempty(&td->io_u_requeues)) + io_u = io_u_rpop(&td->io_u_requeues); else if (!queue_full(td)) { - io_u = flist_entry(td->io_u_freelist.next, struct io_u, list); + io_u = io_u_qpop(&td->io_u_freelist); io_u->buflen = 0; io_u->resid = 0; @@ -1131,8 +1283,6 @@ again: io_u->error = 0; io_u->acct_ddir = -1; - flist_del(&io_u->list); - flist_add_tail(&io_u->list, &td->io_u_busylist); td->cur_depth++; io_u->flags |= IO_U_F_IN_CUR_DEPTH; } else if (td->o.verify_async) { @@ -1368,11 +1518,18 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, tusec = utime_since(&io_u->start_time, &icd->time); add_lat_sample(td, idx, tusec, bytes); - if (td->o.max_latency && tusec > td->o.max_latency) { - if (!td->error) - log_err("fio: latency of %lu usec exceeds specified max (%u usec)\n", tusec, td->o.max_latency); - td_verror(td, ETIMEDOUT, "max latency exceeded"); - icd->error = ETIMEDOUT; + if (td->flags & TD_F_PROFILE_OPS) { + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->io_u_lat) + icd->error = ops->io_u_lat(td, tusec); + } + + if (td->o.max_latency && tusec > td->o.max_latency) + lat_fatal(td, icd, tusec, td->o.max_latency); + if (td->o.latency_target && tusec > td->o.latency_target) { + if (lat_target_failed(td)) + lat_fatal(td, icd, tusec, td->o.latency_target); } } @@ -1384,7 +1541,10 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, if (!td->o.disable_bw) add_bw_sample(td, idx, bytes, &icd->time); - add_iops_sample(td, idx, &icd->time); + add_iops_sample(td, idx, bytes, &icd->time); + + if (td->o.number_ios && !--td->o.number_ios) + td->done = 1; } static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)