X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=428b312c9b34fea3cd1db6d38b375fd1a14376e6;hp=d1f66a949293310f166690089438e2a883f9feec;hb=422f9e4b57549ce1e163b9c1de71932d9ea24de4;hpb=fddc6604f91ebf76d9090741f9d4f5a4d33be0c6 diff --git a/io_u.c b/io_u.c index d1f66a94..428b312c 100644 --- a/io_u.c +++ b/io_u.c @@ -114,6 +114,9 @@ static unsigned long long last_block(struct thread_data *td, struct fio_file *f, if (max_size > f->real_file_size) max_size = f->real_file_size; + if (td->o.zone_range) + max_size = td->o.zone_range; + max_blocks = max_size / (unsigned long long) td->o.ba[ddir]; if (!max_blocks) return 0; @@ -493,12 +496,17 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) /* * We are going to sleep, ensure that we flush anything pending as - * not to skew our latency numbers + * not to skew our latency numbers. + * + * Changed to only monitor 'in flight' requests here instead of the + * td->cur_depth, b/c td->cur_depth does not accurately represent + * io's that have been actually submitted to an async engine, + * and cur_depth is meaningless for sync engines. */ - if (td->cur_depth) { + if (td->io_u_in_flight) { int fio_unused ret; - ret = io_u_queued_complete(td, td->cur_depth, NULL); + ret = io_u_queued_complete(td, td->io_u_in_flight, NULL); } fio_gettime(&t, NULL); @@ -597,13 +605,12 @@ void put_io_u(struct thread_data *td, struct io_u *io_u) { td_io_u_lock(td); - io_u->flags |= IO_U_F_FREE; - io_u->flags &= ~IO_U_F_FREE_DEF; - - if (io_u->file) + if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF)) put_file_log(td, io_u->file); - io_u->file = NULL; + io_u->flags &= ~IO_U_F_FREE_DEF; + io_u->flags |= IO_U_F_FREE; + if (io_u->flags & IO_U_F_IN_CUR_DEPTH) td->cur_depth--; flist_del_init(&io_u->list); @@ -657,7 +664,8 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) */ if (td->zone_bytes >= td->o.zone_size) { td->zone_bytes = 0; - io_u->file->last_pos += td->o.zone_skip; + io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; + io_u->file->last_pos = io_u->file->file_offset; td->io_skip_bytes += td->o.zone_skip; } @@ -1297,6 +1305,16 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, add_iops_sample(td, idx, &icd->time); } +static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) +{ + unsigned long long secs, remainder, bps, bytes; + bytes = td->this_io_bytes[ddir]; + bps = td->rate_bps[ddir]; + secs = bytes / bps; + remainder = bytes % bps; + return remainder * 1000000 / bps + secs * 1000000; +} + static void io_completed(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd) { @@ -1355,14 +1373,12 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, if (__should_check_rate(td, idx)) { td->rate_pending_usleep[idx] = - ((td->this_io_bytes[idx] * - td->rate_nsec_cycle[idx]) / 1000 - + (usec_for_io(td, idx) - utime_since_now(&td->start)); } - if (__should_check_rate(td, idx ^ 1)) + if (__should_check_rate(td, odx)) td->rate_pending_usleep[odx] = - ((td->this_io_bytes[odx] * - td->rate_nsec_cycle[odx]) / 1000 - + (usec_for_io(td, odx) - utime_since_now(&td->start)); } @@ -1382,8 +1398,8 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, icd->error = io_u->error; io_u_log_error(td, io_u); } - if (td->o.continue_on_error && icd->error && - td_non_fatal_error(icd->error)) { + if (icd->error && td_non_fatal_error(icd->error) && + (td->o.continue_on_error & td_error_type(io_u->ddir, icd->error))) { /* * If there is a non_fatal error, then add to the error count * and clear all the errors.