X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=161c2cb388d01218628da8468c1fdeba105e40ed;hp=4dcb1fce44dbbeb4478de8154e502af4eb9f7d32;hb=843ad23782b4e6061dcc0e14b0fbc5f4fecf55f9;hpb=15b87723551b424d0db4c53577b567e670c7d4d8 diff --git a/io_u.c b/io_u.c index 4dcb1fce..161c2cb3 100644 --- a/io_u.c +++ b/io_u.c @@ -114,6 +114,9 @@ static unsigned long long last_block(struct thread_data *td, struct fio_file *f, if (max_size > f->real_file_size) max_size = f->real_file_size; + if (td->o.zone_range) + max_size = td->o.zone_range; + max_blocks = max_size / (unsigned long long) td->o.ba[ddir]; if (!max_blocks) return 0; @@ -374,6 +377,12 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) if (minbs == maxbs) return minbs; + /* + * If we can't satisfy the min block size from here, then fail + */ + if (!io_u_fits(td, io_u, minbs)) + return 0; + if (td->o.use_os_rand) rand_max = OS_RAND_MAX; else @@ -493,12 +502,17 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) /* * We are going to sleep, ensure that we flush anything pending as - * not to skew our latency numbers + * not to skew our latency numbers. + * + * Changed to only monitor 'in flight' requests here instead of the + * td->cur_depth, b/c td->cur_depth does not accurately represent + * io's that have been actually submitted to an async engine, + * and cur_depth is meaningless for sync engines. */ - if (td->cur_depth) { + if (td->io_u_in_flight) { int fio_unused ret; - ret = io_u_queued_complete(td, td->cur_depth, NULL); + ret = io_u_queued_complete(td, td->io_u_in_flight, NULL); } fio_gettime(&t, NULL); @@ -597,13 +611,12 @@ void put_io_u(struct thread_data *td, struct io_u *io_u) { td_io_u_lock(td); - io_u->flags |= IO_U_F_FREE; - io_u->flags &= ~IO_U_F_FREE_DEF; - - if (io_u->file) + if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF)) put_file_log(td, io_u->file); - io_u->file = NULL; + io_u->flags &= ~IO_U_F_FREE_DEF; + io_u->flags |= IO_U_F_FREE; + if (io_u->flags & IO_U_F_IN_CUR_DEPTH) td->cur_depth--; flist_del_init(&io_u->list); @@ -655,9 +668,10 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) /* * See if it's time to switch to a new zone */ - if (td->zone_bytes >= td->o.zone_size) { + if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { td->zone_bytes = 0; - io_u->file->last_pos += td->o.zone_skip; + io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; + io_u->file->last_pos = io_u->file->file_offset; td->io_skip_bytes += td->o.zone_skip; } @@ -1139,6 +1153,7 @@ static void small_content_scramble(struct io_u *io_u) p = io_u->xfer_buf; boffset = io_u->offset; + io_u->buf_filled_len = 0; for (i = 0; i < nr_blocks; i++) { /* @@ -1293,6 +1308,18 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, if (!td->o.disable_bw) add_bw_sample(td, idx, bytes, &icd->time); + + add_iops_sample(td, idx, &icd->time); +} + +static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) +{ + unsigned long long secs, remainder, bps, bytes; + bytes = td->this_io_bytes[ddir]; + bps = td->rate_bps[ddir]; + secs = bytes / bps; + remainder = bytes % bps; + return remainder * 1000000 / bps + secs * 1000000; } static void io_completed(struct thread_data *td, struct io_u *io_u, @@ -1332,6 +1359,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, int ret; td->io_blocks[idx]++; + td->this_io_blocks[idx]++; td->io_bytes[idx] += bytes; td->this_io_bytes[idx] += bytes; @@ -1347,19 +1375,18 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, } } - if (ramp_time_over(td)) { + if (ramp_time_over(td) && (td->runstate == TD_RUNNING || + td->runstate == TD_VERIFYING)) { account_io_completion(td, io_u, icd, idx, bytes); if (__should_check_rate(td, idx)) { td->rate_pending_usleep[idx] = - ((td->this_io_bytes[idx] * - td->rate_nsec_cycle[idx]) / 1000 - + (usec_for_io(td, idx) - utime_since_now(&td->start)); } - if (__should_check_rate(td, idx ^ 1)) + if (__should_check_rate(td, odx)) td->rate_pending_usleep[odx] = - ((td->this_io_bytes[odx] * - td->rate_nsec_cycle[odx]) / 1000 - + (usec_for_io(td, odx) - utime_since_now(&td->start)); } @@ -1379,8 +1406,8 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, icd->error = io_u->error; io_u_log_error(td, io_u); } - if (td->o.continue_on_error && icd->error && - td_non_fatal_error(icd->error)) { + if (icd->error && td_non_fatal_error(icd->error) && + (td->o.continue_on_error & td_error_type(io_u->ddir, icd->error))) { /* * If there is a non_fatal error, then add to the error count * and clear all the errors.