X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=2deb5c7216aea5e02c66cf8237fb7648fc3a2dd2;hp=1aa418c7554b37babcf2682df1030f390438b428;hb=2bcbf556086f7021bb8bf7df7c4a4cd117c7ffb0;hpb=068420271828b3b2426ffc3ccf64404cb9d340fb diff --git a/io_u.c b/io_u.c index 1aa418c7..2deb5c72 100644 --- a/io_u.c +++ b/io_u.c @@ -114,6 +114,9 @@ static unsigned long long last_block(struct thread_data *td, struct fio_file *f, if (max_size > f->real_file_size) max_size = f->real_file_size; + if (td->o.zone_range) + max_size = td->o.zone_range; + max_blocks = max_size / (unsigned long long) td->o.ba[ddir]; if (!max_blocks) return 0; @@ -374,6 +377,12 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) if (minbs == maxbs) return minbs; + /* + * If we can't satisfy the min block size from here, then fail + */ + if (!io_u_fits(td, io_u, minbs)) + return 0; + if (td->o.use_os_rand) rand_max = OS_RAND_MAX; else @@ -493,12 +502,17 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) /* * We are going to sleep, ensure that we flush anything pending as - * not to skew our latency numbers + * not to skew our latency numbers. + * + * Changed to only monitor 'in flight' requests here instead of the + * td->cur_depth, b/c td->cur_depth does not accurately represent + * io's that have been actually submitted to an async engine, + * and cur_depth is meaningless for sync engines. */ - if (td->cur_depth) { + if (td->io_u_in_flight) { int fio_unused ret; - ret = io_u_queued_complete(td, td->cur_depth, NULL); + ret = io_u_queued_complete(td, td->io_u_in_flight, NULL); } fio_gettime(&t, NULL); @@ -654,9 +668,10 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) /* * See if it's time to switch to a new zone */ - if (td->zone_bytes >= td->o.zone_size) { + if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { td->zone_bytes = 0; - io_u->file->last_pos += td->o.zone_skip; + io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; + io_u->file->last_pos = io_u->file->file_offset; td->io_skip_bytes += td->o.zone_skip; } @@ -1138,6 +1153,7 @@ static void small_content_scramble(struct io_u *io_u) p = io_u->xfer_buf; boffset = io_u->offset; + io_u->buf_filled_len = 0; for (i = 0; i < nr_blocks; i++) { /* @@ -1211,9 +1227,10 @@ struct io_u *get_io_u(struct thread_data *td) if (io_u->ddir == DDIR_WRITE) { if (td->o.verify != VERIFY_NONE) populate_verify_io_u(td, io_u); - else if (td->o.refill_buffers) - io_u_fill_buffer(td, io_u, io_u->xfer_buflen); - else if (td->o.scramble_buffers) + else if (td->o.refill_buffers) { + io_u_fill_buffer(td, io_u, + io_u->xfer_buflen, io_u->xfer_buflen); + } else if (td->o.scramble_buffers) do_scramble = 1; } else if (io_u->ddir == DDIR_READ) { /* @@ -1359,7 +1376,8 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, } } - if (ramp_time_over(td) && td->runstate == TD_RUNNING) { + if (ramp_time_over(td) && (td->runstate == TD_RUNNING || + td->runstate == TD_VERIFYING)) { account_io_completion(td, io_u, icd, idx, bytes); if (__should_check_rate(td, idx)) { @@ -1515,12 +1533,18 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) * "randomly" fill the buffer contents */ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, - unsigned int max_bs) + unsigned int min_write, unsigned int max_bs) { io_u->buf_filled_len = 0; - if (!td->o.zero_buffers) - fill_random_buf(&td->buf_state, io_u->buf, max_bs); - else + if (!td->o.zero_buffers) { + unsigned int perc = td->o.compress_percentage; + + if (perc) { + fill_random_buf_percentage(&td->buf_state, io_u->buf, + perc, min_write, max_bs); + } else + fill_random_buf(&td->buf_state, io_u->buf, max_bs); + } else memset(io_u->buf, 0, max_bs); }