X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=io_u.c;h=a45dd4021c4e161f945367a3aec59dde0ebd4c57;hb=29407f16b06cd03e43058f51dd5cca7aa0f0f321;hp=d815ad9a340a63aaee1c0de05c402adec129835e;hpb=d6b72507e72d3f2ed334fa5665880b0ab59dbfdd;p=fio.git diff --git a/io_u.c b/io_u.c index d815ad9a..a45dd402 100644 --- a/io_u.c +++ b/io_u.c @@ -68,6 +68,9 @@ static uint64_t last_block(struct thread_data *td, struct fio_file *f, if (td->o.zone_range) max_size = td->o.zone_range; + if (td->o.min_bs[ddir] > td->o.ba[ddir]) + max_size -= td->o.min_bs[ddir] - td->o.ba[ddir]; + max_blocks = max_size / (uint64_t) td->o.ba[ddir]; if (!max_blocks) return 0; @@ -83,13 +86,15 @@ struct rand_off { static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { - uint64_t r, lastb; - - lastb = last_block(td, f, ddir); - if (!lastb) - return 1; + uint64_t r; if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) { + uint64_t lastb; + + lastb = last_block(td, f, ddir); + if (!lastb) + return 1; + r = __rand(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); @@ -98,7 +103,9 @@ static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, } else { uint64_t off = 0; - if (lfsr_next(&f->lfsr, &off, lastb)) + assert(fio_file_lfsr(f)); + + if (lfsr_next(&f->lfsr, &off)) return 1; *b = off; @@ -206,7 +213,6 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, return get_off_from_method(td, f, ddir, b); if (!flist_empty(&td->next_rand_list)) { - struct rand_off *r; fetch: r = flist_first_entry(&td->next_rand_list, struct rand_off, list); flist_del(&r->list); @@ -248,7 +254,7 @@ static int get_next_rand_block(struct thread_data *td, struct fio_file *f, } dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", - f->file_name, (unsigned long long) f->last_pos, + f->file_name, (unsigned long long) f->last_pos[ddir], (unsigned long long) f->real_file_size); return 1; } @@ -260,17 +266,17 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, assert(ddir_rw(ddir)); - if (f->last_pos >= f->io_size + get_start_offset(td, f) && + if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) && o->time_based) - f->last_pos = f->last_pos - f->io_size; + f->last_pos[ddir] = f->last_pos[ddir] - f->io_size; - if (f->last_pos < f->real_file_size) { + if (f->last_pos[ddir] < f->real_file_size) { uint64_t pos; - if (f->last_pos == f->file_offset && o->ddir_seq_add < 0) - f->last_pos = f->real_file_size; + if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) + f->last_pos[ddir] = f->real_file_size; - pos = f->last_pos - f->file_offset; + pos = f->last_pos[ddir] - f->file_offset; if (pos && o->ddir_seq_add) { pos += o->ddir_seq_add; @@ -329,8 +335,8 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, *is_random = 0; } } else if (td->o.rw_seq == RW_SEQ_IDENT) { - if (f->last_start != -1ULL) - offset = f->last_start - f->file_offset; + if (f->last_start[ddir] != -1ULL) + offset = f->last_start[ddir] - f->file_offset; else offset = 0; ret = 0; @@ -526,6 +532,12 @@ void io_u_quiesce(struct thread_data *td) * io's that have been actually submitted to an async engine, * and cur_depth is meaningless for sync engines. */ + if (td->io_u_queued || td->cur_depth) { + int fio_unused ret; + + ret = td_io_commit(td); + } + while (td->io_u_in_flight) { int fio_unused ret; @@ -536,7 +548,6 @@ void io_u_quiesce(struct thread_data *td) static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; - struct timeval t; long usec; assert(ddir_rw(ddir)); @@ -571,9 +582,7 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) io_u_quiesce(td); - fio_gettime(&t, NULL); - usec_sleep(td, usec); - usec = utime_since_now(&t); + usec = usec_sleep(td, usec); td->rate_pending_usleep[ddir] -= usec; @@ -581,9 +590,6 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) if (td_rw(td) && __should_check_rate(td, odir)) td->rate_pending_usleep[odir] -= usec; - if (ddir_trim(ddir)) - return ddir; - return ddir; } @@ -742,7 +748,7 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) */ if (f->file_offset >= f->real_file_size) f->file_offset = f->real_file_size - f->file_offset; - f->last_pos = f->file_offset; + f->last_pos[io_u->ddir] = f->file_offset; td->io_skip_bytes += td->o.zone_skip; } @@ -1283,6 +1289,9 @@ struct io_u *__get_io_u(struct thread_data *td) { struct io_u *io_u = NULL; + if (td->stop_io) + return NULL; + td_io_u_lock(td); again: @@ -1467,15 +1476,16 @@ struct io_u *get_io_u(struct thread_data *td) goto err_put; } - f->last_start = io_u->offset; - f->last_pos = io_u->offset + io_u->buflen; + f->last_start[io_u->ddir] = io_u->offset; + f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen; if (io_u->ddir == DDIR_WRITE) { if (td->flags & TD_F_REFILL_BUFFERS) { io_u_fill_buffer(td, io_u, td->o.min_bs[DDIR_WRITE], io_u->xfer_buflen); - } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) && + } + if ((td->flags & TD_F_SCRAMBLE_BUFFERS) && !(td->flags & TD_F_COMPRESS)) do_scramble = 1; if (td->flags & TD_F_VER_NONE) { @@ -1643,13 +1653,22 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, if (!(io_u->flags & IO_U_F_VER_LIST)) td->this_io_bytes[ddir] += bytes; - if (ddir == DDIR_WRITE && f) { - if (f->first_write == -1ULL || - io_u->offset < f->first_write) - f->first_write = io_u->offset; - if (f->last_write == -1ULL || - ((io_u->offset + bytes) > f->last_write)) - f->last_write = io_u->offset + bytes; + if (ddir == DDIR_WRITE) { + if (f) { + if (f->first_write == -1ULL || + io_u->offset < f->first_write) + f->first_write = io_u->offset; + if (f->last_write == -1ULL || + ((io_u->offset + bytes) > f->last_write)) + f->last_write = io_u->offset + bytes; + } + if (td->last_write_comp) { + int idx = td->last_write_idx++; + + td->last_write_comp[idx] = io_u->offset; + if (td->last_write_idx == td->o.iodepth) + td->last_write_idx = 0; + } } if (ramp_time_over(td) && (td->runstate == TD_RUNNING || @@ -1846,9 +1865,9 @@ static void save_buf_state(struct thread_data *td, struct frand_state *rs) void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, unsigned int max_bs) { - if (td->o.buffer_pattern_bytes) - fill_buffer_pattern(td, buf, max_bs); - else if (!td->o.zero_buffers) { + struct thread_options *o = &td->o; + + if (o->compress_percentage || o->dedupe_percentage) { unsigned int perc = td->o.compress_percentage; struct frand_state *rs; unsigned int left = max_bs; @@ -1866,7 +1885,8 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, seg = min_write; fill_random_buf_percentage(rs, buf, perc, seg, - min_write); + min_write, o->buffer_pattern, + o->buffer_pattern_bytes); } else fill_random_buf(rs, buf, min_write); @@ -1874,8 +1894,12 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, left -= min_write; save_buf_state(td, rs); } while (left); - } else + } else if (o->buffer_pattern_bytes) + fill_buffer_pattern(td, buf, max_bs); + else if (o->zero_buffers) memset(buf, 0, max_bs); + else + fill_random_buf(get_buf_state(td), buf, max_bs); } /*