X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=612057d15762dfa13e7e48bc48e99658f9e8880a;hp=eac871bfe9d91890e70e9110bd575e9a725b1a4c;hb=a0ae50a6c23daa132fefde214d024d1932997259;hpb=c0b69b92fb155424946b19228da9be0924e9e96c diff --git a/io_u.c b/io_u.c index eac871bf..612057d1 100644 --- a/io_u.c +++ b/io_u.c @@ -271,20 +271,32 @@ static int get_next_rand_block(struct thread_data *td, struct fio_file *f, static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *offset) { + struct thread_options *o = &td->o; + assert(ddir_rw(ddir)); - if (f->last_pos >= f->io_size + get_start_offset(td, f) && td->o.time_based) + if (f->last_pos >= f->io_size + get_start_offset(td, f) && + o->time_based) f->last_pos = f->last_pos - f->io_size; if (f->last_pos < f->real_file_size) { uint64_t pos; - if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0) + if (f->last_pos == f->file_offset && o->ddir_seq_add < 0) f->last_pos = f->real_file_size; pos = f->last_pos - f->file_offset; - if (pos) - pos += td->o.ddir_seq_add; + if (pos && o->ddir_seq_add) { + pos += o->ddir_seq_add; + + /* + * If we reach beyond the end of the file + * with holed IO, wrap around to the + * beginning again. + */ + if (pos >= f->real_file_size) + pos = f->file_offset; + } *offset = pos; return 0; @@ -748,9 +760,17 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) * See if it's time to switch to a new zone */ if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { + struct fio_file *f = io_u->file; + td->zone_bytes = 0; - io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; - io_u->file->last_pos = io_u->file->file_offset; + f->file_offset += td->o.zone_range + td->o.zone_skip; + + /* + * Wrap from the beginning, if we exceed the file size + */ + if (f->file_offset >= f->real_file_size) + f->file_offset = f->real_file_size - f->file_offset; + f->last_pos = f->file_offset; td->io_skip_bytes += td->o.zone_skip; } @@ -1281,7 +1301,7 @@ void lat_target_check(struct thread_data *td) * If latency target is enabled, we might be ramping up or down and not * using the full queue depth available. */ -int queue_full(struct thread_data *td) +int queue_full(const struct thread_data *td) { const int qempty = io_u_qempty(&td->io_u_freelist); @@ -1487,7 +1507,8 @@ struct io_u *get_io_u(struct thread_data *td) if (io_u->ddir == DDIR_WRITE) { if (td->flags & TD_F_REFILL_BUFFERS) { io_u_fill_buffer(td, io_u, - io_u->xfer_buflen, io_u->xfer_buflen); + td->o.min_bs[DDIR_WRITE], + io_u->xfer_buflen); } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) && !(td->flags & TD_F_COMPRESS)) do_scramble = 1; @@ -1864,22 +1885,29 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, else if (!td->o.zero_buffers) { unsigned int perc = td->o.compress_percentage; struct frand_state *rs; + unsigned int left = max_bs; - rs = get_buf_state(td); + do { + rs = get_buf_state(td); - if (perc) { - unsigned int seg = min_write; + min_write = min(min_write, left); - seg = min(min_write, td->o.compress_chunk); - if (!seg) - seg = min_write; + if (perc) { + unsigned int seg = min_write; - fill_random_buf_percentage(rs, buf, perc, seg,max_bs); - save_buf_state(td, rs); - } else { - fill_random_buf(rs, buf, max_bs); + seg = min(min_write, td->o.compress_chunk); + if (!seg) + seg = min_write; + + fill_random_buf_percentage(rs, buf, perc, seg, + min_write); + } else + fill_random_buf(rs, buf, min_write); + + buf += min_write; + left -= min_write; save_buf_state(td, rs); - } + } while (left); } else memset(buf, 0, max_bs); }