X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=84d9315512c9d5b4c1b68924f341daa05fbb4d82;hp=8087bd6a119217e9df7954bfec52a4afaea1dcd7;hb=93b45bb2e4c511f2d9a9a7552d74e1d921b0bf76;hpb=fc9d5a6ae0c29ae26a8db15f553c7197a3ba9440 diff --git a/io_u.c b/io_u.c index 8087bd6a..84d93155 100644 --- a/io_u.c +++ b/io_u.c @@ -163,7 +163,6 @@ static int __get_next_rand_offset_zoned_abs(struct thread_data *td, { struct zone_split_index *zsi; uint64_t lastb, send, stotal; - static int warned; unsigned int v; lastb = last_block(td, f, ddir); @@ -192,10 +191,8 @@ bail: * Should never happen */ if (send == -1U) { - if (!warned) { + if (!fio_did_warn(FIO_WARN_ZONED_BUG)) log_err("fio: bug in zoned generation\n"); - warned = 1; - } goto bail; } else if (send > lastb) { /* @@ -223,7 +220,6 @@ static int __get_next_rand_offset_zoned(struct thread_data *td, { unsigned int v, send, stotal; uint64_t offset, lastb; - static int warned; struct zone_split_index *zsi; lastb = last_block(td, f, ddir); @@ -248,10 +244,8 @@ bail: * Should never happen */ if (send == -1U) { - if (!warned) { + if (!fio_did_warn(FIO_WARN_ZONED_BUG)) log_err("fio: bug in zoned generation\n"); - warned = 1; - } goto bail; } @@ -436,7 +430,11 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, if (f->last_pos[ddir] < f->real_file_size) { uint64_t pos; - if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) { + /* + * Only rewind if we already hit the end + */ + if (f->last_pos[ddir] == f->file_offset && + f->file_offset && o->ddir_seq_add < 0) { if (f->real_file_size > f->io_size) f->last_pos[ddir] = f->io_size; else @@ -476,7 +474,7 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, static int get_next_block(struct thread_data *td, struct io_u *io_u, enum fio_ddir ddir, int rw_seq, - unsigned int *is_random) + bool *is_random) { struct fio_file *f = io_u->file; uint64_t b, offset; @@ -490,27 +488,27 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, if (td_random(td)) { if (should_do_random(td, ddir)) { ret = get_next_rand_block(td, f, ddir, &b); - *is_random = 1; + *is_random = true; } else { - *is_random = 0; + *is_random = false; io_u_set(td, io_u, IO_U_F_BUSY_OK); ret = get_next_seq_offset(td, f, ddir, &offset); if (ret) ret = get_next_rand_block(td, f, ddir, &b); } } else { - *is_random = 0; + *is_random = false; ret = get_next_seq_offset(td, f, ddir, &offset); } } else { io_u_set(td, io_u, IO_U_F_BUSY_OK); - *is_random = 0; + *is_random = false; if (td->o.rw_seq == RW_SEQ_SEQ) { ret = get_next_seq_offset(td, f, ddir, &offset); if (ret) { ret = get_next_rand_block(td, f, ddir, &b); - *is_random = 0; + *is_random = false; } } else if (td->o.rw_seq == RW_SEQ_IDENT) { if (f->last_start[ddir] != -1ULL) @@ -543,8 +541,8 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, * until we find a free one. For sequential io, just return the end of * the last io issued. */ -static int __get_next_offset(struct thread_data *td, struct io_u *io_u, - unsigned int *is_random) +static int get_next_offset(struct thread_data *td, struct io_u *io_u, + bool *is_random) { struct fio_file *f = io_u->file; enum fio_ddir ddir = io_u->ddir; @@ -578,19 +576,6 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u, return 0; } -static int get_next_offset(struct thread_data *td, struct io_u *io_u, - unsigned int *is_random) -{ - if (td->flags & TD_F_PROFILE_OPS) { - struct prof_io_ops *ops = &td->prof_io_ops; - - if (ops->fill_io_u_off) - return ops->fill_io_u_off(td, io_u, is_random); - } - - return __get_next_offset(td, io_u, is_random); -} - static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u, unsigned int buflen) { @@ -599,8 +584,8 @@ static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u, return io_u->offset + buflen <= f->io_size + get_start_offset(td, f); } -static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, - unsigned int is_random) +static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, + bool is_random) { int ddir = io_u->ddir; unsigned int buflen = 0; @@ -611,7 +596,7 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, assert(ddir_rw(ddir)); if (td->o.bs_is_seq_rand) - ddir = is_random ? DDIR_WRITE: DDIR_READ; + ddir = is_random ? DDIR_WRITE : DDIR_READ; minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; @@ -661,19 +646,6 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, return buflen; } -static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, - unsigned int is_random) -{ - if (td->flags & TD_F_PROFILE_OPS) { - struct prof_io_ops *ops = &td->prof_io_ops; - - if (ops->fill_io_u_size) - return ops->fill_io_u_size(td, io_u, is_random); - } - - return __get_next_buflen(td, io_u, is_random); -} - static void set_rwmix_bytes(struct thread_data *td) { unsigned int diff; @@ -759,11 +731,11 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) return odir; /* - * Both directions are ahead of rate. sleep the min - * switch if necissary + * Both directions are ahead of rate. sleep the min, + * switch if necessary */ if (td->rate_next_io_time[ddir] <= - td->rate_next_io_time[odir]) { + td->rate_next_io_time[odir]) { usec = td->rate_next_io_time[ddir] - now; } else { usec = td->rate_next_io_time[odir] - now; @@ -775,8 +747,7 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) if (td->o.io_submit_mode == IO_MODE_INLINE) io_u_quiesce(td); - usec = usec_sleep(td, usec); - + usec_sleep(td, usec); return ddir; } @@ -923,27 +894,14 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u) *io_u = NULL; } -static int fill_io_u(struct thread_data *td, struct io_u *io_u) +static void __fill_io_u_zone(struct thread_data *td, struct io_u *io_u) { - unsigned int is_random; - - if (td_ioengine_flagged(td, FIO_NOIO)) - goto out; - - set_rw_ddir(td, io_u); - - /* - * fsync() or fdatasync() or trim etc, we are done - */ - if (!ddir_rw(io_u->ddir)) - goto out; + struct fio_file *f = io_u->file; /* * See if it's time to switch to a new zone */ if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { - struct fio_file *f = io_u->file; - td->zone_bytes = 0; f->file_offset += td->o.zone_range + td->o.zone_skip; @@ -956,6 +914,47 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) td->io_skip_bytes += td->o.zone_skip; } + /* + * If zone_size > zone_range, then maintain the same zone until + * zone_bytes >= zone_size. + */ + if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) { + dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n", + f->file_offset, f->last_pos[io_u->ddir]); + f->last_pos[io_u->ddir] = f->file_offset; + } + + /* + * For random: if 'norandommap' is not set and zone_size > zone_range, + * map needs to be reset as it's done with zone_range everytime. + */ + if ((td->zone_bytes % td->o.zone_range) == 0) { + fio_file_reset(td, f); + } +} + +static int fill_io_u(struct thread_data *td, struct io_u *io_u) +{ + bool is_random; + + if (td_ioengine_flagged(td, FIO_NOIO)) + goto out; + + set_rw_ddir(td, io_u); + + /* + * fsync() or fdatasync() or trim etc, we are done + */ + if (!ddir_rw(io_u->ddir)) + goto out; + + /* + * When file is zoned zone_range is always positive + */ + if (td->o.zone_range) { + __fill_io_u_zone(td, io_u); + } + /* * No log, let the seq/rand engine retrieve the next buflen and * position. @@ -972,9 +971,8 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) } if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { - dprint(FD_IO, "io_u %p, offset + buflen exceeds file size\n", - io_u); - dprint(FD_IO, " offset=%llu/buflen=%lu > %llu\n", + dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%lx exceeds file size=0x%llx\n", + io_u, (unsigned long long) io_u->offset, io_u->buflen, (unsigned long long) io_u->file->real_file_size); return 1; @@ -987,12 +985,12 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) mark_random_map(td, io_u); out: - dprint_io_u(io_u, "fill_io_u"); + dprint_io_u(io_u, "fill"); td->zone_bytes += io_u->buflen; return 0; } -static void __io_u_mark_map(unsigned int *map, unsigned int nr) +static void __io_u_mark_map(uint64_t *map, unsigned int nr) { int idx = 0; @@ -1367,13 +1365,6 @@ out: static struct fio_file *get_next_file(struct thread_data *td) { - if (td->flags & TD_F_PROFILE_OPS) { - struct prof_io_ops *ops = &td->prof_io_ops; - - if (ops->get_next_file) - return ops->get_next_file(td); - } - return __get_next_file(td); } @@ -1567,6 +1558,7 @@ bool queue_full(const struct thread_data *td) struct io_u *__get_io_u(struct thread_data *td) { struct io_u *io_u = NULL; + int ret; if (td->stop_io) return NULL; @@ -1603,7 +1595,8 @@ again: * return one */ assert(!(td->flags & TD_F_CHILD)); - assert(!pthread_cond_wait(&td->free_cond, &td->io_u_lock)); + ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock); + assert(ret == 0); goto again; } @@ -1615,22 +1608,19 @@ static bool check_get_trim(struct thread_data *td, struct io_u *io_u) { if (!(td->flags & TD_F_TRIM_BACKLOG)) return false; + if (!td->trim_entries) + return false; - if (td->trim_entries) { - int get_trim = 0; - - if (td->trim_batch) { - td->trim_batch--; - get_trim = 1; - } else if (!(td->io_hist_len % td->o.trim_backlog) && - td->last_ddir != DDIR_READ) { - td->trim_batch = td->o.trim_batch; - if (!td->trim_batch) - td->trim_batch = td->o.trim_backlog; - get_trim = 1; - } - - if (get_trim && get_next_trim(td, io_u)) + if (td->trim_batch) { + td->trim_batch--; + if (get_next_trim(td, io_u)) + return true; + } else if (!(td->io_hist_len % td->o.trim_backlog) && + td->last_ddir != DDIR_READ) { + td->trim_batch = td->o.trim_batch; + if (!td->trim_batch) + td->trim_batch = td->o.trim_backlog; + if (get_next_trim(td, io_u)) return true; } @@ -1672,32 +1662,40 @@ static bool check_get_verify(struct thread_data *td, struct io_u *io_u) */ static void small_content_scramble(struct io_u *io_u) { - unsigned int i, nr_blocks = io_u->buflen / 512; + unsigned int i, nr_blocks = io_u->buflen >> 9; unsigned int offset; - uint64_t boffset; - char *p, *end; + uint64_t boffset, *iptr; + char *p; if (!nr_blocks) return; p = io_u->xfer_buf; boffset = io_u->offset; - io_u->buf_filled_len = 0; + + if (io_u->buf_filled_len) + io_u->buf_filled_len = 0; + + /* + * Generate random index between 0..7. We do chunks of 512b, if + * we assume a cacheline is 64 bytes, then we have 8 of those. + * Scramble content within the blocks in the same cacheline to + * speed things up. + */ + offset = (io_u->start_time.tv_nsec ^ boffset) & 7; for (i = 0; i < nr_blocks; i++) { /* - * Fill the byte offset into a "random" start offset of - * the buffer, given by the product of the usec time - * and the actual offset. + * Fill offset into start of cacheline, time into end + * of cacheline */ - offset = (io_u->start_time.tv_nsec ^ boffset) & 511; - offset &= ~(sizeof(uint64_t) - 1); - if (offset >= 512 - sizeof(uint64_t)) - offset -= sizeof(uint64_t); - memcpy(p + offset, &boffset, sizeof(boffset)); - - end = p + 512 - sizeof(io_u->start_time); - memcpy(end, &io_u->start_time, sizeof(io_u->start_time)); + iptr = (void *) p + (offset << 6); + *iptr = boffset; + + iptr = (void *) p + 64 - 2 * sizeof(uint64_t); + iptr[0] = io_u->start_time.tv_sec; + iptr[1] = io_u->start_time.tv_nsec; + p += 512; boffset += 512; } @@ -1890,7 +1888,8 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, if (no_reduce && per_unit_log(td->iops_log)) add_iops_sample(td, io_u, bytes); - } + } else if (ddir_sync(idx) && !td->o.disable_clat) + add_sync_clat_sample(&td->ts, llnsec); if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) { uint32_t *info = io_u_block_info(td, io_u); @@ -1928,6 +1927,12 @@ static void file_log_write_comp(const struct thread_data *td, struct fio_file *f f->last_write_idx = 0; } +static bool should_account(struct thread_data *td) +{ + return ramp_time_over(td) && (td->runstate == TD_RUNNING || + td->runstate == TD_VERIFYING); +} + static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, struct io_completion_data *icd) { @@ -1935,7 +1940,7 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, enum fio_ddir ddir = io_u->ddir; struct fio_file *f = io_u->file; - dprint_io_u(io_u, "io complete"); + dprint_io_u(io_u, "complete"); assert(io_u->flags & IO_U_F_FLIGHT); io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK); @@ -1956,15 +1961,17 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, } if (ddir_sync(ddir)) { - td->last_was_sync = 1; + td->last_was_sync = true; if (f) { f->first_write = -1ULL; f->last_write = -1ULL; } + if (should_account(td)) + account_io_completion(td, io_u, icd, ddir, io_u->buflen); return; } - td->last_was_sync = 0; + td->last_was_sync = false; td->last_ddir = ddir; if (!io_u->error && ddir_rw(ddir)) { @@ -1972,17 +1979,17 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, int ret; td->io_blocks[ddir]++; - td->this_io_blocks[ddir]++; td->io_bytes[ddir] += bytes; - if (!(io_u->flags & IO_U_F_VER_LIST)) + if (!(io_u->flags & IO_U_F_VER_LIST)) { + td->this_io_blocks[ddir]++; td->this_io_bytes[ddir] += bytes; + } if (ddir == DDIR_WRITE) file_log_write_comp(td, f, io_u->offset, bytes); - if (ramp_time_over(td) && (td->runstate == TD_RUNNING || - td->runstate == TD_VERIFYING)) + if (should_account(td)) account_io_completion(td, io_u, icd, ddir, bytes); icd->bytes_done[ddir] += bytes;