X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=e4671e42ac741557006ce7056dd863508b60280c;hp=17e188ad022ee05f3bb6dceedb153a24027b2fb3;hb=709c8313cf171e63c92740c6b2f38041cb11c722;hpb=8400d9b26a4f24f3b56cc9dad02be65791b58a83 diff --git a/io_u.c b/io_u.c index 17e188ad..7cbdb915 100644 --- a/io_u.c +++ b/io_u.c @@ -6,32 +6,28 @@ #include #include "fio.h" -#include "os.h" - -/* - * Change this define to play with the timeout handling - */ -#undef FIO_USE_TIMEOUT +#include "hash.h" +#include "verify.h" +#include "trim.h" +#include "lib/rand.h" +#include "lib/axmap.h" +#include "err.h" struct io_completion_data { int nr; /* input */ int error; /* output */ - unsigned long bytes_done[2]; /* output */ + uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */ struct timeval time; /* output */ }; /* - * The ->file_map[] contains a map of blocks we have or have not done io + * The ->io_axmap contains a map of blocks we have or have not done io * to yet. Used to make sure we cover the entire range in a fair fashion. */ -static int random_map_free(struct thread_data *td, struct fio_file *f, - unsigned long long block) +static int random_map_free(struct fio_file *f, const uint64_t block) { - unsigned int idx = RAND_MAP_IDX(td, f, block); - unsigned int bit = RAND_MAP_BIT(td, f, block); - - return (f->file_map[idx] & (1UL << bit)) == 0; + return !axmap_isset(f->io_axmap, block); } /* @@ -41,89 +37,324 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u) { unsigned int min_bs = td->o.rw_min_bs; struct fio_file *f = io_u->file; - unsigned long long block; - unsigned int blocks; unsigned int nr_blocks; + uint64_t block; - block = io_u->offset / (unsigned long long) min_bs; - blocks = 0; + block = (io_u->offset - f->file_offset) / (uint64_t) min_bs; nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; - while (blocks < nr_blocks) { - unsigned int idx, bit; + if (!(io_u->flags & IO_U_F_BUSY_OK)) + nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks); - /* - * If we have a mixed random workload, we may - * encounter blocks we already did IO to. - */ - if (!td->o.ddir_nr && !random_map_free(td, f, block)) - break; + if ((nr_blocks * min_bs) < io_u->buflen) + io_u->buflen = nr_blocks * min_bs; +} + +static uint64_t last_block(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir) +{ + uint64_t max_blocks; + uint64_t max_size; + + assert(ddir_rw(ddir)); + + /* + * Hmm, should we make sure that ->io_size <= ->real_file_size? + */ + max_size = f->io_size; + if (max_size > f->real_file_size) + max_size = f->real_file_size; + + if (td->o.zone_range) + max_size = td->o.zone_range; - idx = RAND_MAP_IDX(td, f, block); - bit = RAND_MAP_BIT(td, f, block); + max_blocks = max_size / (uint64_t) td->o.ba[ddir]; + if (!max_blocks) + return 0; + + return max_blocks; +} - fio_assert(td, idx < f->num_maps); +struct rand_off { + struct flist_head list; + uint64_t off; +}; + +static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, uint64_t *b) +{ + uint64_t r, lastb; + + lastb = last_block(td, f, ddir); + if (!lastb) + return 1; + + if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) { + uint64_t rmax; + + rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; + + if (td->o.use_os_rand) { + rmax = OS_RAND_MAX; + r = os_random_long(&td->random_state); + } else { + rmax = FRAND_MAX; + r = __rand(&td->__random_state); + } - f->file_map[idx] |= (1UL << bit); - block++; - blocks++; + dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); + + *b = lastb * (r / ((uint64_t) rmax + 1.0)); + } else { + uint64_t off = 0; + + if (lfsr_next(&f->lfsr, &off, lastb)) + return 1; + + *b = off; } - if ((blocks * min_bs) < io_u->buflen) - io_u->buflen = blocks * min_bs; + /* + * if we are not maintaining a random map, we are done. + */ + if (!file_randommap(td, f)) + goto ret; + + /* + * calculate map offset and check if it's free + */ + if (random_map_free(f, *b)) + goto ret; + + dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", + (unsigned long long) *b); + + *b = axmap_next_free(f->io_axmap, *b); + if (*b == (uint64_t) -1ULL) + return 1; +ret: + return 0; +} + +static int __get_next_rand_offset_zipf(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + uint64_t *b) +{ + *b = zipf_next(&f->zipf); + return 0; +} + +static int __get_next_rand_offset_pareto(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + uint64_t *b) +{ + *b = pareto_next(&f->zipf); + return 0; +} + +static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b) +{ + struct rand_off *r1 = flist_entry(a, struct rand_off, list); + struct rand_off *r2 = flist_entry(b, struct rand_off, list); + + return r1->off - r2->off; +} + +static int get_off_from_method(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, uint64_t *b) +{ + if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) + return __get_next_rand_offset(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) + return __get_next_rand_offset_zipf(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) + return __get_next_rand_offset_pareto(td, f, ddir, b); + + log_err("fio: unknown random distribution: %d\n", td->o.random_distribution); + return 1; } /* - * Return the next free block in the map. + * Sort the reads for a verify phase in batches of verifysort_nr, if + * specified. */ -static int get_next_free_block(struct thread_data *td, struct fio_file *f, - unsigned long long *b) +static inline int should_sort_io(struct thread_data *td) { - int i; + if (!td->o.verifysort_nr || !td->o.do_verify) + return 0; + if (!td_random(td)) + return 0; + if (td->runstate != TD_VERIFYING) + return 0; + if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) + return 0; - i = f->last_free_lookup; - *b = (i * BLOCKS_PER_MAP); - while ((*b) * td->o.rw_min_bs < f->real_file_size) { - if (f->file_map[i] != -1UL) { - *b += ffz(f->file_map[i]); - f->last_free_lookup = i; - return 0; - } + return 1; +} + +static int should_do_random(struct thread_data *td, enum fio_ddir ddir) +{ + unsigned int v; + unsigned long r; - *b += BLOCKS_PER_MAP; - i++; + if (td->o.perc_rand[ddir] == 100) + return 1; + + if (td->o.use_os_rand) { + r = os_random_long(&td->seq_rand_state[ddir]); + v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); + } else { + r = __rand(&td->__seq_rand_state[ddir]); + v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); } - return 1; + return v <= td->o.perc_rand[ddir]; } static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, - int ddir, unsigned long long *b) + enum fio_ddir ddir, uint64_t *b) { - unsigned long long max_blocks = f->io_size / td->o.min_bs[ddir]; - unsigned long long r, rb; - int loops = 5; + struct rand_off *r; + int i, ret = 1; + + if (!should_sort_io(td)) + return get_off_from_method(td, f, ddir, b); + + if (!flist_empty(&td->next_rand_list)) { + struct rand_off *r; +fetch: + r = flist_first_entry(&td->next_rand_list, struct rand_off, list); + flist_del(&r->list); + *b = r->off; + free(r); + return 0; + } - do { - r = os_random_long(&td->random_state); - if (!max_blocks) - *b = 0; - else - *b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0)); - if (td->o.norandommap) + for (i = 0; i < td->o.verifysort_nr; i++) { + r = malloc(sizeof(*r)); + + ret = get_off_from_method(td, f, ddir, &r->off); + if (ret) { + free(r); break; - rb = *b + (f->file_offset / td->o.min_bs[ddir]); - loops--; - } while (!random_map_free(td, f, rb) && loops); + } - /* - * if we failed to retrieve a truly random offset within - * the loops assigned, see if there are free ones left at all - */ - if (!loops && get_next_free_block(td, f, b)) - return 1; + flist_add(&r->list, &td->next_rand_list); + } - return 0; + if (ret && !i) + return ret; + + assert(!flist_empty(&td->next_rand_list)); + flist_sort(NULL, &td->next_rand_list, flist_cmp); + goto fetch; +} + +static int get_next_rand_block(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, uint64_t *b) +{ + if (!get_next_rand_offset(td, f, ddir, b)) + return 0; + + if (td->o.time_based) { + fio_file_reset(td, f); + if (!get_next_rand_offset(td, f, ddir, b)) + return 0; + } + + dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", + f->file_name, (unsigned long long) f->last_pos, + (unsigned long long) f->real_file_size); + return 1; +} + +static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, uint64_t *offset) +{ + assert(ddir_rw(ddir)); + + if (f->last_pos >= f->io_size + get_start_offset(td, f) && td->o.time_based) + f->last_pos = f->last_pos - f->io_size; + + if (f->last_pos < f->real_file_size) { + uint64_t pos; + + if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0) + f->last_pos = f->real_file_size; + + pos = f->last_pos - f->file_offset; + if (pos) + pos += td->o.ddir_seq_add; + + *offset = pos; + return 0; + } + + return 1; +} + +static int get_next_block(struct thread_data *td, struct io_u *io_u, + enum fio_ddir ddir, int rw_seq, + unsigned int *is_random) +{ + struct fio_file *f = io_u->file; + uint64_t b, offset; + int ret; + + assert(ddir_rw(ddir)); + + b = offset = -1ULL; + + if (rw_seq) { + if (td_random(td)) { + if (should_do_random(td, ddir)) { + ret = get_next_rand_block(td, f, ddir, &b); + *is_random = 1; + } else { + *is_random = 0; + io_u->flags |= IO_U_F_BUSY_OK; + ret = get_next_seq_offset(td, f, ddir, &offset); + if (ret) + ret = get_next_rand_block(td, f, ddir, &b); + } + } else { + *is_random = 0; + ret = get_next_seq_offset(td, f, ddir, &offset); + } + } else { + io_u->flags |= IO_U_F_BUSY_OK; + *is_random = 0; + + if (td->o.rw_seq == RW_SEQ_SEQ) { + ret = get_next_seq_offset(td, f, ddir, &offset); + if (ret) { + ret = get_next_rand_block(td, f, ddir, &b); + *is_random = 0; + } + } else if (td->o.rw_seq == RW_SEQ_IDENT) { + if (f->last_start != -1ULL) + offset = f->last_start - f->file_offset; + else + offset = 0; + ret = 0; + } else { + log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); + ret = 1; + } + } + + if (!ret) { + if (offset != -1ULL) + io_u->offset = offset; + else if (b != -1ULL) + io_u->offset = b * td->o.ba[ddir]; + else { + log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b); + ret = 1; + } + } + + return ret; } /* @@ -131,52 +362,145 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, * until we find a free one. For sequential io, just return the end of * the last io issued. */ -static int get_next_offset(struct thread_data *td, struct io_u *io_u) +static int __get_next_offset(struct thread_data *td, struct io_u *io_u, + unsigned int *is_random) { struct fio_file *f = io_u->file; - const int ddir = io_u->ddir; - unsigned long long b; + enum fio_ddir ddir = io_u->ddir; + int rw_seq_hit = 0; - if (td_random(td) && (td->o.ddir_nr && !--td->ddir_nr)) { - td->ddir_nr = td->o.ddir_nr; + assert(ddir_rw(ddir)); - if (get_next_rand_offset(td, f, ddir, &b)) - return 1; - } else { - if (f->last_pos >= f->real_file_size) - return 1; + if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) { + rw_seq_hit = 1; + td->ddir_seq_nr = td->o.ddir_seq_nr; + } - b = f->last_pos / td->o.min_bs[ddir]; + if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random)) + return 1; + + if (io_u->offset >= f->io_size) { + dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", + (unsigned long long) io_u->offset, + (unsigned long long) f->io_size); + return 1; } - io_u->offset = (b * td->o.min_bs[ddir]) + f->file_offset; - if (io_u->offset >= f->real_file_size) + io_u->offset += f->file_offset; + if (io_u->offset >= f->real_file_size) { + dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n", + (unsigned long long) io_u->offset, + (unsigned long long) f->real_file_size); return 1; + } return 0; } -static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) +static int get_next_offset(struct thread_data *td, struct io_u *io_u, + unsigned int *is_random) { - const int ddir = io_u->ddir; - unsigned int buflen; - long r; + if (td->flags & TD_F_PROFILE_OPS) { + struct prof_io_ops *ops = &td->prof_io_ops; - if (td->o.min_bs[ddir] == td->o.max_bs[ddir]) - buflen = td->o.min_bs[ddir]; - else { - r = os_random_long(&td->bsrange_state); - buflen = (unsigned int) (1 + (double) (td->o.max_bs[ddir] - 1) * r / (RAND_MAX + 1.0)); - if (!td->o.bs_unaligned) - buflen = (buflen + td->o.min_bs[ddir] - 1) & ~(td->o.min_bs[ddir] - 1); + if (ops->fill_io_u_off) + return ops->fill_io_u_off(td, io_u, is_random); } + return __get_next_offset(td, io_u, is_random); +} + +static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, + unsigned int buflen) +{ + struct fio_file *f = io_u->file; + + return io_u->offset + buflen <= f->io_size + get_start_offset(td, f); +} + +static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u, + unsigned int is_random) +{ + int ddir = io_u->ddir; + unsigned int buflen = 0; + unsigned int minbs, maxbs; + unsigned long r, rand_max; + + assert(ddir_rw(ddir)); + + if (td->o.bs_is_seq_rand) + ddir = is_random ? DDIR_WRITE: DDIR_READ; + + minbs = td->o.min_bs[ddir]; + maxbs = td->o.max_bs[ddir]; + + if (minbs == maxbs) + return minbs; + + /* + * If we can't satisfy the min block size from here, then fail + */ + if (!io_u_fits(td, io_u, minbs)) + return 0; + + if (td->o.use_os_rand) + rand_max = OS_RAND_MAX; + else + rand_max = FRAND_MAX; + + do { + if (td->o.use_os_rand) + r = os_random_long(&td->bsrange_state); + else + r = __rand(&td->__bsrange_state); + + if (!td->o.bssplit_nr[ddir]) { + buflen = 1 + (unsigned int) ((double) maxbs * + (r / (rand_max + 1.0))); + if (buflen < minbs) + buflen = minbs; + } else { + long perc = 0; + unsigned int i; + + for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { + struct bssplit *bsp = &td->o.bssplit[ddir][i]; + + buflen = bsp->bs; + perc += bsp->perc; + if ((r <= ((rand_max / 100L) * perc)) && + io_u_fits(td, io_u, buflen)) + break; + } + } + + if (td->o.do_verify && td->o.verify != VERIFY_NONE) + buflen = (buflen + td->o.verify_interval - 1) & + ~(td->o.verify_interval - 1); + + if (!td->o.bs_unaligned && is_power_of_2(minbs)) + buflen = (buflen + minbs - 1) & ~(minbs - 1); + + } while (!io_u_fits(td, io_u, buflen)); + return buflen; } +static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, + unsigned int is_random) +{ + if (td->flags & TD_F_PROFILE_OPS) { + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->fill_io_u_size) + return ops->fill_io_u_size(td, io_u, is_random); + } + + return __get_next_buflen(td, io_u, is_random); +} + static void set_rwmix_bytes(struct thread_data *td) { - unsigned long long rbytes; unsigned int diff; /* @@ -184,25 +508,101 @@ static void set_rwmix_bytes(struct thread_data *td) * buffered writes may issue a lot quicker than they complete, * whereas reads do not. */ - rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes; diff = td->o.rwmix[td->rwmix_ddir ^ 1]; - - td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * ((100 - diff)) / diff); + td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100; } static inline enum fio_ddir get_rand_ddir(struct thread_data *td) { unsigned int v; - long r; + unsigned long r; - r = os_random_long(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0))); - if (v < td->o.rwmix[DDIR_READ]) + if (td->o.use_os_rand) { + r = os_random_long(&td->rwmix_state); + v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); + } else { + r = __rand(&td->__rwmix_state); + v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); + } + + if (v <= td->o.rwmix[DDIR_READ]) return DDIR_READ; return DDIR_WRITE; } +void io_u_quiesce(struct thread_data *td) +{ + /* + * We are going to sleep, ensure that we flush anything pending as + * not to skew our latency numbers. + * + * Changed to only monitor 'in flight' requests here instead of the + * td->cur_depth, b/c td->cur_depth does not accurately represent + * io's that have been actually submitted to an async engine, + * and cur_depth is meaningless for sync engines. + */ + while (td->io_u_in_flight) { + int fio_unused ret; + + ret = io_u_queued_complete(td, 1, NULL); + } +} + +static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) +{ + enum fio_ddir odir = ddir ^ 1; + struct timeval t; + long usec; + + assert(ddir_rw(ddir)); + + if (td->rate_pending_usleep[ddir] <= 0) + return ddir; + + /* + * We have too much pending sleep in this direction. See if we + * should switch. + */ + if (td_rw(td) && td->o.rwmix[odir]) { + /* + * Other direction does not have too much pending, switch + */ + if (td->rate_pending_usleep[odir] < 100000) + return odir; + + /* + * Both directions have pending sleep. Sleep the minimum time + * and deduct from both. + */ + if (td->rate_pending_usleep[ddir] <= + td->rate_pending_usleep[odir]) { + usec = td->rate_pending_usleep[ddir]; + } else { + usec = td->rate_pending_usleep[odir]; + ddir = odir; + } + } else + usec = td->rate_pending_usleep[ddir]; + + io_u_quiesce(td); + + fio_gettime(&t, NULL); + usec_sleep(td, usec); + usec = utime_since_now(&t); + + td->rate_pending_usleep[ddir] -= usec; + + odir = ddir ^ 1; + if (td_rw(td) && __should_check_rate(td, odir)) + td->rate_pending_usleep[odir] -= usec; + + if (ddir_trim(ddir)) + return ddir; + + return ddir; +} + /* * Return the data direction for the next io_u. If the job is a * mixed read/write workload, check the rwmix cycle and switch if @@ -210,305 +610,823 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) */ static enum fio_ddir get_rw_ddir(struct thread_data *td) { - if (td_rw(td)) { - struct timeval now; - unsigned long elapsed; - unsigned int cycle; + enum fio_ddir ddir; - fio_gettime(&now, NULL); - elapsed = mtime_since_now(&td->rwmix_switch); + /* + * see if it's time to fsync + */ + if (td->o.fsync_blocks && + !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_SYNC; - /* - * if this is the first cycle, make it shorter - */ - cycle = td->o.rwmixcycle; - if (!td->rwmix_bytes) - cycle /= 10; + /* + * see if it's time to fdatasync + */ + if (td->o.fdatasync_blocks && + !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_DATASYNC; + + /* + * see if it's time to sync_file_range + */ + if (td->sync_file_range_nr && + !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_SYNC_FILE_RANGE; + if (td_rw(td)) { /* * Check if it's time to seed a new data direction. */ - if (elapsed >= cycle || - td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) { - unsigned long long max_bytes; - enum fio_ddir ddir; - + if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) { /* * Put a top limit on how many bytes we do for * one data direction, to avoid overflowing the * ranges too much */ ddir = get_rand_ddir(td); - max_bytes = td->this_io_bytes[ddir]; - if (max_bytes >= (td->o.size * td->o.rwmix[ddir] / 100)) { - if (!td->rw_end_set[ddir]) { - td->rw_end_set[ddir] = 1; - memcpy(&td->rw_end[ddir], &now, sizeof(now)); - } - ddir ^= 1; - } if (ddir != td->rwmix_ddir) set_rwmix_bytes(td); td->rwmix_ddir = ddir; - memcpy(&td->rwmix_switch, &now, sizeof(now)); } - return td->rwmix_ddir; + ddir = td->rwmix_ddir; } else if (td_read(td)) - return DDIR_READ; + ddir = DDIR_READ; + else if (td_write(td)) + ddir = DDIR_WRITE; else - return DDIR_WRITE; + ddir = DDIR_TRIM; + + td->rwmix_ddir = rate_ddir(td, ddir); + return td->rwmix_ddir; +} + +static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) +{ + io_u->ddir = io_u->acct_ddir = get_rw_ddir(td); + + if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) && + td->o.barrier_blocks && + !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) && + td->io_issues[DDIR_WRITE]) + io_u->flags |= IO_U_F_BARRIER; +} + +void put_file_log(struct thread_data *td, struct fio_file *f) +{ + unsigned int ret = put_file(td, f); + + if (ret) + td_verror(td, ret, "file close"); } void put_io_u(struct thread_data *td, struct io_u *io_u) { - assert((io_u->flags & IO_U_F_FREE) == 0); - io_u->flags |= IO_U_F_FREE; + td_io_u_lock(td); + + if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT)) + put_file_log(td, io_u->file); io_u->file = NULL; - list_del(&io_u->list); - list_add(&io_u->list, &td->io_u_freelist); - td->cur_depth--; + io_u->flags |= IO_U_F_FREE; + + if (io_u->flags & IO_U_F_IN_CUR_DEPTH) + td->cur_depth--; + io_u_qpush(&td->io_u_freelist, io_u); + td_io_u_unlock(td); + td_io_u_free_notify(td); +} + +void clear_io_u(struct thread_data *td, struct io_u *io_u) +{ + io_u->flags &= ~IO_U_F_FLIGHT; + put_io_u(td, io_u); } void requeue_io_u(struct thread_data *td, struct io_u **io_u) { struct io_u *__io_u = *io_u; + enum fio_ddir ddir = acct_ddir(__io_u); + + dprint(FD_IO, "requeue %p\n", __io_u); + + td_io_u_lock(td); __io_u->flags |= IO_U_F_FREE; + if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir)) + td->io_issues[ddir]--; + __io_u->flags &= ~IO_U_F_FLIGHT; + if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) + td->cur_depth--; - list_del(&__io_u->list); - list_add_tail(&__io_u->list, &td->io_u_requeues); - td->cur_depth--; + io_u_rpush(&td->io_u_requeues, __io_u); + td_io_u_unlock(td); *io_u = NULL; } static int fill_io_u(struct thread_data *td, struct io_u *io_u) { + unsigned int is_random; + + if (td->io_ops->flags & FIO_NOIO) + goto out; + + set_rw_ddir(td, io_u); + /* - * If using an iolog, grab next piece if any available. + * fsync() or fdatasync() or trim etc, we are done */ - if (td->o.read_iolog) - return read_iolog_get(td, io_u); + if (!ddir_rw(io_u->ddir)) + goto out; /* - * see if it's time to sync + * See if it's time to switch to a new zone */ - if (td->o.fsync_blocks && - !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && - td->io_issues[DDIR_WRITE] && should_fsync(td)) { - io_u->ddir = DDIR_SYNC; - goto out; + if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { + td->zone_bytes = 0; + io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; + io_u->file->last_pos = io_u->file->file_offset; + td->io_skip_bytes += td->o.zone_skip; } - io_u->ddir = get_rw_ddir(td); - /* * No log, let the seq/rand engine retrieve the next buflen and * position. */ - if (get_next_offset(td, io_u)) + if (get_next_offset(td, io_u, &is_random)) { + dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); return 1; + } - io_u->buflen = get_next_buflen(td, io_u); - if (!io_u->buflen) + io_u->buflen = get_next_buflen(td, io_u, is_random); + if (!io_u->buflen) { + dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); return 1; + } + + if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { + dprint(FD_IO, "io_u %p, offset too large\n", io_u); + dprint(FD_IO, " off=%llu/%lu > %llu\n", + (unsigned long long) io_u->offset, io_u->buflen, + (unsigned long long) io_u->file->real_file_size); + return 1; + } /* * mark entry before potentially trimming io_u */ - if (td_random(td) && !td->o.norandommap) + if (td_random(td) && file_randommap(td, io_u->file)) mark_random_map(td, io_u); - /* - * If using a write iolog, store this entry. - */ out: - if (td->o.write_iolog_file) - write_iolog_put(td, io_u); - + dprint_io_u(io_u, "fill_io_u"); + td->zone_bytes += io_u->buflen; return 0; } -void io_u_mark_depth(struct thread_data *td, struct io_u *io_u) +static void __io_u_mark_map(unsigned int *map, unsigned int nr) { - int index = 0; + int idx = 0; - if (io_u->ddir == DDIR_SYNC) - return; + switch (nr) { + default: + idx = 6; + break; + case 33 ... 64: + idx = 5; + break; + case 17 ... 32: + idx = 4; + break; + case 9 ... 16: + idx = 3; + break; + case 5 ... 8: + idx = 2; + break; + case 1 ... 4: + idx = 1; + case 0: + break; + } + + map[idx]++; +} + +void io_u_mark_submit(struct thread_data *td, unsigned int nr) +{ + __io_u_mark_map(td->ts.io_u_submit, nr); + td->ts.total_submit++; +} + +void io_u_mark_complete(struct thread_data *td, unsigned int nr) +{ + __io_u_mark_map(td->ts.io_u_complete, nr); + td->ts.total_complete++; +} + +void io_u_mark_depth(struct thread_data *td, unsigned int nr) +{ + int idx = 0; switch (td->cur_depth) { default: - index++; + idx = 6; + break; case 32 ... 63: - index++; + idx = 5; + break; case 16 ... 31: - index++; + idx = 4; + break; case 8 ... 15: - index++; + idx = 3; + break; case 4 ... 7: - index++; + idx = 2; + break; case 2 ... 3: - index++; + idx = 1; case 1: break; } - td->ts.io_u_map[index]++; - td->ts.total_io_u[io_u->ddir]++; + td->ts.io_u_map[idx] += nr; } -static void io_u_mark_latency(struct thread_data *td, unsigned long msec) +static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec) { - int index = 0; + int idx = 0; + + assert(usec < 1000); + + switch (usec) { + case 750 ... 999: + idx = 9; + break; + case 500 ... 749: + idx = 8; + break; + case 250 ... 499: + idx = 7; + break; + case 100 ... 249: + idx = 6; + break; + case 50 ... 99: + idx = 5; + break; + case 20 ... 49: + idx = 4; + break; + case 10 ... 19: + idx = 3; + break; + case 4 ... 9: + idx = 2; + break; + case 2 ... 3: + idx = 1; + case 0 ... 1: + break; + } + + assert(idx < FIO_IO_U_LAT_U_NR); + td->ts.io_u_lat_u[idx]++; +} + +static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec) +{ + int idx = 0; switch (msec) { default: - index++; + idx = 11; + break; case 1000 ... 1999: - index++; + idx = 10; + break; case 750 ... 999: - index++; + idx = 9; + break; case 500 ... 749: - index++; + idx = 8; + break; case 250 ... 499: - index++; + idx = 7; + break; case 100 ... 249: - index++; + idx = 6; + break; case 50 ... 99: - index++; + idx = 5; + break; case 20 ... 49: - index++; + idx = 4; + break; case 10 ... 19: - index++; + idx = 3; + break; case 4 ... 9: - index++; + idx = 2; + break; case 2 ... 3: - index++; + idx = 1; case 0 ... 1: break; } - td->ts.io_u_lat[index]++; + assert(idx < FIO_IO_U_LAT_M_NR); + td->ts.io_u_lat_m[idx]++; +} + +static void io_u_mark_latency(struct thread_data *td, unsigned long usec) +{ + if (usec < 1000) + io_u_mark_lat_usec(td, usec); + else + io_u_mark_lat_msec(td, usec / 1000); +} + +/* + * Get next file to service by choosing one at random + */ +static struct fio_file *get_next_file_rand(struct thread_data *td, + enum fio_file_flags goodf, + enum fio_file_flags badf) +{ + struct fio_file *f; + int fno; + + do { + int opened = 0; + unsigned long r; + + if (td->o.use_os_rand) { + r = os_random_long(&td->next_file_state); + fno = (unsigned int) ((double) td->o.nr_files + * (r / (OS_RAND_MAX + 1.0))); + } else { + r = __rand(&td->__next_file_state); + fno = (unsigned int) ((double) td->o.nr_files + * (r / (FRAND_MAX + 1.0))); + } + + f = td->files[fno]; + if (fio_file_done(f)) + continue; + + if (!fio_file_open(f)) { + int err; + + if (td->nr_open_files >= td->o.open_files) + return ERR_PTR(-EBUSY); + + err = td_io_open_file(td, f); + if (err) + continue; + opened = 1; + } + + if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) { + dprint(FD_FILE, "get_next_file_rand: %p\n", f); + return f; + } + if (opened) + td_io_close_file(td, f); + } while (1); +} + +/* + * Get next file to service by doing round robin between all available ones + */ +static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, + int badf) +{ + unsigned int old_next_file = td->next_file; + struct fio_file *f; + + do { + int opened = 0; + + f = td->files[td->next_file]; + + td->next_file++; + if (td->next_file >= td->o.nr_files) + td->next_file = 0; + + dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags); + if (fio_file_done(f)) { + f = NULL; + continue; + } + + if (!fio_file_open(f)) { + int err; + + if (td->nr_open_files >= td->o.open_files) + return ERR_PTR(-EBUSY); + + err = td_io_open_file(td, f); + if (err) { + dprint(FD_FILE, "error %d on open of %s\n", + err, f->file_name); + f = NULL; + continue; + } + opened = 1; + } + + dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf, + f->flags); + if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) + break; + + if (opened) + td_io_close_file(td, f); + + f = NULL; + } while (td->next_file != old_next_file); + + dprint(FD_FILE, "get_next_file_rr: %p\n", f); + return f; +} + +static struct fio_file *__get_next_file(struct thread_data *td) +{ + struct fio_file *f; + + assert(td->o.nr_files <= td->files_index); + + if (td->nr_done_files >= td->o.nr_files) { + dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d," + " nr_files=%d\n", td->nr_open_files, + td->nr_done_files, + td->o.nr_files); + return NULL; + } + + f = td->file_service_file; + if (f && fio_file_open(f) && !fio_file_closing(f)) { + if (td->o.file_service_type == FIO_FSERVICE_SEQ) + goto out; + if (td->file_service_left--) + goto out; + } + + if (td->o.file_service_type == FIO_FSERVICE_RR || + td->o.file_service_type == FIO_FSERVICE_SEQ) + f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing); + else + f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing); + + if (IS_ERR(f)) + return f; + + td->file_service_file = f; + td->file_service_left = td->file_service_nr - 1; +out: + if (f) + dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name); + else + dprint(FD_FILE, "get_next_file: NULL\n"); + return f; +} + +static struct fio_file *get_next_file(struct thread_data *td) +{ + if (td->flags & TD_F_PROFILE_OPS) { + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->get_next_file) + return ops->get_next_file(td); + } + + return __get_next_file(td); +} + +static long set_io_u_file(struct thread_data *td, struct io_u *io_u) +{ + struct fio_file *f; + + do { + f = get_next_file(td); + if (IS_ERR_OR_NULL(f)) + return PTR_ERR(f); + + io_u->file = f; + get_file(f); + + if (!fill_io_u(td, io_u)) + break; + + put_file_log(td, f); + td_io_close_file(td, f); + io_u->file = NULL; + fio_file_set_done(f); + td->nr_done_files++; + dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, + td->nr_done_files, td->o.nr_files); + } while (1); + + return 0; } -/* - * Get next file to service by choosing one at random - */ -static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf, - int badf) +static void lat_fatal(struct thread_data *td, struct io_completion_data *icd, + unsigned long tusec, unsigned long max_usec) { - struct fio_file *f; - int fno; - - do { - long r = os_random_long(&td->next_file_state); - - fno = (unsigned int) ((double) td->o.nr_files * (r / (RAND_MAX + 1.0))); - f = &td->files[fno]; - if (f->flags & FIO_FILE_DONE) - continue; + if (!td->error) + log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec); + td_verror(td, ETIMEDOUT, "max latency exceeded"); + icd->error = ETIMEDOUT; +} - if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) - return f; - } while (1); +static void lat_new_cycle(struct thread_data *td) +{ + fio_gettime(&td->latency_ts, NULL); + td->latency_ios = ddir_rw_sum(td->io_blocks); + td->latency_failed = 0; } /* - * Get next file to service by doing round robin between all available ones + * We had an IO outside the latency target. Reduce the queue depth. If we + * are at QD=1, then it's time to give up. */ -static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, - int badf) +static int __lat_target_failed(struct thread_data *td) { - unsigned int old_next_file = td->next_file; - struct fio_file *f; + if (td->latency_qd == 1) + return 1; - do { - f = &td->files[td->next_file]; + td->latency_qd_high = td->latency_qd; - td->next_file++; - if (td->next_file >= td->o.nr_files) - td->next_file = 0; + if (td->latency_qd == td->latency_qd_low) + td->latency_qd_low--; - if (f->flags & FIO_FILE_DONE) { - f = NULL; - continue; - } + td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2; - if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) - break; + dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); - f = NULL; - } while (td->next_file != old_next_file); + /* + * When we ramp QD down, quiesce existing IO to prevent + * a storm of ramp downs due to pending higher depth. + */ + io_u_quiesce(td); + lat_new_cycle(td); + return 0; +} - return f; +static int lat_target_failed(struct thread_data *td) +{ + if (td->o.latency_percentile.u.f == 100.0) + return __lat_target_failed(td); + + td->latency_failed++; + return 0; } -static struct fio_file *get_next_file(struct thread_data *td) +void lat_target_init(struct thread_data *td) { - struct fio_file *f; + td->latency_end_run = 0; + + if (td->o.latency_target) { + dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target); + fio_gettime(&td->latency_ts, NULL); + td->latency_qd = 1; + td->latency_qd_high = td->o.iodepth; + td->latency_qd_low = 1; + td->latency_ios = ddir_rw_sum(td->io_blocks); + } else + td->latency_qd = td->o.iodepth; +} - assert(td->o.nr_files <= td->files_index); +void lat_target_reset(struct thread_data *td) +{ + if (!td->latency_end_run) + lat_target_init(td); +} - if (!td->nr_open_files) - return NULL; +static void lat_target_success(struct thread_data *td) +{ + const unsigned int qd = td->latency_qd; + struct thread_options *o = &td->o; - f = td->file_service_file; - if (f && (f->flags & FIO_FILE_OPEN) && td->file_service_left--) - return f; + td->latency_qd_low = td->latency_qd; - if (td->o.file_service_type == FIO_FSERVICE_RR) - f = get_next_file_rr(td, FIO_FILE_OPEN, FIO_FILE_CLOSING); + /* + * If we haven't failed yet, we double up to a failing value instead + * of bisecting from highest possible queue depth. If we have set + * a limit other than td->o.iodepth, bisect between that. + */ + if (td->latency_qd_high != o->iodepth) + td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2; else - f = get_next_file_rand(td, FIO_FILE_OPEN, FIO_FILE_CLOSING); + td->latency_qd *= 2; - td->file_service_file = f; - td->file_service_left = td->file_service_nr - 1; - return f; + if (td->latency_qd > o->iodepth) + td->latency_qd = o->iodepth; + + dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); + + /* + * Same as last one, we are done. Let it run a latency cycle, so + * we get only the results from the targeted depth. + */ + if (td->latency_qd == qd) { + if (td->latency_end_run) { + dprint(FD_RATE, "We are done\n"); + td->done = 1; + } else { + dprint(FD_RATE, "Quiesce and final run\n"); + io_u_quiesce(td); + td->latency_end_run = 1; + reset_all_stats(td); + reset_io_stats(td); + } + } + + lat_new_cycle(td); } -static struct fio_file *find_next_new_file(struct thread_data *td) +/* + * Check if we can bump the queue depth + */ +void lat_target_check(struct thread_data *td) { - struct fio_file *f; + uint64_t usec_window; + uint64_t ios; + double success_ios; + + usec_window = utime_since_now(&td->latency_ts); + if (usec_window < td->o.latency_window) + return; - if (td->o.file_service_type == FIO_FSERVICE_RR) - f = get_next_file_rr(td, 0, FIO_FILE_OPEN); + ios = ddir_rw_sum(td->io_blocks) - td->latency_ios; + success_ios = (double) (ios - td->latency_failed) / (double) ios; + success_ios *= 100.0; + + dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f); + + if (success_ios >= td->o.latency_percentile.u.f) + lat_target_success(td); else - f = get_next_file_rand(td, 0, FIO_FILE_OPEN); + __lat_target_failed(td); +} - return f; +/* + * If latency target is enabled, we might be ramping up or down and not + * using the full queue depth available. + */ +int queue_full(struct thread_data *td) +{ + const int qempty = io_u_qempty(&td->io_u_freelist); + + if (qempty) + return 1; + if (!td->o.latency_target) + return 0; + + return td->cur_depth >= td->latency_qd; } struct io_u *__get_io_u(struct thread_data *td) { struct io_u *io_u = NULL; - if (!list_empty(&td->io_u_requeues)) - io_u = list_entry(td->io_u_requeues.next, struct io_u, list); + td_io_u_lock(td); + +again: + if (!io_u_rempty(&td->io_u_requeues)) + io_u = io_u_rpop(&td->io_u_requeues); else if (!queue_full(td)) { - io_u = list_entry(td->io_u_freelist.next, struct io_u, list); + io_u = io_u_qpop(&td->io_u_freelist); + io_u->file = NULL; io_u->buflen = 0; io_u->resid = 0; - io_u->file = NULL; io_u->end_io = NULL; } if (io_u) { assert(io_u->flags & IO_U_F_FREE); - io_u->flags &= ~IO_U_F_FREE; + io_u->flags &= ~(IO_U_F_FREE | IO_U_F_NO_FILE_PUT | + IO_U_F_TRIMMED | IO_U_F_BARRIER | + IO_U_F_VER_LIST); io_u->error = 0; - list_del(&io_u->list); - list_add(&io_u->list, &td->io_u_busylist); + io_u->acct_ddir = -1; td->cur_depth++; + io_u->flags |= IO_U_F_IN_CUR_DEPTH; + io_u->ipo = NULL; + } else if (td->o.verify_async) { + /* + * We ran out, wait for async verify threads to finish and + * return one + */ + pthread_cond_wait(&td->free_cond, &td->io_u_lock); + goto again; } + td_io_u_unlock(td); return io_u; } +static int check_get_trim(struct thread_data *td, struct io_u *io_u) +{ + if (!(td->flags & TD_F_TRIM_BACKLOG)) + return 0; + + if (td->trim_entries) { + int get_trim = 0; + + if (td->trim_batch) { + td->trim_batch--; + get_trim = 1; + } else if (!(td->io_hist_len % td->o.trim_backlog) && + td->last_ddir != DDIR_READ) { + td->trim_batch = td->o.trim_batch; + if (!td->trim_batch) + td->trim_batch = td->o.trim_backlog; + get_trim = 1; + } + + if (get_trim && !get_next_trim(td, io_u)) + return 1; + } + + return 0; +} + +static int check_get_verify(struct thread_data *td, struct io_u *io_u) +{ + if (!(td->flags & TD_F_VER_BACKLOG)) + return 0; + + if (td->io_hist_len) { + int get_verify = 0; + + if (td->verify_batch) + get_verify = 1; + else if (!(td->io_hist_len % td->o.verify_backlog) && + td->last_ddir != DDIR_READ) { + td->verify_batch = td->o.verify_batch; + if (!td->verify_batch) + td->verify_batch = td->o.verify_backlog; + get_verify = 1; + } + + if (get_verify && !get_next_verify(td, io_u)) { + td->verify_batch--; + return 1; + } + } + + return 0; +} + +/* + * Fill offset and start time into the buffer content, to prevent too + * easy compressible data for simple de-dupe attempts. Do this for every + * 512b block in the range, since that should be the smallest block size + * we can expect from a device. + */ +static void small_content_scramble(struct io_u *io_u) +{ + unsigned int i, nr_blocks = io_u->buflen / 512; + uint64_t boffset; + unsigned int offset; + void *p, *end; + + if (!nr_blocks) + return; + + p = io_u->xfer_buf; + boffset = io_u->offset; + io_u->buf_filled_len = 0; + + for (i = 0; i < nr_blocks; i++) { + /* + * Fill the byte offset into a "random" start offset of + * the buffer, given by the product of the usec time + * and the actual offset. + */ + offset = (io_u->start_time.tv_usec ^ boffset) & 511; + offset &= ~(sizeof(uint64_t) - 1); + if (offset >= 512 - sizeof(uint64_t)) + offset -= sizeof(uint64_t); + memcpy(p + offset, &boffset, sizeof(boffset)); + + end = p + 512 - sizeof(io_u->start_time); + memcpy(end, &io_u->start_time, sizeof(io_u->start_time)); + p += 512; + boffset += 512; + } +} + /* * Return an io_u to be processed. Gets a buflen and offset, sets direction, * etc. The returned io_u is fully ready to be prepped and submitted. @@ -517,11 +1435,19 @@ struct io_u *get_io_u(struct thread_data *td) { struct fio_file *f; struct io_u *io_u; - int ret; + int do_scramble = 0; + long ret = 0; io_u = __get_io_u(td); - if (!io_u) + if (!io_u) { + dprint(FD_IO, "__get_io_u failed\n"); return NULL; + } + + if (check_get_verify(td, io_u)) + goto out; + if (check_get_trim(td, io_u)) + goto out; /* * from a requeue, io_u already setup @@ -529,152 +1455,276 @@ struct io_u *get_io_u(struct thread_data *td) if (io_u->file) goto out; - do { - f = get_next_file(td); - if (!f) { - put_io_u(td, io_u); - return NULL; - } - -set_file: - io_u->file = f; - - if (!fill_io_u(td, io_u)) - break; - - /* - * No more to do for this file, close it - */ - io_u->file = NULL; - td_io_close_file(td, f); - f->flags |= FIO_FILE_DONE; - - /* - * probably not the right place to do this, but see - * if we need to open a new file - */ - if (td->nr_open_files < td->o.open_files && - td->o.open_files != td->o.nr_files) { - f = find_next_new_file(td); - - if (!f || (ret = td_io_open_file(td, f))) { - put_io_u(td, io_u); - return NULL; - } - goto set_file; - } - } while (1); + /* + * If using an iolog, grab next piece if any available. + */ + if (td->flags & TD_F_READ_IOLOG) { + if (read_iolog_get(td, io_u)) + goto err_put; + } else if (set_io_u_file(td, io_u)) { + ret = -EBUSY; + dprint(FD_IO, "io_u %p, setting file failed\n", io_u); + goto err_put; + } - if (td->zone_bytes >= td->o.zone_size) { - td->zone_bytes = 0; - f->last_pos += td->o.zone_skip; + f = io_u->file; + if (!f) { + dprint(FD_IO, "io_u %p, setting file failed\n", io_u); + goto err_put; } - if (io_u->ddir != DDIR_SYNC) { - if (!io_u->buflen) { - put_io_u(td, io_u); - return NULL; + assert(fio_file_open(f)); + + if (ddir_rw(io_u->ddir)) { + if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { + dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); + goto err_put; } + f->last_start = io_u->offset; f->last_pos = io_u->offset + io_u->buflen; - if (td->o.verify != VERIFY_NONE) - populate_verify_io_u(td, io_u); + if (io_u->ddir == DDIR_WRITE) { + if (td->flags & TD_F_REFILL_BUFFERS) { + io_u_fill_buffer(td, io_u, + io_u->xfer_buflen, io_u->xfer_buflen); + } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) && + !(td->flags & TD_F_COMPRESS)) + do_scramble = 1; + if (td->flags & TD_F_VER_NONE) { + populate_verify_io_u(td, io_u); + do_scramble = 0; + } + } else if (io_u->ddir == DDIR_READ) { + /* + * Reset the buf_filled parameters so next time if the + * buffer is used for writes it is refilled. + */ + io_u->buf_filled_len = 0; + } } /* * Set io data pointers. */ -out: io_u->xfer_buf = io_u->buf; io_u->xfer_buflen = io_u->buflen; - if (td_io_prep(td, io_u)) { - put_io_u(td, io_u); - return NULL; +out: + assert(io_u->file); + if (!td_io_prep(td, io_u)) { + if (!td->o.disable_slat) + fio_gettime(&io_u->start_time, NULL); + if (do_scramble) + small_content_scramble(io_u); + return io_u; } - - fio_gettime(&io_u->start_time, NULL); - return io_u; +err_put: + dprint(FD_IO, "get_io_u failed\n"); + put_io_u(td, io_u); + return ERR_PTR(ret); } void io_u_log_error(struct thread_data *td, struct io_u *io_u) { - const char *msg[] = { "read", "write", "sync" }; - - log_err("fio: io_u error"); - - if (io_u->file) - log_err(" on file %s", io_u->file->file_name); + enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error); - log_err(": %s\n", strerror(io_u->error)); + if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump) + return; - log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir], io_u->offset, io_u->xfer_buflen); + log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n", + io_u->file ? " on file " : "", + io_u->file ? io_u->file->file_name : "", + strerror(io_u->error), + io_ddir_name(io_u->ddir), + io_u->offset, io_u->xfer_buflen); if (!td->error) td_verror(td, io_u->error, "io_u error"); } -static void io_completed(struct thread_data *td, struct io_u *io_u, +static inline int gtod_reduce(struct thread_data *td) +{ + return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat + && td->o.disable_bw; +} + +static void account_io_completion(struct thread_data *td, struct io_u *io_u, + struct io_completion_data *icd, + const enum fio_ddir idx, unsigned int bytes) +{ + unsigned long lusec = 0; + + if (!gtod_reduce(td)) + lusec = utime_since(&io_u->issue_time, &icd->time); + + if (!td->o.disable_lat) { + unsigned long tusec; + + tusec = utime_since(&io_u->start_time, &icd->time); + add_lat_sample(td, idx, tusec, bytes, io_u->offset); + + if (td->flags & TD_F_PROFILE_OPS) { + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->io_u_lat) + icd->error = ops->io_u_lat(td, tusec); + } + + if (td->o.max_latency && tusec > td->o.max_latency) + lat_fatal(td, icd, tusec, td->o.max_latency); + if (td->o.latency_target && tusec > td->o.latency_target) { + if (lat_target_failed(td)) + lat_fatal(td, icd, tusec, td->o.latency_target); + } + } + + if (!td->o.disable_clat) { + add_clat_sample(td, idx, lusec, bytes, io_u->offset); + io_u_mark_latency(td, lusec); + } + + if (!td->o.disable_bw) + add_bw_sample(td, idx, bytes, &icd->time); + + if (!gtod_reduce(td)) + add_iops_sample(td, idx, bytes, &icd->time); +} + +static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) +{ + uint64_t secs, remainder, bps, bytes; + + bytes = td->this_io_bytes[ddir]; + bps = td->rate_bps[ddir]; + secs = bytes / bps; + remainder = bytes % bps; + return remainder * 1000000 / bps + secs * 1000000; +} + +static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, struct io_completion_data *icd) { - unsigned long msec; + struct io_u *io_u = *io_u_ptr; + enum fio_ddir ddir = io_u->ddir; + struct fio_file *f = io_u->file; + dprint_io_u(io_u, "io complete"); + + td_io_u_lock(td); assert(io_u->flags & IO_U_F_FLIGHT); - io_u->flags &= ~IO_U_F_FLIGHT; + io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); + + /* + * Mark IO ok to verify + */ + if (io_u->ipo) { + /* + * Remove errored entry from the verification list + */ + if (io_u->error) + unlog_io_piece(td, io_u); + else { + io_u->ipo->flags &= ~IP_F_IN_FLIGHT; + write_barrier(); + } + } - put_file(td, io_u->file); + td_io_u_unlock(td); - if (io_u->ddir == DDIR_SYNC) { + if (ddir_sync(ddir)) { td->last_was_sync = 1; + if (f) { + f->first_write = -1ULL; + f->last_write = -1ULL; + } return; } td->last_was_sync = 0; + td->last_ddir = ddir; - if (!io_u->error) { + if (!io_u->error && ddir_rw(ddir)) { unsigned int bytes = io_u->buflen - io_u->resid; - const enum fio_ddir idx = io_u->ddir; + const enum fio_ddir oddir = ddir ^ 1; int ret; - td->io_blocks[idx]++; - td->io_bytes[idx] += bytes; - td->zone_bytes += bytes; - td->this_io_bytes[idx] += bytes; + td->io_blocks[ddir]++; + td->this_io_blocks[ddir]++; + td->io_bytes[ddir] += bytes; - io_u->file->last_completed_pos = io_u->offset + io_u->buflen; + if (!(io_u->flags & IO_U_F_VER_LIST)) + td->this_io_bytes[ddir] += bytes; - msec = mtime_since(&io_u->issue_time, &icd->time); + if (ddir == DDIR_WRITE && f) { + if (f->first_write == -1ULL || + io_u->offset < f->first_write) + f->first_write = io_u->offset; + if (f->last_write == -1ULL || + ((io_u->offset + bytes) > f->last_write)) + f->last_write = io_u->offset + bytes; + } - add_clat_sample(td, idx, msec); - add_bw_sample(td, idx, &icd->time); - io_u_mark_latency(td, msec); + if (ramp_time_over(td) && (td->runstate == TD_RUNNING || + td->runstate == TD_VERIFYING)) { + account_io_completion(td, io_u, icd, ddir, bytes); - if ((td_rw(td) || td_write(td)) && idx == DDIR_WRITE && - td->o.verify != VERIFY_NONE) - log_io_piece(td, io_u); + if (__should_check_rate(td, ddir)) { + td->rate_pending_usleep[ddir] = + (usec_for_io(td, ddir) - + utime_since_now(&td->start)); + } + if (ddir != DDIR_TRIM && + __should_check_rate(td, oddir)) { + td->rate_pending_usleep[oddir] = + (usec_for_io(td, oddir) - + utime_since_now(&td->start)); + } + } - icd->bytes_done[idx] += bytes; + icd->bytes_done[ddir] += bytes; if (io_u->end_io) { - ret = io_u->end_io(td, io_u); + ret = io_u->end_io(td, io_u_ptr); + io_u = *io_u_ptr; if (ret && !icd->error) icd->error = ret; } - } else { + } else if (io_u->error) { icd->error = io_u->error; io_u_log_error(td, io_u); } + if (icd->error) { + enum error_type_bit eb = td_error_type(ddir, icd->error); + + if (!td_non_fatal_error(td, eb, icd->error)) + return; + + /* + * If there is a non_fatal error, then add to the error count + * and clear all the errors. + */ + update_error_count(td, icd->error); + td_clear_error(td); + icd->error = 0; + if (io_u) + io_u->error = 0; + } } -static void init_icd(struct io_completion_data *icd, int nr) +static void init_icd(struct thread_data *td, struct io_completion_data *icd, + int nr) { - fio_gettime(&icd->time, NULL); + int ddir; + + if (!gtod_reduce(td)) + fio_gettime(&icd->time, NULL); icd->nr = nr; icd->error = 0; - icd->bytes_done[0] = icd->bytes_done[1] = 0; + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + icd->bytes_done[ddir] = 0; } static void ios_completed(struct thread_data *td, @@ -686,56 +1736,82 @@ static void ios_completed(struct thread_data *td, for (i = 0; i < icd->nr; i++) { io_u = td->io_ops->event(td, i); - io_completed(td, io_u, icd); - put_io_u(td, io_u); + io_completed(td, &io_u, icd); + + if (io_u) + put_io_u(td, io_u); } } /* * Complete a single io_u for the sync engines. */ -long io_u_sync_complete(struct thread_data *td, struct io_u *io_u) +int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, + uint64_t *bytes) { struct io_completion_data icd; - init_icd(&icd, 1); - io_completed(td, io_u, &icd); - put_io_u(td, io_u); + init_icd(td, &icd, 1); + io_completed(td, &io_u, &icd); + + if (io_u) + put_io_u(td, io_u); + + if (icd.error) { + td_verror(td, icd.error, "io_u_sync_complete"); + return -1; + } + + if (bytes) { + int ddir; - if (!icd.error) - return icd.bytes_done[0] + icd.bytes_done[1]; + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + bytes[ddir] += icd.bytes_done[ddir]; + } - td_verror(td, icd.error, "io_u_sync_complete"); - return -1; + return 0; } /* * Called to complete min_events number of io for the async engines. */ -long io_u_queued_complete(struct thread_data *td, int min_events) +int io_u_queued_complete(struct thread_data *td, int min_evts, + uint64_t *bytes) { struct io_completion_data icd; struct timespec *tvp = NULL; int ret; struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; - if (!min_events) + dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); + + if (!min_evts) tvp = &ts; + else if (min_evts > td->cur_depth) + min_evts = td->cur_depth; - ret = td_io_getevents(td, min_events, td->cur_depth, tvp); + ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); if (ret < 0) { td_verror(td, -ret, "td_io_getevents"); return ret; } else if (!ret) return ret; - init_icd(&icd, ret); + init_icd(td, &icd, ret); ios_completed(td, &icd); - if (!icd.error) - return icd.bytes_done[0] + icd.bytes_done[1]; + if (icd.error) { + td_verror(td, icd.error, "io_u_queued_complete"); + return -1; + } - td_verror(td, icd.error, "io_u_queued_complete"); - return -1; + if (bytes) { + int ddir; + + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + bytes[ddir] += icd.bytes_done[ddir]; + } + + return 0; } /* @@ -743,88 +1819,44 @@ long io_u_queued_complete(struct thread_data *td, int min_events) */ void io_u_queued(struct thread_data *td, struct io_u *io_u) { - unsigned long slat_time; + if (!td->o.disable_slat) { + unsigned long slat_time; - slat_time = mtime_since(&io_u->start_time, &io_u->issue_time); - add_slat_sample(td, io_u->ddir, slat_time); -} - -#ifdef FIO_USE_TIMEOUT -void io_u_set_timeout(struct thread_data *td) -{ - assert(td->cur_depth); - - td->timer.it_interval.tv_sec = 0; - td->timer.it_interval.tv_usec = 0; - td->timer.it_value.tv_sec = IO_U_TIMEOUT + IO_U_TIMEOUT_INC; - td->timer.it_value.tv_usec = 0; - setitimer(ITIMER_REAL, &td->timer, NULL); - fio_gettime(&td->timeout_end, NULL); -} - -static void io_u_dump(struct io_u *io_u) -{ - unsigned long t_start = mtime_since_now(&io_u->start_time); - unsigned long t_issue = mtime_since_now(&io_u->issue_time); - - log_err("io_u=%p, t_start=%lu, t_issue=%lu\n", io_u, t_start, t_issue); - log_err(" buf=%p/%p, len=%lu/%lu, offset=%llu\n", io_u->buf, io_u->xfer_buf, io_u->buflen, io_u->xfer_buflen, io_u->offset); - log_err(" ddir=%d, fname=%s\n", io_u->ddir, io_u->file->file_name); -} -#else -void io_u_set_timeout(struct thread_data fio_unused *td) -{ + slat_time = utime_since(&io_u->start_time, &io_u->issue_time); + add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen, + io_u->offset); + } } -#endif -#ifdef FIO_USE_TIMEOUT -static void io_u_timeout_handler(int fio_unused sig) +void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, + unsigned int max_bs) { - struct thread_data *td, *__td; - pid_t pid = getpid(); - struct list_head *entry; - struct io_u *io_u; - int i; - - log_err("fio: io_u timeout\n"); - - /* - * TLS would be nice... - */ - td = NULL; - for_each_td(__td, i) { - if (__td->pid == pid) { - td = __td; - break; - } - } - - if (!td) { - log_err("fio: io_u timeout, can't find job\n"); - exit(1); - } - - if (!td->cur_depth) { - log_err("fio: timeout without pending work?\n"); - return; - } - - log_err("fio: io_u timeout: job=%s, pid=%d\n", td->o.name, td->pid); - - list_for_each(entry, &td->io_u_busylist) { - io_u = list_entry(entry, struct io_u, list); - - io_u_dump(io_u); - } - - td_verror(td, ETIMEDOUT, "io_u timeout"); - exit(1); + if (td->o.buffer_pattern_bytes) + fill_buffer_pattern(td, buf, max_bs); + else if (!td->o.zero_buffers) { + unsigned int perc = td->o.compress_percentage; + + if (perc) { + unsigned int seg = min_write; + + seg = min(min_write, td->o.compress_chunk); + if (!seg) + seg = min_write; + + fill_random_buf_percentage(&td->buf_state, buf, + perc, seg, max_bs); + } else + fill_random_buf(&td->buf_state, buf, max_bs); + } else + memset(buf, 0, max_bs); } -#endif -void io_u_init_timeout(void) +/* + * "randomly" fill the buffer contents + */ +void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, + unsigned int min_write, unsigned int max_bs) { -#ifdef FIO_USE_TIMEOUT - signal(SIGALRM, io_u_timeout_handler); -#endif + io_u->buf_filled_len = 0; + fill_io_buffer(td, io_u->buf, min_write, max_bs); }