X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=7ac31a0d297872fe82168e74cff63cfce1880dcb;hp=b0e91e7a1bcc2b52d479df079e671629b8cf3ac8;hb=0d29de831183dfd049c97a03008d425ce21e2fa4;hpb=97601024a53586d77a368763f08be9ee483fdc9c diff --git a/io_u.c b/io_u.c index b0e91e7a..ea0d46c5 100644 --- a/io_u.c +++ b/io_u.c @@ -3,13 +3,16 @@ #include #include #include +#include #include "fio.h" -#include "os.h" +#include "hash.h" +#include "verify.h" +#include "trim.h" +#include "lib/rand.h" struct io_completion_data { int nr; /* input */ - endio_handler *handler; /* input */ int error; /* output */ unsigned long bytes_done[2]; /* output */ @@ -20,63 +23,120 @@ struct io_completion_data { * The ->file_map[] contains a map of blocks we have or have not done io * to yet. Used to make sure we cover the entire range in a fair fashion. */ -static int random_map_free(struct thread_data *td, struct fio_file *f, - unsigned long long block) +static int random_map_free(struct fio_file *f, const unsigned long long block) { - unsigned int idx = RAND_MAP_IDX(td, f, block); - unsigned int bit = RAND_MAP_BIT(td, f, block); + unsigned int idx = RAND_MAP_IDX(f, block); + unsigned int bit = RAND_MAP_BIT(f, block); - return (f->file_map[idx] & (1UL << bit)) == 0; + dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit); + + return (f->file_map[idx] & (1 << bit)) == 0; } /* * Mark a given offset as used in the map. */ -static void mark_random_map(struct thread_data *td, struct fio_file *f, - struct io_u *io_u) +static void mark_random_map(struct thread_data *td, struct io_u *io_u) { - unsigned int min_bs = td->rw_min_bs; + unsigned int min_bs = td->o.rw_min_bs; + struct fio_file *f = io_u->file; unsigned long long block; - unsigned int blocks; - unsigned int nr_blocks; + unsigned int blocks, nr_blocks; + int busy_check; - block = io_u->offset / (unsigned long long) min_bs; - blocks = 0; + block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs; nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; + blocks = 0; + busy_check = !(io_u->flags & IO_U_F_BUSY_OK); - while (blocks < nr_blocks) { + while (nr_blocks) { + unsigned int this_blocks, mask; unsigned int idx, bit; - if (!random_map_free(td, f, block)) + /* + * If we have a mixed random workload, we may + * encounter blocks we already did IO to. + */ + if (!busy_check) { + blocks = nr_blocks; + break; + } + if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block)) break; - idx = RAND_MAP_IDX(td, f, block); - bit = RAND_MAP_BIT(td, f, block); + idx = RAND_MAP_IDX(f, block); + bit = RAND_MAP_BIT(f, block); fio_assert(td, idx < f->num_maps); - f->file_map[idx] |= (1UL << bit); - block++; - blocks++; + this_blocks = nr_blocks; + if (this_blocks + bit > BLOCKS_PER_MAP) + this_blocks = BLOCKS_PER_MAP - bit; + + do { + if (this_blocks == BLOCKS_PER_MAP) + mask = -1U; + else + mask = ((1U << this_blocks) - 1) << bit; + + if (!(f->file_map[idx] & mask)) + break; + + this_blocks--; + } while (this_blocks); + + if (!this_blocks) + break; + + f->file_map[idx] |= mask; + nr_blocks -= this_blocks; + blocks += this_blocks; + block += this_blocks; } if ((blocks * min_bs) < io_u->buflen) io_u->buflen = blocks * min_bs; } +static unsigned long long last_block(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir) +{ + unsigned long long max_blocks; + unsigned long long max_size; + + assert(ddir_rw(ddir)); + + /* + * Hmm, should we make sure that ->io_size <= ->real_file_size? + */ + max_size = f->io_size; + if (max_size > f->real_file_size) + max_size = f->real_file_size; + + max_blocks = max_size / (unsigned long long) td->o.ba[ddir]; + if (!max_blocks) + return 0; + + return max_blocks; +} + /* * Return the next free block in the map. */ static int get_next_free_block(struct thread_data *td, struct fio_file *f, - unsigned long long *b) + enum fio_ddir ddir, unsigned long long *b) { + unsigned long long min_bs = td->o.rw_min_bs; int i; i = f->last_free_lookup; *b = (i * BLOCKS_PER_MAP); - while ((*b) * td->rw_min_bs < f->real_file_size) { - if (f->file_map[i] != -1UL) { + while ((*b) * min_bs < f->real_file_size && + (*b) * min_bs < f->io_size) { + if (f->file_map[i] != (unsigned int) -1) { *b += ffz(f->file_map[i]); + if (*b > last_block(td, f, ddir)) + break; f->last_free_lookup = i; return 0; } @@ -85,76 +145,303 @@ static int get_next_free_block(struct thread_data *td, struct fio_file *f, i++; } + dprint(FD_IO, "failed finding a free block\n"); + return 1; +} + +static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, unsigned long long *b) +{ + unsigned long long r; + int loops = 5; + + do { + r = os_random_long(&td->random_state); + dprint(FD_RANDOM, "off rand %llu\n", r); + *b = (last_block(td, f, ddir) - 1) + * (r / ((unsigned long long) OS_RAND_MAX + 1.0)); + + /* + * if we are not maintaining a random map, we are done. + */ + if (!file_randommap(td, f)) + return 0; + + /* + * calculate map offset and check if it's free + */ + if (random_map_free(f, *b)) + return 0; + + dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", + *b); + } while (--loops); + + /* + * we get here, if we didn't suceed in looking up a block. generate + * a random start offset into the filemap, and find the first free + * block from there. + */ + loops = 10; + do { + f->last_free_lookup = (f->num_maps - 1) * + (r / (OS_RAND_MAX + 1.0)); + if (!get_next_free_block(td, f, ddir, b)) + return 0; + + r = os_random_long(&td->random_state); + } while (--loops); + + /* + * that didn't work either, try exhaustive search from the start + */ + f->last_free_lookup = 0; + return get_next_free_block(td, f, ddir, b); +} + +static int get_next_rand_block(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, unsigned long long *b) +{ + if (get_next_rand_offset(td, f, ddir, b)) { + dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", + f->file_name, f->last_pos, f->real_file_size); + return 1; + } + + return 0; +} + +static int get_next_seq_block(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, unsigned long long *b) +{ + assert(ddir_rw(ddir)); + + if (f->last_pos < f->real_file_size) { + *b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir]; + return 0; + } + return 1; } +static int get_next_block(struct thread_data *td, struct io_u *io_u, + enum fio_ddir ddir, int rw_seq, unsigned long long *b) +{ + struct fio_file *f = io_u->file; + int ret; + + assert(ddir_rw(ddir)); + + if (rw_seq) { + if (td_random(td)) + ret = get_next_rand_block(td, f, ddir, b); + else + ret = get_next_seq_block(td, f, ddir, b); + } else { + io_u->flags |= IO_U_F_BUSY_OK; + + if (td->o.rw_seq == RW_SEQ_SEQ) { + ret = get_next_seq_block(td, f, ddir, b); + if (ret) + ret = get_next_rand_block(td, f, ddir, b); + } else if (td->o.rw_seq == RW_SEQ_IDENT) { + if (f->last_start != -1ULL) + *b = (f->last_start - f->file_offset) / td->o.min_bs[ddir]; + else + *b = 0; + ret = 0; + } else { + log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); + ret = 1; + } + } + + return ret; +} + /* * For random io, generate a random new block and see if it's used. Repeat * until we find a free one. For sequential io, just return the end of * the last io issued. */ -static int get_next_offset(struct thread_data *td, struct fio_file *f, - struct io_u *io_u) +static int __get_next_offset(struct thread_data *td, struct io_u *io_u) { - const int ddir = io_u->ddir; - unsigned long long b, rb; - long r; + struct fio_file *f = io_u->file; + unsigned long long b; + enum fio_ddir ddir = io_u->ddir; + int rw_seq_hit = 0; - if (!td->sequential) { - unsigned long long max_blocks = f->file_size / td->min_bs[ddir]; - int loops = 5; + assert(ddir_rw(ddir)); - do { - r = os_random_long(&td->random_state); - b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0)); - if (td->norandommap) - break; - rb = b + (f->file_offset / td->min_bs[ddir]); - loops--; - } while (!random_map_free(td, f, rb) && loops); + if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) { + rw_seq_hit = 1; + td->ddir_seq_nr = td->o.ddir_seq_nr; + } - /* - * if we failed to retrieve a truly random offset within - * the loops assigned, see if there are free ones left at all - */ - if (!loops && get_next_free_block(td, f, &b)) - return 1; - } else - b = f->last_pos / td->min_bs[ddir]; + if (get_next_block(td, io_u, ddir, rw_seq_hit, &b)) { + printf("fail\n"); + return 1; + } - io_u->offset = (b * td->min_bs[ddir]) + f->file_offset; - if (io_u->offset >= f->real_file_size) + io_u->offset = b * td->o.ba[ddir]; + if (io_u->offset >= f->io_size) { + dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", + io_u->offset, f->io_size); return 1; + } + + io_u->offset += f->file_offset; + if (io_u->offset >= f->real_file_size) { + dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n", + io_u->offset, f->real_file_size); + return 1; + } return 0; } -static unsigned int get_next_buflen(struct thread_data *td, struct fio_file *f, - struct io_u *io_u) +static int get_next_offset(struct thread_data *td, struct io_u *io_u) +{ + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->fill_io_u_off) + return ops->fill_io_u_off(td, io_u); + + return __get_next_offset(td, io_u); +} + +static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) { const int ddir = io_u->ddir; - unsigned int buflen; + unsigned int uninitialized_var(buflen); + unsigned int minbs, maxbs; long r; - if (td->min_bs[ddir] == td->max_bs[ddir]) - buflen = td->min_bs[ddir]; + assert(ddir_rw(ddir)); + + minbs = td->o.min_bs[ddir]; + maxbs = td->o.max_bs[ddir]; + + if (minbs == maxbs) + buflen = minbs; else { r = os_random_long(&td->bsrange_state); - buflen = (unsigned int) (1 + (double) (td->max_bs[ddir] - 1) * r / (RAND_MAX + 1.0)); - if (!td->bs_unaligned) - buflen = (buflen + td->min_bs[ddir] - 1) & ~(td->min_bs[ddir] - 1); + if (!td->o.bssplit_nr[ddir]) { + buflen = 1 + (unsigned int) ((double) maxbs * + (r / (OS_RAND_MAX + 1.0))); + if (buflen < minbs) + buflen = minbs; + } else { + long perc = 0; + unsigned int i; + + for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { + struct bssplit *bsp = &td->o.bssplit[ddir][i]; + + buflen = bsp->bs; + perc += bsp->perc; + if (r <= ((OS_RAND_MAX / 100L) * perc)) + break; + } + } + if (!td->o.bs_unaligned && is_power_of_2(minbs)) + buflen = (buflen + minbs - 1) & ~(minbs - 1); } - while (buflen + io_u->offset > f->real_file_size) { - if (buflen == td->min_bs[ddir]) - return 0; - - buflen = td->min_bs[ddir]; + if (io_u->offset + buflen > io_u->file->real_file_size) { + dprint(FD_IO, "lower buflen %u -> %u (ddir=%d)\n", buflen, + minbs, ddir); + buflen = minbs; } return buflen; } +static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) +{ + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->fill_io_u_size) + return ops->fill_io_u_size(td, io_u); + + return __get_next_buflen(td, io_u); +} + +static void set_rwmix_bytes(struct thread_data *td) +{ + unsigned int diff; + + /* + * we do time or byte based switch. this is needed because + * buffered writes may issue a lot quicker than they complete, + * whereas reads do not. + */ + diff = td->o.rwmix[td->rwmix_ddir ^ 1]; + td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100; +} + +static inline enum fio_ddir get_rand_ddir(struct thread_data *td) +{ + unsigned int v; + long r; + + r = os_random_long(&td->rwmix_state); + v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); + if (v <= td->o.rwmix[DDIR_READ]) + return DDIR_READ; + + return DDIR_WRITE; +} + +static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) +{ + enum fio_ddir odir = ddir ^ 1; + struct timeval t; + long usec; + + assert(ddir_rw(ddir)); + + if (td->rate_pending_usleep[ddir] <= 0) + return ddir; + + /* + * We have too much pending sleep in this direction. See if we + * should switch. + */ + if (td_rw(td)) { + /* + * Other direction does not have too much pending, switch + */ + if (td->rate_pending_usleep[odir] < 100000) + return odir; + + /* + * Both directions have pending sleep. Sleep the minimum time + * and deduct from both. + */ + if (td->rate_pending_usleep[ddir] <= + td->rate_pending_usleep[odir]) { + usec = td->rate_pending_usleep[ddir]; + } else { + usec = td->rate_pending_usleep[odir]; + ddir = odir; + } + } else + usec = td->rate_pending_usleep[ddir]; + + fio_gettime(&t, NULL); + usec_sleep(td, usec); + usec = utime_since_now(&t); + + td->rate_pending_usleep[ddir] -= usec; + + odir = ddir ^ 1; + if (td_rw(td) && __should_check_rate(td, odir)) + td->rate_pending_usleep[odir] -= usec; + + return ddir; +} + /* * Return the data direction for the next io_u. If the job is a * mixed read/write workload, check the rwmix cycle and switch if @@ -162,210 +449,646 @@ static unsigned int get_next_buflen(struct thread_data *td, struct fio_file *f, */ static enum fio_ddir get_rw_ddir(struct thread_data *td) { - if (td_rw(td)) { - struct timeval now; - unsigned long elapsed; + enum fio_ddir ddir; - fio_gettime(&now, NULL); - elapsed = mtime_since_now(&td->rwmix_switch); + /* + * see if it's time to fsync + */ + if (td->o.fsync_blocks && + !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_SYNC; + /* + * see if it's time to fdatasync + */ + if (td->o.fdatasync_blocks && + !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_DATASYNC; + + /* + * see if it's time to sync_file_range + */ + if (td->sync_file_range_nr && + !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_SYNC_FILE_RANGE; + + if (td_rw(td)) { /* * Check if it's time to seed a new data direction. */ - if (elapsed >= td->rwmixcycle) { - unsigned int v; - long r; - - r = os_random_long(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0))); - if (v < td->rwmixread) - td->rwmix_ddir = DDIR_READ; - else - td->rwmix_ddir = DDIR_WRITE; - memcpy(&td->rwmix_switch, &now, sizeof(now)); + if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) { + /* + * Put a top limit on how many bytes we do for + * one data direction, to avoid overflowing the + * ranges too much + */ + ddir = get_rand_ddir(td); + + if (ddir != td->rwmix_ddir) + set_rwmix_bytes(td); + + td->rwmix_ddir = ddir; } - return td->rwmix_ddir; + ddir = td->rwmix_ddir; } else if (td_read(td)) - return DDIR_READ; + ddir = DDIR_READ; else - return DDIR_WRITE; + ddir = DDIR_WRITE; + + td->rwmix_ddir = rate_ddir(td, ddir); + return td->rwmix_ddir; +} + +void put_file_log(struct thread_data *td, struct fio_file *f) +{ + int ret = put_file(td, f); + + if (ret) + td_verror(td, ret, "file close"); } void put_io_u(struct thread_data *td, struct io_u *io_u) { + td_io_u_lock(td); + + io_u->flags |= IO_U_F_FREE; + io_u->flags &= ~IO_U_F_FREE_DEF; + + if (io_u->file) + put_file_log(td, io_u->file); + io_u->file = NULL; - list_del(&io_u->list); - list_add(&io_u->list, &td->io_u_freelist); - td->cur_depth--; + if (io_u->flags & IO_U_F_IN_CUR_DEPTH) + td->cur_depth--; + flist_del_init(&io_u->list); + flist_add(&io_u->list, &td->io_u_freelist); + td_io_u_unlock(td); + td_io_u_free_notify(td); +} + +void clear_io_u(struct thread_data *td, struct io_u *io_u) +{ + io_u->flags &= ~IO_U_F_FLIGHT; + put_io_u(td, io_u); } -static int fill_io_u(struct thread_data *td, struct fio_file *f, - struct io_u *io_u) +void requeue_io_u(struct thread_data *td, struct io_u **io_u) { + struct io_u *__io_u = *io_u; + + dprint(FD_IO, "requeue %p\n", __io_u); + + td_io_u_lock(td); + + __io_u->flags |= IO_U_F_FREE; + if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir)) + td->io_issues[__io_u->ddir]--; + + __io_u->flags &= ~IO_U_F_FLIGHT; + if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) + td->cur_depth--; + flist_del(&__io_u->list); + flist_add_tail(&__io_u->list, &td->io_u_requeues); + td_io_u_unlock(td); + *io_u = NULL; +} + +static int fill_io_u(struct thread_data *td, struct io_u *io_u) +{ + if (td->io_ops->flags & FIO_NOIO) + goto out; + + io_u->ddir = get_rw_ddir(td); + /* - * If using an iolog, grab next piece if any available. + * fsync() or fdatasync() or trim etc, we are done */ - if (td->read_iolog) - return read_iolog_get(td, io_u); + if (!ddir_rw(io_u->ddir)) + goto out; /* - * see if it's time to sync + * See if it's time to switch to a new zone */ - if (td->fsync_blocks && !(td->io_blocks[DDIR_WRITE] % td->fsync_blocks) - && should_fsync(td)) { - io_u->ddir = DDIR_SYNC; - io_u->file = f; - return 0; + if (td->zone_bytes >= td->o.zone_size) { + td->zone_bytes = 0; + io_u->file->last_pos += td->o.zone_skip; + td->io_skip_bytes += td->o.zone_skip; } - io_u->ddir = get_rw_ddir(td); - /* * No log, let the seq/rand engine retrieve the next buflen and * position. */ - if (get_next_offset(td, f, io_u)) + if (get_next_offset(td, io_u)) { + dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); + return 1; + } + + io_u->buflen = get_next_buflen(td, io_u); + if (!io_u->buflen) { + dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); return 1; + } - io_u->buflen = get_next_buflen(td, f, io_u); - if (!io_u->buflen) + if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { + dprint(FD_IO, "io_u %p, offset too large\n", io_u); + dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset, + io_u->buflen, io_u->file->real_file_size); return 1; + } /* * mark entry before potentially trimming io_u */ - if (!td->read_iolog && !td->sequential && !td->norandommap) - mark_random_map(td, f, io_u); + if (td_random(td) && file_randommap(td, io_u->file)) + mark_random_map(td, io_u); /* * If using a write iolog, store this entry. */ - if (td->write_iolog_file) - write_iolog_put(td, io_u); - - io_u->file = f; +out: + dprint_io_u(io_u, "fill_io_u"); + td->zone_bytes += io_u->buflen; + log_io_u(td, io_u); return 0; } -static void io_u_mark_depth(struct thread_data *td) +static void __io_u_mark_map(unsigned int *map, unsigned int nr) +{ + int index = 0; + + switch (nr) { + default: + index = 6; + break; + case 33 ... 64: + index = 5; + break; + case 17 ... 32: + index = 4; + break; + case 9 ... 16: + index = 3; + break; + case 5 ... 8: + index = 2; + break; + case 1 ... 4: + index = 1; + case 0: + break; + } + + map[index]++; +} + +void io_u_mark_submit(struct thread_data *td, unsigned int nr) +{ + __io_u_mark_map(td->ts.io_u_submit, nr); + td->ts.total_submit++; +} + +void io_u_mark_complete(struct thread_data *td, unsigned int nr) +{ + __io_u_mark_map(td->ts.io_u_complete, nr); + td->ts.total_complete++; +} + +void io_u_mark_depth(struct thread_data *td, unsigned int nr) { int index = 0; switch (td->cur_depth) { default: - index++; + index = 6; + break; case 32 ... 63: - index++; + index = 5; + break; case 16 ... 31: - index++; + index = 4; + break; case 8 ... 15: - index++; + index = 3; + break; case 4 ... 7: - index++; + index = 2; + break; case 2 ... 3: - index++; + index = 1; case 1: break; } - td->io_u_map[index]++; - td->total_io_u++; + td->ts.io_u_map[index] += nr; } -static void io_u_mark_latency(struct thread_data *td, unsigned long msec) +static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec) +{ + int index = 0; + + assert(usec < 1000); + + switch (usec) { + case 750 ... 999: + index = 9; + break; + case 500 ... 749: + index = 8; + break; + case 250 ... 499: + index = 7; + break; + case 100 ... 249: + index = 6; + break; + case 50 ... 99: + index = 5; + break; + case 20 ... 49: + index = 4; + break; + case 10 ... 19: + index = 3; + break; + case 4 ... 9: + index = 2; + break; + case 2 ... 3: + index = 1; + case 0 ... 1: + break; + } + + assert(index < FIO_IO_U_LAT_U_NR); + td->ts.io_u_lat_u[index]++; +} + +static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec) { int index = 0; switch (msec) { default: - index++; - case 1024 ... 2047: - index++; - case 512 ... 1023: - index++; - case 256 ... 511: - index++; - case 128 ... 255: - index++; - case 64 ... 127: - index++; - case 32 ... 63: - index++; - case 16 ... 31: - index++; - case 8 ... 15: - index++; - case 4 ... 7: - index++; + index = 11; + break; + case 1000 ... 1999: + index = 10; + break; + case 750 ... 999: + index = 9; + break; + case 500 ... 749: + index = 8; + break; + case 250 ... 499: + index = 7; + break; + case 100 ... 249: + index = 6; + break; + case 50 ... 99: + index = 5; + break; + case 20 ... 49: + index = 4; + break; + case 10 ... 19: + index = 3; + break; + case 4 ... 9: + index = 2; + break; case 2 ... 3: - index++; + index = 1; case 0 ... 1: break; } - td->io_u_lat[index]++; + assert(index < FIO_IO_U_LAT_M_NR); + td->ts.io_u_lat_m[index]++; +} + +static void io_u_mark_latency(struct thread_data *td, unsigned long usec) +{ + if (usec < 1000) + io_u_mark_lat_usec(td, usec); + else + io_u_mark_lat_msec(td, usec / 1000); +} + +/* + * Get next file to service by choosing one at random + */ +static struct fio_file *get_next_file_rand(struct thread_data *td, + enum fio_file_flags goodf, + enum fio_file_flags badf) +{ + struct fio_file *f; + int fno; + + do { + long r = os_random_long(&td->next_file_state); + int opened = 0; + + fno = (unsigned int) ((double) td->o.nr_files + * (r / (OS_RAND_MAX + 1.0))); + f = td->files[fno]; + if (fio_file_done(f)) + continue; + + if (!fio_file_open(f)) { + int err; + + err = td_io_open_file(td, f); + if (err) + continue; + opened = 1; + } + + if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) { + dprint(FD_FILE, "get_next_file_rand: %p\n", f); + return f; + } + if (opened) + td_io_close_file(td, f); + } while (1); +} + +/* + * Get next file to service by doing round robin between all available ones + */ +static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, + int badf) +{ + unsigned int old_next_file = td->next_file; + struct fio_file *f; + + do { + int opened = 0; + + f = td->files[td->next_file]; + + td->next_file++; + if (td->next_file >= td->o.nr_files) + td->next_file = 0; + + dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags); + if (fio_file_done(f)) { + f = NULL; + continue; + } + + if (!fio_file_open(f)) { + int err; + + err = td_io_open_file(td, f); + if (err) { + dprint(FD_FILE, "error %d on open of %s\n", + err, f->file_name); + f = NULL; + continue; + } + opened = 1; + } + + dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf, + f->flags); + if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) + break; + + if (opened) + td_io_close_file(td, f); + + f = NULL; + } while (td->next_file != old_next_file); + + dprint(FD_FILE, "get_next_file_rr: %p\n", f); + return f; +} + +static struct fio_file *__get_next_file(struct thread_data *td) +{ + struct fio_file *f; + + assert(td->o.nr_files <= td->files_index); + + if (td->nr_done_files >= td->o.nr_files) { + dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d," + " nr_files=%d\n", td->nr_open_files, + td->nr_done_files, + td->o.nr_files); + return NULL; + } + + f = td->file_service_file; + if (f && fio_file_open(f) && !fio_file_closing(f)) { + if (td->o.file_service_type == FIO_FSERVICE_SEQ) + goto out; + if (td->file_service_left--) + goto out; + } + + if (td->o.file_service_type == FIO_FSERVICE_RR || + td->o.file_service_type == FIO_FSERVICE_SEQ) + f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing); + else + f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing); + + td->file_service_file = f; + td->file_service_left = td->file_service_nr - 1; +out: + dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name); + return f; } +static struct fio_file *get_next_file(struct thread_data *td) +{ + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->get_next_file) + return ops->get_next_file(td); + + return __get_next_file(td); +} + +static int set_io_u_file(struct thread_data *td, struct io_u *io_u) +{ + struct fio_file *f; + + do { + f = get_next_file(td); + if (!f) + return 1; + + io_u->file = f; + get_file(f); + + if (!fill_io_u(td, io_u)) + break; + + put_file_log(td, f); + td_io_close_file(td, f); + io_u->file = NULL; + fio_file_set_done(f); + td->nr_done_files++; + dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, + td->nr_done_files, td->o.nr_files); + } while (1); + + return 0; +} + + struct io_u *__get_io_u(struct thread_data *td) { struct io_u *io_u = NULL; - if (!queue_full(td)) { - io_u = list_entry(td->io_u_freelist.next, struct io_u, list); + td_io_u_lock(td); + +again: + if (!flist_empty(&td->io_u_requeues)) + io_u = flist_entry(td->io_u_requeues.next, struct io_u, list); + else if (!queue_full(td)) { + io_u = flist_entry(td->io_u_freelist.next, struct io_u, list); io_u->buflen = 0; - io_u->error = 0; io_u->resid = 0; - list_del(&io_u->list); - list_add(&io_u->list, &td->io_u_busylist); + io_u->file = NULL; + io_u->end_io = NULL; + } + + if (io_u) { + assert(io_u->flags & IO_U_F_FREE); + io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF); + + io_u->error = 0; + flist_del(&io_u->list); + flist_add(&io_u->list, &td->io_u_busylist); td->cur_depth++; - io_u_mark_depth(td); + io_u->flags |= IO_U_F_IN_CUR_DEPTH; + } else if (td->o.verify_async) { + /* + * We ran out, wait for async verify threads to finish and + * return one + */ + pthread_cond_wait(&td->free_cond, &td->io_u_lock); + goto again; } + td_io_u_unlock(td); return io_u; } +static int check_get_trim(struct thread_data *td, struct io_u *io_u) +{ + if (td->o.trim_backlog && td->trim_entries) { + int get_trim = 0; + + if (td->trim_batch) { + td->trim_batch--; + get_trim = 1; + } else if (!(td->io_hist_len % td->o.trim_backlog) && + td->last_ddir != DDIR_READ) { + td->trim_batch = td->o.trim_batch; + if (!td->trim_batch) + td->trim_batch = td->o.trim_backlog; + get_trim = 1; + } + + if (get_trim && !get_next_trim(td, io_u)) + return 1; + } + + return 0; +} + +static int check_get_verify(struct thread_data *td, struct io_u *io_u) +{ + if (td->o.verify_backlog && td->io_hist_len) { + int get_verify = 0; + + if (td->verify_batch) { + td->verify_batch--; + get_verify = 1; + } else if (!(td->io_hist_len % td->o.verify_backlog) && + td->last_ddir != DDIR_READ) { + td->verify_batch = td->o.verify_batch; + if (!td->verify_batch) + td->verify_batch = td->o.verify_backlog; + get_verify = 1; + } + + if (get_verify && !get_next_verify(td, io_u)) + return 1; + } + + return 0; +} + /* * Return an io_u to be processed. Gets a buflen and offset, sets direction, * etc. The returned io_u is fully ready to be prepped and submitted. */ -struct io_u *get_io_u(struct thread_data *td, struct fio_file *f) +struct io_u *get_io_u(struct thread_data *td) { + struct fio_file *f; struct io_u *io_u; io_u = __get_io_u(td); - if (!io_u) + if (!io_u) { + dprint(FD_IO, "__get_io_u failed\n"); return NULL; - - if (td->zone_bytes >= td->zone_size) { - td->zone_bytes = 0; - f->last_pos += td->zone_skip; } - if (fill_io_u(td, f, io_u)) { - put_io_u(td, io_u); - return NULL; - } + if (check_get_verify(td, io_u)) + goto out; + if (check_get_trim(td, io_u)) + goto out; - if (io_u->buflen + io_u->offset > f->real_file_size) { - if (td->io_ops->flags & FIO_RAWIO) { - put_io_u(td, io_u); - return NULL; - } + /* + * from a requeue, io_u already setup + */ + if (io_u->file) + goto out; - io_u->buflen = f->real_file_size - io_u->offset; + /* + * If using an iolog, grab next piece if any available. + */ + if (td->o.read_iolog_file) { + if (read_iolog_get(td, io_u)) + goto err_put; + } else if (set_io_u_file(td, io_u)) { + dprint(FD_IO, "io_u %p, setting file failed\n", io_u); + goto err_put; } - if (io_u->ddir != DDIR_SYNC) { - if (!io_u->buflen) { - put_io_u(td, io_u); - return NULL; + f = io_u->file; + assert(fio_file_open(f)); + + if (ddir_rw(io_u->ddir)) { + if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { + dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); + goto err_put; } + f->last_start = io_u->offset; f->last_pos = io_u->offset + io_u->buflen; - if (td->verify != VERIFY_NONE) + if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_WRITE) populate_verify_io_u(td, io_u); + else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE) + io_u_fill_buffer(td, io_u, io_u->xfer_buflen); + else if (io_u->ddir == DDIR_READ) { + /* + * Reset the buf_filled parameters so next time if the + * buffer is used for writes it is refilled. + */ + io_u->buf_filled_len = 0; + } } /* @@ -374,65 +1097,159 @@ struct io_u *get_io_u(struct thread_data *td, struct fio_file *f) io_u->xfer_buf = io_u->buf; io_u->xfer_buflen = io_u->buflen; - if (td_io_prep(td, io_u)) { - put_io_u(td, io_u); - return NULL; +out: + assert(io_u->file); + if (!td_io_prep(td, io_u)) { + if (!td->o.disable_slat) + fio_gettime(&io_u->start_time, NULL); + return io_u; } +err_put: + dprint(FD_IO, "get_io_u failed\n"); + put_io_u(td, io_u); + return NULL; +} - fio_gettime(&io_u->start_time, NULL); - return io_u; +void io_u_log_error(struct thread_data *td, struct io_u *io_u) +{ + const char *msg[] = { "read", "write", "sync", "datasync", + "sync_file_range", "wait", "trim" }; + + + + log_err("fio: io_u error"); + + if (io_u->file) + log_err(" on file %s", io_u->file->file_name); + + log_err(": %s\n", strerror(io_u->error)); + + log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir], + io_u->offset, io_u->xfer_buflen); + + if (!td->error) + td_verror(td, io_u->error, "io_u error"); } static void io_completed(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd) { - unsigned long msec; + /* + * Older gcc's are too dumb to realize that usec is always used + * initialized, silence that warning. + */ + unsigned long uninitialized_var(usec); + struct fio_file *f; + + dprint_io_u(io_u, "io complete"); + + td_io_u_lock(td); + assert(io_u->flags & IO_U_F_FLIGHT); + io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); + td_io_u_unlock(td); - if (io_u->ddir == DDIR_SYNC) { + if (ddir_sync(io_u->ddir)) { td->last_was_sync = 1; + f = io_u->file; + if (f) { + f->first_write = -1ULL; + f->last_write = -1ULL; + } return; } td->last_was_sync = 0; + td->last_ddir = io_u->ddir; - if (!io_u->error) { + if (!io_u->error && ddir_rw(io_u->ddir)) { unsigned int bytes = io_u->buflen - io_u->resid; const enum fio_ddir idx = io_u->ddir; + const enum fio_ddir odx = io_u->ddir ^ 1; int ret; td->io_blocks[idx]++; td->io_bytes[idx] += bytes; - td->zone_bytes += bytes; td->this_io_bytes[idx] += bytes; - io_u->file->last_completed_pos = io_u->offset + io_u->buflen; - - msec = mtime_since(&io_u->issue_time, &icd->time); + if (idx == DDIR_WRITE) { + f = io_u->file; + if (f) { + if (f->first_write == -1ULL || + io_u->offset < f->first_write) + f->first_write = io_u->offset; + if (f->last_write == -1ULL || + ((io_u->offset + bytes) > f->last_write)) + f->last_write = io_u->offset + bytes; + } + } - add_clat_sample(td, idx, msec); - add_bw_sample(td, idx, &icd->time); - io_u_mark_latency(td, msec); + if (ramp_time_over(td)) { + unsigned long uninitialized_var(lusec); + + if (!td->o.disable_clat || !td->o.disable_bw) + lusec = utime_since(&io_u->issue_time, + &icd->time); + if (!td->o.disable_lat) { + unsigned long tusec; + + tusec = utime_since(&io_u->start_time, + &icd->time); + add_lat_sample(td, idx, tusec, bytes); + } + if (!td->o.disable_clat) { + add_clat_sample(td, idx, lusec, bytes); + io_u_mark_latency(td, lusec); + } + if (!td->o.disable_bw) + add_bw_sample(td, idx, bytes, &icd->time); + if (__should_check_rate(td, idx)) { + td->rate_pending_usleep[idx] = + ((td->this_io_bytes[idx] * + td->rate_nsec_cycle[idx]) / 1000 - + utime_since_now(&td->start)); + } + if (__should_check_rate(td, idx ^ 1)) + td->rate_pending_usleep[odx] = + ((td->this_io_bytes[odx] * + td->rate_nsec_cycle[odx]) / 1000 - + utime_since_now(&td->start)); + } - if ((td_rw(td) || td_write(td)) && idx == DDIR_WRITE) + if (td_write(td) && idx == DDIR_WRITE && + td->o.do_verify && + td->o.verify != VERIFY_NONE) log_io_piece(td, io_u); icd->bytes_done[idx] += bytes; - if (icd->handler) { - ret = icd->handler(io_u); + if (io_u->end_io) { + ret = io_u->end_io(td, io_u); if (ret && !icd->error) icd->error = ret; } - } else + } else if (io_u->error) { icd->error = io_u->error; + io_u_log_error(td, io_u); + } + if (td->o.continue_on_error && icd->error && + td_non_fatal_error(icd->error)) { + /* + * If there is a non_fatal error, then add to the error count + * and clear all the errors. + */ + update_error_count(td, icd->error); + td_clear_error(td); + icd->error = 0; + io_u->error = 0; + } } -static void init_icd(struct io_completion_data *icd, endio_handler *handler, +static void init_icd(struct thread_data *td, struct io_completion_data *icd, int nr) { - fio_gettime(&icd->time, NULL); + if (!td->o.disable_clat || !td->o.disable_bw) + fio_gettime(&icd->time, NULL); - icd->handler = handler; icd->nr = nr; icd->error = 0; @@ -449,50 +1266,98 @@ static void ios_completed(struct thread_data *td, io_u = td->io_ops->event(td, i); io_completed(td, io_u, icd); - put_io_u(td, io_u); + + if (!(io_u->flags & IO_U_F_FREE_DEF)) + put_io_u(td, io_u); } } -long io_u_sync_complete(struct thread_data *td, struct io_u *io_u, - endio_handler *handler) +/* + * Complete a single io_u for the sync engines. + */ +int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, + unsigned long *bytes) { struct io_completion_data icd; - init_icd(&icd, handler, 1); + init_icd(td, &icd, 1); io_completed(td, io_u, &icd); - put_io_u(td, io_u); - if (!icd.error) - return icd.bytes_done[0] + icd.bytes_done[1]; + if (!(io_u->flags & IO_U_F_FREE_DEF)) + put_io_u(td, io_u); - td_verror(td, icd.error); - return -1; -} + if (icd.error) { + td_verror(td, icd.error, "io_u_sync_complete"); + return -1; + } -long io_u_queued_complete(struct thread_data *td, int min_events, - endio_handler *handler) + if (bytes) { + bytes[0] += icd.bytes_done[0]; + bytes[1] += icd.bytes_done[1]; + } + return 0; +} + +/* + * Called to complete min_events number of io for the async engines. + */ +int io_u_queued_complete(struct thread_data *td, int min_evts, + unsigned long *bytes) { - struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; - struct timespec *tsp = NULL; struct io_completion_data icd; + struct timespec *tvp = NULL; int ret; + struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; - if (min_events > 0) - tsp = &ts; + dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); - ret = td_io_getevents(td, min_events, td->cur_depth, tsp); + if (!min_evts) + tvp = &ts; + + ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); if (ret < 0) { - td_verror(td, -ret); + td_verror(td, -ret, "td_io_getevents"); return ret; } else if (!ret) return ret; - init_icd(&icd, handler, ret); + init_icd(td, &icd, ret); ios_completed(td, &icd); - if (!icd.error) - return icd.bytes_done[0] + icd.bytes_done[1]; + if (icd.error) { + td_verror(td, icd.error, "io_u_queued_complete"); + return -1; + } + + if (bytes) { + bytes[0] += icd.bytes_done[0]; + bytes[1] += icd.bytes_done[1]; + } + + return 0; +} + +/* + * Call when io_u is really queued, to update the submission latency. + */ +void io_u_queued(struct thread_data *td, struct io_u *io_u) +{ + if (!td->o.disable_slat) { + unsigned long slat_time; + + slat_time = utime_since(&io_u->start_time, &io_u->issue_time); + add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen); + } +} - td_verror(td, icd.error); - return -1; +/* + * "randomly" fill the buffer contents + */ +void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, + unsigned int max_bs) +{ + if (!td->o.zero_buffers) + fill_random_buf(io_u->buf, max_bs); + else + memset(io_u->buf, 0, max_bs); }