X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=io_u.c;h=ae1438fd665673e3077cc41c8dae0e4ace1b01c4;hb=d87d9c6c611f74a5888c74568d545be200139498;hp=9e9dad5a79f15c6f405b26ad16736388149d257c;hpb=04bc85a130c5dbe25dafe5fa5084c5bdcf63844a;p=fio.git diff --git a/io_u.c b/io_u.c index 9e9dad5a..ae1438fd 100644 --- a/io_u.c +++ b/io_u.c @@ -10,6 +10,7 @@ #include "err.h" #include "lib/pow2.h" #include "minmax.h" +#include "zbd.h" struct io_completion_data { int nr; /* input */ @@ -31,21 +32,27 @@ static bool random_map_free(struct fio_file *f, const uint64_t block) /* * Mark a given offset as used in the map. */ -static void mark_random_map(struct thread_data *td, struct io_u *io_u) +static uint64_t mark_random_map(struct thread_data *td, struct io_u *io_u, + uint64_t offset, uint64_t buflen) { - unsigned int min_bs = td->o.min_bs[io_u->ddir]; + unsigned long long min_bs = td->o.min_bs[io_u->ddir]; struct fio_file *f = io_u->file; - unsigned int nr_blocks; + unsigned long long nr_blocks; uint64_t block; - block = (io_u->offset - f->file_offset) / (uint64_t) min_bs; - nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; + block = (offset - f->file_offset) / (uint64_t) min_bs; + nr_blocks = (buflen + min_bs - 1) / min_bs; + assert(nr_blocks > 0); - if (!(io_u->flags & IO_U_F_BUSY_OK)) + if (!(io_u->flags & IO_U_F_BUSY_OK)) { nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks); + assert(nr_blocks > 0); + } + + if ((nr_blocks * min_bs) < buflen) + buflen = nr_blocks * min_bs; - if ((nr_blocks * min_bs) < io_u->buflen) - io_u->buflen = nr_blocks * min_bs; + return buflen; } static uint64_t last_block(struct thread_data *td, struct fio_file *f, @@ -64,7 +71,7 @@ static uint64_t last_block(struct thread_data *td, struct fio_file *f, if (max_size > f->real_file_size) max_size = f->real_file_size; - if (td->o.zone_range) + if (td->o.zone_mode == ZONE_MODE_STRIDED && td->o.zone_range) max_size = td->o.zone_range; if (td->o.min_bs[ddir] > td->o.ba[ddir]) @@ -503,19 +510,19 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u, } static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u, - unsigned int buflen) + unsigned long long buflen) { struct fio_file *f = io_u->file; return io_u->offset + buflen <= f->io_size + get_start_offset(td, f); } -static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, +static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *io_u, bool is_random) { int ddir = io_u->ddir; - unsigned int buflen = 0; - unsigned int minbs, maxbs; + unsigned long long buflen = 0; + unsigned long long minbs, maxbs; uint64_t frand_max, r; bool power_2; @@ -541,10 +548,8 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, r = __rand(&td->bsrange_state[ddir]); if (!td->o.bssplit_nr[ddir]) { - buflen = 1 + (unsigned int) ((double) maxbs * + buflen = minbs + (unsigned long long) ((double) maxbs * (r / (frand_max + 1.0))); - if (buflen < minbs) - buflen = minbs; } else { long long perc = 0; unsigned int i; @@ -552,10 +557,10 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { struct bssplit *bsp = &td->o.bssplit[ddir][i]; + if (!bsp->perc) + continue; buflen = bsp->bs; perc += bsp->perc; - if (!perc) - break; if ((r / perc <= frand_max / 100ULL) && io_u_fits(td, io_u, buflen)) break; @@ -565,8 +570,10 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u, power_2 = is_power_of_2(minbs); if (!td->o.bs_unaligned && power_2) buflen &= ~(minbs - 1); - else if (!td->o.bs_unaligned && !power_2) - buflen -= buflen % minbs; + else if (!td->o.bs_unaligned && !power_2) + buflen -= buflen % minbs; + if (buflen > maxbs) + buflen = maxbs; } while (!io_u_fits(td, io_u, buflen)); return buflen; @@ -599,7 +606,7 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) int io_u_quiesce(struct thread_data *td) { - int completed = 0; + int ret = 0, completed = 0, err = 0; /* * We are going to sleep, ensure that we flush anything pending as @@ -614,17 +621,20 @@ int io_u_quiesce(struct thread_data *td) td_io_commit(td); while (td->io_u_in_flight) { - int ret; - ret = io_u_queued_complete(td, 1); if (ret > 0) completed += ret; + else if (ret < 0) + err = ret; } if (td->flags & TD_F_REGROW_LOGS) regrow_logs(td); - return completed; + if (completed) + return completed; + + return err; } static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) @@ -634,7 +644,7 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) uint64_t now; assert(ddir_rw(ddir)); - now = utime_since_now(&td->start); + now = utime_since_now(&td->epoch); /* * if rate_next_io_time is in the past, need to catch up to rate @@ -736,6 +746,9 @@ static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) { enum fio_ddir ddir = get_rw_ddir(td); + if (td->o.zone_mode == ZONE_MODE_ZBD) + ddir = zbd_adjust_ddir(td, io_u, ddir); + if (td_trimwrite(td)) { struct fio_file *f = io_u->file; if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM]) @@ -763,10 +776,15 @@ void put_file_log(struct thread_data *td, struct fio_file *f) void put_io_u(struct thread_data *td, struct io_u *io_u) { + const bool needs_lock = td_async_processing(td); + + zbd_put_io_u(io_u); + if (td->parent) td = td->parent; - td_io_u_lock(td); + if (needs_lock) + __td_io_u_lock(td); if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT)) put_file_log(td, io_u->file); @@ -780,7 +798,9 @@ void put_io_u(struct thread_data *td, struct io_u *io_u) } io_u_qpush(&td->io_u_freelist, io_u); td_io_u_free_notify(td); - td_io_u_unlock(td); + + if (needs_lock) + __td_io_u_unlock(td); } void clear_io_u(struct thread_data *td, struct io_u *io_u) @@ -791,6 +811,7 @@ void clear_io_u(struct thread_data *td, struct io_u *io_u) void requeue_io_u(struct thread_data *td, struct io_u **io_u) { + const bool needs_lock = td_async_processing(td); struct io_u *__io_u = *io_u; enum fio_ddir ddir = acct_ddir(__io_u); @@ -799,7 +820,8 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u) if (td->parent) td = td->parent; - td_io_u_lock(td); + if (needs_lock) + __td_io_u_lock(td); io_u_set(td, __io_u, IO_U_F_FREE); if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir)) @@ -813,18 +835,25 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u) io_u_rpush(&td->io_u_requeues, __io_u); td_io_u_free_notify(td); - td_io_u_unlock(td); + + if (needs_lock) + __td_io_u_unlock(td); + *io_u = NULL; } -static void __fill_io_u_zone(struct thread_data *td, struct io_u *io_u) +static void setup_strided_zone_mode(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; + assert(td->o.zone_mode == ZONE_MODE_STRIDED); + assert(td->o.zone_size); + assert(td->o.zone_range); + /* * See if it's time to switch to a new zone */ - if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { + if (td->zone_bytes >= td->o.zone_size) { td->zone_bytes = 0; f->file_offset += td->o.zone_range + td->o.zone_skip; @@ -859,6 +888,8 @@ static void __fill_io_u_zone(struct thread_data *td, struct io_u *io_u) static int fill_io_u(struct thread_data *td, struct io_u *io_u) { bool is_random; + uint64_t offset; + enum io_u_action ret; if (td_ioengine_flagged(td, FIO_NOIO)) goto out; @@ -871,11 +902,10 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) if (!ddir_rw(io_u->ddir)) goto out; - /* - * When file is zoned zone_range is always positive - */ - if (td->o.zone_range) - __fill_io_u_zone(td, io_u); + if (td->o.zone_mode == ZONE_MODE_STRIDED) + setup_strided_zone_mode(td, io_u); + else if (td->o.zone_mode == ZONE_MODE_ZBD) + setup_zbd_zone_mode(td, io_u); /* * No log, let the seq/rand engine retrieve the next buflen and @@ -892,8 +922,15 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) return 1; } + offset = io_u->offset; + if (td->o.zone_mode == ZONE_MODE_ZBD) { + ret = zbd_adjust_block(td, io_u); + if (ret == io_u_eof) + return 1; + } + if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { - dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%lx exceeds file size=0x%llx\n", + dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n", io_u, (unsigned long long) io_u->offset, io_u->buflen, (unsigned long long) io_u->file->real_file_size); @@ -904,7 +941,7 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) * mark entry before potentially trimming io_u */ if (td_random(td) && file_randommap(td, io_u->file)) - mark_random_map(td, io_u); + io_u->buflen = mark_random_map(td, io_u, offset, io_u->buflen); out: dprint_io_u(io_u, "fill"); @@ -1305,6 +1342,8 @@ static long set_io_u_file(struct thread_data *td, struct io_u *io_u) if (!fill_io_u(td, io_u)) break; + zbd_put_io_u(io_u); + put_file_log(td, f); td_io_close_file(td, f); io_u->file = NULL; @@ -1352,6 +1391,7 @@ static bool __lat_target_failed(struct thread_data *td) td->latency_qd_low--; td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2; + td->latency_stable_count = 0; dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); @@ -1401,6 +1441,21 @@ static void lat_target_success(struct thread_data *td) td->latency_qd_low = td->latency_qd; + if (td->latency_qd + 1 == td->latency_qd_high) { + /* + * latency_qd will not incease on lat_target_success(), so + * called stable. If we stick with this queue depth, the + * final latency is likely lower than latency_target. Fix + * this by increasing latency_qd_high slowly. Use a naive + * heuristic here. If we get lat_target_success() 3 times + * in a row, increase latency_qd_high by 1. + */ + if (++td->latency_stable_count >= 3) { + td->latency_qd_high++; + td->latency_stable_count = 0; + } + } + /* * If we haven't failed yet, we double up to a failing value instead * of bisecting from highest possible queue depth. If we have set @@ -1420,7 +1475,7 @@ static void lat_target_success(struct thread_data *td) * Same as last one, we are done. Let it run a latency cycle, so * we get only the results from the targeted depth. */ - if (td->latency_qd == qd) { + if (!o->latency_run && td->latency_qd == qd) { if (td->latency_end_run) { dprint(FD_RATE, "We are done\n"); td->done = 1; @@ -1479,13 +1534,15 @@ bool queue_full(const struct thread_data *td) struct io_u *__get_io_u(struct thread_data *td) { + const bool needs_lock = td_async_processing(td); struct io_u *io_u = NULL; int ret; if (td->stop_io) return NULL; - td_io_u_lock(td); + if (needs_lock) + __td_io_u_lock(td); again: if (!io_u_rempty(&td->io_u_requeues)) @@ -1503,7 +1560,7 @@ again: assert(io_u->flags & IO_U_F_FREE); io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT | IO_U_F_TRIMMED | IO_U_F_BARRIER | - IO_U_F_VER_LIST); + IO_U_F_VER_LIST | IO_U_F_PRIORITY); io_u->error = 0; io_u->acct_ddir = -1; @@ -1519,10 +1576,13 @@ again: assert(!(td->flags & TD_F_CHILD)); ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock); assert(ret == 0); - goto again; + if (!td->error) + goto again; } - td_io_u_unlock(td); + if (needs_lock) + __td_io_u_unlock(td); + return io_u; } @@ -1584,7 +1644,7 @@ static bool check_get_verify(struct thread_data *td, struct io_u *io_u) */ static void small_content_scramble(struct io_u *io_u) { - unsigned int i, nr_blocks = io_u->buflen >> 9; + unsigned long long i, nr_blocks = io_u->buflen >> 9; unsigned int offset; uint64_t boffset, *iptr; char *p; @@ -1728,7 +1788,7 @@ static void __io_u_log_error(struct thread_data *td, struct io_u *io_u) if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump) return; - log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n", + log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%llu\n", io_u->file ? " on file " : "", io_u->file ? io_u->file->file_name : "", strerror(io_u->error), @@ -1759,6 +1819,16 @@ static inline bool gtod_reduce(struct thread_data *td) || td->o.gtod_reduce; } +static void trim_block_info(struct thread_data *td, struct io_u *io_u) +{ + uint32_t *info = io_u_block_info(td, io_u); + + if (BLOCK_INFO_STATE(*info) >= BLOCK_STATE_TRIM_FAILURE) + return; + + *info = BLOCK_INFO(BLOCK_STATE_TRIMMED, BLOCK_INFO_TRIMS(*info) + 1); +} + static void account_io_completion(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd, const enum fio_ddir idx, unsigned int bytes) @@ -1779,7 +1849,7 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, unsigned long long tnsec; tnsec = ntime_since(&io_u->start_time, &icd->time); - add_lat_sample(td, idx, tnsec, bytes, io_u->offset); + add_lat_sample(td, idx, tnsec, bytes, io_u->offset, io_u_is_prio(io_u)); if (td->flags & TD_F_PROFILE_OPS) { struct prof_io_ops *ops = &td->prof_io_ops; @@ -1798,7 +1868,7 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, if (ddir_rw(idx)) { if (!td->o.disable_clat) { - add_clat_sample(td, idx, llnsec, bytes, io_u->offset); + add_clat_sample(td, idx, llnsec, bytes, io_u->offset, io_u_is_prio(io_u)); io_u_mark_latency(td, llnsec); } @@ -1810,18 +1880,8 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, } else if (ddir_sync(idx) && !td->o.disable_clat) add_sync_clat_sample(&td->ts, llnsec); - if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) { - uint32_t *info = io_u_block_info(td, io_u); - if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) { - if (io_u->ddir == DDIR_TRIM) { - *info = BLOCK_INFO(BLOCK_STATE_TRIMMED, - BLOCK_INFO_TRIMS(*info) + 1); - } else if (io_u->ddir == DDIR_WRITE) { - *info = BLOCK_INFO_SET_STATE(BLOCK_STATE_WRITTEN, - *info); - } - } - } + if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) + trim_block_info(td, io_u); } static void file_log_write_comp(const struct thread_data *td, struct fio_file *f, @@ -1894,7 +1954,7 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, td->last_ddir = ddir; if (!io_u->error && ddir_rw(ddir)) { - unsigned int bytes = io_u->buflen - io_u->resid; + unsigned long long bytes = io_u->buflen - io_u->resid; int ret; td->io_blocks[ddir]++; @@ -2050,7 +2110,7 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) td = td->parent; add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen, - io_u->offset); + io_u->offset, io_u_is_prio(io_u)); } } @@ -2084,8 +2144,8 @@ static void save_buf_state(struct thread_data *td, struct frand_state *rs) frand_copy(&td->buf_state_prev, rs); } -void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, - unsigned int max_bs) +void fill_io_buffer(struct thread_data *td, void *buf, unsigned long long min_write, + unsigned long long max_bs) { struct thread_options *o = &td->o; @@ -2095,8 +2155,8 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, if (o->compress_percentage || o->dedupe_percentage) { unsigned int perc = td->o.compress_percentage; struct frand_state *rs; - unsigned int left = max_bs; - unsigned int this_write; + unsigned long long left = max_bs; + unsigned long long this_write; do { rs = get_buf_state(td); @@ -2105,7 +2165,7 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, if (perc) { this_write = min_not_zero(min_write, - td->o.compress_chunk); + (unsigned long long) td->o.compress_chunk); fill_random_buf_percentage(rs, buf, perc, this_write, this_write, @@ -2132,7 +2192,7 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, * "randomly" fill the buffer contents */ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, - unsigned int min_write, unsigned int max_bs) + unsigned long long min_write, unsigned long long max_bs) { io_u->buf_filled_len = 0; fill_io_buffer(td, io_u->buf, min_write, max_bs); @@ -2141,7 +2201,7 @@ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, static int do_sync_file_range(const struct thread_data *td, struct fio_file *f) { - off64_t offset, nbytes; + uint64_t offset, nbytes; offset = f->first_write; nbytes = f->last_write - f->first_write;