X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=io_u.c;h=ae1438fd665673e3077cc41c8dae0e4ace1b01c4;hb=d19c04d12b6996c4b9f6f4e27dd5a7570eea1ddc;hp=bee99c3798d8474afcfaaa71a0bc8d1a0c7de2d8;hpb=8135fe4dcb16776e4bfee14d33accfe2c915908d;p=fio.git diff --git a/io_u.c b/io_u.c index bee99c37..ae1438fd 100644 --- a/io_u.c +++ b/io_u.c @@ -557,10 +557,10 @@ static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *i for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { struct bssplit *bsp = &td->o.bssplit[ddir][i]; + if (!bsp->perc) + continue; buflen = bsp->bs; perc += bsp->perc; - if (!perc) - break; if ((r / perc <= frand_max / 100ULL) && io_u_fits(td, io_u, buflen)) break; @@ -606,7 +606,7 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) int io_u_quiesce(struct thread_data *td) { - int ret = 0, completed = 0; + int ret = 0, completed = 0, err = 0; /* * We are going to sleep, ensure that we flush anything pending as @@ -625,7 +625,7 @@ int io_u_quiesce(struct thread_data *td) if (ret > 0) completed += ret; else if (ret < 0) - break; + err = ret; } if (td->flags & TD_F_REGROW_LOGS) @@ -634,7 +634,7 @@ int io_u_quiesce(struct thread_data *td) if (completed) return completed; - return ret; + return err; } static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) @@ -644,7 +644,7 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) uint64_t now; assert(ddir_rw(ddir)); - now = utime_since_now(&td->start); + now = utime_since_now(&td->epoch); /* * if rate_next_io_time is in the past, need to catch up to rate @@ -746,6 +746,9 @@ static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) { enum fio_ddir ddir = get_rw_ddir(td); + if (td->o.zone_mode == ZONE_MODE_ZBD) + ddir = zbd_adjust_ddir(td, io_u, ddir); + if (td_trimwrite(td)) { struct fio_file *f = io_u->file; if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM]) @@ -775,10 +778,7 @@ void put_io_u(struct thread_data *td, struct io_u *io_u) { const bool needs_lock = td_async_processing(td); - if (io_u->post_submit) { - io_u->post_submit(io_u, io_u->error == 0); - io_u->post_submit = NULL; - } + zbd_put_io_u(io_u); if (td->parent) td = td->parent; @@ -853,7 +853,7 @@ static void setup_strided_zone_mode(struct thread_data *td, struct io_u *io_u) /* * See if it's time to switch to a new zone */ - if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { + if (td->zone_bytes >= td->o.zone_size) { td->zone_bytes = 0; f->file_offset += td->o.zone_range + td->o.zone_skip; @@ -904,6 +904,8 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) if (td->o.zone_mode == ZONE_MODE_STRIDED) setup_strided_zone_mode(td, io_u); + else if (td->o.zone_mode == ZONE_MODE_ZBD) + setup_zbd_zone_mode(td, io_u); /* * No log, let the seq/rand engine retrieve the next buflen and @@ -1340,10 +1342,7 @@ static long set_io_u_file(struct thread_data *td, struct io_u *io_u) if (!fill_io_u(td, io_u)) break; - if (io_u->post_submit) { - io_u->post_submit(io_u, false); - io_u->post_submit = NULL; - } + zbd_put_io_u(io_u); put_file_log(td, f); td_io_close_file(td, f); @@ -1392,6 +1391,7 @@ static bool __lat_target_failed(struct thread_data *td) td->latency_qd_low--; td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2; + td->latency_stable_count = 0; dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); @@ -1441,6 +1441,21 @@ static void lat_target_success(struct thread_data *td) td->latency_qd_low = td->latency_qd; + if (td->latency_qd + 1 == td->latency_qd_high) { + /* + * latency_qd will not incease on lat_target_success(), so + * called stable. If we stick with this queue depth, the + * final latency is likely lower than latency_target. Fix + * this by increasing latency_qd_high slowly. Use a naive + * heuristic here. If we get lat_target_success() 3 times + * in a row, increase latency_qd_high by 1. + */ + if (++td->latency_stable_count >= 3) { + td->latency_qd_high++; + td->latency_stable_count = 0; + } + } + /* * If we haven't failed yet, we double up to a failing value instead * of bisecting from highest possible queue depth. If we have set @@ -1460,7 +1475,7 @@ static void lat_target_success(struct thread_data *td) * Same as last one, we are done. Let it run a latency cycle, so * we get only the results from the targeted depth. */ - if (td->latency_qd == qd) { + if (!o->latency_run && td->latency_qd == qd) { if (td->latency_end_run) { dprint(FD_RATE, "We are done\n"); td->done = 1; @@ -1545,7 +1560,7 @@ again: assert(io_u->flags & IO_U_F_FREE); io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT | IO_U_F_TRIMMED | IO_U_F_BARRIER | - IO_U_F_VER_LIST); + IO_U_F_VER_LIST | IO_U_F_PRIORITY); io_u->error = 0; io_u->acct_ddir = -1; @@ -1834,7 +1849,7 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, unsigned long long tnsec; tnsec = ntime_since(&io_u->start_time, &icd->time); - add_lat_sample(td, idx, tnsec, bytes, io_u->offset); + add_lat_sample(td, idx, tnsec, bytes, io_u->offset, io_u_is_prio(io_u)); if (td->flags & TD_F_PROFILE_OPS) { struct prof_io_ops *ops = &td->prof_io_ops; @@ -1853,7 +1868,7 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, if (ddir_rw(idx)) { if (!td->o.disable_clat) { - add_clat_sample(td, idx, llnsec, bytes, io_u->offset); + add_clat_sample(td, idx, llnsec, bytes, io_u->offset, io_u_is_prio(io_u)); io_u_mark_latency(td, llnsec); } @@ -2095,7 +2110,7 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) td = td->parent; add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen, - io_u->offset); + io_u->offset, io_u_is_prio(io_u)); } } @@ -2186,7 +2201,7 @@ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, static int do_sync_file_range(const struct thread_data *td, struct fio_file *f) { - off64_t offset, nbytes; + uint64_t offset, nbytes; offset = f->first_write; nbytes = f->last_write - f->first_write;