#include "err.h"
#include "lib/pow2.h"
#include "minmax.h"
+#include "zbd.h"
struct io_completion_data {
int nr; /* input */
/*
* Mark a given offset as used in the map.
*/
-static void mark_random_map(struct thread_data *td, struct io_u *io_u)
+static uint64_t mark_random_map(struct thread_data *td, struct io_u *io_u,
+ uint64_t offset, uint64_t buflen)
{
unsigned long long min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
unsigned long long nr_blocks;
uint64_t block;
- block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
- nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
+ block = (offset - f->file_offset) / (uint64_t) min_bs;
+ nr_blocks = (buflen + min_bs - 1) / min_bs;
+ assert(nr_blocks > 0);
- if (!(io_u->flags & IO_U_F_BUSY_OK))
+ if (!(io_u->flags & IO_U_F_BUSY_OK)) {
nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
+ assert(nr_blocks > 0);
+ }
+
+ if ((nr_blocks * min_bs) < buflen)
+ buflen = nr_blocks * min_bs;
- if ((nr_blocks * min_bs) < io_u->buflen)
- io_u->buflen = nr_blocks * min_bs;
+ return buflen;
}
static uint64_t last_block(struct thread_data *td, struct fio_file *f,
log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
ret = 1;
}
+ io_u->verify_offset = io_u->offset;
}
return ret;
return 1;
}
+ io_u->verify_offset = io_u->offset;
return 0;
}
for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &td->o.bssplit[ddir][i];
+ if (!bsp->perc)
+ continue;
buflen = bsp->bs;
perc += bsp->perc;
- if (!perc)
- break;
if ((r / perc <= frand_max / 100ULL) &&
io_u_fits(td, io_u, buflen))
break;
power_2 = is_power_of_2(minbs);
if (!td->o.bs_unaligned && power_2)
buflen &= ~(minbs - 1);
- else if (!td->o.bs_unaligned && !power_2)
- buflen -= buflen % minbs;
+ else if (!td->o.bs_unaligned && !power_2)
+ buflen -= buflen % minbs;
+ if (buflen > maxbs)
+ buflen = maxbs;
} while (!io_u_fits(td, io_u, buflen));
return buflen;
int io_u_quiesce(struct thread_data *td)
{
- int completed = 0;
+ int ret = 0, completed = 0, err = 0;
/*
* We are going to sleep, ensure that we flush anything pending as
td_io_commit(td);
while (td->io_u_in_flight) {
- int ret;
-
ret = io_u_queued_complete(td, 1);
if (ret > 0)
completed += ret;
+ else if (ret < 0)
+ err = ret;
}
if (td->flags & TD_F_REGROW_LOGS)
regrow_logs(td);
- return completed;
+ if (completed)
+ return completed;
+
+ return err;
}
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
uint64_t now;
assert(ddir_rw(ddir));
- now = utime_since_now(&td->start);
+ now = utime_since_now(&td->epoch);
/*
* if rate_next_io_time is in the past, need to catch up to rate
if (td->o.io_submit_mode == IO_MODE_INLINE)
io_u_quiesce(td);
+ if (td->o.timeout && ((usec + now) > td->o.timeout)) {
+ /*
+ * check if the usec is capable of taking negative values
+ */
+ if (now > td->o.timeout) {
+ ddir = DDIR_INVAL;
+ return ddir;
+ }
+ usec = td->o.timeout - now;
+ }
usec_sleep(td, usec);
+
+ now = utime_since_now(&td->epoch);
+ if ((td->o.timeout && (now > td->o.timeout)) || td->terminate)
+ ddir = DDIR_INVAL;
+
return ddir;
}
{
enum fio_ddir ddir = get_rw_ddir(td);
+ if (td->o.zone_mode == ZONE_MODE_ZBD)
+ ddir = zbd_adjust_ddir(td, io_u, ddir);
+
if (td_trimwrite(td)) {
struct fio_file *f = io_u->file;
if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
void put_io_u(struct thread_data *td, struct io_u *io_u)
{
+ const bool needs_lock = td_async_processing(td);
+
+ zbd_put_io_u(td, io_u);
+
if (td->parent)
td = td->parent;
- td_io_u_lock(td);
+ if (needs_lock)
+ __td_io_u_lock(td);
if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
put_file_log(td, io_u->file);
}
io_u_qpush(&td->io_u_freelist, io_u);
td_io_u_free_notify(td);
- td_io_u_unlock(td);
+
+ if (needs_lock)
+ __td_io_u_unlock(td);
}
void clear_io_u(struct thread_data *td, struct io_u *io_u)
void requeue_io_u(struct thread_data *td, struct io_u **io_u)
{
+ const bool needs_lock = td_async_processing(td);
struct io_u *__io_u = *io_u;
enum fio_ddir ddir = acct_ddir(__io_u);
if (td->parent)
td = td->parent;
- td_io_u_lock(td);
+ if (needs_lock)
+ __td_io_u_lock(td);
io_u_set(td, __io_u, IO_U_F_FREE);
if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
io_u_rpush(&td->io_u_requeues, __io_u);
td_io_u_free_notify(td);
- td_io_u_unlock(td);
+
+ if (needs_lock)
+ __td_io_u_unlock(td);
+
*io_u = NULL;
}
/*
* See if it's time to switch to a new zone
*/
- if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
+ if (td->zone_bytes >= td->o.zone_size) {
td->zone_bytes = 0;
f->file_offset += td->o.zone_range + td->o.zone_skip;
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
bool is_random;
+ uint64_t offset;
+ enum io_u_action ret;
if (td_ioengine_flagged(td, FIO_NOIO))
goto out;
set_rw_ddir(td, io_u);
+ if (io_u->ddir == DDIR_INVAL) {
+ dprint(FD_IO, "invalid direction received ddir = %d", io_u->ddir);
+ return 1;
+ }
/*
* fsync() or fdatasync() or trim etc, we are done
*/
if (td->o.zone_mode == ZONE_MODE_STRIDED)
setup_strided_zone_mode(td, io_u);
+ else if (td->o.zone_mode == ZONE_MODE_ZBD)
+ setup_zbd_zone_mode(td, io_u);
/*
* No log, let the seq/rand engine retrieve the next buflen and
return 1;
}
+ offset = io_u->offset;
+ if (td->o.zone_mode == ZONE_MODE_ZBD) {
+ ret = zbd_adjust_block(td, io_u);
+ if (ret == io_u_eof)
+ return 1;
+ }
+
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
io_u,
* mark entry before potentially trimming io_u
*/
if (td_random(td) && file_randommap(td, io_u->file))
- mark_random_map(td, io_u);
+ io_u->buflen = mark_random_map(td, io_u, offset, io_u->buflen);
out:
dprint_io_u(io_u, "fill");
+ io_u->verify_offset = io_u->offset;
td->zone_bytes += io_u->buflen;
return 0;
}
break;
case 1 ... 4:
idx = 1;
+ fallthrough;
case 0:
break;
}
break;
case 2 ... 3:
idx = 1;
+ fallthrough;
case 1:
break;
}
break;
case 2 ... 3:
idx = 1;
+ fallthrough;
case 0 ... 1:
break;
}
break;
case 2 ... 3:
idx = 1;
+ fallthrough;
case 0 ... 1:
break;
}
break;
case 2 ... 3:
idx = 1;
+ fallthrough;
case 0 ... 1:
break;
}
if (f && fio_file_open(f) && !fio_file_closing(f)) {
if (td->o.file_service_type == FIO_FSERVICE_SEQ)
goto out;
- if (td->file_service_left--)
- goto out;
+ if (td->file_service_left) {
+ td->file_service_left--;
+ goto out;
+ }
}
if (td->o.file_service_type == FIO_FSERVICE_RR ||
if (!fill_io_u(td, io_u))
break;
+ zbd_put_io_u(td, io_u);
+
put_file_log(td, f);
td_io_close_file(td, f);
io_u->file = NULL;
return 0;
}
-static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
+static void lat_fatal(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd,
unsigned long long tnsec, unsigned long long max_nsec)
{
- if (!td->error)
- log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec)\n", tnsec, max_nsec);
+ if (!td->error) {
+ log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec): %s %s %llu %llu\n",
+ tnsec, max_nsec,
+ io_u->file->file_name,
+ io_ddir_name(io_u->ddir),
+ io_u->offset, io_u->buflen);
+ }
td_verror(td, ETIMEDOUT, "max latency exceeded");
icd->error = ETIMEDOUT;
}
td->latency_qd_low--;
td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
+ td->latency_stable_count = 0;
dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
td->latency_qd_low = td->latency_qd;
+ if (td->latency_qd + 1 == td->latency_qd_high) {
+ /*
+ * latency_qd will not incease on lat_target_success(), so
+ * called stable. If we stick with this queue depth, the
+ * final latency is likely lower than latency_target. Fix
+ * this by increasing latency_qd_high slowly. Use a naive
+ * heuristic here. If we get lat_target_success() 3 times
+ * in a row, increase latency_qd_high by 1.
+ */
+ if (++td->latency_stable_count >= 3) {
+ td->latency_qd_high++;
+ td->latency_stable_count = 0;
+ }
+ }
+
/*
* If we haven't failed yet, we double up to a failing value instead
* of bisecting from highest possible queue depth. If we have set
* Same as last one, we are done. Let it run a latency cycle, so
* we get only the results from the targeted depth.
*/
- if (td->latency_qd == qd) {
+ if (!o->latency_run && td->latency_qd == qd) {
if (td->latency_end_run) {
dprint(FD_RATE, "We are done\n");
td->done = 1;
struct io_u *__get_io_u(struct thread_data *td)
{
+ const bool needs_lock = td_async_processing(td);
struct io_u *io_u = NULL;
int ret;
if (td->stop_io)
return NULL;
- td_io_u_lock(td);
+ if (needs_lock)
+ __td_io_u_lock(td);
again:
- if (!io_u_rempty(&td->io_u_requeues))
+ if (!io_u_rempty(&td->io_u_requeues)) {
io_u = io_u_rpop(&td->io_u_requeues);
- else if (!queue_full(td)) {
+ io_u->resid = 0;
+ } else if (!queue_full(td)) {
io_u = io_u_qpop(&td->io_u_freelist);
io_u->file = NULL;
assert(io_u->flags & IO_U_F_FREE);
io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
IO_U_F_TRIMMED | IO_U_F_BARRIER |
- IO_U_F_VER_LIST);
+ IO_U_F_VER_LIST | IO_U_F_HIGH_PRIO);
io_u->error = 0;
io_u->acct_ddir = -1;
assert(!(td->flags & TD_F_CHILD));
ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock);
assert(ret == 0);
- goto again;
+ if (!td->error)
+ goto again;
}
- td_io_u_unlock(td);
+ if (needs_lock)
+ __td_io_u_unlock(td);
+
return io_u;
}
io_u->xfer_buf = io_u->buf;
io_u->xfer_buflen = io_u->buflen;
+ /*
+ * Remember the issuing context priority. The IO engine may change this.
+ */
+ io_u->ioprio = td->ioprio;
out:
assert(io_u->file);
if (!td_io_prep(td, io_u)) {
|| td->o.gtod_reduce;
}
+static void trim_block_info(struct thread_data *td, struct io_u *io_u)
+{
+ uint32_t *info = io_u_block_info(td, io_u);
+
+ if (BLOCK_INFO_STATE(*info) >= BLOCK_STATE_TRIM_FAILURE)
+ return;
+
+ *info = BLOCK_INFO(BLOCK_STATE_TRIMMED, BLOCK_INFO_TRIMS(*info) + 1);
+}
+
static void account_io_completion(struct thread_data *td, struct io_u *io_u,
struct io_completion_data *icd,
const enum fio_ddir idx, unsigned int bytes)
unsigned long long tnsec;
tnsec = ntime_since(&io_u->start_time, &icd->time);
- add_lat_sample(td, idx, tnsec, bytes, io_u->offset);
+ add_lat_sample(td, idx, tnsec, bytes, io_u->offset,
+ io_u->ioprio, io_u_is_high_prio(io_u));
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
icd->error = ops->io_u_lat(td, tnsec);
}
- if (td->o.max_latency && tnsec > td->o.max_latency)
- lat_fatal(td, icd, tnsec, td->o.max_latency);
- if (td->o.latency_target && tnsec > td->o.latency_target) {
- if (lat_target_failed(td))
- lat_fatal(td, icd, tnsec, td->o.latency_target);
+ if (ddir_rw(idx)) {
+ if (td->o.max_latency[idx] && tnsec > td->o.max_latency[idx])
+ lat_fatal(td, io_u, icd, tnsec, td->o.max_latency[idx]);
+ if (td->o.latency_target && tnsec > td->o.latency_target) {
+ if (lat_target_failed(td))
+ lat_fatal(td, io_u, icd, tnsec, td->o.latency_target);
+ }
}
}
if (ddir_rw(idx)) {
if (!td->o.disable_clat) {
- add_clat_sample(td, idx, llnsec, bytes, io_u->offset);
+ add_clat_sample(td, idx, llnsec, bytes, io_u->offset,
+ io_u->ioprio, io_u_is_high_prio(io_u));
io_u_mark_latency(td, llnsec);
}
} else if (ddir_sync(idx) && !td->o.disable_clat)
add_sync_clat_sample(&td->ts, llnsec);
- if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
- uint32_t *info = io_u_block_info(td, io_u);
- if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
- if (io_u->ddir == DDIR_TRIM) {
- *info = BLOCK_INFO(BLOCK_STATE_TRIMMED,
- BLOCK_INFO_TRIMS(*info) + 1);
- } else if (io_u->ddir == DDIR_WRITE) {
- *info = BLOCK_INFO_SET_STATE(BLOCK_STATE_WRITTEN,
- *info);
- }
- }
- }
+ if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM)
+ trim_block_info(td, io_u);
}
static void file_log_write_comp(const struct thread_data *td, struct fio_file *f,
if (io_u->error)
unlog_io_piece(td, io_u);
else {
- io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
- write_barrier();
+ atomic_store_release(&io_u->ipo->flags,
+ io_u->ipo->flags & ~IP_F_IN_FLIGHT);
}
}
td->last_ddir = ddir;
if (!io_u->error && ddir_rw(ddir)) {
- unsigned long long bytes = io_u->buflen - io_u->resid;
+ unsigned long long bytes = io_u->xfer_buflen - io_u->resid;
int ret;
+ /*
+ * Make sure we notice short IO from here, and requeue them
+ * appropriately!
+ */
+ if (bytes && io_u->resid) {
+ io_u->xfer_buflen = io_u->resid;
+ io_u->xfer_buf += bytes;
+ io_u->offset += bytes;
+ td->ts.short_io_u[io_u->ddir]++;
+ if (io_u->offset < io_u->file->real_file_size) {
+ requeue_io_u(td, io_u_ptr);
+ return;
+ }
+ }
+
td->io_blocks[ddir]++;
td->io_bytes[ddir] += bytes;
td = td->parent;
add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
- io_u->offset);
+ io_u->offset, io_u->ioprio);
}
}
static struct frand_state *get_buf_state(struct thread_data *td)
{
unsigned int v;
+ unsigned long long i;
if (!td->o.dedupe_percentage)
return &td->buf_state;
v = rand_between(&td->dedupe_state, 1, 100);
if (v <= td->o.dedupe_percentage)
- return &td->buf_state_prev;
+ switch (td->o.dedupe_mode) {
+ case DEDUPE_MODE_REPEAT:
+ /*
+ * The caller advances the returned frand_state.
+ * A copy of prev should be returned instead since
+ * a subsequent intention to generate a deduped buffer
+ * might result in generating a unique one
+ */
+ frand_copy(&td->buf_state_ret, &td->buf_state_prev);
+ return &td->buf_state_ret;
+ case DEDUPE_MODE_WORKING_SET:
+ i = rand_between(&td->dedupe_working_set_index_state, 0, td->num_unique_pages - 1);
+ frand_copy(&td->buf_state_ret, &td->dedupe_working_set_states[i]);
+ return &td->buf_state_ret;
+ default:
+ log_err("unexpected dedupe mode %u\n", td->o.dedupe_mode);
+ assert(0);
+ }
return &td->buf_state;
}
static int do_sync_file_range(const struct thread_data *td,
struct fio_file *f)
{
- off64_t offset, nbytes;
+ uint64_t offset, nbytes;
offset = f->first_write;
nbytes = f->last_write - f->first_write;
struct fio_file *f = io_u->file;
int ret;
+ if (td->o.zone_mode == ZONE_MODE_ZBD) {
+ ret = zbd_do_io_u_trim(td, io_u);
+ if (ret == io_u_completed)
+ return io_u->xfer_buflen;
+ if (ret)
+ goto err;
+ }
+
ret = os_trim(f, io_u->offset, io_u->xfer_buflen);
if (!ret)
return io_u->xfer_buflen;
+err:
io_u->error = ret;
return 0;
#endif