#include <unistd.h>
-#include <fcntl.h>
#include <string.h>
-#include <signal.h>
-#include <time.h>
#include <assert.h>
#include "fio.h"
-#include "hash.h"
#include "verify.h"
#include "trim.h"
#include "lib/rand.h"
#include "err.h"
#include "lib/pow2.h"
#include "minmax.h"
+#include "zbd.h"
struct io_completion_data {
int nr; /* input */
/*
* Mark a given offset as used in the map.
*/
-static void mark_random_map(struct thread_data *td, struct io_u *io_u)
+static uint64_t mark_random_map(struct thread_data *td, struct io_u *io_u,
+ uint64_t offset, uint64_t buflen)
{
- unsigned int min_bs = td->o.rw_min_bs;
+ unsigned long long min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
- unsigned int nr_blocks;
+ unsigned long long nr_blocks;
uint64_t block;
- block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
- nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
+ block = (offset - f->file_offset) / (uint64_t) min_bs;
+ nr_blocks = (buflen + min_bs - 1) / min_bs;
+ assert(nr_blocks > 0);
- if (!(io_u->flags & IO_U_F_BUSY_OK))
+ if (!(io_u->flags & IO_U_F_BUSY_OK)) {
nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
+ assert(nr_blocks > 0);
+ }
+
+ if ((nr_blocks * min_bs) < buflen)
+ buflen = nr_blocks * min_bs;
- if ((nr_blocks * min_bs) < io_u->buflen)
- io_u->buflen = nr_blocks * min_bs;
+ return buflen;
}
static uint64_t last_block(struct thread_data *td, struct fio_file *f,
if (max_size > f->real_file_size)
max_size = f->real_file_size;
- if (td->o.zone_range)
+ if (td->o.zone_mode == ZONE_MODE_STRIDED && td->o.zone_range)
max_size = td->o.zone_range;
if (td->o.min_bs[ddir] > td->o.ba[ddir])
return max_blocks;
}
-struct rand_off {
- struct flist_head list;
- uint64_t off;
-};
-
static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, uint64_t *b,
uint64_t lastb)
return 0;
}
+static int __get_next_rand_offset_zoned_abs(struct thread_data *td,
+ struct fio_file *f,
+ enum fio_ddir ddir, uint64_t *b)
+{
+ struct zone_split_index *zsi;
+ uint64_t lastb, send, stotal;
+ unsigned int v;
+
+ lastb = last_block(td, f, ddir);
+ if (!lastb)
+ return 1;
+
+ if (!td->o.zone_split_nr[ddir]) {
+bail:
+ return __get_next_rand_offset(td, f, ddir, b, lastb);
+ }
+
+ /*
+ * Generate a value, v, between 1 and 100, both inclusive
+ */
+ v = rand_between(&td->zone_state, 1, 100);
+
+ /*
+ * Find our generated table. 'send' is the end block of this zone,
+ * 'stotal' is our start offset.
+ */
+ zsi = &td->zone_state_index[ddir][v - 1];
+ stotal = zsi->size_prev / td->o.ba[ddir];
+ send = zsi->size / td->o.ba[ddir];
+
+ /*
+ * Should never happen
+ */
+ if (send == -1U) {
+ if (!fio_did_warn(FIO_WARN_ZONED_BUG))
+ log_err("fio: bug in zoned generation\n");
+ goto bail;
+ } else if (send > lastb) {
+ /*
+ * This happens if the user specifies ranges that exceed
+ * the file/device size. We can't handle that gracefully,
+ * so error and exit.
+ */
+ log_err("fio: zoned_abs sizes exceed file size\n");
+ return 1;
+ }
+
+ /*
+ * Generate index from 0..send-stotal
+ */
+ if (__get_next_rand_offset(td, f, ddir, b, send - stotal) == 1)
+ return 1;
+
+ *b += stotal;
+ return 0;
+}
+
static int __get_next_rand_offset_zoned(struct thread_data *td,
struct fio_file *f, enum fio_ddir ddir,
uint64_t *b)
{
unsigned int v, send, stotal;
uint64_t offset, lastb;
- static int warned;
struct zone_split_index *zsi;
lastb = last_block(td, f, ddir);
/*
* Generate a value, v, between 1 and 100, both inclusive
*/
- v = rand32_between(&td->zone_state, 1, 100);
+ v = rand_between(&td->zone_state, 1, 100);
zsi = &td->zone_state_index[ddir][v - 1];
stotal = zsi->size_perc_prev;
* Should never happen
*/
if (send == -1U) {
- if (!warned) {
+ if (!fio_did_warn(FIO_WARN_ZONED_BUG))
log_err("fio: bug in zoned generation\n");
- warned = 1;
- }
goto bail;
}
return 0;
}
-static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
-{
- struct rand_off *r1 = flist_entry(a, struct rand_off, list);
- struct rand_off *r2 = flist_entry(b, struct rand_off, list);
-
- return r1->off - r2->off;
-}
-
-static int get_off_from_method(struct thread_data *td, struct fio_file *f,
- enum fio_ddir ddir, uint64_t *b)
+static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
+ enum fio_ddir ddir, uint64_t *b)
{
if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) {
uint64_t lastb;
return __get_next_rand_offset_gauss(td, f, ddir, b);
else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
return __get_next_rand_offset_zoned(td, f, ddir, b);
+ else if (td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS)
+ return __get_next_rand_offset_zoned_abs(td, f, ddir, b);
log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
return 1;
}
-/*
- * Sort the reads for a verify phase in batches of verifysort_nr, if
- * specified.
- */
-static inline bool should_sort_io(struct thread_data *td)
-{
- if (!td->o.verifysort_nr || !td->o.do_verify)
- return false;
- if (!td_random(td))
- return false;
- if (td->runstate != TD_VERIFYING)
- return false;
- if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
- td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64)
- return false;
-
- return true;
-}
-
static bool should_do_random(struct thread_data *td, enum fio_ddir ddir)
{
unsigned int v;
if (td->o.perc_rand[ddir] == 100)
return true;
- v = rand32_between(&td->seq_rand_state[ddir], 1, 100);
+ v = rand_between(&td->seq_rand_state[ddir], 1, 100);
return v <= td->o.perc_rand[ddir];
}
-static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
- enum fio_ddir ddir, uint64_t *b)
+static void loop_cache_invalidate(struct thread_data *td, struct fio_file *f)
{
- struct rand_off *r;
- int i, ret = 1;
-
- if (!should_sort_io(td))
- return get_off_from_method(td, f, ddir, b);
-
- if (!flist_empty(&td->next_rand_list)) {
-fetch:
- r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
- flist_del(&r->list);
- *b = r->off;
- free(r);
- return 0;
- }
-
- for (i = 0; i < td->o.verifysort_nr; i++) {
- r = malloc(sizeof(*r));
+ struct thread_options *o = &td->o;
- ret = get_off_from_method(td, f, ddir, &r->off);
- if (ret) {
- free(r);
- break;
- }
+ if (o->invalidate_cache && !o->odirect) {
+ int fio_unused ret;
- flist_add(&r->list, &td->next_rand_list);
+ ret = file_invalidate_cache(td, f);
}
-
- if (ret && !i)
- return ret;
-
- assert(!flist_empty(&td->next_rand_list));
- flist_sort(NULL, &td->next_rand_list, flist_cmp);
- goto fetch;
}
static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
if (td->o.time_based ||
(td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) {
fio_file_reset(td, f);
+ loop_cache_invalidate(td, f);
if (!get_next_rand_offset(td, f, ddir, b))
return 0;
}
assert(ddir_rw(ddir));
+ /*
+ * If we reach the end for a time based run, reset us back to 0
+ * and invalidate the cache, if we need to.
+ */
if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
o->time_based) {
- struct thread_options *o = &td->o;
- uint64_t io_size = f->io_size + (f->io_size % o->min_bs[ddir]);
-
- if (io_size > f->last_pos[ddir])
- f->last_pos[ddir] = 0;
- else
- f->last_pos[ddir] = f->last_pos[ddir] - io_size;
+ f->last_pos[ddir] = f->file_offset;
+ loop_cache_invalidate(td, f);
}
if (f->last_pos[ddir] < f->real_file_size) {
uint64_t pos;
- if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) {
+ /*
+ * Only rewind if we already hit the end
+ */
+ if (f->last_pos[ddir] == f->file_offset &&
+ f->file_offset && o->ddir_seq_add < 0) {
if (f->real_file_size > f->io_size)
f->last_pos[ddir] = f->io_size;
else
static int get_next_block(struct thread_data *td, struct io_u *io_u,
enum fio_ddir ddir, int rw_seq,
- unsigned int *is_random)
+ bool *is_random)
{
struct fio_file *f = io_u->file;
uint64_t b, offset;
if (td_random(td)) {
if (should_do_random(td, ddir)) {
ret = get_next_rand_block(td, f, ddir, &b);
- *is_random = 1;
+ *is_random = true;
} else {
- *is_random = 0;
+ *is_random = false;
io_u_set(td, io_u, IO_U_F_BUSY_OK);
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret)
ret = get_next_rand_block(td, f, ddir, &b);
}
} else {
- *is_random = 0;
+ *is_random = false;
ret = get_next_seq_offset(td, f, ddir, &offset);
}
} else {
io_u_set(td, io_u, IO_U_F_BUSY_OK);
- *is_random = 0;
+ *is_random = false;
if (td->o.rw_seq == RW_SEQ_SEQ) {
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret) {
ret = get_next_rand_block(td, f, ddir, &b);
- *is_random = 0;
+ *is_random = false;
}
} else if (td->o.rw_seq == RW_SEQ_IDENT) {
if (f->last_start[ddir] != -1ULL)
* until we find a free one. For sequential io, just return the end of
* the last io issued.
*/
-static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
- unsigned int *is_random)
+static int get_next_offset(struct thread_data *td, struct io_u *io_u,
+ bool *is_random)
{
struct fio_file *f = io_u->file;
enum fio_ddir ddir = io_u->ddir;
return 0;
}
-static int get_next_offset(struct thread_data *td, struct io_u *io_u,
- unsigned int *is_random)
-{
- if (td->flags & TD_F_PROFILE_OPS) {
- struct prof_io_ops *ops = &td->prof_io_ops;
-
- if (ops->fill_io_u_off)
- return ops->fill_io_u_off(td, io_u, is_random);
- }
-
- return __get_next_offset(td, io_u, is_random);
-}
-
static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u,
- unsigned int buflen)
+ unsigned long long buflen)
{
struct fio_file *f = io_u->file;
return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
}
-static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
- unsigned int is_random)
+static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *io_u,
+ bool is_random)
{
int ddir = io_u->ddir;
- unsigned int buflen = 0;
- unsigned int minbs, maxbs;
+ unsigned long long buflen = 0;
+ unsigned long long minbs, maxbs;
uint64_t frand_max, r;
bool power_2;
assert(ddir_rw(ddir));
if (td->o.bs_is_seq_rand)
- ddir = is_random ? DDIR_WRITE: DDIR_READ;
+ ddir = is_random ? DDIR_WRITE : DDIR_READ;
minbs = td->o.min_bs[ddir];
maxbs = td->o.max_bs[ddir];
if (!io_u_fits(td, io_u, minbs))
return 0;
- frand_max = rand_max(&td->bsrange_state);
+ frand_max = rand_max(&td->bsrange_state[ddir]);
do {
- r = __rand(&td->bsrange_state);
+ r = __rand(&td->bsrange_state[ddir]);
if (!td->o.bssplit_nr[ddir]) {
- buflen = 1 + (unsigned int) ((double) maxbs *
+ buflen = minbs + (unsigned long long) ((double) maxbs *
(r / (frand_max + 1.0)));
- if (buflen < minbs)
- buflen = minbs;
} else {
long long perc = 0;
unsigned int i;
for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &td->o.bssplit[ddir][i];
+ if (!bsp->perc)
+ continue;
buflen = bsp->bs;
perc += bsp->perc;
- if (!perc)
- break;
if ((r / perc <= frand_max / 100ULL) &&
io_u_fits(td, io_u, buflen))
break;
power_2 = is_power_of_2(minbs);
if (!td->o.bs_unaligned && power_2)
buflen &= ~(minbs - 1);
- else if (!td->o.bs_unaligned && !power_2)
- buflen -= buflen % minbs;
+ else if (!td->o.bs_unaligned && !power_2)
+ buflen -= buflen % minbs;
+ if (buflen > maxbs)
+ buflen = maxbs;
} while (!io_u_fits(td, io_u, buflen));
return buflen;
}
-static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
- unsigned int is_random)
-{
- if (td->flags & TD_F_PROFILE_OPS) {
- struct prof_io_ops *ops = &td->prof_io_ops;
-
- if (ops->fill_io_u_size)
- return ops->fill_io_u_size(td, io_u, is_random);
- }
-
- return __get_next_buflen(td, io_u, is_random);
-}
-
static void set_rwmix_bytes(struct thread_data *td)
{
unsigned int diff;
{
unsigned int v;
- v = rand32_between(&td->rwmix_state, 1, 100);
+ v = rand_between(&td->rwmix_state, 1, 100);
if (v <= td->o.rwmix[DDIR_READ])
return DDIR_READ;
int io_u_quiesce(struct thread_data *td)
{
- int completed = 0;
+ int ret = 0, completed = 0;
/*
* We are going to sleep, ensure that we flush anything pending as
* io's that have been actually submitted to an async engine,
* and cur_depth is meaningless for sync engines.
*/
- if (td->io_u_queued || td->cur_depth) {
- int fio_unused ret;
-
- ret = td_io_commit(td);
- }
+ if (td->io_u_queued || td->cur_depth)
+ td_io_commit(td);
while (td->io_u_in_flight) {
- int ret;
-
ret = io_u_queued_complete(td, 1);
if (ret > 0)
completed += ret;
+ else if (ret < 0)
+ break;
}
if (td->flags & TD_F_REGROW_LOGS)
regrow_logs(td);
- return completed;
+ if (completed)
+ return completed;
+
+ return ret;
}
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
{
enum fio_ddir odir = ddir ^ 1;
- long usec;
+ uint64_t usec;
uint64_t now;
assert(ddir_rw(ddir));
return odir;
/*
- * Both directions are ahead of rate. sleep the min
- * switch if necissary
+ * Both directions are ahead of rate. sleep the min,
+ * switch if necessary
*/
if (td->rate_next_io_time[ddir] <=
- td->rate_next_io_time[odir]) {
+ td->rate_next_io_time[odir]) {
usec = td->rate_next_io_time[ddir] - now;
} else {
usec = td->rate_next_io_time[odir] - now;
if (td->o.io_submit_mode == IO_MODE_INLINE)
io_u_quiesce(td);
- usec = usec_sleep(td, usec);
-
+ usec_sleep(td, usec);
return ddir;
}
void put_io_u(struct thread_data *td, struct io_u *io_u)
{
+ const bool needs_lock = td_async_processing(td);
+
+ zbd_put_io_u(io_u);
+
if (td->parent)
td = td->parent;
- td_io_u_lock(td);
+ if (needs_lock)
+ __td_io_u_lock(td);
if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
put_file_log(td, io_u->file);
assert(!(td->flags & TD_F_CHILD));
}
io_u_qpush(&td->io_u_freelist, io_u);
- td_io_u_unlock(td);
td_io_u_free_notify(td);
+
+ if (needs_lock)
+ __td_io_u_unlock(td);
}
void clear_io_u(struct thread_data *td, struct io_u *io_u)
void requeue_io_u(struct thread_data *td, struct io_u **io_u)
{
+ const bool needs_lock = td_async_processing(td);
struct io_u *__io_u = *io_u;
enum fio_ddir ddir = acct_ddir(__io_u);
if (td->parent)
td = td->parent;
- td_io_u_lock(td);
+ if (needs_lock)
+ __td_io_u_lock(td);
io_u_set(td, __io_u, IO_U_F_FREE);
if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
}
io_u_rpush(&td->io_u_requeues, __io_u);
- td_io_u_unlock(td);
td_io_u_free_notify(td);
+
+ if (needs_lock)
+ __td_io_u_unlock(td);
+
*io_u = NULL;
}
-static int fill_io_u(struct thread_data *td, struct io_u *io_u)
+static void setup_strided_zone_mode(struct thread_data *td, struct io_u *io_u)
{
- unsigned int is_random;
-
- if (td_ioengine_flagged(td, FIO_NOIO))
- goto out;
+ struct fio_file *f = io_u->file;
- set_rw_ddir(td, io_u);
-
- /*
- * fsync() or fdatasync() or trim etc, we are done
- */
- if (!ddir_rw(io_u->ddir))
- goto out;
+ assert(td->o.zone_mode == ZONE_MODE_STRIDED);
+ assert(td->o.zone_size);
+ assert(td->o.zone_range);
/*
* See if it's time to switch to a new zone
*/
if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
- struct fio_file *f = io_u->file;
-
td->zone_bytes = 0;
f->file_offset += td->o.zone_range + td->o.zone_skip;
* Wrap from the beginning, if we exceed the file size
*/
if (f->file_offset >= f->real_file_size)
- f->file_offset = f->real_file_size - f->file_offset;
+ f->file_offset = get_start_offset(td, f);
+
f->last_pos[io_u->ddir] = f->file_offset;
td->io_skip_bytes += td->o.zone_skip;
}
+ /*
+ * If zone_size > zone_range, then maintain the same zone until
+ * zone_bytes >= zone_size.
+ */
+ if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) {
+ dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n",
+ f->file_offset, f->last_pos[io_u->ddir]);
+ f->last_pos[io_u->ddir] = f->file_offset;
+ }
+
+ /*
+ * For random: if 'norandommap' is not set and zone_size > zone_range,
+ * map needs to be reset as it's done with zone_range everytime.
+ */
+ if ((td->zone_bytes % td->o.zone_range) == 0)
+ fio_file_reset(td, f);
+}
+
+static int fill_io_u(struct thread_data *td, struct io_u *io_u)
+{
+ bool is_random;
+ uint64_t offset;
+ enum io_u_action ret;
+
+ if (td_ioengine_flagged(td, FIO_NOIO))
+ goto out;
+
+ set_rw_ddir(td, io_u);
+
+ /*
+ * fsync() or fdatasync() or trim etc, we are done
+ */
+ if (!ddir_rw(io_u->ddir))
+ goto out;
+
+ if (td->o.zone_mode == ZONE_MODE_STRIDED)
+ setup_strided_zone_mode(td, io_u);
+ else if (td->o.zone_mode == ZONE_MODE_ZBD)
+ setup_zbd_zone_mode(td, io_u);
+
/*
* No log, let the seq/rand engine retrieve the next buflen and
* position.
return 1;
}
+ offset = io_u->offset;
+ if (td->o.zone_mode == ZONE_MODE_ZBD) {
+ ret = zbd_adjust_block(td, io_u);
+ if (ret == io_u_eof)
+ return 1;
+ }
+
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
- dprint(FD_IO, "io_u %p, offset + buflen exceeds file size\n",
- io_u);
- dprint(FD_IO, " offset=%llu/buflen=%lu > %llu\n",
+ dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
+ io_u,
(unsigned long long) io_u->offset, io_u->buflen,
(unsigned long long) io_u->file->real_file_size);
return 1;
* mark entry before potentially trimming io_u
*/
if (td_random(td) && file_randommap(td, io_u->file))
- mark_random_map(td, io_u);
+ io_u->buflen = mark_random_map(td, io_u, offset, io_u->buflen);
out:
- dprint_io_u(io_u, "fill_io_u");
+ dprint_io_u(io_u, "fill");
td->zone_bytes += io_u->buflen;
return 0;
}
-static void __io_u_mark_map(unsigned int *map, unsigned int nr)
+static void __io_u_mark_map(uint64_t *map, unsigned int nr)
{
int idx = 0;
static struct fio_file *get_next_file(struct thread_data *td)
{
- if (td->flags & TD_F_PROFILE_OPS) {
- struct prof_io_ops *ops = &td->prof_io_ops;
-
- if (ops->get_next_file)
- return ops->get_next_file(td);
- }
-
return __get_next_file(td);
}
if (!fill_io_u(td, io_u))
break;
+ zbd_put_io_u(io_u);
+
put_file_log(td, f);
td_io_close_file(td, f);
io_u->file = NULL;
}
static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
- unsigned long tusec, unsigned long max_usec)
+ unsigned long long tnsec, unsigned long long max_nsec)
{
if (!td->error)
- log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
+ log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec)\n", tnsec, max_nsec);
td_verror(td, ETIMEDOUT, "max latency exceeded");
icd->error = ETIMEDOUT;
}
struct io_u *__get_io_u(struct thread_data *td)
{
+ const bool needs_lock = td_async_processing(td);
struct io_u *io_u = NULL;
+ int ret;
if (td->stop_io)
return NULL;
- td_io_u_lock(td);
+ if (needs_lock)
+ __td_io_u_lock(td);
again:
if (!io_u_rempty(&td->io_u_requeues))
* return one
*/
assert(!(td->flags & TD_F_CHILD));
- assert(!pthread_cond_wait(&td->free_cond, &td->io_u_lock));
- goto again;
+ ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock);
+ assert(ret == 0);
+ if (!td->error)
+ goto again;
}
- td_io_u_unlock(td);
+ if (needs_lock)
+ __td_io_u_unlock(td);
+
return io_u;
}
{
if (!(td->flags & TD_F_TRIM_BACKLOG))
return false;
+ if (!td->trim_entries)
+ return false;
- if (td->trim_entries) {
- int get_trim = 0;
-
- if (td->trim_batch) {
- td->trim_batch--;
- get_trim = 1;
- } else if (!(td->io_hist_len % td->o.trim_backlog) &&
- td->last_ddir != DDIR_READ) {
- td->trim_batch = td->o.trim_batch;
- if (!td->trim_batch)
- td->trim_batch = td->o.trim_backlog;
- get_trim = 1;
- }
-
- if (get_trim && get_next_trim(td, io_u))
+ if (td->trim_batch) {
+ td->trim_batch--;
+ if (get_next_trim(td, io_u))
+ return true;
+ } else if (!(td->io_hist_len % td->o.trim_backlog) &&
+ td->last_ddir != DDIR_READ) {
+ td->trim_batch = td->o.trim_batch;
+ if (!td->trim_batch)
+ td->trim_batch = td->o.trim_backlog;
+ if (get_next_trim(td, io_u))
return true;
}
*/
static void small_content_scramble(struct io_u *io_u)
{
- unsigned int i, nr_blocks = io_u->buflen / 512;
- uint64_t boffset;
+ unsigned long long i, nr_blocks = io_u->buflen >> 9;
unsigned int offset;
- void *p, *end;
+ uint64_t boffset, *iptr;
+ char *p;
if (!nr_blocks)
return;
p = io_u->xfer_buf;
boffset = io_u->offset;
- io_u->buf_filled_len = 0;
+
+ if (io_u->buf_filled_len)
+ io_u->buf_filled_len = 0;
+
+ /*
+ * Generate random index between 0..7. We do chunks of 512b, if
+ * we assume a cacheline is 64 bytes, then we have 8 of those.
+ * Scramble content within the blocks in the same cacheline to
+ * speed things up.
+ */
+ offset = (io_u->start_time.tv_nsec ^ boffset) & 7;
for (i = 0; i < nr_blocks; i++) {
/*
- * Fill the byte offset into a "random" start offset of
- * the buffer, given by the product of the usec time
- * and the actual offset.
+ * Fill offset into start of cacheline, time into end
+ * of cacheline
*/
- offset = ((io_u->start_time.tv_nsec/1000) ^ boffset) & 511;
- offset &= ~(sizeof(uint64_t) - 1);
- if (offset >= 512 - sizeof(uint64_t))
- offset -= sizeof(uint64_t);
- memcpy(p + offset, &boffset, sizeof(boffset));
-
- end = p + 512 - sizeof(io_u->start_time);
- memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
+ iptr = (void *) p + (offset << 6);
+ *iptr = boffset;
+
+ iptr = (void *) p + 64 - 2 * sizeof(uint64_t);
+ iptr[0] = io_u->start_time.tv_sec;
+ iptr[1] = io_u->start_time.tv_nsec;
+
p += 512;
boffset += 512;
}
/*
* Return an io_u to be processed. Gets a buflen and offset, sets direction,
- * etc. The returned io_u is fully ready to be prepped and submitted.
+ * etc. The returned io_u is fully ready to be prepped, populated and submitted.
*/
struct io_u *get_io_u(struct thread_data *td)
{
td->o.min_bs[DDIR_WRITE],
io_u->buflen);
} else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
- !(td->flags & TD_F_COMPRESS))
+ !(td->flags & TD_F_COMPRESS) &&
+ !(td->flags & TD_F_DO_VERIFY))
do_scramble = 1;
- if (td->flags & TD_F_VER_NONE) {
- populate_verify_io_u(td, io_u);
- do_scramble = 0;
- }
} else if (io_u->ddir == DDIR_READ) {
/*
* Reset the buf_filled parameters so next time if the
if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
return;
- log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
+ log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%llu\n",
io_u->file ? " on file " : "",
io_u->file ? io_u->file->file_name : "",
strerror(io_u->error),
|| td->o.gtod_reduce;
}
+static void trim_block_info(struct thread_data *td, struct io_u *io_u)
+{
+ uint32_t *info = io_u_block_info(td, io_u);
+
+ if (BLOCK_INFO_STATE(*info) >= BLOCK_STATE_TRIM_FAILURE)
+ return;
+
+ *info = BLOCK_INFO(BLOCK_STATE_TRIMMED, BLOCK_INFO_TRIMS(*info) + 1);
+}
+
static void account_io_completion(struct thread_data *td, struct io_u *io_u,
struct io_completion_data *icd,
const enum fio_ddir idx, unsigned int bytes)
if (td->parent)
td = td->parent;
- if (!td->o.stats)
+ if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS))
return;
if (no_reduce)
struct prof_io_ops *ops = &td->prof_io_ops;
if (ops->io_u_lat)
- icd->error = ops->io_u_lat(td, tnsec/1000);
+ icd->error = ops->io_u_lat(td, tnsec);
}
- if (td->o.max_latency && tnsec/1000 > td->o.max_latency)
- lat_fatal(td, icd, tnsec/1000, td->o.max_latency);
- if (td->o.latency_target && tnsec/1000 > td->o.latency_target) {
+ if (td->o.max_latency && tnsec > td->o.max_latency)
+ lat_fatal(td, icd, tnsec, td->o.max_latency);
+ if (td->o.latency_target && tnsec > td->o.latency_target) {
if (lat_target_failed(td))
- lat_fatal(td, icd, tnsec/1000, td->o.latency_target);
+ lat_fatal(td, icd, tnsec, td->o.latency_target);
}
}
if (no_reduce && per_unit_log(td->iops_log))
add_iops_sample(td, io_u, bytes);
- }
+ } else if (ddir_sync(idx) && !td->o.disable_clat)
+ add_sync_clat_sample(&td->ts, llnsec);
- if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
- uint32_t *info = io_u_block_info(td, io_u);
- if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
- if (io_u->ddir == DDIR_TRIM) {
- *info = BLOCK_INFO(BLOCK_STATE_TRIMMED,
- BLOCK_INFO_TRIMS(*info) + 1);
- } else if (io_u->ddir == DDIR_WRITE) {
- *info = BLOCK_INFO_SET_STATE(BLOCK_STATE_WRITTEN,
- *info);
- }
- }
- }
+ if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM)
+ trim_block_info(td, io_u);
}
static void file_log_write_comp(const struct thread_data *td, struct fio_file *f,
f->last_write_idx = 0;
}
+static bool should_account(struct thread_data *td)
+{
+ return ramp_time_over(td) && (td->runstate == TD_RUNNING ||
+ td->runstate == TD_VERIFYING);
+}
+
static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
struct io_completion_data *icd)
{
enum fio_ddir ddir = io_u->ddir;
struct fio_file *f = io_u->file;
- dprint_io_u(io_u, "io complete");
+ dprint_io_u(io_u, "complete");
assert(io_u->flags & IO_U_F_FLIGHT);
io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
}
if (ddir_sync(ddir)) {
- td->last_was_sync = 1;
+ td->last_was_sync = true;
if (f) {
f->first_write = -1ULL;
f->last_write = -1ULL;
}
+ if (should_account(td))
+ account_io_completion(td, io_u, icd, ddir, io_u->buflen);
return;
}
- td->last_was_sync = 0;
+ td->last_was_sync = false;
td->last_ddir = ddir;
if (!io_u->error && ddir_rw(ddir)) {
- unsigned int bytes = io_u->buflen - io_u->resid;
+ unsigned long long bytes = io_u->buflen - io_u->resid;
int ret;
td->io_blocks[ddir]++;
- td->this_io_blocks[ddir]++;
td->io_bytes[ddir] += bytes;
- if (!(io_u->flags & IO_U_F_VER_LIST))
+ if (!(io_u->flags & IO_U_F_VER_LIST)) {
+ td->this_io_blocks[ddir]++;
td->this_io_bytes[ddir] += bytes;
+ }
if (ddir == DDIR_WRITE)
file_log_write_comp(td, f, io_u->offset, bytes);
- if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
- td->runstate == TD_VERIFYING))
+ if (should_account(td))
account_io_completion(td, io_u, icd, ddir, bytes);
icd->bytes_done[ddir] += bytes;
return &td->buf_state;
}
- v = rand32_between(&td->dedupe_state, 1, 100);
+ v = rand_between(&td->dedupe_state, 1, 100);
if (v <= td->o.dedupe_percentage)
return &td->buf_state_prev;
frand_copy(&td->buf_state_prev, rs);
}
-void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
- unsigned int max_bs)
+void fill_io_buffer(struct thread_data *td, void *buf, unsigned long long min_write,
+ unsigned long long max_bs)
{
struct thread_options *o = &td->o;
if (o->compress_percentage || o->dedupe_percentage) {
unsigned int perc = td->o.compress_percentage;
struct frand_state *rs;
- unsigned int left = max_bs;
- unsigned int this_write;
+ unsigned long long left = max_bs;
+ unsigned long long this_write;
do {
rs = get_buf_state(td);
if (perc) {
this_write = min_not_zero(min_write,
- td->o.compress_chunk);
+ (unsigned long long) td->o.compress_chunk);
fill_random_buf_percentage(rs, buf, perc,
this_write, this_write,
* "randomly" fill the buffer contents
*/
void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
- unsigned int min_write, unsigned int max_bs)
+ unsigned long long min_write, unsigned long long max_bs)
{
io_u->buf_filled_len = 0;
fill_io_buffer(td, io_u->buf, min_write, max_bs);
struct fio_file *f = io_u->file;
int ret;
- ret = os_trim(f->fd, io_u->offset, io_u->xfer_buflen);
+ ret = os_trim(f, io_u->offset, io_u->xfer_buflen);
if (!ret)
return io_u->xfer_buflen;