int error; /* output */
uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
- struct timeval time; /* output */
+ struct timespec time; /* output */
};
/*
*/
static void mark_random_map(struct thread_data *td, struct io_u *io_u)
{
- unsigned int min_bs = td->o.rw_min_bs;
+ unsigned int min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
unsigned int nr_blocks;
uint64_t block;
/*
* Hmm, should we make sure that ->io_size <= ->real_file_size?
+ * -> not for now since there is code assuming it could go either.
*/
max_size = f->io_size;
if (max_size > f->real_file_size)
goto fetch;
}
+static void loop_cache_invalidate(struct thread_data *td, struct fio_file *f)
+{
+ struct thread_options *o = &td->o;
+
+ if (o->invalidate_cache && !o->odirect) {
+ int fio_unused ret;
+
+ ret = file_invalidate_cache(td, f);
+ }
+}
+
static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, uint64_t *b)
{
if (!get_next_rand_offset(td, f, ddir, b))
return 0;
- if (td->o.time_based) {
+ if (td->o.time_based ||
+ (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) {
fio_file_reset(td, f);
if (!get_next_rand_offset(td, f, ddir, b))
return 0;
+ loop_cache_invalidate(td, f);
}
dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
assert(ddir_rw(ddir));
+ /*
+ * If we reach the end for a time based run, reset us back to 0
+ * and invalidate the cache, if we need to.
+ */
if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
o->time_based) {
- struct thread_options *o = &td->o;
- uint64_t io_size = f->io_size + (f->io_size % o->min_bs[ddir]);
-
- if (io_size > f->last_pos[ddir])
- f->last_pos[ddir] = 0;
- else
- f->last_pos[ddir] = f->last_pos[ddir] - io_size;
+ f->last_pos[ddir] = f->file_offset;
+ loop_cache_invalidate(td, f);
}
if (f->last_pos[ddir] < f->real_file_size) {
uint64_t pos;
- if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0)
- f->last_pos[ddir] = f->real_file_size;
+ if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) {
+ if (f->real_file_size > f->io_size)
+ f->last_pos[ddir] = f->io_size;
+ else
+ f->last_pos[ddir] = f->real_file_size;
+ }
pos = f->last_pos[ddir] - f->file_offset;
if (pos && o->ddir_seq_add) {
/*
* If we reach beyond the end of the file
* with holed IO, wrap around to the
- * beginning again.
+ * beginning again. If we're doing backwards IO,
+ * wrap to the end.
*/
- if (pos >= f->real_file_size)
- pos = f->file_offset;
+ if (pos >= f->real_file_size) {
+ if (o->ddir_seq_add > 0)
+ pos = f->file_offset;
+ else {
+ if (f->real_file_size > f->io_size)
+ pos = f->io_size;
+ else
+ pos = f->real_file_size;
+
+ pos += o->ddir_seq_add;
+ }
+ }
}
*offset = pos;
*is_random = 1;
} else {
*is_random = 0;
- io_u_set(io_u, IO_U_F_BUSY_OK);
+ io_u_set(td, io_u, IO_U_F_BUSY_OK);
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret)
ret = get_next_rand_block(td, f, ddir, &b);
ret = get_next_seq_offset(td, f, ddir, &offset);
}
} else {
- io_u_set(io_u, IO_U_F_BUSY_OK);
+ io_u_set(td, io_u, IO_U_F_BUSY_OK);
*is_random = 0;
if (td->o.rw_seq == RW_SEQ_SEQ) {
int ddir = io_u->ddir;
unsigned int buflen = 0;
unsigned int minbs, maxbs;
- uint64_t frand_max;
- unsigned long r;
+ uint64_t frand_max, r;
+ bool power_2;
assert(ddir_rw(ddir));
if (!io_u_fits(td, io_u, minbs))
return 0;
- frand_max = rand_max(&td->bsrange_state);
+ frand_max = rand_max(&td->bsrange_state[ddir]);
do {
- r = __rand(&td->bsrange_state);
+ r = __rand(&td->bsrange_state[ddir]);
if (!td->o.bssplit_nr[ddir]) {
buflen = 1 + (unsigned int) ((double) maxbs *
if (buflen < minbs)
buflen = minbs;
} else {
- long perc = 0;
+ long long perc = 0;
unsigned int i;
for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
buflen = bsp->bs;
perc += bsp->perc;
- if ((r * 100UL <= frand_max * perc) &&
+ if (!perc)
+ break;
+ if ((r / perc <= frand_max / 100ULL) &&
io_u_fits(td, io_u, buflen))
break;
}
}
- if (td->o.verify != VERIFY_NONE)
- buflen = (buflen + td->o.verify_interval - 1) &
- ~(td->o.verify_interval - 1);
-
- if (!td->o.bs_unaligned && is_power_of_2(minbs))
+ power_2 = is_power_of_2(minbs);
+ if (!td->o.bs_unaligned && power_2)
buflen &= ~(minbs - 1);
-
+ else if (!td->o.bs_unaligned && !power_2)
+ buflen -= buflen % minbs;
} while (!io_u_fits(td, io_u, buflen));
return buflen;
}
while (td->io_u_in_flight) {
- int fio_unused ret;
+ int ret;
ret = io_u_queued_complete(td, 1);
if (ret > 0)
completed += ret;
}
+ if (td->flags & TD_F_REGROW_LOGS)
+ regrow_logs(td);
+
return completed;
}
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
{
enum fio_ddir odir = ddir ^ 1;
- long usec, now;
+ uint64_t usec;
+ uint64_t now;
assert(ddir_rw(ddir));
now = utime_since_now(&td->start);
enum fio_ddir ddir;
/*
- * see if it's time to fsync
+ * See if it's time to fsync/fdatasync/sync_file_range first,
+ * and if not then move on to check regular I/Os.
*/
- if (td->o.fsync_blocks &&
- !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
- td->io_issues[DDIR_WRITE] && should_fsync(td))
- return DDIR_SYNC;
-
- /*
- * see if it's time to fdatasync
- */
- if (td->o.fdatasync_blocks &&
- !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
- td->io_issues[DDIR_WRITE] && should_fsync(td))
- return DDIR_DATASYNC;
-
- /*
- * see if it's time to sync_file_range
- */
- if (td->sync_file_range_nr &&
- !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
- td->io_issues[DDIR_WRITE] && should_fsync(td))
- return DDIR_SYNC_FILE_RANGE;
+ if (should_fsync(td)) {
+ if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] &&
+ !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks))
+ return DDIR_SYNC;
+
+ if (td->o.fdatasync_blocks && td->io_issues[DDIR_WRITE] &&
+ !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks))
+ return DDIR_DATASYNC;
+
+ if (td->sync_file_range_nr && td->io_issues[DDIR_WRITE] &&
+ !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr))
+ return DDIR_SYNC_FILE_RANGE;
+ }
if (td_rw(td)) {
/*
ddir = DDIR_READ;
else if (td_write(td))
ddir = DDIR_WRITE;
- else
+ else if (td_trim(td))
ddir = DDIR_TRIM;
+ else
+ ddir = DDIR_INVAL;
td->rwmix_ddir = rate_ddir(td, ddir);
return td->rwmix_ddir;
io_u->ddir = io_u->acct_ddir = ddir;
- if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
+ if (io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_BARRIER) &&
td->o.barrier_blocks &&
!(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
td->io_issues[DDIR_WRITE])
- io_u_set(io_u, IO_U_F_BARRIER);
+ io_u_set(td, io_u, IO_U_F_BARRIER);
}
void put_file_log(struct thread_data *td, struct fio_file *f)
put_file_log(td, io_u->file);
io_u->file = NULL;
- io_u_set(io_u, IO_U_F_FREE);
+ io_u_set(td, io_u, IO_U_F_FREE);
if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
td->cur_depth--;
void clear_io_u(struct thread_data *td, struct io_u *io_u)
{
- io_u_clear(io_u, IO_U_F_FLIGHT);
+ io_u_clear(td, io_u, IO_U_F_FLIGHT);
put_io_u(td, io_u);
}
td_io_u_lock(td);
- io_u_set(__io_u, IO_U_F_FREE);
+ io_u_set(td, __io_u, IO_U_F_FREE);
if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
td->io_issues[ddir]--;
- io_u_clear(__io_u, IO_U_F_FLIGHT);
+ io_u_clear(td, __io_u, IO_U_F_FLIGHT);
if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) {
td->cur_depth--;
assert(!(td->flags & TD_F_CHILD));
{
unsigned int is_random;
- if (td->io_ops->flags & FIO_NOIO)
+ if (td_ioengine_flagged(td, FIO_NOIO))
goto out;
set_rw_ddir(td, io_u);
}
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
- dprint(FD_IO, "io_u %p, offset too large\n", io_u);
- dprint(FD_IO, " off=%llu/%lu > %llu\n",
+ dprint(FD_IO, "io_u %p, offset + buflen exceeds file size\n",
+ io_u);
+ dprint(FD_IO, " offset=%llu/buflen=%lu > %llu\n",
(unsigned long long) io_u->offset, io_u->buflen,
(unsigned long long) io_u->file->real_file_size);
return 1;
td->ts.io_u_map[idx] += nr;
}
-static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
+static void io_u_mark_lat_nsec(struct thread_data *td, unsigned long long nsec)
+{
+ int idx = 0;
+
+ assert(nsec < 1000);
+
+ switch (nsec) {
+ case 750 ... 999:
+ idx = 9;
+ break;
+ case 500 ... 749:
+ idx = 8;
+ break;
+ case 250 ... 499:
+ idx = 7;
+ break;
+ case 100 ... 249:
+ idx = 6;
+ break;
+ case 50 ... 99:
+ idx = 5;
+ break;
+ case 20 ... 49:
+ idx = 4;
+ break;
+ case 10 ... 19:
+ idx = 3;
+ break;
+ case 4 ... 9:
+ idx = 2;
+ break;
+ case 2 ... 3:
+ idx = 1;
+ case 0 ... 1:
+ break;
+ }
+
+ assert(idx < FIO_IO_U_LAT_N_NR);
+ td->ts.io_u_lat_n[idx]++;
+}
+
+static void io_u_mark_lat_usec(struct thread_data *td, unsigned long long usec)
{
int idx = 0;
- assert(usec < 1000);
+ assert(usec < 1000 && usec >= 1);
switch (usec) {
case 750 ... 999:
td->ts.io_u_lat_u[idx]++;
}
-static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
+static void io_u_mark_lat_msec(struct thread_data *td, unsigned long long msec)
{
int idx = 0;
+ assert(msec >= 1);
+
switch (msec) {
default:
idx = 11;
td->ts.io_u_lat_m[idx]++;
}
-static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
+static void io_u_mark_latency(struct thread_data *td, unsigned long long nsec)
{
- if (usec < 1000)
- io_u_mark_lat_usec(td, usec);
+ if (nsec < 1000)
+ io_u_mark_lat_nsec(td, nsec);
+ else if (nsec < 1000000)
+ io_u_mark_lat_usec(td, nsec / 1000);
else
- io_u_mark_lat_msec(td, usec / 1000);
+ io_u_mark_lat_msec(td, nsec / 1000000);
+}
+
+static unsigned int __get_next_fileno_rand(struct thread_data *td)
+{
+ unsigned long fileno;
+
+ if (td->o.file_service_type == FIO_FSERVICE_RANDOM) {
+ uint64_t frand_max = rand_max(&td->next_file_state);
+ unsigned long r;
+
+ r = __rand(&td->next_file_state);
+ return (unsigned int) ((double) td->o.nr_files
+ * (r / (frand_max + 1.0)));
+ }
+
+ if (td->o.file_service_type == FIO_FSERVICE_ZIPF)
+ fileno = zipf_next(&td->next_file_zipf);
+ else if (td->o.file_service_type == FIO_FSERVICE_PARETO)
+ fileno = pareto_next(&td->next_file_zipf);
+ else if (td->o.file_service_type == FIO_FSERVICE_GAUSS)
+ fileno = gauss_next(&td->next_file_gauss);
+ else {
+ log_err("fio: bad file service type: %d\n", td->o.file_service_type);
+ assert(0);
+ return 0;
+ }
+
+ return fileno >> FIO_FSERVICE_SHIFT;
}
/*
enum fio_file_flags goodf,
enum fio_file_flags badf)
{
- uint64_t frand_max = rand_max(&td->next_file_state);
struct fio_file *f;
int fno;
do {
int opened = 0;
- unsigned long r;
- r = __rand(&td->next_file_state);
- fno = (unsigned int) ((double) td->o.nr_files
- * (r / (frand_max + 1.0)));
+ fno = __get_next_fileno_rand(td);
f = td->files[fno];
if (fio_file_done(f))
put_file_log(td, f);
td_io_close_file(td, f);
io_u->file = NULL;
- fio_file_set_done(f);
- td->nr_done_files++;
- dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
+ if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)
+ fio_file_reset(td, f);
+ else {
+ fio_file_set_done(f);
+ td->nr_done_files++;
+ dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
td->nr_done_files, td->o.nr_files);
+ }
} while (1);
return 0;
if (io_u) {
assert(io_u->flags & IO_U_F_FREE);
- io_u_clear(io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
+ io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
IO_U_F_TRIMMED | IO_U_F_BARRIER |
IO_U_F_VER_LIST);
io_u->acct_ddir = -1;
td->cur_depth++;
assert(!(td->flags & TD_F_CHILD));
- io_u_set(io_u, IO_U_F_IN_CUR_DEPTH);
+ io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH);
io_u->ipo = NULL;
} else if (td_async_processing(td)) {
/*
get_trim = 1;
}
- if (get_trim && !get_next_trim(td, io_u))
+ if (get_trim && get_next_trim(td, io_u))
return true;
}
unsigned int i, nr_blocks = io_u->buflen / 512;
uint64_t boffset;
unsigned int offset;
- void *p, *end;
+ char *p, *end;
if (!nr_blocks)
return;
* the buffer, given by the product of the usec time
* and the actual offset.
*/
- offset = (io_u->start_time.tv_usec ^ boffset) & 511;
+ offset = ((io_u->start_time.tv_nsec/1000) ^ boffset) & 511;
offset &= ~(sizeof(uint64_t) - 1);
if (offset >= 512 - sizeof(uint64_t))
offset -= sizeof(uint64_t);
assert(fio_file_open(f));
if (ddir_rw(io_u->ddir)) {
- if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
+ if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) {
dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
goto err_put;
}
if (!td_io_prep(td, io_u)) {
if (!td->o.disable_lat)
fio_gettime(&io_u->start_time, NULL);
+
if (do_scramble)
small_content_scramble(io_u);
+
return io_u;
}
err_put:
const enum fio_ddir idx, unsigned int bytes)
{
const int no_reduce = !gtod_reduce(td);
- unsigned long lusec = 0;
+ unsigned long long llnsec = 0;
if (td->parent)
td = td->parent;
+ if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS))
+ return;
+
if (no_reduce)
- lusec = utime_since(&io_u->issue_time, &icd->time);
+ llnsec = ntime_since(&io_u->issue_time, &icd->time);
if (!td->o.disable_lat) {
- unsigned long tusec;
+ unsigned long long tnsec;
- tusec = utime_since(&io_u->start_time, &icd->time);
- add_lat_sample(td, idx, tusec, bytes, io_u->offset);
+ tnsec = ntime_since(&io_u->start_time, &icd->time);
+ add_lat_sample(td, idx, tnsec, bytes, io_u->offset);
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ops->io_u_lat)
- icd->error = ops->io_u_lat(td, tusec);
+ icd->error = ops->io_u_lat(td, tnsec/1000);
}
- if (td->o.max_latency && tusec > td->o.max_latency)
- lat_fatal(td, icd, tusec, td->o.max_latency);
- if (td->o.latency_target && tusec > td->o.latency_target) {
+ if (td->o.max_latency && tnsec/1000 > td->o.max_latency)
+ lat_fatal(td, icd, tnsec/1000, td->o.max_latency);
+ if (td->o.latency_target && tnsec/1000 > td->o.latency_target) {
if (lat_target_failed(td))
- lat_fatal(td, icd, tusec, td->o.latency_target);
+ lat_fatal(td, icd, tnsec/1000, td->o.latency_target);
}
}
- if (!td->o.disable_clat) {
- add_clat_sample(td, idx, lusec, bytes, io_u->offset);
- io_u_mark_latency(td, lusec);
- }
+ if (ddir_rw(idx)) {
+ if (!td->o.disable_clat) {
+ add_clat_sample(td, idx, llnsec, bytes, io_u->offset);
+ io_u_mark_latency(td, llnsec);
+ }
- if (!td->o.disable_bw)
- add_bw_sample(td, idx, bytes, &icd->time);
+ if (!td->o.disable_bw && per_unit_log(td->bw_log))
+ add_bw_sample(td, io_u, bytes, llnsec);
- if (no_reduce)
- add_iops_sample(td, idx, bytes, &icd->time);
+ if (no_reduce && per_unit_log(td->iops_log))
+ add_iops_sample(td, io_u, bytes);
+ }
if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
uint32_t *info = io_u_block_info(td, io_u);
dprint_io_u(io_u, "io complete");
assert(io_u->flags & IO_U_F_FLIGHT);
- io_u_clear(io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
+ io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
/*
* Mark IO ok to verify
icd->nr = nr;
icd->error = 0;
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
icd->bytes_done[ddir] = 0;
}
return -1;
}
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
td->bytes_done[ddir] += icd.bytes_done[ddir];
return 0;
int ret, ddir;
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
- dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
+ dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts);
if (!min_evts)
tvp = &ts;
return -1;
}
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
td->bytes_done[ddir] += icd.bytes_done[ddir];
return ret;
*/
void io_u_queued(struct thread_data *td, struct io_u *io_u)
{
- if (!td->o.disable_slat) {
+ if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) {
unsigned long slat_time;
- slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
+ slat_time = ntime_since(&io_u->start_time, &io_u->issue_time);
if (td->parent)
td = td->parent;
{
struct thread_options *o = &td->o;
+ if (o->mem_type == MEM_CUDA_MALLOC)
+ return;
+
if (o->compress_percentage || o->dedupe_percentage) {
unsigned int perc = td->o.compress_percentage;
struct frand_state *rs;
io_u->buf_filled_len = 0;
fill_io_buffer(td, io_u->buf, min_write, max_bs);
}
+
+static int do_sync_file_range(const struct thread_data *td,
+ struct fio_file *f)
+{
+ off64_t offset, nbytes;
+
+ offset = f->first_write;
+ nbytes = f->last_write - f->first_write;
+
+ if (!nbytes)
+ return 0;
+
+ return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
+}
+
+int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
+{
+ int ret;
+
+ if (io_u->ddir == DDIR_SYNC) {
+ ret = fsync(io_u->file->fd);
+ } else if (io_u->ddir == DDIR_DATASYNC) {
+#ifdef CONFIG_FDATASYNC
+ ret = fdatasync(io_u->file->fd);
+#else
+ ret = io_u->xfer_buflen;
+ io_u->error = EINVAL;
+#endif
+ } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
+ ret = do_sync_file_range(td, io_u->file);
+ else {
+ ret = io_u->xfer_buflen;
+ io_u->error = EINVAL;
+ }
+
+ if (ret < 0)
+ io_u->error = errno;
+
+ return ret;
+}
+
+int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
+{
+#ifndef FIO_HAVE_TRIM
+ io_u->error = EINVAL;
+ return 0;
+#else
+ struct fio_file *f = io_u->file;
+ int ret;
+
+ ret = os_trim(f, io_u->offset, io_u->xfer_buflen);
+ if (!ret)
+ return io_u->xfer_buflen;
+
+ io_u->error = ret;
+ return 0;
+#endif
+}