return 0;
}
+static int __get_next_rand_offset_gauss(struct thread_data *td,
+ struct fio_file *f, enum fio_ddir ddir,
+ uint64_t *b)
+{
+ *b = gauss_next(&f->gauss);
+ return 0;
+}
+
+
static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
{
struct rand_off *r1 = flist_entry(a, struct rand_off, list);
return __get_next_rand_offset_zipf(td, f, ddir, b);
else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
return __get_next_rand_offset_pareto(td, f, ddir, b);
+ else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
+ return __get_next_rand_offset_gauss(td, f, ddir, b);
log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
return 1;
while (td->io_u_in_flight) {
int fio_unused ret;
- ret = io_u_queued_complete(td, 1, NULL);
+ ret = io_u_queued_complete(td, 1);
}
}
static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
{
- io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
+ enum fio_ddir ddir = get_rw_ddir(td);
+
+ if (td_trimwrite(td)) {
+ struct fio_file *f = io_u->file;
+ if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
+ ddir = DDIR_TRIM;
+ else
+ ddir = DDIR_WRITE;
+ }
+
+ io_u->ddir = io_u->acct_ddir = ddir;
if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
td->o.barrier_blocks &&
if (!gtod_reduce(td))
add_iops_sample(td, idx, bytes, &icd->time);
+
+ if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
+ uint32_t *info = io_u_block_info(td, io_u);
+ if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
+ if (io_u->ddir == DDIR_TRIM) {
+ *info = BLOCK_INFO(BLOCK_STATE_TRIMMED,
+ BLOCK_INFO_TRIMS(*info) + 1);
+ } else if (io_u->ddir == DDIR_WRITE) {
+ *info = BLOCK_INFO_SET_STATE(BLOCK_STATE_WRITTEN,
+ *info);
+ }
+ }
+ }
}
static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
/*
* Complete a single io_u for the sync engines.
*/
-int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
- uint64_t *bytes)
+int io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
{
struct io_completion_data icd;
+ int ddir;
init_icd(td, &icd, 1);
io_completed(td, &io_u, &icd);
return -1;
}
- if (bytes) {
- int ddir;
-
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
- bytes[ddir] += icd.bytes_done[ddir];
- }
+ for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ td->bytes_done[ddir] += icd.bytes_done[ddir];
return 0;
}
/*
* Called to complete min_events number of io for the async engines.
*/
-int io_u_queued_complete(struct thread_data *td, int min_evts,
- uint64_t *bytes)
+int io_u_queued_complete(struct thread_data *td, int min_evts)
{
struct io_completion_data icd;
struct timespec *tvp = NULL;
- int ret;
+ int ret, ddir;
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
return -1;
}
- if (bytes) {
- int ddir;
-
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
- bytes[ddir] += icd.bytes_done[ddir];
- }
+ for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ td->bytes_done[ddir] += icd.bytes_done[ddir];
return 0;
}