In this new mode, sequential trims and writes are interspersed by first
doing a trim at a particular offset, then doing writes starting from
that offset until the start of the next trim block, then another trim,
etc. This workload is designed to match the requirements of NAND flash,
if trims are implemented as erases.
Signed-off-by: Dan Ehrenberg <dehrenberg@chromium.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
(td_write(td) && td->o.verify_backlog))
total_bytes += td->o.size;
(td_write(td) && td->o.verify_backlog))
total_bytes += td->o.size;
+ /* In writetrim mode, each byte is trimmed and then written, so
+ * allow total_bytes to be twice as big */
+ if (td_writetrim(td))
+ total_bytes += td->total_io_size;
+
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
(!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
td->o.time_based) {
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
(!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
td->o.time_based) {
TD_DDIR_RANDWRITE = TD_DDIR_WRITE | TD_DDIR_RAND,
TD_DDIR_RANDRW = TD_DDIR_RW | TD_DDIR_RAND,
TD_DDIR_RANDTRIM = TD_DDIR_TRIM | TD_DDIR_RAND,
TD_DDIR_RANDWRITE = TD_DDIR_WRITE | TD_DDIR_RAND,
TD_DDIR_RANDRW = TD_DDIR_RW | TD_DDIR_RAND,
TD_DDIR_RANDTRIM = TD_DDIR_TRIM | TD_DDIR_RAND,
+ TD_DDIR_WRITETRIM = TD_DDIR_TRIM | TD_DDIR_WRITE,
};
#define td_read(td) ((td)->o.td_ddir & TD_DDIR_READ)
};
#define td_read(td) ((td)->o.td_ddir & TD_DDIR_READ)
#define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
#define file_randommap(td, f) (!(td)->o.norandommap && fio_file_axmap((f)))
#define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
#define file_randommap(td, f) (!(td)->o.norandommap && fio_file_axmap((f)))
+#define td_writetrim(td) (((td)->o.td_ddir & TD_DDIR_WRITETRIM) \
+ == TD_DDIR_WRITETRIM)
static inline int ddir_sync(enum fio_ddir ddir)
{
static inline int ddir_sync(enum fio_ddir ddir)
{
static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
{
static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
{
- io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
+ enum fio_ddir ddir = get_rw_ddir(td);
+
+ if (td_writetrim(td)) {
+ struct fio_file *f = io_u->file;
+ if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
+ ddir = DDIR_TRIM;
+ else
+ ddir = DDIR_WRITE;
+ }
+
+ io_u->ddir = io_u->acct_ddir = ddir;
if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
td->o.barrier_blocks &&
if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
td->o.barrier_blocks &&
.oval = TD_DDIR_RANDRW,
.help = "Random read and write mix"
},
.oval = TD_DDIR_RANDRW,
.help = "Random read and write mix"
},
+ { .ival = "writetrim",
+ .oval = TD_DDIR_WRITETRIM,
+ .help = "Write and trim mix, trims preceding writes"
+ },
ts->latency_percentile = td->o.latency_percentile;
ts->latency_window = td->o.latency_window;
ts->latency_percentile = td->o.latency_percentile;
ts->latency_window = td->o.latency_window;
+ ts->nr_block_infos = td->ts.nr_block_infos;
+ for (i = 0; i < ts->nr_block_infos; i++)
+ ts->block_infos[i] = td->ts.block_infos[i];
+
sum_thread_stats(ts, &td->ts, idx);
}
sum_thread_stats(ts, &td->ts, idx);
}