Specifies logical block application tag mask value, if namespace is
formatted to use end to end protection information. Default: 0xffff.
+.. option:: num_range=int : [io_uring_cmd]
+
+ For trim command this will be the number of ranges to trim per I/O
+ request. The number of logical blocks per range is determined by the
+ :option:`bs` option which should be a multiple of logical block size.
+ This cannot be used with read or write. Note that setting this
+ option > 1, :option:`log_offset` will not be able to log all the
+ offsets. Default: 1.
+
.. option:: cpuload=int : [cpuio]
Attempt to use the specified percentage of CPU cycles. This is a mandatory
int init_io_u_buffers(struct thread_data *td)
{
struct io_u *io_u;
- unsigned long long max_bs, min_write;
+ unsigned long long max_bs, min_write, trim_bs = 0;
int i, max_units;
int data_xfer = 1;
char *p;
td->orig_buffer_size = (unsigned long long) max_bs
* (unsigned long long) max_units;
- if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td)))
+ if (td_trim(td) && td->o.num_range > 1) {
+ trim_bs = td->o.num_range * sizeof(struct trim_range);
+ td->orig_buffer_size = trim_bs
+ * (unsigned long long) max_units;
+ }
+
+ /*
+ * For reads, writes, and multi-range trim operations we need a
+ * data buffer
+ */
+ if (td_ioengine_flagged(td, FIO_NOIO) ||
+ !(td_read(td) || td_write(td) || (td_trim(td) && td->o.num_range > 1)))
data_xfer = 0;
/*
fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
}
}
- p += max_bs;
+ if (td_trim(td) && td->o.num_range > 1)
+ p += trim_bs;
+ else
+ p += max_bs;
}
return 0;
o->serialize_overlap = le32_to_cpu(top->serialize_overlap);
o->size = le64_to_cpu(top->size);
o->io_size = le64_to_cpu(top->io_size);
+ o->num_range = le32_to_cpu(top->num_range);
o->size_percent = le32_to_cpu(top->size_percent);
o->io_size_percent = le32_to_cpu(top->io_size_percent);
o->fill_device = le32_to_cpu(top->fill_device);
top->size = __cpu_to_le64(o->size);
top->io_size = __cpu_to_le64(o->io_size);
+ top->num_range = __cpu_to_le32(o->num_range);
top->verify_backlog = __cpu_to_le64(o->verify_backlog);
top->start_delay = __cpu_to_le64(o->start_delay);
top->start_delay_high = __cpu_to_le64(o->start_delay_high);
Specifies logical block application tag mask value, if namespace is formatted
to use end to end protection information. Default: 0xffff.
.TP
+.BI (io_uring_cmd)num_range \fR=\fPint
+For trim command this will be the number of ranges to trim per I/O request.
+The number of logical blocks per range is determined by the \fBbs\fR option
+which should be a multiple of logical block size. This cannot be used with
+read or write. Note that setting this option > 1, \fBlog_offset\fR will not be
+able to log all the offsets. Default: 1.
+.TP
.BI (cpuio)cpuload \fR=\fPint
Attempt to use the specified percentage of CPU cycles. This is a mandatory
option when using cpuio I/O engine.
struct fio_sem;
+#define MAX_TRIM_RANGE 256
+
+/*
+ * Range for trim command
+ */
+struct trim_range {
+ unsigned long long start;
+ unsigned long long len;
+};
+
/*
* offset generator types
*/
!(io_u->ddir == DDIR_TRIM && !td_trim(td)));
}
+static inline bool multi_range_trim(struct thread_data *td, struct io_u *io_u)
+{
+ if (io_u->ddir == DDIR_TRIM && td->o.num_range > 1)
+ return true;
+
+ return false;
+}
+
static inline bool should_fsync(struct thread_data *td)
{
if (td->last_was_sync)
ret |= 1;
}
+ if (td_trimwrite(td) && o->num_range > 1) {
+ log_err("fio: trimwrite cannot be used with multiple"
+ " ranges.\n");
+ ret |= 1;
+ }
+
+ if (td_trim(td) && o->num_range > 1 &&
+ !td_ioengine_flagged(td, FIO_MULTI_RANGE_TRIM)) {
+ log_err("fio: can't use multiple ranges with IO engine %s\n",
+ td->io_ops->name);
+ ret |= 1;
+ }
+
#ifndef CONFIG_PSHARED
if (!o->use_thread) {
log_info("fio: this platform does not support process shared"
fio_file_reset(td, f);
}
+static int fill_multi_range_io_u(struct thread_data *td, struct io_u *io_u)
+{
+ bool is_random;
+ uint64_t buflen, i = 0;
+ struct trim_range *range;
+ struct fio_file *f = io_u->file;
+ uint8_t *buf;
+
+ buf = io_u->buf;
+ buflen = 0;
+
+ while (i < td->o.num_range) {
+ range = (struct trim_range *)buf;
+ if (get_next_offset(td, io_u, &is_random)) {
+ dprint(FD_IO, "io_u %p, failed getting offset\n",
+ io_u);
+ break;
+ }
+
+ io_u->buflen = get_next_buflen(td, io_u, is_random);
+ if (!io_u->buflen) {
+ dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
+ break;
+ }
+
+ if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
+ dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
+ io_u,
+ (unsigned long long) io_u->offset, io_u->buflen,
+ (unsigned long long) io_u->file->real_file_size);
+ break;
+ }
+
+ range->start = io_u->offset;
+ range->len = io_u->buflen;
+ buflen += io_u->buflen;
+ f->last_start[io_u->ddir] = io_u->offset;
+ f->last_pos[io_u->ddir] = io_u->offset + range->len;
+
+ buf += sizeof(struct trim_range);
+ i++;
+
+ if (td_random(td) && file_randommap(td, io_u->file))
+ mark_random_map(td, io_u, io_u->offset, io_u->buflen);
+ dprint_io_u(io_u, "fill");
+ }
+ if (buflen) {
+ /*
+ * Set buffer length as overall trim length for this IO, and
+ * tell the ioengine about the number of ranges to be trimmed.
+ */
+ io_u->buflen = buflen;
+ io_u->number_trim = i;
+ return 0;
+ }
+
+ return 1;
+}
+
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
bool is_random;
else if (td->o.zone_mode == ZONE_MODE_ZBD)
setup_zbd_zone_mode(td, io_u);
- /*
- * No log, let the seq/rand engine retrieve the next buflen and
- * position.
- */
- if (get_next_offset(td, io_u, &is_random)) {
- dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
- return 1;
- }
+ if (multi_range_trim(td, io_u)) {
+ if (fill_multi_range_io_u(td, io_u))
+ return 1;
+ } else {
+ /*
+ * No log, let the seq/rand engine retrieve the next buflen and
+ * position.
+ */
+ if (get_next_offset(td, io_u, &is_random)) {
+ dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
+ return 1;
+ }
- io_u->buflen = get_next_buflen(td, io_u, is_random);
- if (!io_u->buflen) {
- dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
- return 1;
+ io_u->buflen = get_next_buflen(td, io_u, is_random);
+ if (!io_u->buflen) {
+ dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
+ return 1;
+ }
}
-
offset = io_u->offset;
+
if (td->o.zone_mode == ZONE_MODE_ZBD) {
ret = zbd_adjust_block(td, io_u);
if (ret == io_u_eof) {
/*
* mark entry before potentially trimming io_u
*/
- if (td_random(td) && file_randommap(td, io_u->file))
+ if (!multi_range_trim(td, io_u) && td_random(td) && file_randommap(td, io_u->file))
io_u->buflen = mark_random_map(td, io_u, offset, io_u->buflen);
out:
- dprint_io_u(io_u, "fill");
+ if (!multi_range_trim(td, io_u))
+ dprint_io_u(io_u, "fill");
io_u->verify_offset = io_u->offset;
td->zone_bytes += io_u->buflen;
return 0;
assert(fio_file_open(f));
- if (ddir_rw(io_u->ddir)) {
+ if (ddir_rw(io_u->ddir) && !multi_range_trim(td, io_u)) {
if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) {
dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
goto err_put;
struct io_piece *ipo;
+ /*
+ * number of trim ranges for this IO.
+ */
+ unsigned int number_trim;
unsigned long long resid;
unsigned int error;
FIO_RO_NEEDS_RW_OPEN
= 1 << 18, /* open files in rw mode even if we have a read job; only
affects ioengines using generic_open_file */
+ FIO_MULTI_RANGE_TRIM
+ = 1 << 19, /* ioengine supports trim with more than one range */
};
/*
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "num_range",
+ .lname = "Number of ranges",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct thread_options, num_range),
+ .maxval = MAX_TRIM_RANGE,
+ .help = "Number of ranges for trim command",
+ .def = "1",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "bs",
.lname = "Block size",
};
enum {
- FIO_SERVER_VER = 102,
+ FIO_SERVER_VER = 103,
FIO_SERVER_MAX_FRAGMENT_PDU = 1024,
FIO_SERVER_MAX_CMD_MB = 2048,
unsigned long long offset_increment;
unsigned long long number_ios;
+ unsigned int num_range;
+
unsigned int sync_file_range;
unsigned long long latency_target;
uint32_t fdp_plis[FIO_MAX_PLIS];
uint32_t fdp_nrpli;
+ uint32_t num_range;
/*
* verify_pattern followed by buffer_pattern from the unpacked struct
*/