#include "../lib/memalign.h"
#include "../lib/fls.h"
#include "../lib/roundup.h"
+#include "../verify.h"
#ifdef ARCH_HAVE_IOURING
struct cmdprio cmdprio;
- struct nvme_dsm_range *dsm;
+ struct nvme_dsm *dsm;
};
struct ioring_options {
unsigned int nowait;
unsigned int force_async;
unsigned int md_per_io_size;
+ unsigned int pi_act;
+ unsigned int apptag;
+ unsigned int apptag_mask;
+ unsigned int prchk;
+ char *pi_chk;
enum uring_cmd_type cmd_type;
};
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_IOURING,
},
+ {
+ .name = "pi_act",
+ .lname = "Protection Information Action",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct ioring_options, pi_act),
+ .def = "1",
+ .help = "Protection Information Action bit (pi_act=1 or pi_act=0)",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
+ {
+ .name = "pi_chk",
+ .lname = "Protection Information Check",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = offsetof(struct ioring_options, pi_chk),
+ .def = NULL,
+ .help = "Control of Protection Information Checking (pi_chk=GUARD,REFTAG,APPTAG)",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
+ {
+ .name = "apptag",
+ .lname = "Application Tag used in Protection Information",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, apptag),
+ .def = "0x1234",
+ .help = "Application Tag used in Protection Information field (Default: 0x1234)",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
+ {
+ .name = "apptag_mask",
+ .lname = "Application Tag Mask",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, apptag_mask),
+ .def = "0xffff",
+ .help = "Application Tag Mask used with Application Tag (Default: 0xffff)",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
{
.name = NULL,
},
struct fio_file *f = io_u->file;
struct nvme_uring_cmd *cmd;
struct io_uring_sqe *sqe;
+ struct nvme_dsm *dsm;
+ void *ptr = ld->dsm;
+ unsigned int dsm_size;
/* only supports nvme_uring_cmd */
if (o->cmd_type != FIO_URING_CMD_NVME)
}
cmd = (struct nvme_uring_cmd *)sqe->cmd;
+ dsm_size = sizeof(*ld->dsm) + td->o.num_range * sizeof(struct nvme_dsm_range);
+ ptr += io_u->index * dsm_size;
+ dsm = (struct nvme_dsm *)ptr;
+
return fio_nvme_uring_cmd_prep(cmd, io_u,
o->nonvectored ? NULL : &ld->iovecs[io_u->index],
- &ld->dsm[io_u->index]);
+ dsm);
}
static struct io_u *fio_ioring_event(struct thread_data *td, int event)
struct ioring_options *o = td->eo;
struct io_uring_cqe *cqe;
struct io_u *io_u;
+ struct nvme_data *data;
unsigned index;
+ int ret;
index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
if (o->cmd_type == FIO_URING_CMD_NVME)
cqe = &ld->cq_ring.cqes[index];
io_u = (struct io_u *) (uintptr_t) cqe->user_data;
- if (cqe->res != 0)
+ if (cqe->res != 0) {
io_u->error = -cqe->res;
- else
+ return io_u;
+ } else {
io_u->error = 0;
+ }
+
+ if (o->cmd_type == FIO_URING_CMD_NVME) {
+ data = FILE_ENG_DATA(io_u->file);
+ if (data->pi_type && (io_u->ddir == DDIR_READ) && !o->pi_act) {
+ ret = fio_nvme_pi_verify(data, io_u);
+ if (ret)
+ io_u->error = ret;
+ }
+ }
return io_u;
}
return r < 0 ? r : events;
}
+static inline void fio_ioring_cmd_nvme_pi(struct thread_data *td,
+ struct io_u *io_u)
+{
+ struct ioring_data *ld = td->io_ops_data;
+ struct ioring_options *o = td->eo;
+ struct nvme_uring_cmd *cmd;
+ struct io_uring_sqe *sqe;
+ struct nvme_cmd_ext_io_opts ext_opts = {0};
+ struct nvme_data *data = FILE_ENG_DATA(io_u->file);
+
+ if (io_u->ddir == DDIR_TRIM)
+ return;
+
+ sqe = &ld->sqes[(io_u->index) << 1];
+ cmd = (struct nvme_uring_cmd *)sqe->cmd;
+
+ if (data->pi_type) {
+ if (o->pi_act)
+ ext_opts.io_flags |= NVME_IO_PRINFO_PRACT;
+ ext_opts.io_flags |= o->prchk;
+ ext_opts.apptag = o->apptag;
+ ext_opts.apptag_mask = o->apptag_mask;
+ }
+
+ fio_nvme_pi_fill(cmd, io_u, &ext_opts);
+}
+
static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
struct io_u *io_u)
{
struct io_u *io_u)
{
struct ioring_data *ld = td->io_ops_data;
+ struct ioring_options *o = td->eo;
struct io_sq_ring *ring = &ld->sq_ring;
unsigned tail, next_tail;
if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
fio_ioring_cmdprio_prep(td, io_u);
+ if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
+ o->cmd_type == FIO_URING_CMD_NVME)
+ fio_ioring_cmd_nvme_pi(td, io_u);
+
ring->array[tail & ld->sq_ring_mask] = io_u->index;
atomic_store_release(ring->tail, next_tail);
return 0;
}
+static void parse_prchk_flags(struct ioring_options *o)
+{
+ if (!o->pi_chk)
+ return;
+
+ if (strstr(o->pi_chk, "GUARD") != NULL)
+ o->prchk = NVME_IO_PRINFO_PRCHK_GUARD;
+ if (strstr(o->pi_chk, "REFTAG") != NULL)
+ o->prchk |= NVME_IO_PRINFO_PRCHK_REF;
+ if (strstr(o->pi_chk, "APPTAG") != NULL)
+ o->prchk |= NVME_IO_PRINFO_PRCHK_APP;
+}
+
static int fio_ioring_init(struct thread_data *td)
{
struct ioring_options *o = td->eo;
struct ioring_data *ld;
+ struct nvme_dsm *dsm;
+ void *ptr;
+ unsigned int dsm_size;
unsigned long long md_size;
- int ret;
+ int ret, i;
/* sqthread submission requires registered files */
if (o->sqpoll_thread)
md_size += td->o.mem_align - page_size;
if (td->o.mem_type == MEM_MALLOC) {
ld->md_buf = malloc(md_size);
- if (!ld->md_buf)
+ if (!ld->md_buf) {
+ free(ld);
return 1;
+ }
} else {
log_err("fio: Only iomem=malloc or mem=malloc is supported\n");
+ free(ld);
return 1;
}
}
+ parse_prchk_flags(o);
ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
* in zbd mode where trim means zone reset.
*/
if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
- td->o.zone_mode == ZONE_MODE_ZBD)
+ td->o.zone_mode == ZONE_MODE_ZBD) {
td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
- else
- ld->dsm = calloc(ld->iodepth, sizeof(*ld->dsm));
+ } else {
+ dsm_size = sizeof(*ld->dsm) +
+ td->o.num_range * sizeof(struct nvme_dsm_range);
+ ld->dsm = calloc(td->o.iodepth, dsm_size);
+ ptr = ld->dsm;
+ for (i = 0; i < td->o.iodepth; i++) {
+ dsm = (struct nvme_dsm *)ptr;
+ dsm->nr_ranges = td->o.num_range;
+ ptr += dsm_size;
+ }
+ }
return 0;
}
{
struct ioring_data *ld = td->io_ops_data;
struct ioring_options *o = td->eo;
+ struct nvme_pi_data *pi_data;
char *p;
ld->io_u_index[io_u->index] = io_u;
p = PTR_ALIGN(ld->md_buf, page_mask) + td->o.mem_align;
p += o->md_per_io_size * io_u->index;
io_u->mmap_data = p;
+
+ if (!o->pi_act) {
+ pi_data = calloc(1, sizeof(*pi_data));
+ pi_data->io_flags |= o->prchk;
+ pi_data->apptag_mask = o->apptag_mask;
+ pi_data->apptag = o->apptag;
+ io_u->engine_data = pi_data;
+ }
}
return 0;
}
+static void fio_ioring_io_u_free(struct thread_data *td, struct io_u *io_u)
+{
+ struct ioring_options *o = td->eo;
+ struct nvme_pi *pi;
+
+ if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
+ (o->cmd_type == FIO_URING_CMD_NVME)) {
+ pi = io_u->engine_data;
+ free(pi);
+ io_u->engine_data = NULL;
+ }
+}
+
static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
{
struct ioring_data *ld = td->io_ops_data;
data = FILE_ENG_DATA(f);
if (data == NULL) {
data = calloc(1, sizeof(struct nvme_data));
- ret = fio_nvme_get_info(f, &nlba, data);
+ ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
if (ret) {
free(data);
return ret;
lba_size = data->lba_ext ? data->lba_ext : data->lba_size;
for_each_rw_ddir(ddir) {
- if (td->o.min_bs[ddir] % lba_size ||
- td->o.max_bs[ddir] % lba_size) {
- if (data->lba_ext)
- log_err("%s: block size must be a multiple of (LBA data size + Metadata size)\n",
- f->file_name);
- else
+ if (td->o.min_bs[ddir] % lba_size || td->o.max_bs[ddir] % lba_size) {
+ if (data->lba_ext) {
+ log_err("%s: block size must be a multiple of %u "
+ "(LBA data size + Metadata size)\n", f->file_name, lba_size);
+ if (td->o.min_bs[ddir] == td->o.max_bs[ddir] &&
+ !(td->o.min_bs[ddir] % data->lba_size)) {
+ /* fixed block size is actually a multiple of LBA data size */
+ unsigned long long suggestion = lba_size *
+ (td->o.min_bs[ddir] / data->lba_size);
+ log_err("Did you mean to use a block size of %llu?\n", suggestion);
+ }
+ } else {
log_err("%s: block size must be a multiple of LBA data size\n",
f->file_name);
+ }
td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
return 1;
}
return 1;
}
}
+
+ /*
+ * For extended logical block sizes we cannot use verify when
+ * end to end data protection checks are enabled, as the PI
+ * section of data buffer conflicts with verify.
+ */
+ if (data->ms && data->pi_type && data->lba_ext &&
+ td->o.verify != VERIFY_NONE) {
+ log_err("%s: for extended LBA, verify cannot be used when E2E data protection is enabled\n",
+ f->file_name);
+ td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
+ return 1;
+ }
}
if (!ld || !o->registerfiles)
return generic_open_file(td, f);
int ret;
data = calloc(1, sizeof(struct nvme_data));
- ret = fio_nvme_get_info(f, &nlba, data);
+ ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
if (ret) {
free(data);
return ret;
.name = "io_uring_cmd",
.version = FIO_IOOPS_VERSION,
.flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
- FIO_ASYNCIO_SETS_ISSUE_TIME,
+ FIO_ASYNCIO_SETS_ISSUE_TIME |
+ FIO_MULTI_RANGE_TRIM,
.init = fio_ioring_init,
.post_init = fio_ioring_cmd_post_init,
.io_u_init = fio_ioring_io_u_init,
+ .io_u_free = fio_ioring_io_u_free,
.prep = fio_ioring_cmd_prep,
.queue = fio_ioring_queue,
.commit = fio_ioring_commit,