4 * IO engine using the xNVMe C API.
6 * See: http://xnvme.io/
8 * SPDX-License-Identifier: Apache-2.0
13 #include <libxnvme_libconf.h>
14 #include <libxnvme_nvm.h>
15 #include <libxnvme_znd.h>
16 #include <libxnvme_spec_fs.h>
18 #include "zbd_types.h"
21 static pthread_mutex_t g_serialize = PTHREAD_MUTEX_INITIALIZER;
23 struct xnvme_fioe_fwrap {
24 /* fio file representation */
25 struct fio_file *fio_file;
27 /* xNVMe device handle */
28 struct xnvme_dev *dev;
29 /* xNVMe device geometry */
30 const struct xnvme_geo *geo;
32 struct xnvme_queue *queue;
39 XNVME_STATIC_ASSERT(sizeof(struct xnvme_fioe_fwrap) == 64, "Incorrect size")
41 struct xnvme_fioe_data {
42 /* I/O completion queue */
45 /* # of iocq entries; incremented via getevents()/cb_pool() */
49 * # of errors; incremented when observed on completion via
50 * getevents()/cb_pool()
54 /* Controller which device/file to select */
58 /* Number of devices/files for which open() has been called */
60 /* Number of devices/files allocated in files[] */
67 struct xnvme_fioe_fwrap files[];
69 XNVME_STATIC_ASSERT(sizeof(struct xnvme_fioe_data) == 64, "Incorrect size")
71 struct xnvme_fioe_options {
74 unsigned int sqpoll_thread;
75 unsigned int xnvme_dev_nsid;
76 unsigned int xnvme_iovec;
81 char *xnvme_dev_subnqn;
84 static struct fio_option options[] = {
87 .lname = "High Priority",
88 .type = FIO_OPT_STR_SET,
89 .off1 = offsetof(struct xnvme_fioe_options, hipri),
90 .help = "Use polled IO completions",
91 .category = FIO_OPT_C_ENGINE,
92 .group = FIO_OPT_G_XNVME,
95 .name = "sqthread_poll",
96 .lname = "Kernel SQ thread polling",
97 .type = FIO_OPT_STR_SET,
98 .off1 = offsetof(struct xnvme_fioe_options, sqpoll_thread),
99 .help = "Offload submission/completion to kernel thread",
100 .category = FIO_OPT_C_ENGINE,
101 .group = FIO_OPT_G_XNVME,
105 .lname = "xNVMe Backend",
106 .type = FIO_OPT_STR_STORE,
107 .off1 = offsetof(struct xnvme_fioe_options, xnvme_be),
108 .help = "Select xNVMe backend [spdk,linux,fbsd]",
109 .category = FIO_OPT_C_ENGINE,
110 .group = FIO_OPT_G_XNVME,
113 .name = "xnvme_async",
114 .lname = "xNVMe Asynchronous command-interface",
115 .type = FIO_OPT_STR_STORE,
116 .off1 = offsetof(struct xnvme_fioe_options, xnvme_async),
117 .help = "Select xNVMe async. interface: "
118 "[emu,thrpool,io_uring,io_uring_cmd,libaio,posix,vfio,nil]",
119 .category = FIO_OPT_C_ENGINE,
120 .group = FIO_OPT_G_XNVME,
123 .name = "xnvme_sync",
124 .lname = "xNVMe Synchronous. command-interface",
125 .type = FIO_OPT_STR_STORE,
126 .off1 = offsetof(struct xnvme_fioe_options, xnvme_sync),
127 .help = "Select xNVMe sync. interface: [nvme,psync,block]",
128 .category = FIO_OPT_C_ENGINE,
129 .group = FIO_OPT_G_XNVME,
132 .name = "xnvme_admin",
133 .lname = "xNVMe Admin command-interface",
134 .type = FIO_OPT_STR_STORE,
135 .off1 = offsetof(struct xnvme_fioe_options, xnvme_admin),
136 .help = "Select xNVMe admin. cmd-interface: [nvme,block]",
137 .category = FIO_OPT_C_ENGINE,
138 .group = FIO_OPT_G_XNVME,
141 .name = "xnvme_dev_nsid",
142 .lname = "xNVMe Namespace-Identifier, for user-space NVMe driver",
144 .off1 = offsetof(struct xnvme_fioe_options, xnvme_dev_nsid),
145 .help = "xNVMe Namespace-Identifier, for user-space NVMe driver",
146 .category = FIO_OPT_C_ENGINE,
147 .group = FIO_OPT_G_XNVME,
150 .name = "xnvme_dev_subnqn",
151 .lname = "Subsystem nqn for Fabrics",
152 .type = FIO_OPT_STR_STORE,
153 .off1 = offsetof(struct xnvme_fioe_options, xnvme_dev_subnqn),
154 .help = "Subsystem NQN for Fabrics",
155 .category = FIO_OPT_C_ENGINE,
156 .group = FIO_OPT_G_XNVME,
159 .name = "xnvme_iovec",
160 .lname = "Vectored IOs",
161 .type = FIO_OPT_STR_SET,
162 .off1 = offsetof(struct xnvme_fioe_options, xnvme_iovec),
163 .help = "Send vectored IOs",
164 .category = FIO_OPT_C_ENGINE,
165 .group = FIO_OPT_G_XNVME,
173 static void cb_pool(struct xnvme_cmd_ctx *ctx, void *cb_arg)
175 struct io_u *io_u = cb_arg;
176 struct xnvme_fioe_data *xd = io_u->mmap_data;
178 if (xnvme_cmd_ctx_cpl_status(ctx)) {
179 xnvme_cmd_ctx_pr(ctx, XNVME_PR_DEF);
184 xd->iocq[xd->completed++] = io_u;
185 xnvme_queue_put_cmd_ctx(ctx->async.queue, ctx);
188 static struct xnvme_opts xnvme_opts_from_fioe(struct thread_data *td)
190 struct xnvme_fioe_options *o = td->eo;
191 struct xnvme_opts opts = xnvme_opts_default();
193 opts.nsid = o->xnvme_dev_nsid;
194 opts.subnqn = o->xnvme_dev_subnqn;
195 opts.be = o->xnvme_be;
196 opts.async = o->xnvme_async;
197 opts.sync = o->xnvme_sync;
198 opts.admin = o->xnvme_admin;
200 opts.poll_io = o->hipri;
201 opts.poll_sq = o->sqpoll_thread;
203 opts.direct = td->o.odirect;
208 static void _dev_close(struct thread_data *td, struct xnvme_fioe_fwrap *fwrap)
211 xnvme_queue_term(fwrap->queue);
213 xnvme_dev_close(fwrap->dev);
215 memset(fwrap, 0, sizeof(*fwrap));
218 static void xnvme_fioe_cleanup(struct thread_data *td)
220 struct xnvme_fioe_data *xd = NULL;
223 if (!td->io_ops_data)
226 xd = td->io_ops_data;
228 err = pthread_mutex_lock(&g_serialize);
230 log_err("ioeng->cleanup(): pthread_mutex_lock(), err(%d)\n", err);
231 /* NOTE: not returning here */
233 for (uint64_t i = 0; i < xd->nallocated; ++i)
234 _dev_close(td, &xd->files[i]);
237 err = pthread_mutex_unlock(&g_serialize);
239 log_err("ioeng->cleanup(): pthread_mutex_unlock(), err(%d)\n", err);
245 td->io_ops_data = NULL;
249 * Helper function setting up device handles as addressed by the naming
250 * convention of the given `fio_file` filename.
252 * Checks thread-options for explicit control of asynchronous implementation via
253 * the ``--xnvme_async={thrpool,emu,posix,io_uring,libaio,nil}``.
255 static int _dev_open(struct thread_data *td, struct fio_file *f)
257 struct xnvme_opts opts = xnvme_opts_from_fioe(td);
258 struct xnvme_fioe_data *xd = td->io_ops_data;
259 struct xnvme_fioe_fwrap *fwrap;
263 if (f->fileno > (int)xd->nallocated) {
264 log_err("ioeng->_dev_open(%s): invalid assumption\n", f->file_name);
268 fwrap = &xd->files[f->fileno];
270 err = pthread_mutex_lock(&g_serialize);
272 log_err("ioeng->_dev_open(%s): pthread_mutex_lock(), err(%d)\n", f->file_name,
277 fwrap->dev = xnvme_dev_open(f->file_name, &opts);
279 log_err("ioeng->_dev_open(%s): xnvme_dev_open(), err(%d)\n", f->file_name, errno);
282 fwrap->geo = xnvme_dev_get_geo(fwrap->dev);
284 if (xnvme_queue_init(fwrap->dev, td->o.iodepth, flags, &(fwrap->queue))) {
285 log_err("ioeng->_dev_open(%s): xnvme_queue_init(), err(?)\n", f->file_name);
288 xnvme_queue_set_cb(fwrap->queue, cb_pool, NULL);
290 fwrap->ssw = xnvme_dev_get_ssw(fwrap->dev);
291 fwrap->lba_nbytes = fwrap->geo->lba_nbytes;
294 fwrap->fio_file->filetype = FIO_TYPE_BLOCK;
295 fwrap->fio_file->real_file_size = fwrap->geo->tbytes;
296 fio_file_set_size_known(fwrap->fio_file);
298 err = pthread_mutex_unlock(&g_serialize);
300 log_err("ioeng->_dev_open(%s): pthread_mutex_unlock(), err(%d)\n", f->file_name,
306 xnvme_queue_term(fwrap->queue);
307 xnvme_dev_close(fwrap->dev);
309 err = pthread_mutex_unlock(&g_serialize);
311 log_err("ioeng->_dev_open(%s): pthread_mutex_unlock(), err(%d)\n", f->file_name,
317 static int xnvme_fioe_init(struct thread_data *td)
319 struct xnvme_fioe_data *xd = NULL;
323 if (!td->o.use_thread) {
324 log_err("ioeng->init(): --thread=1 is required\n");
328 /* Allocate xd and iocq */
329 xd = calloc(1, sizeof(*xd) + sizeof(*xd->files) * td->o.nr_files);
331 log_err("ioeng->init(): !calloc(), err(%d)\n", errno);
335 xd->iocq = calloc(td->o.iodepth, sizeof(struct io_u *));
338 log_err("ioeng->init(): !calloc(xd->iocq), err(%d)\n", errno);
342 xd->iovec = calloc(td->o.iodepth, sizeof(*xd->iovec));
346 log_err("ioeng->init(): !calloc(xd->iovec), err(%d)\n", errno);
351 td->io_ops_data = xd;
353 for_each_file(td, f, i)
355 if (_dev_open(td, f)) {
357 * Note: We are not freeing xd, iocq and iovec. This
358 * will be done as part of cleanup routine.
360 log_err("ioeng->init(): failed; _dev_open(%s)\n", f->file_name);
367 if (xd->nallocated != td->o.nr_files) {
368 log_err("ioeng->init(): failed; nallocated != td->o.nr_files\n");
375 /* NOTE: using the first device for buffer-allocators) */
376 static int xnvme_fioe_iomem_alloc(struct thread_data *td, size_t total_mem)
378 struct xnvme_fioe_data *xd = td->io_ops_data;
379 struct xnvme_fioe_fwrap *fwrap = &xd->files[0];
382 log_err("ioeng->iomem_alloc(): failed; no dev-handle\n");
386 td->orig_buffer = xnvme_buf_alloc(fwrap->dev, total_mem);
388 return td->orig_buffer == NULL;
391 /* NOTE: using the first device for buffer-allocators) */
392 static void xnvme_fioe_iomem_free(struct thread_data *td)
394 struct xnvme_fioe_data *xd = NULL;
395 struct xnvme_fioe_fwrap *fwrap = NULL;
397 if (!td->io_ops_data)
400 xd = td->io_ops_data;
401 fwrap = &xd->files[0];
404 log_err("ioeng->iomem_free(): failed no dev-handle\n");
408 xnvme_buf_free(fwrap->dev, td->orig_buffer);
411 static int xnvme_fioe_io_u_init(struct thread_data *td, struct io_u *io_u)
413 io_u->mmap_data = td->io_ops_data;
418 static void xnvme_fioe_io_u_free(struct thread_data *td, struct io_u *io_u)
420 io_u->mmap_data = NULL;
423 static struct io_u *xnvme_fioe_event(struct thread_data *td, int event)
425 struct xnvme_fioe_data *xd = td->io_ops_data;
428 assert((unsigned)event < xd->completed);
430 return xd->iocq[event];
433 static int xnvme_fioe_getevents(struct thread_data *td, unsigned int min, unsigned int max,
434 const struct timespec *t)
436 struct xnvme_fioe_data *xd = td->io_ops_data;
437 struct xnvme_fioe_fwrap *fwrap = NULL;
438 int nfiles = xd->nallocated;
441 if (xd->prev != -1 && ++xd->prev < nfiles) {
442 fwrap = &xd->files[xd->prev];
448 if (fwrap == NULL || xd->cur == nfiles) {
449 fwrap = &xd->files[0];
453 while (fwrap != NULL && xd->cur < nfiles && err >= 0) {
454 err = xnvme_queue_poke(fwrap->queue, max - xd->completed);
463 log_err("ioeng->getevents(): unhandled IO error\n");
468 if (xd->completed >= min) {
470 return xd->completed;
473 fwrap = &xd->files[xd->cur];
488 return xd->completed;
491 static enum fio_q_status xnvme_fioe_queue(struct thread_data *td, struct io_u *io_u)
493 struct xnvme_fioe_data *xd = td->io_ops_data;
494 struct xnvme_fioe_fwrap *fwrap;
495 struct xnvme_cmd_ctx *ctx;
500 bool vectored_io = ((struct xnvme_fioe_options *)td->eo)->xnvme_iovec;
502 fio_ro_check(td, io_u);
504 fwrap = &xd->files[io_u->file->fileno];
505 nsid = xnvme_dev_get_nsid(fwrap->dev);
507 slba = io_u->offset >> fwrap->ssw;
508 nlb = (io_u->xfer_buflen >> fwrap->ssw) - 1;
510 ctx = xnvme_queue_get_cmd_ctx(fwrap->queue);
511 ctx->async.cb_arg = io_u;
513 ctx->cmd.common.nsid = nsid;
514 ctx->cmd.nvm.slba = slba;
515 ctx->cmd.nvm.nlb = nlb;
517 switch (io_u->ddir) {
519 ctx->cmd.common.opcode = XNVME_SPEC_NVM_OPC_READ;
523 ctx->cmd.common.opcode = XNVME_SPEC_NVM_OPC_WRITE;
527 log_err("ioeng->queue(): ENOSYS: %u\n", io_u->ddir);
528 xnvme_queue_put_cmd_ctx(ctx->async.queue, ctx);
530 io_u->error = ENOSYS;
532 return FIO_Q_COMPLETED;
536 xd->iovec[io_u->index].iov_base = io_u->xfer_buf;
537 xd->iovec[io_u->index].iov_len = io_u->xfer_buflen;
539 err = xnvme_cmd_passv(ctx, &xd->iovec[io_u->index], 1, io_u->xfer_buflen, NULL, 0,
542 err = xnvme_cmd_pass(ctx, io_u->xfer_buf, io_u->xfer_buflen, NULL, 0);
550 xnvme_queue_put_cmd_ctx(ctx->async.queue, ctx);
554 log_err("ioeng->queue(): err: '%d'\n", err);
556 xnvme_queue_put_cmd_ctx(ctx->async.queue, ctx);
558 io_u->error = abs(err);
560 return FIO_Q_COMPLETED;
564 static int xnvme_fioe_close(struct thread_data *td, struct fio_file *f)
566 struct xnvme_fioe_data *xd = td->io_ops_data;
568 dprint(FD_FILE, "xnvme close %s -- nopen: %ld\n", f->file_name, xd->nopen);
575 static int xnvme_fioe_open(struct thread_data *td, struct fio_file *f)
577 struct xnvme_fioe_data *xd = td->io_ops_data;
579 dprint(FD_FILE, "xnvme open %s -- nopen: %ld\n", f->file_name, xd->nopen);
581 if (f->fileno > (int)xd->nallocated) {
582 log_err("ioeng->open(): f->fileno > xd->nallocated; invalid assumption\n");
585 if (xd->files[f->fileno].fio_file != f) {
586 log_err("ioeng->open(): fio_file != f; invalid assumption\n");
595 static int xnvme_fioe_invalidate(struct thread_data *td, struct fio_file *f)
597 /* Consider only doing this with be:spdk */
601 static int xnvme_fioe_get_max_open_zones(struct thread_data *td, struct fio_file *f,
602 unsigned int *max_open_zones)
604 struct xnvme_opts opts = xnvme_opts_from_fioe(td);
605 struct xnvme_dev *dev;
606 const struct xnvme_spec_znd_idfy_ns *zns;
607 int err = 0, err_lock;
609 if (f->filetype != FIO_TYPE_FILE && f->filetype != FIO_TYPE_BLOCK &&
610 f->filetype != FIO_TYPE_CHAR) {
611 log_info("ioeng->get_max_open_zoned(): ignoring filetype: %d\n", f->filetype);
614 err_lock = pthread_mutex_lock(&g_serialize);
616 log_err("ioeng->get_max_open_zones(): pthread_mutex_lock(), err(%d)\n", err_lock);
620 dev = xnvme_dev_open(f->file_name, &opts);
622 log_err("ioeng->get_max_open_zones(): xnvme_dev_open(), err(%d)\n", err_lock);
626 if (xnvme_dev_get_geo(dev)->type != XNVME_GEO_ZONED) {
632 zns = (void *)xnvme_dev_get_ns_css(dev);
634 log_err("ioeng->get_max_open_zones(): xnvme_dev_get_ns_css(), err(%d)\n", errno);
640 * intentional overflow as the value is zero-based and NVMe
641 * defines 0xFFFFFFFF as unlimited thus overflowing to 0 which
642 * is how fio indicates unlimited and otherwise just converting
645 *max_open_zones = zns->mor + 1;
648 xnvme_dev_close(dev);
649 err_lock = pthread_mutex_unlock(&g_serialize);
651 log_err("ioeng->get_max_open_zones(): pthread_mutex_unlock(), err(%d)\n",
658 * Currently, this function is called before of I/O engine initialization, so,
659 * we cannot consult the file-wrapping done when 'fioe' initializes.
660 * Instead we just open based on the given filename.
662 * TODO: unify the different setup methods, consider keeping the handle around,
663 * and consider how to support the --be option in this usecase
665 static int xnvme_fioe_get_zoned_model(struct thread_data *td, struct fio_file *f,
666 enum zbd_zoned_model *model)
668 struct xnvme_opts opts = xnvme_opts_from_fioe(td);
669 struct xnvme_dev *dev;
670 int err = 0, err_lock;
672 if (f->filetype != FIO_TYPE_FILE && f->filetype != FIO_TYPE_BLOCK &&
673 f->filetype != FIO_TYPE_CHAR) {
674 log_info("ioeng->get_zoned_model(): ignoring filetype: %d\n", f->filetype);
678 err = pthread_mutex_lock(&g_serialize);
680 log_err("ioeng->get_zoned_model(): pthread_mutex_lock(), err(%d)\n", err);
684 dev = xnvme_dev_open(f->file_name, &opts);
686 log_err("ioeng->get_zoned_model(): xnvme_dev_open(%s) failed, errno: %d\n",
687 f->file_name, errno);
692 switch (xnvme_dev_get_geo(dev)->type) {
693 case XNVME_GEO_UNKNOWN:
694 dprint(FD_ZBD, "%s: got 'unknown', assigning ZBD_NONE\n", f->file_name);
698 case XNVME_GEO_CONVENTIONAL:
699 dprint(FD_ZBD, "%s: got 'conventional', assigning ZBD_NONE\n", f->file_name);
703 case XNVME_GEO_ZONED:
704 dprint(FD_ZBD, "%s: got 'zoned', assigning ZBD_HOST_MANAGED\n", f->file_name);
705 *model = ZBD_HOST_MANAGED;
709 dprint(FD_ZBD, "%s: hit-default, assigning ZBD_NONE\n", f->file_name);
717 xnvme_dev_close(dev);
719 err_lock = pthread_mutex_unlock(&g_serialize);
721 log_err("ioeng->get_zoned_model(): pthread_mutex_unlock(), err(%d)\n", err_lock);
727 * Fills the given ``zbdz`` with at most ``nr_zones`` zone-descriptors.
729 * The implementation converts the NVMe Zoned Command Set log-pages for Zone
730 * descriptors into the Linux Kernel Zoned Block Report format.
732 * NOTE: This function is called before I/O engine initialization, that is,
733 * before ``_dev_open`` has been called and file-wrapping is setup. Thus is has
734 * to do the ``_dev_open`` itself, and shut it down again once it is done
735 * retrieving the log-pages and converting them to the report format.
737 * TODO: unify the different setup methods, consider keeping the handle around,
738 * and consider how to support the --async option in this usecase
740 static int xnvme_fioe_report_zones(struct thread_data *td, struct fio_file *f, uint64_t offset,
741 struct zbd_zone *zbdz, unsigned int nr_zones)
743 struct xnvme_opts opts = xnvme_opts_from_fioe(td);
744 const struct xnvme_spec_znd_idfy_lbafe *lbafe = NULL;
745 struct xnvme_dev *dev = NULL;
746 const struct xnvme_geo *geo = NULL;
747 struct xnvme_znd_report *rprt = NULL;
750 unsigned int limit = 0;
751 int err = 0, err_lock;
753 dprint(FD_ZBD, "%s: report_zones() offset: %zu, nr_zones: %u\n", f->file_name, offset,
756 err = pthread_mutex_lock(&g_serialize);
758 log_err("ioeng->report_zones(%s): pthread_mutex_lock(), err(%d)\n", f->file_name,
763 dev = xnvme_dev_open(f->file_name, &opts);
765 log_err("ioeng->report_zones(%s): xnvme_dev_open(), err(%d)\n", f->file_name,
770 geo = xnvme_dev_get_geo(dev);
771 ssw = xnvme_dev_get_ssw(dev);
772 lbafe = xnvme_znd_dev_get_lbafe(dev);
774 limit = nr_zones > geo->nzone ? geo->nzone : nr_zones;
776 dprint(FD_ZBD, "%s: limit: %u\n", f->file_name, limit);
778 slba = ((offset >> ssw) / geo->nsect) * geo->nsect;
780 rprt = xnvme_znd_report_from_dev(dev, slba, limit, 0);
782 log_err("ioeng->report_zones(%s): xnvme_znd_report_from_dev(), err(%d)\n",
783 f->file_name, errno);
787 if (rprt->nentries != limit) {
788 log_err("ioeng->report_zones(%s): nentries != nr_zones\n", f->file_name);
792 if (offset > geo->tbytes) {
793 log_err("ioeng->report_zones(%s): out-of-bounds\n", f->file_name);
797 /* Transform the zone-report */
798 for (uint32_t idx = 0; idx < rprt->nentries; ++idx) {
799 struct xnvme_spec_znd_descr *descr = XNVME_ZND_REPORT_DESCR(rprt, idx);
801 zbdz[idx].start = descr->zslba << ssw;
802 zbdz[idx].len = lbafe->zsze << ssw;
803 zbdz[idx].capacity = descr->zcap << ssw;
804 zbdz[idx].wp = descr->wp << ssw;
807 case XNVME_SPEC_ZND_TYPE_SEQWR:
808 zbdz[idx].type = ZBD_ZONE_TYPE_SWR;
812 log_err("ioeng->report_zones(%s): invalid type for zone at offset(%zu)\n",
813 f->file_name, zbdz[idx].start);
819 case XNVME_SPEC_ZND_STATE_EMPTY:
820 zbdz[idx].cond = ZBD_ZONE_COND_EMPTY;
822 case XNVME_SPEC_ZND_STATE_IOPEN:
823 zbdz[idx].cond = ZBD_ZONE_COND_IMP_OPEN;
825 case XNVME_SPEC_ZND_STATE_EOPEN:
826 zbdz[idx].cond = ZBD_ZONE_COND_EXP_OPEN;
828 case XNVME_SPEC_ZND_STATE_CLOSED:
829 zbdz[idx].cond = ZBD_ZONE_COND_CLOSED;
831 case XNVME_SPEC_ZND_STATE_FULL:
832 zbdz[idx].cond = ZBD_ZONE_COND_FULL;
835 case XNVME_SPEC_ZND_STATE_RONLY:
836 case XNVME_SPEC_ZND_STATE_OFFLINE:
838 zbdz[idx].cond = ZBD_ZONE_COND_OFFLINE;
844 xnvme_buf_virt_free(rprt);
846 xnvme_dev_close(dev);
848 err_lock = pthread_mutex_unlock(&g_serialize);
850 log_err("ioeng->report_zones(): pthread_mutex_unlock(), err: %d\n", err_lock);
852 dprint(FD_ZBD, "err: %d, nr_zones: %d\n", err, (int)nr_zones);
854 return err ? err : (int)limit;
858 * NOTE: This function may get called before I/O engine initialization, that is,
859 * before ``_dev_open`` has been called and file-wrapping is setup. In such
860 * case it has to do ``_dev_open`` itself, and shut it down again once it is
861 * done resetting write pointer of zones.
863 static int xnvme_fioe_reset_wp(struct thread_data *td, struct fio_file *f, uint64_t offset,
866 struct xnvme_opts opts = xnvme_opts_from_fioe(td);
867 struct xnvme_fioe_data *xd = NULL;
868 struct xnvme_fioe_fwrap *fwrap = NULL;
869 struct xnvme_dev *dev = NULL;
870 const struct xnvme_geo *geo = NULL;
871 uint64_t first, last;
874 int err = 0, err_lock;
876 if (td->io_ops_data) {
877 xd = td->io_ops_data;
878 fwrap = &xd->files[f->fileno];
887 err = pthread_mutex_lock(&g_serialize);
889 log_err("ioeng->reset_wp(): pthread_mutex_lock(), err(%d)\n", err);
893 dev = xnvme_dev_open(f->file_name, &opts);
895 log_err("ioeng->reset_wp(): xnvme_dev_open(%s) failed, errno(%d)\n",
896 f->file_name, errno);
899 geo = xnvme_dev_get_geo(dev);
900 ssw = xnvme_dev_get_ssw(dev);
903 nsid = xnvme_dev_get_nsid(dev);
905 first = ((offset >> ssw) / geo->nsect) * geo->nsect;
906 last = (((offset + length) >> ssw) / geo->nsect) * geo->nsect;
907 dprint(FD_ZBD, "first: 0x%lx, last: 0x%lx\n", first, last);
909 for (uint64_t zslba = first; zslba < last; zslba += geo->nsect) {
910 struct xnvme_cmd_ctx ctx = xnvme_cmd_ctx_from_dev(dev);
912 if (zslba >= (geo->nsect * geo->nzone)) {
913 log_err("ioeng->reset_wp(): out-of-bounds\n");
918 err = xnvme_znd_mgmt_send(&ctx, nsid, zslba, false,
919 XNVME_SPEC_ZND_CMD_MGMT_SEND_RESET, 0x0, NULL);
920 if (err || xnvme_cmd_ctx_cpl_status(&ctx)) {
921 err = err ? err : -EIO;
922 log_err("ioeng->reset_wp(): err(%d), sc(%d)", err, ctx.cpl.status.sc);
928 if (!td->io_ops_data) {
929 xnvme_dev_close(dev);
931 err_lock = pthread_mutex_unlock(&g_serialize);
933 log_err("ioeng->reset_wp(): pthread_mutex_unlock(), err(%d)\n", err_lock);
939 static int xnvme_fioe_get_file_size(struct thread_data *td, struct fio_file *f)
941 struct xnvme_opts opts = xnvme_opts_from_fioe(td);
942 struct xnvme_dev *dev;
945 if (fio_file_size_known(f))
948 ret = pthread_mutex_lock(&g_serialize);
950 log_err("ioeng->reset_wp(): pthread_mutex_lock(), err(%d)\n", ret);
954 dev = xnvme_dev_open(f->file_name, &opts);
956 log_err("%s: failed retrieving device handle, errno: %d\n", f->file_name, errno);
961 f->real_file_size = xnvme_dev_get_geo(dev)->tbytes;
962 fio_file_set_size_known(f);
963 f->filetype = FIO_TYPE_BLOCK;
966 xnvme_dev_close(dev);
967 err = pthread_mutex_unlock(&g_serialize);
969 log_err("ioeng->reset_wp(): pthread_mutex_unlock(), err(%d)\n", err);
974 FIO_STATIC struct ioengine_ops ioengine = {
976 .version = FIO_IOOPS_VERSION,
978 .option_struct_size = sizeof(struct xnvme_fioe_options),
979 .flags = FIO_DISKLESSIO | FIO_NODISKUTIL | FIO_NOEXTEND | FIO_MEMALIGN | FIO_RAWIO,
981 .cleanup = xnvme_fioe_cleanup,
982 .init = xnvme_fioe_init,
984 .iomem_free = xnvme_fioe_iomem_free,
985 .iomem_alloc = xnvme_fioe_iomem_alloc,
987 .io_u_free = xnvme_fioe_io_u_free,
988 .io_u_init = xnvme_fioe_io_u_init,
990 .event = xnvme_fioe_event,
991 .getevents = xnvme_fioe_getevents,
992 .queue = xnvme_fioe_queue,
994 .close_file = xnvme_fioe_close,
995 .open_file = xnvme_fioe_open,
996 .get_file_size = xnvme_fioe_get_file_size,
998 .invalidate = xnvme_fioe_invalidate,
999 .get_max_open_zones = xnvme_fioe_get_max_open_zones,
1000 .get_zoned_model = xnvme_fioe_get_zoned_model,
1001 .report_zones = xnvme_fioe_report_zones,
1002 .reset_wp = xnvme_fioe_reset_wp,
1005 static void fio_init fio_xnvme_register(void)
1007 register_ioengine(&ioengine);
1010 static void fio_exit fio_xnvme_unregister(void)
1012 unregister_ioengine(&ioengine);