4 * IO engine using libblkio to access various block I/O interfaces:
5 * https://gitlab.com/libblkio/libblkio
19 #include "../optgroup.h"
20 #include "../options.h"
23 /* per-process state */
25 pthread_mutex_t mutex;
27 int initted_hipri_threads;
29 } proc_state = { PTHREAD_MUTEX_INITIALIZER, 0, 0, NULL };
31 static void fio_blkio_proc_lock(void) {
33 ret = pthread_mutex_lock(&proc_state.mutex);
37 static void fio_blkio_proc_unlock(void) {
39 ret = pthread_mutex_unlock(&proc_state.mutex);
43 /* per-thread state */
44 struct fio_blkio_data {
46 int completion_fd; /* may be -1 if not FIO_BLKIO_WAIT_MODE_EVENTFD */
48 bool has_mem_region; /* whether mem_region is valid */
49 struct blkio_mem_region mem_region; /* only if allocated by libblkio */
51 struct iovec *iovecs; /* for vectored requests */
52 struct blkio_completion *completions;
55 enum fio_blkio_wait_mode {
56 FIO_BLKIO_WAIT_MODE_BLOCK,
57 FIO_BLKIO_WAIT_MODE_EVENTFD,
58 FIO_BLKIO_WAIT_MODE_LOOP,
61 struct fio_blkio_options {
62 void *pad; /* option fields must not have offset 0 */
67 char *pre_connect_props;
71 char *pre_start_props;
74 unsigned int vectored;
75 unsigned int write_zeroes_on_trim;
76 enum fio_blkio_wait_mode wait_mode;
77 unsigned int force_enable_completion_eventfd;
80 static struct fio_option options[] = {
82 .name = "libblkio_driver",
83 .lname = "libblkio driver name",
84 .type = FIO_OPT_STR_STORE,
85 .off1 = offsetof(struct fio_blkio_options, driver),
86 .help = "Name of the driver to be used by libblkio",
87 .category = FIO_OPT_C_ENGINE,
88 .group = FIO_OPT_G_LIBBLKIO,
91 .name = "libblkio_path",
92 .lname = "libblkio \"path\" property",
93 .type = FIO_OPT_STR_STORE,
94 .off1 = offsetof(struct fio_blkio_options, path),
95 .help = "Value to set the \"path\" property to",
96 .category = FIO_OPT_C_ENGINE,
97 .group = FIO_OPT_G_LIBBLKIO,
100 .name = "libblkio_pre_connect_props",
101 .lname = "Additional properties to be set before blkio_connect()",
102 .type = FIO_OPT_STR_STORE,
103 .off1 = offsetof(struct fio_blkio_options, pre_connect_props),
105 .category = FIO_OPT_C_ENGINE,
106 .group = FIO_OPT_G_LIBBLKIO,
109 .name = "libblkio_num_entries",
110 .lname = "libblkio \"num-entries\" property",
112 .off1 = offsetof(struct fio_blkio_options, num_entries),
113 .help = "Value to set the \"num-entries\" property to",
116 .category = FIO_OPT_C_ENGINE,
117 .group = FIO_OPT_G_LIBBLKIO,
120 .name = "libblkio_queue_size",
121 .lname = "libblkio \"queue-size\" property",
123 .off1 = offsetof(struct fio_blkio_options, queue_size),
124 .help = "Value to set the \"queue-size\" property to",
127 .category = FIO_OPT_C_ENGINE,
128 .group = FIO_OPT_G_LIBBLKIO,
131 .name = "libblkio_pre_start_props",
132 .lname = "Additional properties to be set before blkio_start()",
133 .type = FIO_OPT_STR_STORE,
134 .off1 = offsetof(struct fio_blkio_options, pre_start_props),
136 .category = FIO_OPT_C_ENGINE,
137 .group = FIO_OPT_G_LIBBLKIO,
141 .lname = "Use poll queues",
142 .type = FIO_OPT_STR_SET,
143 .off1 = offsetof(struct fio_blkio_options, hipri),
144 .help = "Use poll queues",
145 .category = FIO_OPT_C_ENGINE,
146 .group = FIO_OPT_G_LIBBLKIO,
149 .name = "libblkio_vectored",
150 .lname = "Use blkioq_{readv,writev}()",
151 .type = FIO_OPT_STR_SET,
152 .off1 = offsetof(struct fio_blkio_options, vectored),
153 .help = "Use blkioq_{readv,writev}() instead of blkioq_{read,write}()",
154 .category = FIO_OPT_C_ENGINE,
155 .group = FIO_OPT_G_LIBBLKIO,
158 .name = "libblkio_write_zeroes_on_trim",
159 .lname = "Use blkioq_write_zeroes() for TRIM",
160 .type = FIO_OPT_STR_SET,
161 .off1 = offsetof(struct fio_blkio_options,
162 write_zeroes_on_trim),
163 .help = "Use blkioq_write_zeroes() for TRIM instead of blkioq_discard()",
164 .category = FIO_OPT_C_ENGINE,
165 .group = FIO_OPT_G_LIBBLKIO,
168 .name = "libblkio_wait_mode",
169 .lname = "How to wait for completions",
171 .off1 = offsetof(struct fio_blkio_options, wait_mode),
172 .help = "How to wait for completions",
176 .oval = FIO_BLKIO_WAIT_MODE_BLOCK,
177 .help = "Blocking blkioq_do_io()",
180 .oval = FIO_BLKIO_WAIT_MODE_EVENTFD,
181 .help = "Blocking read() on the completion eventfd",
184 .oval = FIO_BLKIO_WAIT_MODE_LOOP,
185 .help = "Busy loop with non-blocking blkioq_do_io()",
188 .category = FIO_OPT_C_ENGINE,
189 .group = FIO_OPT_G_LIBBLKIO,
192 .name = "libblkio_force_enable_completion_eventfd",
193 .lname = "Force enable the completion eventfd, even if unused",
194 .type = FIO_OPT_STR_SET,
195 .off1 = offsetof(struct fio_blkio_options,
196 force_enable_completion_eventfd),
197 .help = "This can impact performance",
198 .category = FIO_OPT_C_ENGINE,
199 .group = FIO_OPT_G_LIBBLKIO,
206 static int fio_blkio_set_props_from_str(struct blkio *b, const char *opt_name,
209 char *new_str, *name, *value;
214 /* iteration can mutate string, so copy it */
215 new_str = strdup(str);
217 log_err("fio: strdup() failed\n");
221 /* iterate over property name-value pairs */
222 while ((name = get_next_str(&new_str))) {
223 /* split into property name and value */
224 value = strchr(name, '=');
226 log_err("fio: missing '=' in option %s\n", opt_name);
234 /* strip whitespace from property name */
235 strip_blank_front(&name);
236 strip_blank_end(name);
238 if (name[0] == '\0') {
239 log_err("fio: empty property name in option %s\n",
245 /* strip whitespace from property value */
246 strip_blank_front(&value);
247 strip_blank_end(value);
250 if (blkio_set_str(b, name, value) != 0) {
251 log_err("fio: error setting property '%s' to '%s': %s\n",
252 name, value, blkio_get_error_msg());
263 * Log the failure of a libblkio function.
265 * `(void)func` is to ensure `func` exists and prevent typos
267 #define fio_blkio_log_err(func) \
270 log_err("fio: %s() failed: %s\n", #func, \
271 blkio_get_error_msg()); \
274 static bool possibly_null_strs_equal(const char *a, const char *b)
276 return (!a && !b) || (a && b && strcmp(a, b) == 0);
280 * Returns the total number of subjobs using the 'libblkio' ioengine and setting
281 * the 'thread' option in the entire workload that have the given value for the
284 static int total_threaded_subjobs(bool hipri)
286 struct thread_data *td;
291 const struct fio_blkio_options *options = td->eo;
292 if (strcmp(td->o.ioengine, "libblkio") == 0 &&
293 td->o.use_thread && (bool)options->hipri == hipri)
303 struct fio_blkio_options opts;
304 } first_threaded_subjob = { 0 };
306 static void fio_blkio_log_opt_compat_err(const char *option_name)
308 log_err("fio: jobs using engine libblkio and sharing a process must agree on the %s option\n",
313 * If td represents a subjob with option 'thread', check if its options are
314 * compatible with those of other threaded subjobs that were already set up.
316 static int fio_blkio_check_opt_compat(struct thread_data *td)
318 const struct fio_blkio_options *options = td->eo, *prev_options;
320 if (!td->o.use_thread)
321 return 0; /* subjob doesn't use 'thread' */
323 if (!first_threaded_subjob.set_up) {
324 /* first subjob using 'thread', store options for later */
325 first_threaded_subjob.set_up = true;
326 first_threaded_subjob.direct = td->o.odirect;
327 first_threaded_subjob.opts = *options;
331 /* not first subjob using 'thread', check option compatibility */
332 prev_options = &first_threaded_subjob.opts;
334 if (td->o.odirect != first_threaded_subjob.direct) {
335 fio_blkio_log_opt_compat_err("direct/buffered");
339 if (strcmp(options->driver, prev_options->driver) != 0) {
340 fio_blkio_log_opt_compat_err("libblkio_driver");
344 if (!possibly_null_strs_equal(options->path, prev_options->path)) {
345 fio_blkio_log_opt_compat_err("libblkio_path");
349 if (!possibly_null_strs_equal(options->pre_connect_props,
350 prev_options->pre_connect_props)) {
351 fio_blkio_log_opt_compat_err("libblkio_pre_connect_props");
355 if (options->num_entries != prev_options->num_entries) {
356 fio_blkio_log_opt_compat_err("libblkio_num_entries");
360 if (options->queue_size != prev_options->queue_size) {
361 fio_blkio_log_opt_compat_err("libblkio_queue_size");
365 if (!possibly_null_strs_equal(options->pre_start_props,
366 prev_options->pre_start_props)) {
367 fio_blkio_log_opt_compat_err("libblkio_pre_start_props");
374 static int fio_blkio_create_and_connect(struct thread_data *td,
375 struct blkio **out_blkio)
377 const struct fio_blkio_options *options = td->eo;
381 if (!options->driver) {
382 log_err("fio: engine libblkio requires option libblkio_driver to be set\n");
386 if (blkio_create(options->driver, &b) != 0) {
387 fio_blkio_log_err(blkio_create);
391 /* don't fail if driver doesn't have a "direct" property */
392 ret = blkio_set_bool(b, "direct", td->o.odirect);
393 if (ret != 0 && ret != -ENOENT) {
394 fio_blkio_log_err(blkio_set_bool);
395 goto err_blkio_destroy;
398 if (blkio_set_bool(b, "read-only", read_only) != 0) {
399 fio_blkio_log_err(blkio_set_bool);
400 goto err_blkio_destroy;
404 if (blkio_set_str(b, "path", options->path) != 0) {
405 fio_blkio_log_err(blkio_set_str);
406 goto err_blkio_destroy;
410 if (fio_blkio_set_props_from_str(b, "libblkio_pre_connect_props",
411 options->pre_connect_props) != 0)
412 goto err_blkio_destroy;
414 if (blkio_connect(b) != 0) {
415 fio_blkio_log_err(blkio_connect);
416 goto err_blkio_destroy;
419 if (options->num_entries != 0) {
420 if (blkio_set_int(b, "num-entries",
421 options->num_entries) != 0) {
422 fio_blkio_log_err(blkio_set_int);
423 goto err_blkio_destroy;
427 if (options->queue_size != 0) {
428 if (blkio_set_int(b, "queue-size", options->queue_size) != 0) {
429 fio_blkio_log_err(blkio_set_int);
430 goto err_blkio_destroy;
434 if (fio_blkio_set_props_from_str(b, "libblkio_pre_start_props",
435 options->pre_start_props) != 0)
436 goto err_blkio_destroy;
446 static bool incompatible_threaded_subjob_options = false;
449 * This callback determines the device/file size, so it creates and connects a
450 * blkio instance. But it is invoked from the main thread in the original fio
451 * process, not from the processes in which jobs will actually run. It thus
452 * subsequently destroys the blkio, which is recreated in the init() callback.
454 static int fio_blkio_setup(struct thread_data *td)
456 const struct fio_blkio_options *options = td->eo;
461 assert(td->files_index == 1);
463 if (fio_blkio_check_opt_compat(td) != 0) {
464 incompatible_threaded_subjob_options = true;
468 if (options->hipri &&
469 options->wait_mode == FIO_BLKIO_WAIT_MODE_EVENTFD) {
470 log_err("fio: option hipri is incompatible with option libblkio_wait_mode=eventfd\n");
474 if (options->hipri && options->force_enable_completion_eventfd) {
475 log_err("fio: option hipri is incompatible with option libblkio_force_enable_completion_eventfd\n");
479 if (fio_blkio_create_and_connect(td, &b) != 0)
482 if (blkio_get_uint64(b, "capacity", &capacity) != 0) {
483 fio_blkio_log_err(blkio_get_uint64);
485 goto out_blkio_destroy;
488 td->files[0]->real_file_size = capacity;
489 fio_file_set_size_known(td->files[0]);
496 static int fio_blkio_init(struct thread_data *td)
498 const struct fio_blkio_options *options = td->eo;
499 struct fio_blkio_data *data;
502 if (td->o.use_thread && incompatible_threaded_subjob_options) {
504 * Different subjobs using option 'thread' specified
505 * incompatible options. We don't know which configuration
506 * should win, so we just fail all such subjobs.
512 * Request enqueueing is fast, and it's not possible to know exactly
513 * when a request is submitted, so never report submission latencies.
515 td->o.disable_slat = 1;
517 data = calloc(1, sizeof(*data));
519 log_err("fio: calloc() failed\n");
523 data->iovecs = calloc(td->o.iodepth, sizeof(data->iovecs[0]));
524 data->completions = calloc(td->o.iodepth, sizeof(data->completions[0]));
525 if (!data->iovecs || !data->completions) {
526 log_err("fio: calloc() failed\n");
530 fio_blkio_proc_lock();
532 if (proc_state.initted_threads == 0) {
533 /* initialize per-process blkio */
534 int num_queues, num_poll_queues;
536 if (td->o.use_thread) {
537 num_queues = total_threaded_subjobs(false);
538 num_poll_queues = total_threaded_subjobs(true);
540 num_queues = options->hipri ? 0 : 1;
541 num_poll_queues = options->hipri ? 1 : 0;
544 if (fio_blkio_create_and_connect(td, &proc_state.b) != 0)
547 if (blkio_set_int(proc_state.b, "num-queues",
549 fio_blkio_log_err(blkio_set_int);
550 goto err_blkio_destroy;
553 if (blkio_set_int(proc_state.b, "num-poll-queues",
554 num_poll_queues) != 0) {
555 fio_blkio_log_err(blkio_set_int);
556 goto err_blkio_destroy;
559 if (blkio_start(proc_state.b) != 0) {
560 fio_blkio_log_err(blkio_start);
561 goto err_blkio_destroy;
565 if (options->hipri) {
566 int i = proc_state.initted_hipri_threads;
567 data->q = blkio_get_poll_queue(proc_state.b, i);
569 int i = proc_state.initted_threads -
570 proc_state.initted_hipri_threads;
571 data->q = blkio_get_queue(proc_state.b, i);
574 if (options->wait_mode == FIO_BLKIO_WAIT_MODE_EVENTFD ||
575 options->force_enable_completion_eventfd) {
576 /* enable completion fd and make it blocking */
577 blkioq_set_completion_fd_enabled(data->q, true);
578 data->completion_fd = blkioq_get_completion_fd(data->q);
580 flags = fcntl(data->completion_fd, F_GETFL);
582 log_err("fio: fcntl(F_GETFL) failed: %s\n",
584 goto err_blkio_destroy;
587 if (fcntl(data->completion_fd, F_SETFL,
588 flags & ~O_NONBLOCK) != 0) {
589 log_err("fio: fcntl(F_SETFL) failed: %s\n",
591 goto err_blkio_destroy;
594 data->completion_fd = -1;
597 ++proc_state.initted_threads;
599 ++proc_state.initted_hipri_threads;
601 /* Set data last so cleanup() does nothing if init() fails. */
602 td->io_ops_data = data;
604 fio_blkio_proc_unlock();
609 if (proc_state.initted_threads == 0)
610 blkio_destroy(&proc_state.b);
612 if (proc_state.initted_threads == 0)
614 fio_blkio_proc_unlock();
616 free(data->completions);
622 static int fio_blkio_post_init(struct thread_data *td)
624 struct fio_blkio_data *data = td->io_ops_data;
626 if (!data->has_mem_region) {
628 * Memory was allocated by the fio core and not iomem_alloc(),
629 * so we need to register it as a memory region here.
631 * `td->orig_buffer_size` is computed like `len` below, but then
632 * fio can add some padding to it to make sure it is
633 * sufficiently aligned to the page size and the mem_align
634 * option. However, this can make it become unaligned to the
635 * "mem-region-alignment" property in ways that the user can't
636 * control, so we essentially recompute `td->orig_buffer_size`
637 * here but without adding that padding.
640 unsigned long long max_block_size;
641 struct blkio_mem_region region;
643 max_block_size = max(td->o.max_bs[DDIR_READ],
644 max(td->o.max_bs[DDIR_WRITE],
645 td->o.max_bs[DDIR_TRIM]));
647 region = (struct blkio_mem_region) {
648 .addr = td->orig_buffer,
649 .len = (size_t)max_block_size *
650 (size_t)td->o.iodepth,
654 if (blkio_map_mem_region(proc_state.b, ®ion) != 0) {
655 fio_blkio_log_err(blkio_map_mem_region);
663 static void fio_blkio_cleanup(struct thread_data *td)
665 struct fio_blkio_data *data = td->io_ops_data;
668 * Subjobs from different jobs can be terminated at different times, so
669 * this callback may be invoked for one subjob while another is still
670 * doing I/O. Those subjobs may share the process, so we must wait until
671 * the last subjob in the process wants to clean up to actually destroy
676 free(data->completions);
680 fio_blkio_proc_lock();
681 if (--proc_state.initted_threads == 0) {
682 blkio_destroy(&proc_state.b);
685 fio_blkio_proc_unlock();
689 #define align_up(x, y) ((((x) + (y) - 1) / (y)) * (y))
691 static int fio_blkio_iomem_alloc(struct thread_data *td, size_t size)
693 struct fio_blkio_data *data = td->io_ops_data;
695 uint64_t mem_region_alignment;
697 if (blkio_get_uint64(proc_state.b, "mem-region-alignment",
698 &mem_region_alignment) != 0) {
699 fio_blkio_log_err(blkio_get_uint64);
703 /* round up size to satisfy mem-region-alignment */
704 size = align_up(size, (size_t)mem_region_alignment);
706 fio_blkio_proc_lock();
708 if (blkio_alloc_mem_region(proc_state.b, &data->mem_region,
710 fio_blkio_log_err(blkio_alloc_mem_region);
715 if (blkio_map_mem_region(proc_state.b, &data->mem_region) != 0) {
716 fio_blkio_log_err(blkio_map_mem_region);
721 td->orig_buffer = data->mem_region.addr;
722 data->has_mem_region = true;
728 blkio_free_mem_region(proc_state.b, &data->mem_region);
730 fio_blkio_proc_unlock();
734 static void fio_blkio_iomem_free(struct thread_data *td)
736 struct fio_blkio_data *data = td->io_ops_data;
738 if (data && data->has_mem_region) {
739 fio_blkio_proc_lock();
740 blkio_unmap_mem_region(proc_state.b, &data->mem_region);
741 blkio_free_mem_region(proc_state.b, &data->mem_region);
742 fio_blkio_proc_unlock();
744 data->has_mem_region = false;
748 static int fio_blkio_open_file(struct thread_data *td, struct fio_file *f)
753 static enum fio_q_status fio_blkio_queue(struct thread_data *td,
756 const struct fio_blkio_options *options = td->eo;
757 struct fio_blkio_data *data = td->io_ops_data;
759 fio_ro_check(td, io_u);
761 switch (io_u->ddir) {
763 if (options->vectored) {
764 struct iovec *iov = &data->iovecs[io_u->index];
765 iov->iov_base = io_u->xfer_buf;
766 iov->iov_len = (size_t)io_u->xfer_buflen;
768 blkioq_readv(data->q, io_u->offset, iov, 1,
771 blkioq_read(data->q, io_u->offset,
773 (size_t)io_u->xfer_buflen, io_u, 0);
777 if (options->vectored) {
778 struct iovec *iov = &data->iovecs[io_u->index];
779 iov->iov_base = io_u->xfer_buf;
780 iov->iov_len = (size_t)io_u->xfer_buflen;
782 blkioq_writev(data->q, io_u->offset, iov, 1,
785 blkioq_write(data->q, io_u->offset,
787 (size_t)io_u->xfer_buflen, io_u,
792 if (options->write_zeroes_on_trim) {
793 blkioq_write_zeroes(data->q, io_u->offset,
794 io_u->xfer_buflen, io_u, 0);
796 blkioq_discard(data->q, io_u->offset,
797 io_u->xfer_buflen, io_u, 0);
802 blkioq_flush(data->q, io_u, 0);
805 io_u->error = ENOTSUP;
806 io_u_log_error(td, io_u);
807 return FIO_Q_COMPLETED;
813 static int fio_blkio_getevents(struct thread_data *td, unsigned int min,
814 unsigned int max, const struct timespec *t)
816 const struct fio_blkio_options *options = td->eo;
817 struct fio_blkio_data *data = td->io_ops_data;
821 switch (options->wait_mode) {
822 case FIO_BLKIO_WAIT_MODE_BLOCK:
823 n = blkioq_do_io(data->q, data->completions, (int)min, (int)max,
826 fio_blkio_log_err(blkioq_do_io);
830 case FIO_BLKIO_WAIT_MODE_EVENTFD:
831 n = blkioq_do_io(data->q, data->completions, 0, (int)max, NULL);
833 fio_blkio_log_err(blkioq_do_io);
836 while (n < (int)min) {
837 ret = read(data->completion_fd, &event, sizeof(event));
838 if (ret != sizeof(event)) {
839 log_err("fio: read() on the completion fd returned %d\n",
844 ret = blkioq_do_io(data->q, data->completions + n, 0,
847 fio_blkio_log_err(blkioq_do_io);
854 case FIO_BLKIO_WAIT_MODE_LOOP:
855 for (n = 0; n < (int)min; ) {
856 ret = blkioq_do_io(data->q, data->completions + n, 0,
859 fio_blkio_log_err(blkioq_do_io);
871 static struct io_u *fio_blkio_event(struct thread_data *td, int event)
873 struct fio_blkio_data *data = td->io_ops_data;
874 struct blkio_completion *completion = &data->completions[event];
875 struct io_u *io_u = completion->user_data;
877 io_u->error = -completion->ret;
882 FIO_STATIC struct ioengine_ops ioengine = {
884 .version = FIO_IOOPS_VERSION,
885 .flags = FIO_DISKLESSIO | FIO_NOEXTEND |
886 FIO_NO_OFFLOAD | FIO_SKIPPABLE_IOMEM_ALLOC,
888 .setup = fio_blkio_setup,
889 .init = fio_blkio_init,
890 .post_init = fio_blkio_post_init,
891 .cleanup = fio_blkio_cleanup,
893 .iomem_alloc = fio_blkio_iomem_alloc,
894 .iomem_free = fio_blkio_iomem_free,
896 .open_file = fio_blkio_open_file,
898 .queue = fio_blkio_queue,
899 .getevents = fio_blkio_getevents,
900 .event = fio_blkio_event,
903 .option_struct_size = sizeof(struct fio_blkio_options),
906 static void fio_init fio_blkio_register(void)
908 register_ioengine(&ioengine);
911 static void fio_exit fio_blkio_unregister(void)
913 unregister_ioengine(&ioengine);