4 * IO engine using libblkio to access various block I/O interfaces:
5 * https://gitlab.com/libblkio/libblkio
19 #include "../optgroup.h"
20 #include "../options.h"
23 /* per-thread state */
24 struct fio_blkio_data {
27 int completion_fd; /* may be -1 if not FIO_BLKIO_WAIT_MODE_EVENTFD */
29 bool has_mem_region; /* whether mem_region is valid */
30 struct blkio_mem_region mem_region; /* only if allocated by libblkio */
32 struct iovec *iovecs; /* for vectored requests */
33 struct blkio_completion *completions;
36 enum fio_blkio_wait_mode {
37 FIO_BLKIO_WAIT_MODE_BLOCK,
38 FIO_BLKIO_WAIT_MODE_EVENTFD,
39 FIO_BLKIO_WAIT_MODE_LOOP,
42 struct fio_blkio_options {
43 void *pad; /* option fields must not have offset 0 */
46 char *pre_connect_props;
47 char *pre_start_props;
50 unsigned int vectored;
51 unsigned int write_zeroes_on_trim;
52 enum fio_blkio_wait_mode wait_mode;
53 unsigned int force_enable_completion_eventfd;
56 static struct fio_option options[] = {
58 .name = "libblkio_driver",
59 .lname = "libblkio driver name",
60 .type = FIO_OPT_STR_STORE,
61 .off1 = offsetof(struct fio_blkio_options, driver),
62 .help = "Name of the driver to be used by libblkio",
63 .category = FIO_OPT_C_ENGINE,
64 .group = FIO_OPT_G_LIBBLKIO,
67 .name = "libblkio_pre_connect_props",
68 .lname = "Properties to be set before blkio_connect()",
69 .type = FIO_OPT_STR_STORE,
70 .off1 = offsetof(struct fio_blkio_options, pre_connect_props),
72 .category = FIO_OPT_C_ENGINE,
73 .group = FIO_OPT_G_LIBBLKIO,
76 .name = "libblkio_pre_start_props",
77 .lname = "Properties to be set before blkio_start()",
78 .type = FIO_OPT_STR_STORE,
79 .off1 = offsetof(struct fio_blkio_options, pre_start_props),
81 .category = FIO_OPT_C_ENGINE,
82 .group = FIO_OPT_G_LIBBLKIO,
86 .lname = "Use poll queues",
87 .type = FIO_OPT_STR_SET,
88 .off1 = offsetof(struct fio_blkio_options, hipri),
89 .help = "Use poll queues",
90 .category = FIO_OPT_C_ENGINE,
91 .group = FIO_OPT_G_LIBBLKIO,
94 .name = "libblkio_vectored",
95 .lname = "Use blkioq_{readv,writev}()",
96 .type = FIO_OPT_STR_SET,
97 .off1 = offsetof(struct fio_blkio_options, vectored),
98 .help = "Use blkioq_{readv,writev}() instead of blkioq_{read,write}()",
99 .category = FIO_OPT_C_ENGINE,
100 .group = FIO_OPT_G_LIBBLKIO,
103 .name = "libblkio_write_zeroes_on_trim",
104 .lname = "Use blkioq_write_zeroes() for TRIM",
105 .type = FIO_OPT_STR_SET,
106 .off1 = offsetof(struct fio_blkio_options,
107 write_zeroes_on_trim),
108 .help = "Use blkioq_write_zeroes() for TRIM instead of blkioq_discard()",
109 .category = FIO_OPT_C_ENGINE,
110 .group = FIO_OPT_G_LIBBLKIO,
113 .name = "libblkio_wait_mode",
114 .lname = "How to wait for completions",
116 .off1 = offsetof(struct fio_blkio_options, wait_mode),
117 .help = "How to wait for completions",
121 .oval = FIO_BLKIO_WAIT_MODE_BLOCK,
122 .help = "Blocking blkioq_do_io()",
125 .oval = FIO_BLKIO_WAIT_MODE_EVENTFD,
126 .help = "Blocking read() on the completion eventfd",
129 .oval = FIO_BLKIO_WAIT_MODE_LOOP,
130 .help = "Busy loop with non-blocking blkioq_do_io()",
133 .category = FIO_OPT_C_ENGINE,
134 .group = FIO_OPT_G_LIBBLKIO,
137 .name = "libblkio_force_enable_completion_eventfd",
138 .lname = "Force enable the completion eventfd, even if unused",
139 .type = FIO_OPT_STR_SET,
140 .off1 = offsetof(struct fio_blkio_options,
141 force_enable_completion_eventfd),
142 .help = "This can impact performance",
143 .category = FIO_OPT_C_ENGINE,
144 .group = FIO_OPT_G_LIBBLKIO,
151 static int fio_blkio_set_props_from_str(struct blkio *b, const char *opt_name,
154 char *new_str, *name, *value;
159 /* iteration can mutate string, so copy it */
160 new_str = strdup(str);
162 log_err("fio: strdup() failed\n");
166 /* iterate over property name-value pairs */
167 while ((name = get_next_str(&new_str))) {
168 /* split into property name and value */
169 value = strchr(name, '=');
171 log_err("fio: missing '=' in option %s\n", opt_name);
179 /* strip whitespace from property name */
180 strip_blank_front(&name);
181 strip_blank_end(name);
183 if (name[0] == '\0') {
184 log_err("fio: empty property name in option %s\n",
190 /* strip whitespace from property value */
191 strip_blank_front(&value);
192 strip_blank_end(value);
195 if (blkio_set_str(b, name, value) != 0) {
196 log_err("fio: error setting property '%s' to '%s': %s\n",
197 name, value, blkio_get_error_msg());
208 * Log the failure of a libblkio function.
210 * `(void)func` is to ensure `func` exists and prevent typos
212 #define fio_blkio_log_err(func) \
215 log_err("fio: %s() failed: %s\n", #func, \
216 blkio_get_error_msg()); \
219 static int fio_blkio_create_and_connect(struct thread_data *td,
220 struct blkio **out_blkio)
222 const struct fio_blkio_options *options = td->eo;
226 if (!options->driver) {
227 log_err("fio: engine libblkio requires option libblkio_driver to be set\n");
231 if (blkio_create(options->driver, &b) != 0) {
232 fio_blkio_log_err(blkio_create);
236 /* don't fail if driver doesn't have a "direct" property */
237 ret = blkio_set_bool(b, "direct", td->o.odirect);
238 if (ret != 0 && ret != -ENOENT) {
239 fio_blkio_log_err(blkio_set_bool);
240 goto err_blkio_destroy;
243 if (blkio_set_bool(b, "read-only", read_only) != 0) {
244 fio_blkio_log_err(blkio_set_bool);
245 goto err_blkio_destroy;
248 if (fio_blkio_set_props_from_str(b, "libblkio_pre_connect_props",
249 options->pre_connect_props) != 0)
250 goto err_blkio_destroy;
252 if (blkio_connect(b) != 0) {
253 fio_blkio_log_err(blkio_connect);
254 goto err_blkio_destroy;
257 if (fio_blkio_set_props_from_str(b, "libblkio_pre_start_props",
258 options->pre_start_props) != 0)
259 goto err_blkio_destroy;
270 * This callback determines the device/file size, so it creates and connects a
271 * blkio instance. But it is invoked from the main thread in the original fio
272 * process, not from the processes in which jobs will actually run. It thus
273 * subsequently destroys the blkio, which is recreated in the init() callback.
275 static int fio_blkio_setup(struct thread_data *td)
277 const struct fio_blkio_options *options = td->eo;
282 assert(td->files_index == 1);
284 if (options->hipri &&
285 options->wait_mode == FIO_BLKIO_WAIT_MODE_EVENTFD) {
286 log_err("fio: option hipri is incompatible with option libblkio_wait_mode=eventfd\n");
290 if (options->hipri && options->force_enable_completion_eventfd) {
291 log_err("fio: option hipri is incompatible with option libblkio_force_enable_completion_eventfd\n");
295 if (fio_blkio_create_and_connect(td, &b) != 0)
298 if (blkio_get_uint64(b, "capacity", &capacity) != 0) {
299 fio_blkio_log_err(blkio_get_uint64);
301 goto out_blkio_destroy;
304 td->files[0]->real_file_size = capacity;
305 fio_file_set_size_known(td->files[0]);
312 static int fio_blkio_init(struct thread_data *td)
314 const struct fio_blkio_options *options = td->eo;
315 struct fio_blkio_data *data;
319 * Request enqueueing is fast, and it's not possible to know exactly
320 * when a request is submitted, so never report submission latencies.
322 td->o.disable_slat = 1;
324 data = calloc(1, sizeof(*data));
326 log_err("fio: calloc() failed\n");
330 data->iovecs = calloc(td->o.iodepth, sizeof(data->iovecs[0]));
331 data->completions = calloc(td->o.iodepth, sizeof(data->completions[0]));
332 if (!data->iovecs || !data->completions) {
333 log_err("fio: calloc() failed\n");
337 if (fio_blkio_create_and_connect(td, &data->b) != 0)
340 if (blkio_set_int(data->b, "num-queues", options->hipri ? 0 : 1) != 0) {
341 fio_blkio_log_err(blkio_set_int);
342 goto err_blkio_destroy;
345 if (blkio_set_int(data->b, "num-poll-queues",
346 options->hipri ? 1 : 0) != 0) {
347 fio_blkio_log_err(blkio_set_int);
348 goto err_blkio_destroy;
351 if (blkio_start(data->b) != 0) {
352 fio_blkio_log_err(blkio_start);
353 goto err_blkio_destroy;
357 data->q = blkio_get_poll_queue(data->b, 0);
359 data->q = blkio_get_queue(data->b, 0);
361 if (options->wait_mode == FIO_BLKIO_WAIT_MODE_EVENTFD ||
362 options->force_enable_completion_eventfd) {
363 /* enable completion fd and make it blocking */
364 blkioq_set_completion_fd_enabled(data->q, true);
365 data->completion_fd = blkioq_get_completion_fd(data->q);
367 flags = fcntl(data->completion_fd, F_GETFL);
369 log_err("fio: fcntl(F_GETFL) failed: %s\n",
371 goto err_blkio_destroy;
374 if (fcntl(data->completion_fd, F_SETFL,
375 flags & ~O_NONBLOCK) != 0) {
376 log_err("fio: fcntl(F_SETFL) failed: %s\n",
378 goto err_blkio_destroy;
381 data->completion_fd = -1;
384 /* Set data last so cleanup() does nothing if init() fails. */
385 td->io_ops_data = data;
390 blkio_destroy(&data->b);
392 free(data->completions);
398 static int fio_blkio_post_init(struct thread_data *td)
400 struct fio_blkio_data *data = td->io_ops_data;
402 if (!data->has_mem_region) {
404 * Memory was allocated by the fio core and not iomem_alloc(),
405 * so we need to register it as a memory region here.
407 * `td->orig_buffer_size` is computed like `len` below, but then
408 * fio can add some padding to it to make sure it is
409 * sufficiently aligned to the page size and the mem_align
410 * option. However, this can make it become unaligned to the
411 * "mem-region-alignment" property in ways that the user can't
412 * control, so we essentially recompute `td->orig_buffer_size`
413 * here but without adding that padding.
416 unsigned long long max_block_size;
417 struct blkio_mem_region region;
419 max_block_size = max(td->o.max_bs[DDIR_READ],
420 max(td->o.max_bs[DDIR_WRITE],
421 td->o.max_bs[DDIR_TRIM]));
423 region = (struct blkio_mem_region) {
424 .addr = td->orig_buffer,
425 .len = (size_t)max_block_size *
426 (size_t)td->o.iodepth,
430 if (blkio_map_mem_region(data->b, ®ion) != 0) {
431 fio_blkio_log_err(blkio_map_mem_region);
439 static void fio_blkio_cleanup(struct thread_data *td)
441 struct fio_blkio_data *data = td->io_ops_data;
444 blkio_destroy(&data->b);
445 free(data->completions);
451 #define align_up(x, y) ((((x) + (y) - 1) / (y)) * (y))
453 static int fio_blkio_iomem_alloc(struct thread_data *td, size_t size)
455 struct fio_blkio_data *data = td->io_ops_data;
457 uint64_t mem_region_alignment;
459 if (blkio_get_uint64(data->b, "mem-region-alignment",
460 &mem_region_alignment) != 0) {
461 fio_blkio_log_err(blkio_get_uint64);
465 /* round up size to satisfy mem-region-alignment */
466 size = align_up(size, (size_t)mem_region_alignment);
468 if (blkio_alloc_mem_region(data->b, &data->mem_region, size) != 0) {
469 fio_blkio_log_err(blkio_alloc_mem_region);
474 if (blkio_map_mem_region(data->b, &data->mem_region) != 0) {
475 fio_blkio_log_err(blkio_map_mem_region);
480 td->orig_buffer = data->mem_region.addr;
481 data->has_mem_region = true;
487 blkio_free_mem_region(data->b, &data->mem_region);
492 static void fio_blkio_iomem_free(struct thread_data *td)
494 struct fio_blkio_data *data = td->io_ops_data;
496 if (data && data->has_mem_region) {
497 blkio_unmap_mem_region(data->b, &data->mem_region);
498 blkio_free_mem_region(data->b, &data->mem_region);
500 data->has_mem_region = false;
504 static int fio_blkio_open_file(struct thread_data *td, struct fio_file *f)
509 static enum fio_q_status fio_blkio_queue(struct thread_data *td,
512 const struct fio_blkio_options *options = td->eo;
513 struct fio_blkio_data *data = td->io_ops_data;
515 fio_ro_check(td, io_u);
517 switch (io_u->ddir) {
519 if (options->vectored) {
520 struct iovec *iov = &data->iovecs[io_u->index];
521 iov->iov_base = io_u->xfer_buf;
522 iov->iov_len = (size_t)io_u->xfer_buflen;
524 blkioq_readv(data->q, io_u->offset, iov, 1,
527 blkioq_read(data->q, io_u->offset,
529 (size_t)io_u->xfer_buflen, io_u, 0);
533 if (options->vectored) {
534 struct iovec *iov = &data->iovecs[io_u->index];
535 iov->iov_base = io_u->xfer_buf;
536 iov->iov_len = (size_t)io_u->xfer_buflen;
538 blkioq_writev(data->q, io_u->offset, iov, 1,
541 blkioq_write(data->q, io_u->offset,
543 (size_t)io_u->xfer_buflen, io_u,
548 if (options->write_zeroes_on_trim) {
549 blkioq_write_zeroes(data->q, io_u->offset,
550 io_u->xfer_buflen, io_u, 0);
552 blkioq_discard(data->q, io_u->offset,
553 io_u->xfer_buflen, io_u, 0);
558 blkioq_flush(data->q, io_u, 0);
561 io_u->error = ENOTSUP;
562 io_u_log_error(td, io_u);
563 return FIO_Q_COMPLETED;
569 static int fio_blkio_getevents(struct thread_data *td, unsigned int min,
570 unsigned int max, const struct timespec *t)
572 const struct fio_blkio_options *options = td->eo;
573 struct fio_blkio_data *data = td->io_ops_data;
577 switch (options->wait_mode) {
578 case FIO_BLKIO_WAIT_MODE_BLOCK:
579 n = blkioq_do_io(data->q, data->completions, (int)min, (int)max,
582 fio_blkio_log_err(blkioq_do_io);
586 case FIO_BLKIO_WAIT_MODE_EVENTFD:
587 n = blkioq_do_io(data->q, data->completions, 0, (int)max, NULL);
589 fio_blkio_log_err(blkioq_do_io);
592 while (n < (int)min) {
593 ret = read(data->completion_fd, &event, sizeof(event));
594 if (ret != sizeof(event)) {
595 log_err("fio: read() on the completion fd returned %d\n",
600 ret = blkioq_do_io(data->q, data->completions + n, 0,
603 fio_blkio_log_err(blkioq_do_io);
610 case FIO_BLKIO_WAIT_MODE_LOOP:
611 for (n = 0; n < (int)min; ) {
612 ret = blkioq_do_io(data->q, data->completions + n, 0,
615 fio_blkio_log_err(blkioq_do_io);
627 static struct io_u *fio_blkio_event(struct thread_data *td, int event)
629 struct fio_blkio_data *data = td->io_ops_data;
630 struct blkio_completion *completion = &data->completions[event];
631 struct io_u *io_u = completion->user_data;
633 io_u->error = -completion->ret;
638 FIO_STATIC struct ioengine_ops ioengine = {
640 .version = FIO_IOOPS_VERSION,
641 .flags = FIO_DISKLESSIO | FIO_NOEXTEND |
642 FIO_NO_OFFLOAD | FIO_SKIPPABLE_IOMEM_ALLOC,
644 .setup = fio_blkio_setup,
645 .init = fio_blkio_init,
646 .post_init = fio_blkio_post_init,
647 .cleanup = fio_blkio_cleanup,
649 .iomem_alloc = fio_blkio_iomem_alloc,
650 .iomem_free = fio_blkio_iomem_free,
652 .open_file = fio_blkio_open_file,
654 .queue = fio_blkio_queue,
655 .getevents = fio_blkio_getevents,
656 .event = fio_blkio_event,
659 .option_struct_size = sizeof(struct fio_blkio_options),
662 static void fio_init fio_blkio_register(void)
664 register_ioengine(&ioengine);
667 static void fio_exit fio_blkio_unregister(void)
669 unregister_ioengine(&ioengine);