4 * IO engine using libblkio to access various block I/O interfaces:
5 * https://gitlab.com/libblkio/libblkio
19 #include "../optgroup.h"
20 #include "../options.h"
23 /* per-thread state */
24 struct fio_blkio_data {
28 bool has_mem_region; /* whether mem_region is valid */
29 struct blkio_mem_region mem_region; /* only if allocated by libblkio */
31 struct iovec *iovecs; /* for vectored requests */
32 struct blkio_completion *completions;
35 struct fio_blkio_options {
36 void *pad; /* option fields must not have offset 0 */
39 char *pre_connect_props;
40 char *pre_start_props;
43 unsigned int vectored;
44 unsigned int write_zeroes_on_trim;
47 static struct fio_option options[] = {
49 .name = "libblkio_driver",
50 .lname = "libblkio driver name",
51 .type = FIO_OPT_STR_STORE,
52 .off1 = offsetof(struct fio_blkio_options, driver),
53 .help = "Name of the driver to be used by libblkio",
54 .category = FIO_OPT_C_ENGINE,
55 .group = FIO_OPT_G_LIBBLKIO,
58 .name = "libblkio_pre_connect_props",
59 .lname = "Properties to be set before blkio_connect()",
60 .type = FIO_OPT_STR_STORE,
61 .off1 = offsetof(struct fio_blkio_options, pre_connect_props),
63 .category = FIO_OPT_C_ENGINE,
64 .group = FIO_OPT_G_LIBBLKIO,
67 .name = "libblkio_pre_start_props",
68 .lname = "Properties to be set before blkio_start()",
69 .type = FIO_OPT_STR_STORE,
70 .off1 = offsetof(struct fio_blkio_options, pre_start_props),
72 .category = FIO_OPT_C_ENGINE,
73 .group = FIO_OPT_G_LIBBLKIO,
77 .lname = "Use poll queues",
78 .type = FIO_OPT_STR_SET,
79 .off1 = offsetof(struct fio_blkio_options, hipri),
80 .help = "Use poll queues",
81 .category = FIO_OPT_C_ENGINE,
82 .group = FIO_OPT_G_LIBBLKIO,
85 .name = "libblkio_vectored",
86 .lname = "Use blkioq_{readv,writev}()",
87 .type = FIO_OPT_STR_SET,
88 .off1 = offsetof(struct fio_blkio_options, vectored),
89 .help = "Use blkioq_{readv,writev}() instead of blkioq_{read,write}()",
90 .category = FIO_OPT_C_ENGINE,
91 .group = FIO_OPT_G_LIBBLKIO,
94 .name = "libblkio_write_zeroes_on_trim",
95 .lname = "Use blkioq_write_zeroes() for TRIM",
96 .type = FIO_OPT_STR_SET,
97 .off1 = offsetof(struct fio_blkio_options,
98 write_zeroes_on_trim),
99 .help = "Use blkioq_write_zeroes() for TRIM instead of blkioq_discard()",
100 .category = FIO_OPT_C_ENGINE,
101 .group = FIO_OPT_G_LIBBLKIO,
108 static int fio_blkio_set_props_from_str(struct blkio *b, const char *opt_name,
111 char *new_str, *name, *value;
116 /* iteration can mutate string, so copy it */
117 new_str = strdup(str);
119 log_err("fio: strdup() failed\n");
123 /* iterate over property name-value pairs */
124 while ((name = get_next_str(&new_str))) {
125 /* split into property name and value */
126 value = strchr(name, '=');
128 log_err("fio: missing '=' in option %s\n", opt_name);
136 /* strip whitespace from property name */
137 strip_blank_front(&name);
138 strip_blank_end(name);
140 if (name[0] == '\0') {
141 log_err("fio: empty property name in option %s\n",
147 /* strip whitespace from property value */
148 strip_blank_front(&value);
149 strip_blank_end(value);
152 if (blkio_set_str(b, name, value) != 0) {
153 log_err("fio: error setting property '%s' to '%s': %s\n",
154 name, value, blkio_get_error_msg());
165 * Log the failure of a libblkio function.
167 * `(void)func` is to ensure `func` exists and prevent typos
169 #define fio_blkio_log_err(func) \
172 log_err("fio: %s() failed: %s\n", #func, \
173 blkio_get_error_msg()); \
176 static int fio_blkio_create_and_connect(struct thread_data *td,
177 struct blkio **out_blkio)
179 const struct fio_blkio_options *options = td->eo;
183 if (!options->driver) {
184 log_err("fio: engine libblkio requires option libblkio_driver to be set\n");
188 if (blkio_create(options->driver, &b) != 0) {
189 fio_blkio_log_err(blkio_create);
193 /* don't fail if driver doesn't have a "direct" property */
194 ret = blkio_set_bool(b, "direct", td->o.odirect);
195 if (ret != 0 && ret != -ENOENT) {
196 fio_blkio_log_err(blkio_set_bool);
197 goto err_blkio_destroy;
200 if (blkio_set_bool(b, "read-only", read_only) != 0) {
201 fio_blkio_log_err(blkio_set_bool);
202 goto err_blkio_destroy;
205 if (fio_blkio_set_props_from_str(b, "libblkio_pre_connect_props",
206 options->pre_connect_props) != 0)
207 goto err_blkio_destroy;
209 if (blkio_connect(b) != 0) {
210 fio_blkio_log_err(blkio_connect);
211 goto err_blkio_destroy;
214 if (fio_blkio_set_props_from_str(b, "libblkio_pre_start_props",
215 options->pre_start_props) != 0)
216 goto err_blkio_destroy;
227 * This callback determines the device/file size, so it creates and connects a
228 * blkio instance. But it is invoked from the main thread in the original fio
229 * process, not from the processes in which jobs will actually run. It thus
230 * subsequently destroys the blkio, which is recreated in the init() callback.
232 static int fio_blkio_setup(struct thread_data *td)
238 assert(td->files_index == 1);
240 if (fio_blkio_create_and_connect(td, &b) != 0)
243 if (blkio_get_uint64(b, "capacity", &capacity) != 0) {
244 fio_blkio_log_err(blkio_get_uint64);
246 goto out_blkio_destroy;
249 td->files[0]->real_file_size = capacity;
250 fio_file_set_size_known(td->files[0]);
257 static int fio_blkio_init(struct thread_data *td)
259 const struct fio_blkio_options *options = td->eo;
260 struct fio_blkio_data *data;
263 * Request enqueueing is fast, and it's not possible to know exactly
264 * when a request is submitted, so never report submission latencies.
266 td->o.disable_slat = 1;
268 data = calloc(1, sizeof(*data));
270 log_err("fio: calloc() failed\n");
274 data->iovecs = calloc(td->o.iodepth, sizeof(data->iovecs[0]));
275 data->completions = calloc(td->o.iodepth, sizeof(data->completions[0]));
276 if (!data->iovecs || !data->completions) {
277 log_err("fio: calloc() failed\n");
281 if (fio_blkio_create_and_connect(td, &data->b) != 0)
284 if (blkio_set_int(data->b, "num-queues", options->hipri ? 0 : 1) != 0) {
285 fio_blkio_log_err(blkio_set_int);
286 goto err_blkio_destroy;
289 if (blkio_set_int(data->b, "num-poll-queues",
290 options->hipri ? 1 : 0) != 0) {
291 fio_blkio_log_err(blkio_set_int);
292 goto err_blkio_destroy;
295 if (blkio_start(data->b) != 0) {
296 fio_blkio_log_err(blkio_start);
297 goto err_blkio_destroy;
301 data->q = blkio_get_poll_queue(data->b, 0);
303 data->q = blkio_get_queue(data->b, 0);
305 /* Set data last so cleanup() does nothing if init() fails. */
306 td->io_ops_data = data;
311 blkio_destroy(&data->b);
313 free(data->completions);
319 static int fio_blkio_post_init(struct thread_data *td)
321 struct fio_blkio_data *data = td->io_ops_data;
323 if (!data->has_mem_region) {
325 * Memory was allocated by the fio core and not iomem_alloc(),
326 * so we need to register it as a memory region here.
328 * `td->orig_buffer_size` is computed like `len` below, but then
329 * fio can add some padding to it to make sure it is
330 * sufficiently aligned to the page size and the mem_align
331 * option. However, this can make it become unaligned to the
332 * "mem-region-alignment" property in ways that the user can't
333 * control, so we essentially recompute `td->orig_buffer_size`
334 * here but without adding that padding.
337 unsigned long long max_block_size;
338 struct blkio_mem_region region;
340 max_block_size = max(td->o.max_bs[DDIR_READ],
341 max(td->o.max_bs[DDIR_WRITE],
342 td->o.max_bs[DDIR_TRIM]));
344 region = (struct blkio_mem_region) {
345 .addr = td->orig_buffer,
346 .len = (size_t)max_block_size *
347 (size_t)td->o.iodepth,
351 if (blkio_map_mem_region(data->b, ®ion) != 0) {
352 fio_blkio_log_err(blkio_map_mem_region);
360 static void fio_blkio_cleanup(struct thread_data *td)
362 struct fio_blkio_data *data = td->io_ops_data;
365 blkio_destroy(&data->b);
366 free(data->completions);
372 #define align_up(x, y) ((((x) + (y) - 1) / (y)) * (y))
374 static int fio_blkio_iomem_alloc(struct thread_data *td, size_t size)
376 struct fio_blkio_data *data = td->io_ops_data;
378 uint64_t mem_region_alignment;
380 if (blkio_get_uint64(data->b, "mem-region-alignment",
381 &mem_region_alignment) != 0) {
382 fio_blkio_log_err(blkio_get_uint64);
386 /* round up size to satisfy mem-region-alignment */
387 size = align_up(size, (size_t)mem_region_alignment);
389 if (blkio_alloc_mem_region(data->b, &data->mem_region, size) != 0) {
390 fio_blkio_log_err(blkio_alloc_mem_region);
395 if (blkio_map_mem_region(data->b, &data->mem_region) != 0) {
396 fio_blkio_log_err(blkio_map_mem_region);
401 td->orig_buffer = data->mem_region.addr;
402 data->has_mem_region = true;
408 blkio_free_mem_region(data->b, &data->mem_region);
413 static void fio_blkio_iomem_free(struct thread_data *td)
415 struct fio_blkio_data *data = td->io_ops_data;
417 if (data && data->has_mem_region) {
418 blkio_unmap_mem_region(data->b, &data->mem_region);
419 blkio_free_mem_region(data->b, &data->mem_region);
421 data->has_mem_region = false;
425 static int fio_blkio_open_file(struct thread_data *td, struct fio_file *f)
430 static enum fio_q_status fio_blkio_queue(struct thread_data *td,
433 const struct fio_blkio_options *options = td->eo;
434 struct fio_blkio_data *data = td->io_ops_data;
436 fio_ro_check(td, io_u);
438 switch (io_u->ddir) {
440 if (options->vectored) {
441 struct iovec *iov = &data->iovecs[io_u->index];
442 iov->iov_base = io_u->xfer_buf;
443 iov->iov_len = (size_t)io_u->xfer_buflen;
445 blkioq_readv(data->q, io_u->offset, iov, 1,
448 blkioq_read(data->q, io_u->offset,
450 (size_t)io_u->xfer_buflen, io_u, 0);
454 if (options->vectored) {
455 struct iovec *iov = &data->iovecs[io_u->index];
456 iov->iov_base = io_u->xfer_buf;
457 iov->iov_len = (size_t)io_u->xfer_buflen;
459 blkioq_writev(data->q, io_u->offset, iov, 1,
462 blkioq_write(data->q, io_u->offset,
464 (size_t)io_u->xfer_buflen, io_u,
469 if (options->write_zeroes_on_trim) {
470 blkioq_write_zeroes(data->q, io_u->offset,
471 io_u->xfer_buflen, io_u, 0);
473 blkioq_discard(data->q, io_u->offset,
474 io_u->xfer_buflen, io_u, 0);
479 blkioq_flush(data->q, io_u, 0);
482 io_u->error = ENOTSUP;
483 io_u_log_error(td, io_u);
484 return FIO_Q_COMPLETED;
490 static int fio_blkio_getevents(struct thread_data *td, unsigned int min,
491 unsigned int max, const struct timespec *t)
493 struct fio_blkio_data *data = td->io_ops_data;
496 n = blkioq_do_io(data->q, data->completions, (int)min, (int)max, NULL);
498 fio_blkio_log_err(blkioq_do_io);
505 static struct io_u *fio_blkio_event(struct thread_data *td, int event)
507 struct fio_blkio_data *data = td->io_ops_data;
508 struct blkio_completion *completion = &data->completions[event];
509 struct io_u *io_u = completion->user_data;
511 io_u->error = -completion->ret;
516 FIO_STATIC struct ioengine_ops ioengine = {
518 .version = FIO_IOOPS_VERSION,
519 .flags = FIO_DISKLESSIO | FIO_NOEXTEND |
520 FIO_NO_OFFLOAD | FIO_SKIPPABLE_IOMEM_ALLOC,
522 .setup = fio_blkio_setup,
523 .init = fio_blkio_init,
524 .post_init = fio_blkio_post_init,
525 .cleanup = fio_blkio_cleanup,
527 .iomem_alloc = fio_blkio_iomem_alloc,
528 .iomem_free = fio_blkio_iomem_free,
530 .open_file = fio_blkio_open_file,
532 .queue = fio_blkio_queue,
533 .getevents = fio_blkio_getevents,
534 .event = fio_blkio_event,
537 .option_struct_size = sizeof(struct fio_blkio_options),
540 static void fio_init fio_blkio_register(void)
542 register_ioengine(&ioengine);
545 static void fio_exit fio_blkio_unregister(void)
547 unregister_ioengine(&ioengine);