4 * IO engine using libblkio to access various block I/O interfaces:
5 * https://gitlab.com/libblkio/libblkio
19 #include "../optgroup.h"
20 #include "../options.h"
23 /* per-thread state */
24 struct fio_blkio_data {
28 bool has_mem_region; /* whether mem_region is valid */
29 struct blkio_mem_region mem_region; /* only if allocated by libblkio */
31 struct blkio_completion *completions;
34 struct fio_blkio_options {
35 void *pad; /* option fields must not have offset 0 */
38 char *pre_connect_props;
39 char *pre_start_props;
44 static struct fio_option options[] = {
46 .name = "libblkio_driver",
47 .lname = "libblkio driver name",
48 .type = FIO_OPT_STR_STORE,
49 .off1 = offsetof(struct fio_blkio_options, driver),
50 .help = "Name of the driver to be used by libblkio",
51 .category = FIO_OPT_C_ENGINE,
52 .group = FIO_OPT_G_LIBBLKIO,
55 .name = "libblkio_pre_connect_props",
56 .lname = "Properties to be set before blkio_connect()",
57 .type = FIO_OPT_STR_STORE,
58 .off1 = offsetof(struct fio_blkio_options, pre_connect_props),
60 .category = FIO_OPT_C_ENGINE,
61 .group = FIO_OPT_G_LIBBLKIO,
64 .name = "libblkio_pre_start_props",
65 .lname = "Properties to be set before blkio_start()",
66 .type = FIO_OPT_STR_STORE,
67 .off1 = offsetof(struct fio_blkio_options, pre_start_props),
69 .category = FIO_OPT_C_ENGINE,
70 .group = FIO_OPT_G_LIBBLKIO,
74 .lname = "Use poll queues",
75 .type = FIO_OPT_STR_SET,
76 .off1 = offsetof(struct fio_blkio_options, hipri),
77 .help = "Use poll queues",
78 .category = FIO_OPT_C_ENGINE,
79 .group = FIO_OPT_G_LIBBLKIO,
86 static int fio_blkio_set_props_from_str(struct blkio *b, const char *opt_name,
89 char *new_str, *name, *value;
94 /* iteration can mutate string, so copy it */
95 new_str = strdup(str);
97 log_err("fio: strdup() failed\n");
101 /* iterate over property name-value pairs */
102 while ((name = get_next_str(&new_str))) {
103 /* split into property name and value */
104 value = strchr(name, '=');
106 log_err("fio: missing '=' in option %s\n", opt_name);
114 /* strip whitespace from property name */
115 strip_blank_front(&name);
116 strip_blank_end(name);
118 if (name[0] == '\0') {
119 log_err("fio: empty property name in option %s\n",
125 /* strip whitespace from property value */
126 strip_blank_front(&value);
127 strip_blank_end(value);
130 if (blkio_set_str(b, name, value) != 0) {
131 log_err("fio: error setting property '%s' to '%s': %s\n",
132 name, value, blkio_get_error_msg());
143 * Log the failure of a libblkio function.
145 * `(void)func` is to ensure `func` exists and prevent typos
147 #define fio_blkio_log_err(func) \
150 log_err("fio: %s() failed: %s\n", #func, \
151 blkio_get_error_msg()); \
154 static int fio_blkio_create_and_connect(struct thread_data *td,
155 struct blkio **out_blkio)
157 const struct fio_blkio_options *options = td->eo;
161 if (!options->driver) {
162 log_err("fio: engine libblkio requires option libblkio_driver to be set\n");
166 if (blkio_create(options->driver, &b) != 0) {
167 fio_blkio_log_err(blkio_create);
171 /* don't fail if driver doesn't have a "direct" property */
172 ret = blkio_set_bool(b, "direct", td->o.odirect);
173 if (ret != 0 && ret != -ENOENT) {
174 fio_blkio_log_err(blkio_set_bool);
175 goto err_blkio_destroy;
178 if (blkio_set_bool(b, "read-only", read_only) != 0) {
179 fio_blkio_log_err(blkio_set_bool);
180 goto err_blkio_destroy;
183 if (fio_blkio_set_props_from_str(b, "libblkio_pre_connect_props",
184 options->pre_connect_props) != 0)
185 goto err_blkio_destroy;
187 if (blkio_connect(b) != 0) {
188 fio_blkio_log_err(blkio_connect);
189 goto err_blkio_destroy;
192 if (fio_blkio_set_props_from_str(b, "libblkio_pre_start_props",
193 options->pre_start_props) != 0)
194 goto err_blkio_destroy;
205 * This callback determines the device/file size, so it creates and connects a
206 * blkio instance. But it is invoked from the main thread in the original fio
207 * process, not from the processes in which jobs will actually run. It thus
208 * subsequently destroys the blkio, which is recreated in the init() callback.
210 static int fio_blkio_setup(struct thread_data *td)
216 assert(td->files_index == 1);
218 if (fio_blkio_create_and_connect(td, &b) != 0)
221 if (blkio_get_uint64(b, "capacity", &capacity) != 0) {
222 fio_blkio_log_err(blkio_get_uint64);
224 goto out_blkio_destroy;
227 td->files[0]->real_file_size = capacity;
228 fio_file_set_size_known(td->files[0]);
235 static int fio_blkio_init(struct thread_data *td)
237 const struct fio_blkio_options *options = td->eo;
238 struct fio_blkio_data *data;
241 * Request enqueueing is fast, and it's not possible to know exactly
242 * when a request is submitted, so never report submission latencies.
244 td->o.disable_slat = 1;
246 data = calloc(1, sizeof(*data));
248 log_err("fio: calloc() failed\n");
252 data->completions = calloc(td->o.iodepth, sizeof(data->completions[0]));
253 if (!data->completions) {
254 log_err("fio: calloc() failed\n");
258 if (fio_blkio_create_and_connect(td, &data->b) != 0)
261 if (blkio_set_int(data->b, "num-queues", options->hipri ? 0 : 1) != 0) {
262 fio_blkio_log_err(blkio_set_int);
263 goto err_blkio_destroy;
266 if (blkio_set_int(data->b, "num-poll-queues",
267 options->hipri ? 1 : 0) != 0) {
268 fio_blkio_log_err(blkio_set_int);
269 goto err_blkio_destroy;
272 if (blkio_start(data->b) != 0) {
273 fio_blkio_log_err(blkio_start);
274 goto err_blkio_destroy;
278 data->q = blkio_get_poll_queue(data->b, 0);
280 data->q = blkio_get_queue(data->b, 0);
282 /* Set data last so cleanup() does nothing if init() fails. */
283 td->io_ops_data = data;
288 blkio_destroy(&data->b);
290 free(data->completions);
295 static int fio_blkio_post_init(struct thread_data *td)
297 struct fio_blkio_data *data = td->io_ops_data;
299 if (!data->has_mem_region) {
301 * Memory was allocated by the fio core and not iomem_alloc(),
302 * so we need to register it as a memory region here.
304 * `td->orig_buffer_size` is computed like `len` below, but then
305 * fio can add some padding to it to make sure it is
306 * sufficiently aligned to the page size and the mem_align
307 * option. However, this can make it become unaligned to the
308 * "mem-region-alignment" property in ways that the user can't
309 * control, so we essentially recompute `td->orig_buffer_size`
310 * here but without adding that padding.
313 unsigned long long max_block_size;
314 struct blkio_mem_region region;
316 max_block_size = max(td->o.max_bs[DDIR_READ],
317 max(td->o.max_bs[DDIR_WRITE],
318 td->o.max_bs[DDIR_TRIM]));
320 region = (struct blkio_mem_region) {
321 .addr = td->orig_buffer,
322 .len = (size_t)max_block_size *
323 (size_t)td->o.iodepth,
327 if (blkio_map_mem_region(data->b, ®ion) != 0) {
328 fio_blkio_log_err(blkio_map_mem_region);
336 static void fio_blkio_cleanup(struct thread_data *td)
338 struct fio_blkio_data *data = td->io_ops_data;
341 blkio_destroy(&data->b);
342 free(data->completions);
347 #define align_up(x, y) ((((x) + (y) - 1) / (y)) * (y))
349 static int fio_blkio_iomem_alloc(struct thread_data *td, size_t size)
351 struct fio_blkio_data *data = td->io_ops_data;
353 uint64_t mem_region_alignment;
355 if (blkio_get_uint64(data->b, "mem-region-alignment",
356 &mem_region_alignment) != 0) {
357 fio_blkio_log_err(blkio_get_uint64);
361 /* round up size to satisfy mem-region-alignment */
362 size = align_up(size, (size_t)mem_region_alignment);
364 if (blkio_alloc_mem_region(data->b, &data->mem_region, size) != 0) {
365 fio_blkio_log_err(blkio_alloc_mem_region);
370 if (blkio_map_mem_region(data->b, &data->mem_region) != 0) {
371 fio_blkio_log_err(blkio_map_mem_region);
376 td->orig_buffer = data->mem_region.addr;
377 data->has_mem_region = true;
383 blkio_free_mem_region(data->b, &data->mem_region);
388 static void fio_blkio_iomem_free(struct thread_data *td)
390 struct fio_blkio_data *data = td->io_ops_data;
392 if (data && data->has_mem_region) {
393 blkio_unmap_mem_region(data->b, &data->mem_region);
394 blkio_free_mem_region(data->b, &data->mem_region);
396 data->has_mem_region = false;
400 static int fio_blkio_open_file(struct thread_data *td, struct fio_file *f)
405 static enum fio_q_status fio_blkio_queue(struct thread_data *td,
408 struct fio_blkio_data *data = td->io_ops_data;
410 fio_ro_check(td, io_u);
412 switch (io_u->ddir) {
414 blkioq_read(data->q, io_u->offset, io_u->xfer_buf,
415 (size_t)io_u->xfer_buflen, io_u, 0);
418 blkioq_write(data->q, io_u->offset, io_u->xfer_buf,
419 (size_t)io_u->xfer_buflen, io_u, 0);
422 blkioq_discard(data->q, io_u->offset, io_u->xfer_buflen,
427 blkioq_flush(data->q, io_u, 0);
430 io_u->error = ENOTSUP;
431 io_u_log_error(td, io_u);
432 return FIO_Q_COMPLETED;
438 static int fio_blkio_getevents(struct thread_data *td, unsigned int min,
439 unsigned int max, const struct timespec *t)
441 struct fio_blkio_data *data = td->io_ops_data;
444 n = blkioq_do_io(data->q, data->completions, (int)min, (int)max, NULL);
446 fio_blkio_log_err(blkioq_do_io);
453 static struct io_u *fio_blkio_event(struct thread_data *td, int event)
455 struct fio_blkio_data *data = td->io_ops_data;
456 struct blkio_completion *completion = &data->completions[event];
457 struct io_u *io_u = completion->user_data;
459 io_u->error = -completion->ret;
464 FIO_STATIC struct ioengine_ops ioengine = {
466 .version = FIO_IOOPS_VERSION,
467 .flags = FIO_DISKLESSIO | FIO_NOEXTEND |
468 FIO_NO_OFFLOAD | FIO_SKIPPABLE_IOMEM_ALLOC,
470 .setup = fio_blkio_setup,
471 .init = fio_blkio_init,
472 .post_init = fio_blkio_post_init,
473 .cleanup = fio_blkio_cleanup,
475 .iomem_alloc = fio_blkio_iomem_alloc,
476 .iomem_free = fio_blkio_iomem_free,
478 .open_file = fio_blkio_open_file,
480 .queue = fio_blkio_queue,
481 .getevents = fio_blkio_getevents,
482 .event = fio_blkio_event,
485 .option_struct_size = sizeof(struct fio_blkio_options),
488 static void fio_init fio_blkio_register(void)
490 register_ioengine(&ioengine);
493 static void fio_exit fio_blkio_unregister(void)
495 unregister_ioengine(&ioengine);