engines/libblkio: Add support for poll queues
[fio.git] / engines / libblkio.c
CommitLineData
a601337a
AF
1/*
2 * libblkio engine
3 *
4 * IO engine using libblkio to access various block I/O interfaces:
5 * https://gitlab.com/libblkio/libblkio
6 */
7
8#include <assert.h>
9#include <errno.h>
10#include <stdbool.h>
11#include <stddef.h>
12#include <stdint.h>
13#include <stdlib.h>
14#include <string.h>
15
16#include <blkio.h>
17
18#include "../fio.h"
19#include "../optgroup.h"
20#include "../options.h"
21#include "../parse.h"
22
23/* per-thread state */
24struct fio_blkio_data {
25 struct blkio *b;
26 struct blkioq *q;
27
28 bool has_mem_region; /* whether mem_region is valid */
ef9b6f2f 29 struct blkio_mem_region mem_region; /* only if allocated by libblkio */
a601337a
AF
30
31 struct blkio_completion *completions;
32};
33
34struct fio_blkio_options {
35 void *pad; /* option fields must not have offset 0 */
36
37 char *driver;
38 char *pre_connect_props;
39 char *pre_start_props;
a870d6ff
AF
40
41 unsigned int hipri;
a601337a
AF
42};
43
44static struct fio_option options[] = {
45 {
46 .name = "libblkio_driver",
47 .lname = "libblkio driver name",
48 .type = FIO_OPT_STR_STORE,
49 .off1 = offsetof(struct fio_blkio_options, driver),
50 .help = "Name of the driver to be used by libblkio",
51 .category = FIO_OPT_C_ENGINE,
52 .group = FIO_OPT_G_LIBBLKIO,
53 },
54 {
55 .name = "libblkio_pre_connect_props",
56 .lname = "Properties to be set before blkio_connect()",
57 .type = FIO_OPT_STR_STORE,
58 .off1 = offsetof(struct fio_blkio_options, pre_connect_props),
59 .help = "",
60 .category = FIO_OPT_C_ENGINE,
61 .group = FIO_OPT_G_LIBBLKIO,
62 },
63 {
64 .name = "libblkio_pre_start_props",
65 .lname = "Properties to be set before blkio_start()",
66 .type = FIO_OPT_STR_STORE,
67 .off1 = offsetof(struct fio_blkio_options, pre_start_props),
68 .help = "",
69 .category = FIO_OPT_C_ENGINE,
70 .group = FIO_OPT_G_LIBBLKIO,
71 },
a870d6ff
AF
72 {
73 .name = "hipri",
74 .lname = "Use poll queues",
75 .type = FIO_OPT_STR_SET,
76 .off1 = offsetof(struct fio_blkio_options, hipri),
77 .help = "Use poll queues",
78 .category = FIO_OPT_C_ENGINE,
79 .group = FIO_OPT_G_LIBBLKIO,
80 },
a601337a
AF
81 {
82 .name = NULL,
83 },
84};
85
86static int fio_blkio_set_props_from_str(struct blkio *b, const char *opt_name,
87 const char *str) {
88 int ret = 0;
89 char *new_str, *name, *value;
90
91 if (!str)
92 return 0;
93
94 /* iteration can mutate string, so copy it */
95 new_str = strdup(str);
96 if (!new_str) {
97 log_err("fio: strdup() failed\n");
98 return 1;
99 }
100
101 /* iterate over property name-value pairs */
102 while ((name = get_next_str(&new_str))) {
103 /* split into property name and value */
104 value = strchr(name, '=');
105 if (!value) {
106 log_err("fio: missing '=' in option %s\n", opt_name);
107 ret = 1;
108 break;
109 }
110
111 *value = '\0';
112 ++value;
113
114 /* strip whitespace from property name */
115 strip_blank_front(&name);
116 strip_blank_end(name);
117
118 if (name[0] == '\0') {
119 log_err("fio: empty property name in option %s\n",
120 opt_name);
121 ret = 1;
122 break;
123 }
124
125 /* strip whitespace from property value */
126 strip_blank_front(&value);
127 strip_blank_end(value);
128
129 /* set property */
130 if (blkio_set_str(b, name, value) != 0) {
131 log_err("fio: error setting property '%s' to '%s': %s\n",
132 name, value, blkio_get_error_msg());
133 ret = 1;
134 break;
135 }
136 }
137
138 free(new_str);
139 return ret;
140}
141
142/*
143 * Log the failure of a libblkio function.
144 *
145 * `(void)func` is to ensure `func` exists and prevent typos
146 */
147#define fio_blkio_log_err(func) \
148 ({ \
149 (void)func; \
150 log_err("fio: %s() failed: %s\n", #func, \
151 blkio_get_error_msg()); \
152 })
153
154static int fio_blkio_create_and_connect(struct thread_data *td,
155 struct blkio **out_blkio)
156{
157 const struct fio_blkio_options *options = td->eo;
158 struct blkio *b;
159 int ret;
160
161 if (!options->driver) {
162 log_err("fio: engine libblkio requires option libblkio_driver to be set\n");
163 return 1;
164 }
165
166 if (blkio_create(options->driver, &b) != 0) {
167 fio_blkio_log_err(blkio_create);
168 return 1;
169 }
170
171 /* don't fail if driver doesn't have a "direct" property */
172 ret = blkio_set_bool(b, "direct", td->o.odirect);
173 if (ret != 0 && ret != -ENOENT) {
174 fio_blkio_log_err(blkio_set_bool);
175 goto err_blkio_destroy;
176 }
177
178 if (blkio_set_bool(b, "read-only", read_only) != 0) {
179 fio_blkio_log_err(blkio_set_bool);
180 goto err_blkio_destroy;
181 }
182
183 if (fio_blkio_set_props_from_str(b, "libblkio_pre_connect_props",
184 options->pre_connect_props) != 0)
185 goto err_blkio_destroy;
186
187 if (blkio_connect(b) != 0) {
188 fio_blkio_log_err(blkio_connect);
189 goto err_blkio_destroy;
190 }
191
192 if (fio_blkio_set_props_from_str(b, "libblkio_pre_start_props",
193 options->pre_start_props) != 0)
194 goto err_blkio_destroy;
195
196 *out_blkio = b;
197 return 0;
198
199err_blkio_destroy:
200 blkio_destroy(&b);
201 return 1;
202}
203
204/*
205 * This callback determines the device/file size, so it creates and connects a
206 * blkio instance. But it is invoked from the main thread in the original fio
207 * process, not from the processes in which jobs will actually run. It thus
208 * subsequently destroys the blkio, which is recreated in the init() callback.
209 */
210static int fio_blkio_setup(struct thread_data *td)
211{
212 struct blkio *b;
213 int ret = 0;
214 uint64_t capacity;
215
216 assert(td->files_index == 1);
217
218 if (fio_blkio_create_and_connect(td, &b) != 0)
219 return 1;
220
221 if (blkio_get_uint64(b, "capacity", &capacity) != 0) {
222 fio_blkio_log_err(blkio_get_uint64);
223 ret = 1;
224 goto out_blkio_destroy;
225 }
226
227 td->files[0]->real_file_size = capacity;
228 fio_file_set_size_known(td->files[0]);
229
230out_blkio_destroy:
231 blkio_destroy(&b);
232 return ret;
233}
234
235static int fio_blkio_init(struct thread_data *td)
236{
a870d6ff 237 const struct fio_blkio_options *options = td->eo;
a601337a
AF
238 struct fio_blkio_data *data;
239
240 /*
241 * Request enqueueing is fast, and it's not possible to know exactly
242 * when a request is submitted, so never report submission latencies.
243 */
244 td->o.disable_slat = 1;
245
246 data = calloc(1, sizeof(*data));
247 if (!data) {
248 log_err("fio: calloc() failed\n");
249 return 1;
250 }
251
252 data->completions = calloc(td->o.iodepth, sizeof(data->completions[0]));
253 if (!data->completions) {
254 log_err("fio: calloc() failed\n");
255 goto err_free;
256 }
257
258 if (fio_blkio_create_and_connect(td, &data->b) != 0)
259 goto err_free;
260
a870d6ff
AF
261 if (blkio_set_int(data->b, "num-queues", options->hipri ? 0 : 1) != 0) {
262 fio_blkio_log_err(blkio_set_int);
263 goto err_blkio_destroy;
264 }
265
266 if (blkio_set_int(data->b, "num-poll-queues",
267 options->hipri ? 1 : 0) != 0) {
a601337a
AF
268 fio_blkio_log_err(blkio_set_int);
269 goto err_blkio_destroy;
270 }
271
272 if (blkio_start(data->b) != 0) {
273 fio_blkio_log_err(blkio_start);
274 goto err_blkio_destroy;
275 }
276
a870d6ff
AF
277 if (options->hipri)
278 data->q = blkio_get_poll_queue(data->b, 0);
279 else
280 data->q = blkio_get_queue(data->b, 0);
a601337a
AF
281
282 /* Set data last so cleanup() does nothing if init() fails. */
283 td->io_ops_data = data;
284
285 return 0;
286
287err_blkio_destroy:
288 blkio_destroy(&data->b);
289err_free:
290 free(data->completions);
291 free(data);
292 return 1;
293}
294
ef9b6f2f
AF
295static int fio_blkio_post_init(struct thread_data *td)
296{
297 struct fio_blkio_data *data = td->io_ops_data;
298
299 if (!data->has_mem_region) {
300 /*
301 * Memory was allocated by the fio core and not iomem_alloc(),
302 * so we need to register it as a memory region here.
303 *
304 * `td->orig_buffer_size` is computed like `len` below, but then
305 * fio can add some padding to it to make sure it is
306 * sufficiently aligned to the page size and the mem_align
307 * option. However, this can make it become unaligned to the
308 * "mem-region-alignment" property in ways that the user can't
309 * control, so we essentially recompute `td->orig_buffer_size`
310 * here but without adding that padding.
311 */
312
313 unsigned long long max_block_size;
314 struct blkio_mem_region region;
315
316 max_block_size = max(td->o.max_bs[DDIR_READ],
317 max(td->o.max_bs[DDIR_WRITE],
318 td->o.max_bs[DDIR_TRIM]));
319
320 region = (struct blkio_mem_region) {
321 .addr = td->orig_buffer,
322 .len = (size_t)max_block_size *
323 (size_t)td->o.iodepth,
324 .fd = -1,
325 };
326
327 if (blkio_map_mem_region(data->b, &region) != 0) {
328 fio_blkio_log_err(blkio_map_mem_region);
329 return 1;
330 }
331 }
332
333 return 0;
334}
335
a601337a
AF
336static void fio_blkio_cleanup(struct thread_data *td)
337{
338 struct fio_blkio_data *data = td->io_ops_data;
339
340 if (data) {
341 blkio_destroy(&data->b);
342 free(data->completions);
343 free(data);
344 }
345}
346
347#define align_up(x, y) ((((x) + (y) - 1) / (y)) * (y))
348
349static int fio_blkio_iomem_alloc(struct thread_data *td, size_t size)
350{
351 struct fio_blkio_data *data = td->io_ops_data;
352 int ret;
353 uint64_t mem_region_alignment;
354
355 if (blkio_get_uint64(data->b, "mem-region-alignment",
356 &mem_region_alignment) != 0) {
357 fio_blkio_log_err(blkio_get_uint64);
358 return 1;
359 }
360
361 /* round up size to satisfy mem-region-alignment */
362 size = align_up(size, (size_t)mem_region_alignment);
363
364 if (blkio_alloc_mem_region(data->b, &data->mem_region, size) != 0) {
365 fio_blkio_log_err(blkio_alloc_mem_region);
366 ret = 1;
367 goto out;
368 }
369
370 if (blkio_map_mem_region(data->b, &data->mem_region) != 0) {
371 fio_blkio_log_err(blkio_map_mem_region);
372 ret = 1;
373 goto out_free;
374 }
375
376 td->orig_buffer = data->mem_region.addr;
377 data->has_mem_region = true;
378
379 ret = 0;
380 goto out;
381
382out_free:
383 blkio_free_mem_region(data->b, &data->mem_region);
384out:
385 return ret;
386}
387
388static void fio_blkio_iomem_free(struct thread_data *td)
389{
390 struct fio_blkio_data *data = td->io_ops_data;
391
392 if (data && data->has_mem_region) {
393 blkio_unmap_mem_region(data->b, &data->mem_region);
394 blkio_free_mem_region(data->b, &data->mem_region);
395
396 data->has_mem_region = false;
397 }
398}
399
400static int fio_blkio_open_file(struct thread_data *td, struct fio_file *f)
401{
402 return 0;
403}
404
405static enum fio_q_status fio_blkio_queue(struct thread_data *td,
406 struct io_u *io_u)
407{
408 struct fio_blkio_data *data = td->io_ops_data;
409
410 fio_ro_check(td, io_u);
411
412 switch (io_u->ddir) {
413 case DDIR_READ:
414 blkioq_read(data->q, io_u->offset, io_u->xfer_buf,
415 (size_t)io_u->xfer_buflen, io_u, 0);
416 break;
417 case DDIR_WRITE:
418 blkioq_write(data->q, io_u->offset, io_u->xfer_buf,
419 (size_t)io_u->xfer_buflen, io_u, 0);
420 break;
421 case DDIR_TRIM:
422 blkioq_discard(data->q, io_u->offset, io_u->xfer_buflen,
423 io_u, 0);
424 break;
425 case DDIR_SYNC:
426 case DDIR_DATASYNC:
427 blkioq_flush(data->q, io_u, 0);
428 break;
429 default:
430 io_u->error = ENOTSUP;
431 io_u_log_error(td, io_u);
432 return FIO_Q_COMPLETED;
433 }
434
435 return FIO_Q_QUEUED;
436}
437
438static int fio_blkio_getevents(struct thread_data *td, unsigned int min,
439 unsigned int max, const struct timespec *t)
440{
441 struct fio_blkio_data *data = td->io_ops_data;
442 int n;
443
444 n = blkioq_do_io(data->q, data->completions, (int)min, (int)max, NULL);
445 if (n < 0) {
446 fio_blkio_log_err(blkioq_do_io);
447 return -1;
448 }
449
450 return n;
451}
452
453static struct io_u *fio_blkio_event(struct thread_data *td, int event)
454{
455 struct fio_blkio_data *data = td->io_ops_data;
456 struct blkio_completion *completion = &data->completions[event];
457 struct io_u *io_u = completion->user_data;
458
459 io_u->error = -completion->ret;
460
461 return io_u;
462}
463
464FIO_STATIC struct ioengine_ops ioengine = {
465 .name = "libblkio",
466 .version = FIO_IOOPS_VERSION,
467 .flags = FIO_DISKLESSIO | FIO_NOEXTEND |
ef9b6f2f 468 FIO_NO_OFFLOAD | FIO_SKIPPABLE_IOMEM_ALLOC,
a601337a
AF
469
470 .setup = fio_blkio_setup,
471 .init = fio_blkio_init,
ef9b6f2f 472 .post_init = fio_blkio_post_init,
a601337a
AF
473 .cleanup = fio_blkio_cleanup,
474
475 .iomem_alloc = fio_blkio_iomem_alloc,
476 .iomem_free = fio_blkio_iomem_free,
477
478 .open_file = fio_blkio_open_file,
479
480 .queue = fio_blkio_queue,
481 .getevents = fio_blkio_getevents,
482 .event = fio_blkio_event,
483
484 .options = options,
485 .option_struct_size = sizeof(struct fio_blkio_options),
486};
487
488static void fio_init fio_blkio_register(void)
489{
490 register_ioengine(&ioengine);
491}
492
493static void fio_exit fio_blkio_unregister(void)
494{
495 unregister_ioengine(&ioengine);
496}