rbd: fixed busy-loop when using eventfd polling
[fio.git] / engines / rbd.c
CommitLineData
fc5c0345
DG
1/*
2 * rbd engine
3 *
4 * IO engine using Ceph's librbd to test RADOS Block Devices.
5 *
6 */
7
8#include <rbd/librbd.h>
9
10#include "../fio.h"
d220c761 11#include "../optgroup.h"
a4c4c346 12#ifdef CONFIG_RBD_BLKIN
13#include <zipkin_c.h>
14#endif
fc5c0345 15
53b6c979
PL
16#ifdef CONFIG_RBD_POLL
17/* add for poll */
18#include <poll.h>
19#include <sys/eventfd.h>
20#endif
21
fc5c0345
DG
22struct fio_rbd_iou {
23 struct io_u *io_u;
d8b64af2 24 rbd_completion_t completion;
d8b64af2 25 int io_seen;
b8ecbef6 26 int io_complete;
550beaad 27#ifdef CONFIG_RBD_BLKIN
a4c4c346 28 struct blkin_trace_info info;
550beaad 29#endif
fc5c0345
DG
30};
31
32struct rbd_data {
33 rados_t cluster;
34 rados_ioctx_t io_ctx;
35 rbd_image_t image;
36 struct io_u **aio_events;
6f9961ac 37 struct io_u **sort_events;
53b6c979 38 int fd; /* add for poll */
e0f35b53 39 bool connected;
fc5c0345
DG
40};
41
42struct rbd_options {
a1f871c7 43 void *pad;
6e20c6e7 44 char *cluster_name;
fc5c0345
DG
45 char *rbd_name;
46 char *pool_name;
47 char *client_name;
d7d702c7 48 int busy_poll;
fc5c0345
DG
49};
50
51static struct fio_option options[] = {
6e20c6e7
T
52 {
53 .name = "clustername",
54 .lname = "ceph cluster name",
55 .type = FIO_OPT_STR_STORE,
56 .help = "Cluster name for ceph",
57 .off1 = offsetof(struct rbd_options, cluster_name),
58 .category = FIO_OPT_C_ENGINE,
59 .group = FIO_OPT_G_RBD,
60 },
fc5c0345 61 {
d8b64af2
JA
62 .name = "rbdname",
63 .lname = "rbd engine rbdname",
64 .type = FIO_OPT_STR_STORE,
65 .help = "RBD name for RBD engine",
66 .off1 = offsetof(struct rbd_options, rbd_name),
67 .category = FIO_OPT_C_ENGINE,
68 .group = FIO_OPT_G_RBD,
69 },
fc5c0345 70 {
d7d702c7
JA
71 .name = "pool",
72 .lname = "rbd engine pool",
73 .type = FIO_OPT_STR_STORE,
74 .help = "Name of the pool hosting the RBD for the RBD engine",
75 .off1 = offsetof(struct rbd_options, pool_name),
76 .category = FIO_OPT_C_ENGINE,
77 .group = FIO_OPT_G_RBD,
d8b64af2 78 },
fc5c0345 79 {
d7d702c7
JA
80 .name = "clientname",
81 .lname = "rbd engine clientname",
82 .type = FIO_OPT_STR_STORE,
83 .help = "Name of the ceph client to access the RBD for the RBD engine",
84 .off1 = offsetof(struct rbd_options, client_name),
85 .category = FIO_OPT_C_ENGINE,
86 .group = FIO_OPT_G_RBD,
87 },
88 {
89 .name = "busy_poll",
90 .lname = "Busy poll",
91 .type = FIO_OPT_BOOL,
92 .help = "Busy poll for completions instead of sleeping",
fea585d4 93 .off1 = offsetof(struct rbd_options, busy_poll),
d7d702c7
JA
94 .def = "0",
95 .category = FIO_OPT_C_ENGINE,
96 .group = FIO_OPT_G_RBD,
d8b64af2 97 },
fc5c0345 98 {
d8b64af2
JA
99 .name = NULL,
100 },
fc5c0345
DG
101};
102
103static int _fio_setup_rbd_data(struct thread_data *td,
104 struct rbd_data **rbd_data_ptr)
105{
6f9961ac 106 struct rbd_data *rbd;
fc5c0345 107
565e784d 108 if (td->io_ops_data)
fc5c0345
DG
109 return 0;
110
6f9961ac
JA
111 rbd = calloc(1, sizeof(struct rbd_data));
112 if (!rbd)
fc5c0345
DG
113 goto failed;
114
e0f35b53
PL
115 rbd->connected = false;
116
53b6c979
PL
117 /* add for poll, init fd: -1 */
118 rbd->fd = -1;
53b6c979 119
6f9961ac
JA
120 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
121 if (!rbd->aio_events)
fc5c0345
DG
122 goto failed;
123
6f9961ac
JA
124 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
125 if (!rbd->sort_events)
126 goto failed;
fc5c0345 127
6f9961ac 128 *rbd_data_ptr = rbd;
fc5c0345
DG
129 return 0;
130
131failed:
5a4adfd2
JQ
132 if (rbd) {
133 if (rbd->aio_events)
134 free(rbd->aio_events);
135 if (rbd->sort_events)
136 free(rbd->sort_events);
6f9961ac 137 free(rbd);
5a4adfd2 138 }
fc5c0345
DG
139 return 1;
140
141}
142
73ae55c3
JA
143#ifdef CONFIG_RBD_POLL
144static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
145{
146 int r;
147
148 /* add for rbd poll */
4001b1ba 149 rbd->fd = eventfd(0, EFD_SEMAPHORE);
73ae55c3
JA
150 if (rbd->fd < 0) {
151 log_err("eventfd failed.\n");
152 return false;
153 }
154
155 r = rbd_set_image_notification(rbd->image, rbd->fd, EVENT_TYPE_EVENTFD);
156 if (r < 0) {
157 log_err("rbd_set_image_notification failed.\n");
158 close(rbd->fd);
159 rbd->fd = -1;
160 return false;
161 }
162
163 return true;
164}
165#else
166static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
167{
168 return true;
169}
170#endif
171
fc5c0345
DG
172static int _fio_rbd_connect(struct thread_data *td)
173{
565e784d 174 struct rbd_data *rbd = td->io_ops_data;
fc5c0345
DG
175 struct rbd_options *o = td->eo;
176 int r;
177
6e20c6e7
T
178 if (o->cluster_name) {
179 char *client_name = NULL;
180
181 /*
89e5ec96 182 * If we specify cluser name, the rados_create2
6e20c6e7
T
183 * will not assume 'client.'. name is considered
184 * as a full type.id namestr
185 */
89e5ec96
JQ
186 if (o->client_name) {
187 if (!index(o->client_name, '.')) {
188 client_name = calloc(1, strlen("client.") +
189 strlen(o->client_name) + 1);
190 strcat(client_name, "client.");
6ec0dcd1
JQ
191 strcat(client_name, o->client_name);
192 } else {
193 client_name = o->client_name;
89e5ec96 194 }
6e20c6e7 195 }
6ec0dcd1 196
6e20c6e7 197 r = rados_create2(&rbd->cluster, o->cluster_name,
6ec0dcd1
JQ
198 client_name, 0);
199
200 if (client_name && !index(o->client_name, '.'))
201 free(client_name);
6e20c6e7
T
202 } else
203 r = rados_create(&rbd->cluster, o->client_name);
204
fc5c0345
DG
205 if (r < 0) {
206 log_err("rados_create failed.\n");
207 goto failed_early;
208 }
209
6f9961ac 210 r = rados_conf_read_file(rbd->cluster, NULL);
fc5c0345
DG
211 if (r < 0) {
212 log_err("rados_conf_read_file failed.\n");
213 goto failed_early;
214 }
215
6f9961ac 216 r = rados_connect(rbd->cluster);
fc5c0345
DG
217 if (r < 0) {
218 log_err("rados_connect failed.\n");
219 goto failed_shutdown;
220 }
221
6f9961ac 222 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
fc5c0345
DG
223 if (r < 0) {
224 log_err("rados_ioctx_create failed.\n");
225 goto failed_shutdown;
226 }
227
6f9961ac 228 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
fc5c0345
DG
229 if (r < 0) {
230 log_err("rbd_open failed.\n");
231 goto failed_open;
232 }
53b6c979 233
73ae55c3
JA
234 if (!_fio_rbd_setup_poll(rbd))
235 goto failed_poll;
53b6c979 236
fc5c0345
DG
237 return 0;
238
73ae55c3
JA
239failed_poll:
240 rbd_close(rbd->image);
241 rbd->image = NULL;
fc5c0345 242failed_open:
6f9961ac
JA
243 rados_ioctx_destroy(rbd->io_ctx);
244 rbd->io_ctx = NULL;
fc5c0345 245failed_shutdown:
6f9961ac
JA
246 rados_shutdown(rbd->cluster);
247 rbd->cluster = NULL;
fc5c0345
DG
248failed_early:
249 return 1;
250}
251
6f9961ac 252static void _fio_rbd_disconnect(struct rbd_data *rbd)
fc5c0345 253{
6f9961ac 254 if (!rbd)
fc5c0345
DG
255 return;
256
53b6c979 257 /* close eventfd */
73ae55c3 258 if (rbd->fd != -1) {
53b6c979
PL
259 close(rbd->fd);
260 rbd->fd = -1;
261 }
53b6c979 262
fc5c0345 263 /* shutdown everything */
6f9961ac
JA
264 if (rbd->image) {
265 rbd_close(rbd->image);
266 rbd->image = NULL;
fc5c0345
DG
267 }
268
6f9961ac
JA
269 if (rbd->io_ctx) {
270 rados_ioctx_destroy(rbd->io_ctx);
271 rbd->io_ctx = NULL;
fc5c0345
DG
272 }
273
6f9961ac
JA
274 if (rbd->cluster) {
275 rados_shutdown(rbd->cluster);
276 rbd->cluster = NULL;
fc5c0345
DG
277 }
278}
279
d8b64af2 280static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
fc5c0345 281{
dbf388d2
JA
282 struct fio_rbd_iou *fri = data;
283 struct io_u *io_u = fri->io_u;
d8b64af2 284 ssize_t ret;
fc5c0345 285
d8b64af2
JA
286 /*
287 * Looks like return value is 0 for success, or < 0 for
288 * a specific error. So we have to assume that it can't do
289 * partial completions.
290 */
291 ret = rbd_aio_get_return_value(fri->completion);
292 if (ret < 0) {
d7658ced 293 io_u->error = -ret;
d8b64af2
JA
294 io_u->resid = io_u->xfer_buflen;
295 } else
296 io_u->error = 0;
20cf5aab
JD
297
298 fri->io_complete = 1;
d8b64af2 299}
fc5c0345 300
d8b64af2
JA
301static struct io_u *fio_rbd_event(struct thread_data *td, int event)
302{
565e784d 303 struct rbd_data *rbd = td->io_ops_data;
fc5c0345 304
6f9961ac 305 return rbd->aio_events[event];
fc5c0345
DG
306}
307
6f9961ac 308static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
d8b64af2 309 unsigned int *events)
fc5c0345 310{
d8b64af2 311 struct fio_rbd_iou *fri = io_u->engine_data;
fc5c0345 312
b8ecbef6 313 if (fri->io_complete) {
d8b64af2 314 fri->io_seen = 1;
6f9961ac 315 rbd->aio_events[*events] = io_u;
d8b64af2 316 (*events)++;
fc5c0345 317
d8b64af2
JA
318 rbd_aio_release(fri->completion);
319 return 1;
320 }
fc5c0345 321
d8b64af2 322 return 0;
fc5c0345
DG
323}
324
6f9961ac
JA
325static inline int rbd_io_u_seen(struct io_u *io_u)
326{
327 struct fio_rbd_iou *fri = io_u->engine_data;
328
329 return fri->io_seen;
330}
331
332static void rbd_io_u_wait_complete(struct io_u *io_u)
333{
334 struct fio_rbd_iou *fri = io_u->engine_data;
335
336 rbd_aio_wait_for_complete(fri->completion);
337}
338
339static int rbd_io_u_cmp(const void *p1, const void *p2)
340{
341 const struct io_u **a = (const struct io_u **) p1;
342 const struct io_u **b = (const struct io_u **) p2;
343 uint64_t at, bt;
344
345 at = utime_since_now(&(*a)->start_time);
346 bt = utime_since_now(&(*b)->start_time);
347
348 if (at < bt)
349 return -1;
350 else if (at == bt)
351 return 0;
352 else
353 return 1;
354}
355
d8b64af2
JA
356static int rbd_iter_events(struct thread_data *td, unsigned int *events,
357 unsigned int min_evts, int wait)
82340a9f 358{
565e784d 359 struct rbd_data *rbd = td->io_ops_data;
d8b64af2
JA
360 unsigned int this_events = 0;
361 struct io_u *io_u;
53b6c979
PL
362 int i, sidx = 0;
363
364#ifdef CONFIG_RBD_POLL
365 int ret = 0;
366 int event_num = 0;
367 struct fio_rbd_iou *fri = NULL;
368 rbd_completion_t comps[min_evts];
4001b1ba
JD
369 uint64_t counter;
370 bool completed;
82340a9f 371
53b6c979
PL
372 struct pollfd pfd;
373 pfd.fd = rbd->fd;
374 pfd.events = POLLIN;
375
4001b1ba 376 ret = poll(&pfd, 1, wait ? -1 : 0);
73ae55c3 377 if (ret <= 0)
53b6c979 378 return 0;
4001b1ba
JD
379 if (!(pfd.revents & POLLIN))
380 return 0;
53b6c979
PL
381
382 event_num = rbd_poll_io_events(rbd->image, comps, min_evts);
383
384 for (i = 0; i < event_num; i++) {
385 fri = rbd_aio_get_arg(comps[i]);
386 io_u = fri->io_u;
4001b1ba
JD
387
388 /* best effort to decrement the semaphore */
389 ret = read(rbd->fd, &counter, sizeof(counter));
390 if (ret <= 0)
391 log_err("rbd_iter_events failed to decrement semaphore.\n");
392
393 completed = fri_check_complete(rbd, io_u, events);
394 assert(completed);
395
396 this_events++;
397 }
53b6c979 398#else
d8b64af2 399 io_u_qiter(&td->io_u_all, io_u, i) {
d8b64af2
JA
400 if (!(io_u->flags & IO_U_F_FLIGHT))
401 continue;
6f9961ac 402 if (rbd_io_u_seen(io_u))
d8b64af2 403 continue;
82340a9f 404
6f9961ac 405 if (fri_check_complete(rbd, io_u, events))
d8b64af2 406 this_events++;
6f9961ac
JA
407 else if (wait)
408 rbd->sort_events[sidx++] = io_u;
409 }
4001b1ba 410#endif
82340a9f 411
6f9961ac
JA
412 if (!wait || !sidx)
413 return this_events;
414
415 /*
416 * Sort events, oldest issue first, then wait on as many as we
417 * need in order of age. If we have enough events, stop waiting,
418 * and just check if any of the older ones are done.
419 */
420 if (sidx > 1)
421 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
422
423 for (i = 0; i < sidx; i++) {
424 io_u = rbd->sort_events[i];
425
426 if (fri_check_complete(rbd, io_u, events)) {
427 this_events++;
428 continue;
d8b64af2 429 }
6f9961ac
JA
430
431 /*
432 * Stop waiting when we have enough, but continue checking
433 * all pending IOs if they are complete.
434 */
d8b64af2 435 if (*events >= min_evts)
6f9961ac
JA
436 continue;
437
438 rbd_io_u_wait_complete(io_u);
439
440 if (fri_check_complete(rbd, io_u, events))
441 this_events++;
d8b64af2 442 }
fc5c0345 443
d8b64af2 444 return this_events;
fc5c0345
DG
445}
446
447static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
1f440ece 448 unsigned int max, const struct timespec *t)
fc5c0345 449{
d8b64af2 450 unsigned int this_events, events = 0;
d7d702c7 451 struct rbd_options *o = td->eo;
d8b64af2 452 int wait = 0;
fc5c0345
DG
453
454 do {
d8b64af2 455 this_events = rbd_iter_events(td, &events, min, wait);
fc5c0345 456
d8b64af2 457 if (events >= min)
fc5c0345 458 break;
d8b64af2
JA
459 if (this_events)
460 continue;
fc5c0345 461
d7d702c7
JA
462 if (!o->busy_poll)
463 wait = 1;
464 else
465 nop;
fc5c0345
DG
466 } while (1);
467
468 return events;
469}
470
471static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
472{
565e784d 473 struct rbd_data *rbd = td->io_ops_data;
d8b64af2
JA
474 struct fio_rbd_iou *fri = io_u->engine_data;
475 int r = -1;
fc5c0345
DG
476
477 fio_ro_check(td, io_u);
478
d8b64af2 479 fri->io_seen = 0;
b8ecbef6 480 fri->io_complete = 0;
d8b64af2 481
dbf388d2 482 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
d8b64af2 483 &fri->completion);
dbf388d2
JA
484 if (r < 0) {
485 log_err("rbd_aio_create_completion failed.\n");
486 goto failed;
487 }
fc5c0345 488
dbf388d2 489 if (io_u->ddir == DDIR_WRITE) {
a4c4c346 490#ifdef CONFIG_RBD_BLKIN
491 blkin_init_trace_info(&fri->info);
492 r = rbd_aio_write_traced(rbd->image, io_u->offset, io_u->xfer_buflen,
493 io_u->xfer_buf, fri->completion, &fri->info);
494#else
6f9961ac
JA
495 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
496 io_u->xfer_buf, fri->completion);
a4c4c346 497#endif
fc5c0345
DG
498 if (r < 0) {
499 log_err("rbd_aio_write failed.\n");
dbf388d2 500 goto failed_comp;
fc5c0345
DG
501 }
502
503 } else if (io_u->ddir == DDIR_READ) {
a4c4c346 504#ifdef CONFIG_RBD_BLKIN
505 blkin_init_trace_info(&fri->info);
506 r = rbd_aio_read_traced(rbd->image, io_u->offset, io_u->xfer_buflen,
507 io_u->xfer_buf, fri->completion, &fri->info);
508#else
6f9961ac
JA
509 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
510 io_u->xfer_buf, fri->completion);
a4c4c346 511#endif
fc5c0345
DG
512
513 if (r < 0) {
514 log_err("rbd_aio_read failed.\n");
dbf388d2 515 goto failed_comp;
fc5c0345 516 }
dbf388d2 517 } else if (io_u->ddir == DDIR_TRIM) {
6f9961ac
JA
518 r = rbd_aio_discard(rbd->image, io_u->offset,
519 io_u->xfer_buflen, fri->completion);
82340a9f 520 if (r < 0) {
dbf388d2
JA
521 log_err("rbd_aio_discard failed.\n");
522 goto failed_comp;
82340a9f 523 }
dbf388d2 524 } else if (io_u->ddir == DDIR_SYNC) {
6f9961ac 525 r = rbd_aio_flush(rbd->image, fri->completion);
fc5c0345
DG
526 if (r < 0) {
527 log_err("rbd_flush failed.\n");
dbf388d2 528 goto failed_comp;
fc5c0345 529 }
fc5c0345
DG
530 } else {
531 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
532 io_u->ddir);
ef2f4a50 533 r = -EINVAL;
dbf388d2 534 goto failed_comp;
fc5c0345
DG
535 }
536
537 return FIO_Q_QUEUED;
dbf388d2
JA
538failed_comp:
539 rbd_aio_release(fri->completion);
fc5c0345 540failed:
d7658ced 541 io_u->error = -r;
fc5c0345
DG
542 td_verror(td, io_u->error, "xfer");
543 return FIO_Q_COMPLETED;
544}
545
546static int fio_rbd_init(struct thread_data *td)
547{
548 int r;
e0f35b53 549 struct rbd_data *rbd = td->io_ops_data;
35448275
JA
550
551 if (rbd->connected)
e0f35b53 552 return 0;
fc5c0345
DG
553
554 r = _fio_rbd_connect(td);
555 if (r) {
556 log_err("fio_rbd_connect failed, return code: %d .\n", r);
557 goto failed;
558 }
559
560 return 0;
561
562failed:
563 return 1;
fc5c0345
DG
564}
565
566static void fio_rbd_cleanup(struct thread_data *td)
567{
565e784d 568 struct rbd_data *rbd = td->io_ops_data;
fc5c0345 569
6f9961ac
JA
570 if (rbd) {
571 _fio_rbd_disconnect(rbd);
572 free(rbd->aio_events);
573 free(rbd->sort_events);
574 free(rbd);
fc5c0345 575 }
fc5c0345
DG
576}
577
578static int fio_rbd_setup(struct thread_data *td)
579{
fc5c0345
DG
580 rbd_image_info_t info;
581 struct fio_file *f;
6f9961ac 582 struct rbd_data *rbd = NULL;
6f9961ac 583 int r;
fc5c0345 584
fc5c0345 585 /* allocate engine specific structure to deal with librbd. */
6f9961ac 586 r = _fio_setup_rbd_data(td, &rbd);
fc5c0345
DG
587 if (r) {
588 log_err("fio_setup_rbd_data failed.\n");
589 goto cleanup;
590 }
565e784d 591 td->io_ops_data = rbd;
fc5c0345 592
d8b64af2
JA
593 /* librbd does not allow us to run first in the main thread and later
594 * in a fork child. It needs to be the same process context all the
595 * time.
fc5c0345
DG
596 */
597 td->o.use_thread = 1;
598
599 /* connect in the main thread to determine to determine
600 * the size of the given RADOS block device. And disconnect
601 * later on.
602 */
603 r = _fio_rbd_connect(td);
604 if (r) {
605 log_err("fio_rbd_connect failed.\n");
606 goto cleanup;
607 }
e0f35b53 608 rbd->connected = true;
fc5c0345
DG
609
610 /* get size of the RADOS block device */
6f9961ac 611 r = rbd_stat(rbd->image, &info, sizeof(info));
fc5c0345
DG
612 if (r < 0) {
613 log_err("rbd_status failed.\n");
6747f469 614 goto cleanup;
206c546d
PL
615 } else if (info.size == 0) {
616 log_err("image size should be larger than zero.\n");
617 r = -EINVAL;
6747f469 618 goto cleanup;
fc5c0345 619 }
206c546d 620
50ea012c 621 dprint(FD_IO, "rbd-engine: image size: %" PRIu64 "\n", info.size);
fc5c0345
DG
622
623 /* taken from "net" engine. Pretend we deal with files,
624 * even if we do not have any ideas about files.
625 * The size of the RBD is set instead of a artificial file.
626 */
627 if (!td->files_index) {
5903e7b7 628 add_file(td, td->o.filename ? : "rbd", 0, 0);
fc5c0345 629 td->o.nr_files = td->o.nr_files ? : 1;
b53f2c54 630 td->o.open_files++;
fc5c0345
DG
631 }
632 f = td->files[0];
633 f->real_file_size = info.size;
634
fc5c0345
DG
635 return 0;
636
fc5c0345
DG
637cleanup:
638 fio_rbd_cleanup(td);
639 return r;
640}
641
642static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
643{
644 return 0;
645}
646
d9b100fc
JA
647static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
648{
903b2812 649#if defined(CONFIG_RBD_INVAL)
565e784d 650 struct rbd_data *rbd = td->io_ops_data;
903b2812 651
6f9961ac 652 return rbd_invalidate_cache(rbd->image);
903b2812 653#else
d9b100fc 654 return 0;
903b2812 655#endif
d9b100fc
JA
656}
657
fc5c0345
DG
658static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
659{
d8b64af2 660 struct fio_rbd_iou *fri = io_u->engine_data;
fc5c0345 661
d8b64af2 662 if (fri) {
fc5c0345 663 io_u->engine_data = NULL;
d8b64af2 664 free(fri);
fc5c0345
DG
665 }
666}
667
668static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
669{
d8b64af2 670 struct fio_rbd_iou *fri;
fc5c0345 671
d8b64af2
JA
672 fri = calloc(1, sizeof(*fri));
673 fri->io_u = io_u;
674 io_u->engine_data = fri;
fc5c0345
DG
675 return 0;
676}
677
10aa136b 678static struct ioengine_ops ioengine = {
d9b100fc
JA
679 .name = "rbd",
680 .version = FIO_IOOPS_VERSION,
681 .setup = fio_rbd_setup,
682 .init = fio_rbd_init,
683 .queue = fio_rbd_queue,
684 .getevents = fio_rbd_getevents,
685 .event = fio_rbd_event,
686 .cleanup = fio_rbd_cleanup,
687 .open_file = fio_rbd_open,
688 .invalidate = fio_rbd_invalidate,
689 .options = options,
690 .io_u_init = fio_rbd_io_u_init,
691 .io_u_free = fio_rbd_io_u_free,
692 .option_struct_size = sizeof(struct rbd_options),
fc5c0345
DG
693};
694
695static void fio_init fio_rbd_register(void)
696{
697 register_ioengine(&ioengine);
698}
699
700static void fio_exit fio_rbd_unregister(void)
701{
702 unregister_ioengine(&ioengine);
703}