engines/rbd: add support for "direct=1" option
[fio.git] / engines / rbd.c
CommitLineData
fc5c0345
DG
1/*
2 * rbd engine
3 *
4 * IO engine using Ceph's librbd to test RADOS Block Devices.
5 *
6 */
7
8#include <rbd/librbd.h>
9
10#include "../fio.h"
d220c761 11#include "../optgroup.h"
fc5c0345 12
53b6c979
PL
13#ifdef CONFIG_RBD_POLL
14/* add for poll */
15#include <poll.h>
16#include <sys/eventfd.h>
17#endif
18
fc5c0345
DG
19struct fio_rbd_iou {
20 struct io_u *io_u;
d8b64af2 21 rbd_completion_t completion;
d8b64af2 22 int io_seen;
b8ecbef6 23 int io_complete;
fc5c0345
DG
24};
25
26struct rbd_data {
27 rados_t cluster;
28 rados_ioctx_t io_ctx;
29 rbd_image_t image;
30 struct io_u **aio_events;
6f9961ac 31 struct io_u **sort_events;
53b6c979 32 int fd; /* add for poll */
e0f35b53 33 bool connected;
fc5c0345
DG
34};
35
36struct rbd_options {
a1f871c7 37 void *pad;
6e20c6e7 38 char *cluster_name;
fc5c0345
DG
39 char *rbd_name;
40 char *pool_name;
41 char *client_name;
d7d702c7 42 int busy_poll;
fc5c0345
DG
43};
44
45static struct fio_option options[] = {
6e20c6e7
T
46 {
47 .name = "clustername",
48 .lname = "ceph cluster name",
49 .type = FIO_OPT_STR_STORE,
50 .help = "Cluster name for ceph",
51 .off1 = offsetof(struct rbd_options, cluster_name),
52 .category = FIO_OPT_C_ENGINE,
53 .group = FIO_OPT_G_RBD,
54 },
fc5c0345 55 {
d8b64af2
JA
56 .name = "rbdname",
57 .lname = "rbd engine rbdname",
58 .type = FIO_OPT_STR_STORE,
59 .help = "RBD name for RBD engine",
60 .off1 = offsetof(struct rbd_options, rbd_name),
61 .category = FIO_OPT_C_ENGINE,
62 .group = FIO_OPT_G_RBD,
63 },
fc5c0345 64 {
d7d702c7
JA
65 .name = "pool",
66 .lname = "rbd engine pool",
67 .type = FIO_OPT_STR_STORE,
68 .help = "Name of the pool hosting the RBD for the RBD engine",
69 .off1 = offsetof(struct rbd_options, pool_name),
70 .category = FIO_OPT_C_ENGINE,
71 .group = FIO_OPT_G_RBD,
d8b64af2 72 },
fc5c0345 73 {
d7d702c7
JA
74 .name = "clientname",
75 .lname = "rbd engine clientname",
76 .type = FIO_OPT_STR_STORE,
77 .help = "Name of the ceph client to access the RBD for the RBD engine",
78 .off1 = offsetof(struct rbd_options, client_name),
79 .category = FIO_OPT_C_ENGINE,
80 .group = FIO_OPT_G_RBD,
81 },
82 {
83 .name = "busy_poll",
84 .lname = "Busy poll",
85 .type = FIO_OPT_BOOL,
86 .help = "Busy poll for completions instead of sleeping",
fea585d4 87 .off1 = offsetof(struct rbd_options, busy_poll),
d7d702c7
JA
88 .def = "0",
89 .category = FIO_OPT_C_ENGINE,
90 .group = FIO_OPT_G_RBD,
d8b64af2 91 },
fc5c0345 92 {
d8b64af2
JA
93 .name = NULL,
94 },
fc5c0345
DG
95};
96
97static int _fio_setup_rbd_data(struct thread_data *td,
98 struct rbd_data **rbd_data_ptr)
99{
6f9961ac 100 struct rbd_data *rbd;
fc5c0345 101
565e784d 102 if (td->io_ops_data)
fc5c0345
DG
103 return 0;
104
6f9961ac
JA
105 rbd = calloc(1, sizeof(struct rbd_data));
106 if (!rbd)
fc5c0345
DG
107 goto failed;
108
e0f35b53
PL
109 rbd->connected = false;
110
53b6c979
PL
111 /* add for poll, init fd: -1 */
112 rbd->fd = -1;
53b6c979 113
6f9961ac
JA
114 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
115 if (!rbd->aio_events)
fc5c0345
DG
116 goto failed;
117
6f9961ac
JA
118 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
119 if (!rbd->sort_events)
120 goto failed;
fc5c0345 121
6f9961ac 122 *rbd_data_ptr = rbd;
fc5c0345
DG
123 return 0;
124
125failed:
5a4adfd2
JQ
126 if (rbd) {
127 if (rbd->aio_events)
128 free(rbd->aio_events);
129 if (rbd->sort_events)
130 free(rbd->sort_events);
6f9961ac 131 free(rbd);
5a4adfd2 132 }
fc5c0345
DG
133 return 1;
134
135}
136
73ae55c3
JA
137#ifdef CONFIG_RBD_POLL
138static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
139{
140 int r;
141
142 /* add for rbd poll */
4001b1ba 143 rbd->fd = eventfd(0, EFD_SEMAPHORE);
73ae55c3
JA
144 if (rbd->fd < 0) {
145 log_err("eventfd failed.\n");
146 return false;
147 }
148
149 r = rbd_set_image_notification(rbd->image, rbd->fd, EVENT_TYPE_EVENTFD);
150 if (r < 0) {
151 log_err("rbd_set_image_notification failed.\n");
152 close(rbd->fd);
153 rbd->fd = -1;
154 return false;
155 }
156
157 return true;
158}
159#else
160static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
161{
162 return true;
163}
164#endif
165
fc5c0345
DG
166static int _fio_rbd_connect(struct thread_data *td)
167{
565e784d 168 struct rbd_data *rbd = td->io_ops_data;
fc5c0345
DG
169 struct rbd_options *o = td->eo;
170 int r;
171
6e20c6e7
T
172 if (o->cluster_name) {
173 char *client_name = NULL;
174
175 /*
89e5ec96 176 * If we specify cluser name, the rados_create2
6e20c6e7
T
177 * will not assume 'client.'. name is considered
178 * as a full type.id namestr
179 */
89e5ec96
JQ
180 if (o->client_name) {
181 if (!index(o->client_name, '.')) {
182 client_name = calloc(1, strlen("client.") +
183 strlen(o->client_name) + 1);
184 strcat(client_name, "client.");
6ec0dcd1
JQ
185 strcat(client_name, o->client_name);
186 } else {
187 client_name = o->client_name;
89e5ec96 188 }
6e20c6e7 189 }
6ec0dcd1 190
6e20c6e7 191 r = rados_create2(&rbd->cluster, o->cluster_name,
6ec0dcd1
JQ
192 client_name, 0);
193
194 if (client_name && !index(o->client_name, '.'))
195 free(client_name);
6e20c6e7
T
196 } else
197 r = rados_create(&rbd->cluster, o->client_name);
198
fc5c0345
DG
199 if (r < 0) {
200 log_err("rados_create failed.\n");
201 goto failed_early;
202 }
de8f8675
DM
203 if (o->pool_name == NULL) {
204 log_err("rbd pool name must be provided.\n");
205 goto failed_early;
206 }
207 if (!o->rbd_name) {
208 log_err("rbdname must be provided.\n");
209 goto failed_early;
210 }
fc5c0345 211
6f9961ac 212 r = rados_conf_read_file(rbd->cluster, NULL);
fc5c0345
DG
213 if (r < 0) {
214 log_err("rados_conf_read_file failed.\n");
215 goto failed_early;
216 }
217
6f9961ac 218 r = rados_connect(rbd->cluster);
fc5c0345
DG
219 if (r < 0) {
220 log_err("rados_connect failed.\n");
221 goto failed_shutdown;
222 }
223
6f9961ac 224 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
fc5c0345
DG
225 if (r < 0) {
226 log_err("rados_ioctx_create failed.\n");
227 goto failed_shutdown;
228 }
229
ee584d96
JD
230 if (td->o.odirect) {
231 r = rados_conf_set(rbd->cluster, "rbd_cache", "false");
232 if (r < 0) {
233 log_info("failed to disable RBD in-memory cache\n");
234 }
235 }
236
6f9961ac 237 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
fc5c0345
DG
238 if (r < 0) {
239 log_err("rbd_open failed.\n");
240 goto failed_open;
241 }
53b6c979 242
73ae55c3
JA
243 if (!_fio_rbd_setup_poll(rbd))
244 goto failed_poll;
53b6c979 245
fc5c0345
DG
246 return 0;
247
73ae55c3
JA
248failed_poll:
249 rbd_close(rbd->image);
250 rbd->image = NULL;
fc5c0345 251failed_open:
6f9961ac
JA
252 rados_ioctx_destroy(rbd->io_ctx);
253 rbd->io_ctx = NULL;
fc5c0345 254failed_shutdown:
6f9961ac
JA
255 rados_shutdown(rbd->cluster);
256 rbd->cluster = NULL;
fc5c0345
DG
257failed_early:
258 return 1;
259}
260
6f9961ac 261static void _fio_rbd_disconnect(struct rbd_data *rbd)
fc5c0345 262{
6f9961ac 263 if (!rbd)
fc5c0345
DG
264 return;
265
53b6c979 266 /* close eventfd */
73ae55c3 267 if (rbd->fd != -1) {
53b6c979
PL
268 close(rbd->fd);
269 rbd->fd = -1;
270 }
53b6c979 271
fc5c0345 272 /* shutdown everything */
6f9961ac
JA
273 if (rbd->image) {
274 rbd_close(rbd->image);
275 rbd->image = NULL;
fc5c0345
DG
276 }
277
6f9961ac
JA
278 if (rbd->io_ctx) {
279 rados_ioctx_destroy(rbd->io_ctx);
280 rbd->io_ctx = NULL;
fc5c0345
DG
281 }
282
6f9961ac
JA
283 if (rbd->cluster) {
284 rados_shutdown(rbd->cluster);
285 rbd->cluster = NULL;
fc5c0345
DG
286 }
287}
288
d8b64af2 289static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
fc5c0345 290{
dbf388d2
JA
291 struct fio_rbd_iou *fri = data;
292 struct io_u *io_u = fri->io_u;
d8b64af2 293 ssize_t ret;
fc5c0345 294
d8b64af2
JA
295 /*
296 * Looks like return value is 0 for success, or < 0 for
297 * a specific error. So we have to assume that it can't do
298 * partial completions.
299 */
300 ret = rbd_aio_get_return_value(fri->completion);
301 if (ret < 0) {
d7658ced 302 io_u->error = -ret;
d8b64af2
JA
303 io_u->resid = io_u->xfer_buflen;
304 } else
305 io_u->error = 0;
20cf5aab
JD
306
307 fri->io_complete = 1;
d8b64af2 308}
fc5c0345 309
d8b64af2
JA
310static struct io_u *fio_rbd_event(struct thread_data *td, int event)
311{
565e784d 312 struct rbd_data *rbd = td->io_ops_data;
fc5c0345 313
6f9961ac 314 return rbd->aio_events[event];
fc5c0345
DG
315}
316
6f9961ac 317static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
d8b64af2 318 unsigned int *events)
fc5c0345 319{
d8b64af2 320 struct fio_rbd_iou *fri = io_u->engine_data;
fc5c0345 321
b8ecbef6 322 if (fri->io_complete) {
d8b64af2 323 fri->io_seen = 1;
6f9961ac 324 rbd->aio_events[*events] = io_u;
d8b64af2 325 (*events)++;
fc5c0345 326
d8b64af2
JA
327 rbd_aio_release(fri->completion);
328 return 1;
329 }
fc5c0345 330
d8b64af2 331 return 0;
fc5c0345
DG
332}
333
c7cc1c75 334#ifndef CONFIG_RBD_POLL
6f9961ac
JA
335static inline int rbd_io_u_seen(struct io_u *io_u)
336{
337 struct fio_rbd_iou *fri = io_u->engine_data;
338
339 return fri->io_seen;
340}
c7cc1c75 341#endif
6f9961ac
JA
342
343static void rbd_io_u_wait_complete(struct io_u *io_u)
344{
345 struct fio_rbd_iou *fri = io_u->engine_data;
346
347 rbd_aio_wait_for_complete(fri->completion);
348}
349
350static int rbd_io_u_cmp(const void *p1, const void *p2)
351{
352 const struct io_u **a = (const struct io_u **) p1;
353 const struct io_u **b = (const struct io_u **) p2;
354 uint64_t at, bt;
355
356 at = utime_since_now(&(*a)->start_time);
357 bt = utime_since_now(&(*b)->start_time);
358
359 if (at < bt)
360 return -1;
361 else if (at == bt)
362 return 0;
363 else
364 return 1;
365}
366
d8b64af2
JA
367static int rbd_iter_events(struct thread_data *td, unsigned int *events,
368 unsigned int min_evts, int wait)
82340a9f 369{
565e784d 370 struct rbd_data *rbd = td->io_ops_data;
d8b64af2
JA
371 unsigned int this_events = 0;
372 struct io_u *io_u;
53b6c979
PL
373 int i, sidx = 0;
374
375#ifdef CONFIG_RBD_POLL
376 int ret = 0;
377 int event_num = 0;
378 struct fio_rbd_iou *fri = NULL;
379 rbd_completion_t comps[min_evts];
4001b1ba
JD
380 uint64_t counter;
381 bool completed;
82340a9f 382
53b6c979
PL
383 struct pollfd pfd;
384 pfd.fd = rbd->fd;
385 pfd.events = POLLIN;
386
4001b1ba 387 ret = poll(&pfd, 1, wait ? -1 : 0);
73ae55c3 388 if (ret <= 0)
53b6c979 389 return 0;
4001b1ba
JD
390 if (!(pfd.revents & POLLIN))
391 return 0;
53b6c979
PL
392
393 event_num = rbd_poll_io_events(rbd->image, comps, min_evts);
394
395 for (i = 0; i < event_num; i++) {
396 fri = rbd_aio_get_arg(comps[i]);
397 io_u = fri->io_u;
4001b1ba
JD
398
399 /* best effort to decrement the semaphore */
400 ret = read(rbd->fd, &counter, sizeof(counter));
401 if (ret <= 0)
402 log_err("rbd_iter_events failed to decrement semaphore.\n");
403
404 completed = fri_check_complete(rbd, io_u, events);
405 assert(completed);
406
407 this_events++;
408 }
53b6c979 409#else
d8b64af2 410 io_u_qiter(&td->io_u_all, io_u, i) {
d8b64af2
JA
411 if (!(io_u->flags & IO_U_F_FLIGHT))
412 continue;
6f9961ac 413 if (rbd_io_u_seen(io_u))
d8b64af2 414 continue;
82340a9f 415
6f9961ac 416 if (fri_check_complete(rbd, io_u, events))
d8b64af2 417 this_events++;
6f9961ac
JA
418 else if (wait)
419 rbd->sort_events[sidx++] = io_u;
420 }
4001b1ba 421#endif
82340a9f 422
6f9961ac
JA
423 if (!wait || !sidx)
424 return this_events;
425
426 /*
427 * Sort events, oldest issue first, then wait on as many as we
428 * need in order of age. If we have enough events, stop waiting,
429 * and just check if any of the older ones are done.
430 */
431 if (sidx > 1)
432 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
433
434 for (i = 0; i < sidx; i++) {
435 io_u = rbd->sort_events[i];
436
437 if (fri_check_complete(rbd, io_u, events)) {
438 this_events++;
439 continue;
d8b64af2 440 }
6f9961ac
JA
441
442 /*
443 * Stop waiting when we have enough, but continue checking
444 * all pending IOs if they are complete.
445 */
d8b64af2 446 if (*events >= min_evts)
6f9961ac
JA
447 continue;
448
449 rbd_io_u_wait_complete(io_u);
450
451 if (fri_check_complete(rbd, io_u, events))
452 this_events++;
d8b64af2 453 }
fc5c0345 454
d8b64af2 455 return this_events;
fc5c0345
DG
456}
457
458static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
1f440ece 459 unsigned int max, const struct timespec *t)
fc5c0345 460{
d8b64af2 461 unsigned int this_events, events = 0;
d7d702c7 462 struct rbd_options *o = td->eo;
d8b64af2 463 int wait = 0;
fc5c0345
DG
464
465 do {
d8b64af2 466 this_events = rbd_iter_events(td, &events, min, wait);
fc5c0345 467
d8b64af2 468 if (events >= min)
fc5c0345 469 break;
d8b64af2
JA
470 if (this_events)
471 continue;
fc5c0345 472
d7d702c7
JA
473 if (!o->busy_poll)
474 wait = 1;
475 else
476 nop;
fc5c0345
DG
477 } while (1);
478
479 return events;
480}
481
2e4ef4fb
JA
482static enum fio_q_status fio_rbd_queue(struct thread_data *td,
483 struct io_u *io_u)
fc5c0345 484{
565e784d 485 struct rbd_data *rbd = td->io_ops_data;
d8b64af2
JA
486 struct fio_rbd_iou *fri = io_u->engine_data;
487 int r = -1;
fc5c0345
DG
488
489 fio_ro_check(td, io_u);
490
d8b64af2 491 fri->io_seen = 0;
b8ecbef6 492 fri->io_complete = 0;
d8b64af2 493
dbf388d2 494 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
d8b64af2 495 &fri->completion);
dbf388d2
JA
496 if (r < 0) {
497 log_err("rbd_aio_create_completion failed.\n");
498 goto failed;
499 }
fc5c0345 500
dbf388d2 501 if (io_u->ddir == DDIR_WRITE) {
6f9961ac
JA
502 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
503 io_u->xfer_buf, fri->completion);
fc5c0345
DG
504 if (r < 0) {
505 log_err("rbd_aio_write failed.\n");
dbf388d2 506 goto failed_comp;
fc5c0345
DG
507 }
508
509 } else if (io_u->ddir == DDIR_READ) {
6f9961ac
JA
510 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
511 io_u->xfer_buf, fri->completion);
fc5c0345
DG
512
513 if (r < 0) {
514 log_err("rbd_aio_read failed.\n");
dbf388d2 515 goto failed_comp;
fc5c0345 516 }
dbf388d2 517 } else if (io_u->ddir == DDIR_TRIM) {
6f9961ac
JA
518 r = rbd_aio_discard(rbd->image, io_u->offset,
519 io_u->xfer_buflen, fri->completion);
82340a9f 520 if (r < 0) {
dbf388d2
JA
521 log_err("rbd_aio_discard failed.\n");
522 goto failed_comp;
82340a9f 523 }
dbf388d2 524 } else if (io_u->ddir == DDIR_SYNC) {
6f9961ac 525 r = rbd_aio_flush(rbd->image, fri->completion);
fc5c0345
DG
526 if (r < 0) {
527 log_err("rbd_flush failed.\n");
dbf388d2 528 goto failed_comp;
fc5c0345 529 }
fc5c0345
DG
530 } else {
531 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
532 io_u->ddir);
ef2f4a50 533 r = -EINVAL;
dbf388d2 534 goto failed_comp;
fc5c0345
DG
535 }
536
537 return FIO_Q_QUEUED;
dbf388d2
JA
538failed_comp:
539 rbd_aio_release(fri->completion);
fc5c0345 540failed:
d7658ced 541 io_u->error = -r;
fc5c0345
DG
542 td_verror(td, io_u->error, "xfer");
543 return FIO_Q_COMPLETED;
544}
545
546static int fio_rbd_init(struct thread_data *td)
547{
548 int r;
e0f35b53 549 struct rbd_data *rbd = td->io_ops_data;
35448275
JA
550
551 if (rbd->connected)
e0f35b53 552 return 0;
fc5c0345
DG
553
554 r = _fio_rbd_connect(td);
555 if (r) {
556 log_err("fio_rbd_connect failed, return code: %d .\n", r);
557 goto failed;
558 }
559
560 return 0;
561
562failed:
563 return 1;
fc5c0345
DG
564}
565
566static void fio_rbd_cleanup(struct thread_data *td)
567{
565e784d 568 struct rbd_data *rbd = td->io_ops_data;
fc5c0345 569
6f9961ac
JA
570 if (rbd) {
571 _fio_rbd_disconnect(rbd);
572 free(rbd->aio_events);
573 free(rbd->sort_events);
574 free(rbd);
fc5c0345 575 }
fc5c0345
DG
576}
577
578static int fio_rbd_setup(struct thread_data *td)
579{
fc5c0345
DG
580 rbd_image_info_t info;
581 struct fio_file *f;
6f9961ac 582 struct rbd_data *rbd = NULL;
6f9961ac 583 int r;
fc5c0345 584
fc5c0345 585 /* allocate engine specific structure to deal with librbd. */
6f9961ac 586 r = _fio_setup_rbd_data(td, &rbd);
fc5c0345
DG
587 if (r) {
588 log_err("fio_setup_rbd_data failed.\n");
589 goto cleanup;
590 }
565e784d 591 td->io_ops_data = rbd;
fc5c0345 592
d8b64af2
JA
593 /* librbd does not allow us to run first in the main thread and later
594 * in a fork child. It needs to be the same process context all the
595 * time.
fc5c0345
DG
596 */
597 td->o.use_thread = 1;
598
599 /* connect in the main thread to determine to determine
600 * the size of the given RADOS block device. And disconnect
601 * later on.
602 */
603 r = _fio_rbd_connect(td);
604 if (r) {
605 log_err("fio_rbd_connect failed.\n");
606 goto cleanup;
607 }
e0f35b53 608 rbd->connected = true;
fc5c0345
DG
609
610 /* get size of the RADOS block device */
6f9961ac 611 r = rbd_stat(rbd->image, &info, sizeof(info));
fc5c0345
DG
612 if (r < 0) {
613 log_err("rbd_status failed.\n");
6747f469 614 goto cleanup;
206c546d
PL
615 } else if (info.size == 0) {
616 log_err("image size should be larger than zero.\n");
617 r = -EINVAL;
6747f469 618 goto cleanup;
fc5c0345 619 }
206c546d 620
50ea012c 621 dprint(FD_IO, "rbd-engine: image size: %" PRIu64 "\n", info.size);
fc5c0345
DG
622
623 /* taken from "net" engine. Pretend we deal with files,
624 * even if we do not have any ideas about files.
625 * The size of the RBD is set instead of a artificial file.
626 */
627 if (!td->files_index) {
5903e7b7 628 add_file(td, td->o.filename ? : "rbd", 0, 0);
fc5c0345 629 td->o.nr_files = td->o.nr_files ? : 1;
b53f2c54 630 td->o.open_files++;
fc5c0345
DG
631 }
632 f = td->files[0];
633 f->real_file_size = info.size;
634
fc5c0345
DG
635 return 0;
636
fc5c0345
DG
637cleanup:
638 fio_rbd_cleanup(td);
639 return r;
640}
641
642static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
643{
644 return 0;
645}
646
d9b100fc
JA
647static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
648{
903b2812 649#if defined(CONFIG_RBD_INVAL)
565e784d 650 struct rbd_data *rbd = td->io_ops_data;
903b2812 651
6f9961ac 652 return rbd_invalidate_cache(rbd->image);
903b2812 653#else
d9b100fc 654 return 0;
903b2812 655#endif
d9b100fc
JA
656}
657
fc5c0345
DG
658static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
659{
d8b64af2 660 struct fio_rbd_iou *fri = io_u->engine_data;
fc5c0345 661
d8b64af2 662 if (fri) {
fc5c0345 663 io_u->engine_data = NULL;
d8b64af2 664 free(fri);
fc5c0345
DG
665 }
666}
667
668static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
669{
d8b64af2 670 struct fio_rbd_iou *fri;
fc5c0345 671
d8b64af2
JA
672 fri = calloc(1, sizeof(*fri));
673 fri->io_u = io_u;
674 io_u->engine_data = fri;
fc5c0345
DG
675 return 0;
676}
677
5a8a6a03 678FIO_STATIC struct ioengine_ops ioengine = {
d9b100fc
JA
679 .name = "rbd",
680 .version = FIO_IOOPS_VERSION,
681 .setup = fio_rbd_setup,
682 .init = fio_rbd_init,
683 .queue = fio_rbd_queue,
684 .getevents = fio_rbd_getevents,
685 .event = fio_rbd_event,
686 .cleanup = fio_rbd_cleanup,
687 .open_file = fio_rbd_open,
688 .invalidate = fio_rbd_invalidate,
689 .options = options,
690 .io_u_init = fio_rbd_io_u_init,
691 .io_u_free = fio_rbd_io_u_free,
692 .option_struct_size = sizeof(struct rbd_options),
fc5c0345
DG
693};
694
695static void fio_init fio_rbd_register(void)
696{
697 register_ioengine(&ioengine);
698}
699
700static void fio_exit fio_rbd_unregister(void)
701{
702 unregister_ioengine(&ioengine);
703}