4 * IO engine using Ceph's librbd to test RADOS Block Devices.
8 #include <rbd/librbd.h>
11 #include "../optgroup.h"
12 #ifdef CONFIG_RBD_BLKIN
16 #ifdef CONFIG_RBD_POLL
19 #include <sys/eventfd.h>
24 rbd_completion_t completion;
27 #ifdef CONFIG_RBD_BLKIN
28 struct blkin_trace_info info;
36 struct io_u **aio_events;
37 struct io_u **sort_events;
38 #ifdef CONFIG_RBD_POLL
39 int fd; /* add for poll */
52 static struct fio_option options[] = {
54 .name = "clustername",
55 .lname = "ceph cluster name",
56 .type = FIO_OPT_STR_STORE,
57 .help = "Cluster name for ceph",
58 .off1 = offsetof(struct rbd_options, cluster_name),
59 .category = FIO_OPT_C_ENGINE,
60 .group = FIO_OPT_G_RBD,
64 .lname = "rbd engine rbdname",
65 .type = FIO_OPT_STR_STORE,
66 .help = "RBD name for RBD engine",
67 .off1 = offsetof(struct rbd_options, rbd_name),
68 .category = FIO_OPT_C_ENGINE,
69 .group = FIO_OPT_G_RBD,
73 .lname = "rbd engine pool",
74 .type = FIO_OPT_STR_STORE,
75 .help = "Name of the pool hosting the RBD for the RBD engine",
76 .off1 = offsetof(struct rbd_options, pool_name),
77 .category = FIO_OPT_C_ENGINE,
78 .group = FIO_OPT_G_RBD,
82 .lname = "rbd engine clientname",
83 .type = FIO_OPT_STR_STORE,
84 .help = "Name of the ceph client to access the RBD for the RBD engine",
85 .off1 = offsetof(struct rbd_options, client_name),
86 .category = FIO_OPT_C_ENGINE,
87 .group = FIO_OPT_G_RBD,
93 .help = "Busy poll for completions instead of sleeping",
94 .off1 = offsetof(struct rbd_options, busy_poll),
96 .category = FIO_OPT_C_ENGINE,
97 .group = FIO_OPT_G_RBD,
104 static int _fio_setup_rbd_data(struct thread_data *td,
105 struct rbd_data **rbd_data_ptr)
107 struct rbd_data *rbd;
112 rbd = calloc(1, sizeof(struct rbd_data));
116 #ifdef CONFIG_RBD_POLL
117 /* add for poll, init fd: -1 */
121 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
122 if (!rbd->aio_events)
125 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
126 if (!rbd->sort_events)
135 free(rbd->aio_events);
136 if (rbd->sort_events)
137 free(rbd->sort_events);
144 static int _fio_rbd_connect(struct thread_data *td)
146 struct rbd_data *rbd = td->io_ops_data;
147 struct rbd_options *o = td->eo;
150 if (o->cluster_name) {
151 char *client_name = NULL;
154 * If we specify cluser name, the rados_create2
155 * will not assume 'client.'. name is considered
156 * as a full type.id namestr
158 if (o->client_name) {
159 if (!index(o->client_name, '.')) {
160 client_name = calloc(1, strlen("client.") +
161 strlen(o->client_name) + 1);
162 strcat(client_name, "client.");
163 strcat(client_name, o->client_name);
165 client_name = o->client_name;
169 r = rados_create2(&rbd->cluster, o->cluster_name,
172 if (client_name && !index(o->client_name, '.'))
175 r = rados_create(&rbd->cluster, o->client_name);
178 log_err("rados_create failed.\n");
182 r = rados_conf_read_file(rbd->cluster, NULL);
184 log_err("rados_conf_read_file failed.\n");
188 r = rados_connect(rbd->cluster);
190 log_err("rados_connect failed.\n");
191 goto failed_shutdown;
194 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
196 log_err("rados_ioctx_create failed.\n");
197 goto failed_shutdown;
200 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
202 log_err("rbd_open failed.\n");
206 #ifdef CONFIG_RBD_POLL
207 /* add for rbd poll */
208 rbd->fd = eventfd(0, EFD_NONBLOCK);
210 log_err("eventfd failed.\n");
214 r = rbd_set_image_notification(rbd->image, rbd->fd, EVENT_TYPE_EVENTFD);
216 log_err("rbd_set_image_notification failed.\n");
223 #ifdef CONFIG_RBD_POLL
230 rados_ioctx_destroy(rbd->io_ctx);
233 rados_shutdown(rbd->cluster);
239 static void _fio_rbd_disconnect(struct rbd_data *rbd)
244 #ifdef CONFIG_RBD_POLL
252 /* shutdown everything */
254 rbd_close(rbd->image);
259 rados_ioctx_destroy(rbd->io_ctx);
264 rados_shutdown(rbd->cluster);
269 static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
271 struct fio_rbd_iou *fri = data;
272 struct io_u *io_u = fri->io_u;
276 * Looks like return value is 0 for success, or < 0 for
277 * a specific error. So we have to assume that it can't do
278 * partial completions.
280 ret = rbd_aio_get_return_value(fri->completion);
283 io_u->resid = io_u->xfer_buflen;
287 fri->io_complete = 1;
290 static struct io_u *fio_rbd_event(struct thread_data *td, int event)
292 struct rbd_data *rbd = td->io_ops_data;
294 return rbd->aio_events[event];
297 static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
298 unsigned int *events)
300 struct fio_rbd_iou *fri = io_u->engine_data;
302 if (fri->io_complete) {
304 rbd->aio_events[*events] = io_u;
307 rbd_aio_release(fri->completion);
314 static inline int rbd_io_u_seen(struct io_u *io_u)
316 struct fio_rbd_iou *fri = io_u->engine_data;
321 static void rbd_io_u_wait_complete(struct io_u *io_u)
323 struct fio_rbd_iou *fri = io_u->engine_data;
325 rbd_aio_wait_for_complete(fri->completion);
328 static int rbd_io_u_cmp(const void *p1, const void *p2)
330 const struct io_u **a = (const struct io_u **) p1;
331 const struct io_u **b = (const struct io_u **) p2;
334 at = utime_since_now(&(*a)->start_time);
335 bt = utime_since_now(&(*b)->start_time);
345 static int rbd_iter_events(struct thread_data *td, unsigned int *events,
346 unsigned int min_evts, int wait)
348 struct rbd_data *rbd = td->io_ops_data;
349 unsigned int this_events = 0;
353 #ifdef CONFIG_RBD_POLL
356 struct fio_rbd_iou *fri = NULL;
357 rbd_completion_t comps[min_evts];
363 ret = poll(&pfd, 1, -1);
368 assert(pfd.revents & POLLIN);
370 event_num = rbd_poll_io_events(rbd->image, comps, min_evts);
372 for (i = 0; i < event_num; i++) {
373 fri = rbd_aio_get_arg(comps[i]);
376 io_u_qiter(&td->io_u_all, io_u, i) {
378 if (!(io_u->flags & IO_U_F_FLIGHT))
380 if (rbd_io_u_seen(io_u))
383 if (fri_check_complete(rbd, io_u, events))
386 rbd->sort_events[sidx++] = io_u;
393 * Sort events, oldest issue first, then wait on as many as we
394 * need in order of age. If we have enough events, stop waiting,
395 * and just check if any of the older ones are done.
398 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
400 for (i = 0; i < sidx; i++) {
401 io_u = rbd->sort_events[i];
403 if (fri_check_complete(rbd, io_u, events)) {
409 * Stop waiting when we have enough, but continue checking
410 * all pending IOs if they are complete.
412 if (*events >= min_evts)
415 rbd_io_u_wait_complete(io_u);
417 if (fri_check_complete(rbd, io_u, events))
424 static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
425 unsigned int max, const struct timespec *t)
427 unsigned int this_events, events = 0;
428 struct rbd_options *o = td->eo;
432 this_events = rbd_iter_events(td, &events, min, wait);
448 static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
450 struct rbd_data *rbd = td->io_ops_data;
451 struct fio_rbd_iou *fri = io_u->engine_data;
454 fio_ro_check(td, io_u);
457 fri->io_complete = 0;
459 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
462 log_err("rbd_aio_create_completion failed.\n");
466 if (io_u->ddir == DDIR_WRITE) {
467 #ifdef CONFIG_RBD_BLKIN
468 blkin_init_trace_info(&fri->info);
469 r = rbd_aio_write_traced(rbd->image, io_u->offset, io_u->xfer_buflen,
470 io_u->xfer_buf, fri->completion, &fri->info);
472 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
473 io_u->xfer_buf, fri->completion);
476 log_err("rbd_aio_write failed.\n");
480 } else if (io_u->ddir == DDIR_READ) {
481 #ifdef CONFIG_RBD_BLKIN
482 blkin_init_trace_info(&fri->info);
483 r = rbd_aio_read_traced(rbd->image, io_u->offset, io_u->xfer_buflen,
484 io_u->xfer_buf, fri->completion, &fri->info);
486 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
487 io_u->xfer_buf, fri->completion);
491 log_err("rbd_aio_read failed.\n");
494 } else if (io_u->ddir == DDIR_TRIM) {
495 r = rbd_aio_discard(rbd->image, io_u->offset,
496 io_u->xfer_buflen, fri->completion);
498 log_err("rbd_aio_discard failed.\n");
501 } else if (io_u->ddir == DDIR_SYNC) {
502 r = rbd_aio_flush(rbd->image, fri->completion);
504 log_err("rbd_flush failed.\n");
508 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
515 rbd_aio_release(fri->completion);
518 td_verror(td, io_u->error, "xfer");
519 return FIO_Q_COMPLETED;
522 static int fio_rbd_init(struct thread_data *td)
526 r = _fio_rbd_connect(td);
528 log_err("fio_rbd_connect failed, return code: %d .\n", r);
538 static void fio_rbd_cleanup(struct thread_data *td)
540 struct rbd_data *rbd = td->io_ops_data;
543 _fio_rbd_disconnect(rbd);
544 free(rbd->aio_events);
545 free(rbd->sort_events);
550 static int fio_rbd_setup(struct thread_data *td)
552 rbd_image_info_t info;
554 struct rbd_data *rbd = NULL;
555 int major, minor, extra;
558 /* log version of librbd. No cluster connection required. */
559 rbd_version(&major, &minor, &extra);
560 log_info("rbd engine: RBD version: %d.%d.%d\n", major, minor, extra);
562 /* allocate engine specific structure to deal with librbd. */
563 r = _fio_setup_rbd_data(td, &rbd);
565 log_err("fio_setup_rbd_data failed.\n");
568 td->io_ops_data = rbd;
570 /* librbd does not allow us to run first in the main thread and later
571 * in a fork child. It needs to be the same process context all the
574 td->o.use_thread = 1;
576 /* connect in the main thread to determine to determine
577 * the size of the given RADOS block device. And disconnect
580 r = _fio_rbd_connect(td);
582 log_err("fio_rbd_connect failed.\n");
586 /* get size of the RADOS block device */
587 r = rbd_stat(rbd->image, &info, sizeof(info));
589 log_err("rbd_status failed.\n");
592 dprint(FD_IO, "rbd-engine: image size: %lu\n", info.size);
594 /* taken from "net" engine. Pretend we deal with files,
595 * even if we do not have any ideas about files.
596 * The size of the RBD is set instead of a artificial file.
598 if (!td->files_index) {
599 add_file(td, td->o.filename ? : "rbd", 0, 0);
600 td->o.nr_files = td->o.nr_files ? : 1;
604 f->real_file_size = info.size;
606 /* disconnect, then we were only connected to determine
607 * the size of the RBD.
609 _fio_rbd_disconnect(rbd);
613 _fio_rbd_disconnect(rbd);
619 static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
624 static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
626 #if defined(CONFIG_RBD_INVAL)
627 struct rbd_data *rbd = td->io_ops_data;
629 return rbd_invalidate_cache(rbd->image);
635 static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
637 struct fio_rbd_iou *fri = io_u->engine_data;
640 io_u->engine_data = NULL;
645 static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
647 struct fio_rbd_iou *fri;
649 fri = calloc(1, sizeof(*fri));
651 io_u->engine_data = fri;
655 static struct ioengine_ops ioengine = {
657 .version = FIO_IOOPS_VERSION,
658 .setup = fio_rbd_setup,
659 .init = fio_rbd_init,
660 .queue = fio_rbd_queue,
661 .getevents = fio_rbd_getevents,
662 .event = fio_rbd_event,
663 .cleanup = fio_rbd_cleanup,
664 .open_file = fio_rbd_open,
665 .invalidate = fio_rbd_invalidate,
667 .io_u_init = fio_rbd_io_u_init,
668 .io_u_free = fio_rbd_io_u_free,
669 .option_struct_size = sizeof(struct rbd_options),
672 static void fio_init fio_rbd_register(void)
674 register_ioengine(&ioengine);
677 static void fio_exit fio_rbd_unregister(void)
679 unregister_ioengine(&ioengine);