4 * IO engine using Ceph's librbd to test RADOS Block Devices.
8 #include <rbd/librbd.h>
11 #include "../optgroup.h"
13 #ifdef CONFIG_RBD_POLL
16 #include <sys/eventfd.h>
21 rbd_completion_t completion;
30 struct io_u **aio_events;
31 struct io_u **sort_events;
32 int fd; /* add for poll */
45 static struct fio_option options[] = {
47 .name = "clustername",
48 .lname = "ceph cluster name",
49 .type = FIO_OPT_STR_STORE,
50 .help = "Cluster name for ceph",
51 .off1 = offsetof(struct rbd_options, cluster_name),
52 .category = FIO_OPT_C_ENGINE,
53 .group = FIO_OPT_G_RBD,
57 .lname = "rbd engine rbdname",
58 .type = FIO_OPT_STR_STORE,
59 .help = "RBD name for RBD engine",
60 .off1 = offsetof(struct rbd_options, rbd_name),
61 .category = FIO_OPT_C_ENGINE,
62 .group = FIO_OPT_G_RBD,
66 .lname = "rbd engine pool",
67 .type = FIO_OPT_STR_STORE,
68 .help = "Name of the pool hosting the RBD for the RBD engine",
69 .off1 = offsetof(struct rbd_options, pool_name),
70 .category = FIO_OPT_C_ENGINE,
71 .group = FIO_OPT_G_RBD,
75 .lname = "rbd engine clientname",
76 .type = FIO_OPT_STR_STORE,
77 .help = "Name of the ceph client to access the RBD for the RBD engine",
78 .off1 = offsetof(struct rbd_options, client_name),
79 .category = FIO_OPT_C_ENGINE,
80 .group = FIO_OPT_G_RBD,
86 .help = "Busy poll for completions instead of sleeping",
87 .off1 = offsetof(struct rbd_options, busy_poll),
89 .category = FIO_OPT_C_ENGINE,
90 .group = FIO_OPT_G_RBD,
97 static int _fio_setup_rbd_data(struct thread_data *td,
98 struct rbd_data **rbd_data_ptr)
100 struct rbd_data *rbd;
105 rbd = calloc(1, sizeof(struct rbd_data));
109 rbd->connected = false;
111 /* add for poll, init fd: -1 */
114 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
115 if (!rbd->aio_events)
118 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
119 if (!rbd->sort_events)
128 free(rbd->aio_events);
129 if (rbd->sort_events)
130 free(rbd->sort_events);
137 #ifdef CONFIG_RBD_POLL
138 static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
142 /* add for rbd poll */
143 rbd->fd = eventfd(0, EFD_SEMAPHORE);
145 log_err("eventfd failed.\n");
149 r = rbd_set_image_notification(rbd->image, rbd->fd, EVENT_TYPE_EVENTFD);
151 log_err("rbd_set_image_notification failed.\n");
160 static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
166 static int _fio_rbd_connect(struct thread_data *td)
168 struct rbd_data *rbd = td->io_ops_data;
169 struct rbd_options *o = td->eo;
172 if (o->cluster_name) {
173 char *client_name = NULL;
176 * If we specify cluser name, the rados_create2
177 * will not assume 'client.'. name is considered
178 * as a full type.id namestr
180 if (o->client_name) {
181 if (!index(o->client_name, '.')) {
182 client_name = calloc(1, strlen("client.") +
183 strlen(o->client_name) + 1);
184 strcat(client_name, "client.");
185 strcat(client_name, o->client_name);
187 client_name = o->client_name;
191 r = rados_create2(&rbd->cluster, o->cluster_name,
194 if (client_name && !index(o->client_name, '.'))
197 r = rados_create(&rbd->cluster, o->client_name);
200 log_err("rados_create failed.\n");
203 if (o->pool_name == NULL) {
204 log_err("rbd pool name must be provided.\n");
208 log_err("rbdname must be provided.\n");
212 r = rados_conf_read_file(rbd->cluster, NULL);
214 log_err("rados_conf_read_file failed.\n");
218 r = rados_connect(rbd->cluster);
220 log_err("rados_connect failed.\n");
221 goto failed_shutdown;
224 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
226 log_err("rados_ioctx_create failed.\n");
227 goto failed_shutdown;
231 r = rados_conf_set(rbd->cluster, "rbd_cache", "false");
233 log_info("failed to disable RBD in-memory cache\n");
237 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
239 log_err("rbd_open failed.\n");
243 if (!_fio_rbd_setup_poll(rbd))
249 rbd_close(rbd->image);
252 rados_ioctx_destroy(rbd->io_ctx);
255 rados_shutdown(rbd->cluster);
261 static void _fio_rbd_disconnect(struct rbd_data *rbd)
272 /* shutdown everything */
274 rbd_close(rbd->image);
279 rados_ioctx_destroy(rbd->io_ctx);
284 rados_shutdown(rbd->cluster);
289 static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
291 struct fio_rbd_iou *fri = data;
292 struct io_u *io_u = fri->io_u;
296 * Looks like return value is 0 for success, or < 0 for
297 * a specific error. So we have to assume that it can't do
298 * partial completions.
300 ret = rbd_aio_get_return_value(fri->completion);
303 io_u->resid = io_u->xfer_buflen;
307 fri->io_complete = 1;
310 static struct io_u *fio_rbd_event(struct thread_data *td, int event)
312 struct rbd_data *rbd = td->io_ops_data;
314 return rbd->aio_events[event];
317 static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
318 unsigned int *events)
320 struct fio_rbd_iou *fri = io_u->engine_data;
322 if (fri->io_complete) {
324 rbd->aio_events[*events] = io_u;
327 rbd_aio_release(fri->completion);
334 #ifndef CONFIG_RBD_POLL
335 static inline int rbd_io_u_seen(struct io_u *io_u)
337 struct fio_rbd_iou *fri = io_u->engine_data;
343 static void rbd_io_u_wait_complete(struct io_u *io_u)
345 struct fio_rbd_iou *fri = io_u->engine_data;
347 rbd_aio_wait_for_complete(fri->completion);
350 static int rbd_io_u_cmp(const void *p1, const void *p2)
352 const struct io_u **a = (const struct io_u **) p1;
353 const struct io_u **b = (const struct io_u **) p2;
356 at = utime_since_now(&(*a)->start_time);
357 bt = utime_since_now(&(*b)->start_time);
367 static int rbd_iter_events(struct thread_data *td, unsigned int *events,
368 unsigned int min_evts, int wait)
370 struct rbd_data *rbd = td->io_ops_data;
371 unsigned int this_events = 0;
375 #ifdef CONFIG_RBD_POLL
378 struct fio_rbd_iou *fri = NULL;
379 rbd_completion_t comps[min_evts];
387 ret = poll(&pfd, 1, wait ? -1 : 0);
390 if (!(pfd.revents & POLLIN))
393 event_num = rbd_poll_io_events(rbd->image, comps, min_evts);
395 for (i = 0; i < event_num; i++) {
396 fri = rbd_aio_get_arg(comps[i]);
399 /* best effort to decrement the semaphore */
400 ret = read(rbd->fd, &counter, sizeof(counter));
402 log_err("rbd_iter_events failed to decrement semaphore.\n");
404 completed = fri_check_complete(rbd, io_u, events);
410 io_u_qiter(&td->io_u_all, io_u, i) {
411 if (!(io_u->flags & IO_U_F_FLIGHT))
413 if (rbd_io_u_seen(io_u))
416 if (fri_check_complete(rbd, io_u, events))
419 rbd->sort_events[sidx++] = io_u;
427 * Sort events, oldest issue first, then wait on as many as we
428 * need in order of age. If we have enough events, stop waiting,
429 * and just check if any of the older ones are done.
432 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
434 for (i = 0; i < sidx; i++) {
435 io_u = rbd->sort_events[i];
437 if (fri_check_complete(rbd, io_u, events)) {
443 * Stop waiting when we have enough, but continue checking
444 * all pending IOs if they are complete.
446 if (*events >= min_evts)
449 rbd_io_u_wait_complete(io_u);
451 if (fri_check_complete(rbd, io_u, events))
458 static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
459 unsigned int max, const struct timespec *t)
461 unsigned int this_events, events = 0;
462 struct rbd_options *o = td->eo;
466 this_events = rbd_iter_events(td, &events, min, wait);
482 static enum fio_q_status fio_rbd_queue(struct thread_data *td,
485 struct rbd_data *rbd = td->io_ops_data;
486 struct fio_rbd_iou *fri = io_u->engine_data;
489 fio_ro_check(td, io_u);
492 fri->io_complete = 0;
494 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
497 log_err("rbd_aio_create_completion failed.\n");
501 if (io_u->ddir == DDIR_WRITE) {
502 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
503 io_u->xfer_buf, fri->completion);
505 log_err("rbd_aio_write failed.\n");
509 } else if (io_u->ddir == DDIR_READ) {
510 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
511 io_u->xfer_buf, fri->completion);
514 log_err("rbd_aio_read failed.\n");
517 } else if (io_u->ddir == DDIR_TRIM) {
518 r = rbd_aio_discard(rbd->image, io_u->offset,
519 io_u->xfer_buflen, fri->completion);
521 log_err("rbd_aio_discard failed.\n");
524 } else if (io_u->ddir == DDIR_SYNC) {
525 r = rbd_aio_flush(rbd->image, fri->completion);
527 log_err("rbd_flush failed.\n");
531 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
539 rbd_aio_release(fri->completion);
542 td_verror(td, io_u->error, "xfer");
543 return FIO_Q_COMPLETED;
546 static int fio_rbd_init(struct thread_data *td)
549 struct rbd_data *rbd = td->io_ops_data;
554 r = _fio_rbd_connect(td);
556 log_err("fio_rbd_connect failed, return code: %d .\n", r);
566 static void fio_rbd_cleanup(struct thread_data *td)
568 struct rbd_data *rbd = td->io_ops_data;
571 _fio_rbd_disconnect(rbd);
572 free(rbd->aio_events);
573 free(rbd->sort_events);
578 static int fio_rbd_setup(struct thread_data *td)
580 rbd_image_info_t info;
582 struct rbd_data *rbd = NULL;
585 /* allocate engine specific structure to deal with librbd. */
586 r = _fio_setup_rbd_data(td, &rbd);
588 log_err("fio_setup_rbd_data failed.\n");
591 td->io_ops_data = rbd;
593 /* librbd does not allow us to run first in the main thread and later
594 * in a fork child. It needs to be the same process context all the
597 td->o.use_thread = 1;
599 /* connect in the main thread to determine to determine
600 * the size of the given RADOS block device. And disconnect
603 r = _fio_rbd_connect(td);
605 log_err("fio_rbd_connect failed.\n");
608 rbd->connected = true;
610 /* get size of the RADOS block device */
611 r = rbd_stat(rbd->image, &info, sizeof(info));
613 log_err("rbd_status failed.\n");
615 } else if (info.size == 0) {
616 log_err("image size should be larger than zero.\n");
621 dprint(FD_IO, "rbd-engine: image size: %" PRIu64 "\n", info.size);
623 /* taken from "net" engine. Pretend we deal with files,
624 * even if we do not have any ideas about files.
625 * The size of the RBD is set instead of a artificial file.
627 if (!td->files_index) {
628 add_file(td, td->o.filename ? : "rbd", 0, 0);
629 td->o.nr_files = td->o.nr_files ? : 1;
633 f->real_file_size = info.size;
642 static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
647 static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
649 #if defined(CONFIG_RBD_INVAL)
650 struct rbd_data *rbd = td->io_ops_data;
652 return rbd_invalidate_cache(rbd->image);
658 static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
660 struct fio_rbd_iou *fri = io_u->engine_data;
663 io_u->engine_data = NULL;
668 static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
670 struct fio_rbd_iou *fri;
672 fri = calloc(1, sizeof(*fri));
674 io_u->engine_data = fri;
678 FIO_STATIC struct ioengine_ops ioengine = {
680 .version = FIO_IOOPS_VERSION,
681 .setup = fio_rbd_setup,
682 .init = fio_rbd_init,
683 .queue = fio_rbd_queue,
684 .getevents = fio_rbd_getevents,
685 .event = fio_rbd_event,
686 .cleanup = fio_rbd_cleanup,
687 .open_file = fio_rbd_open,
688 .invalidate = fio_rbd_invalidate,
690 .io_u_init = fio_rbd_io_u_init,
691 .io_u_free = fio_rbd_io_u_free,
692 .option_struct_size = sizeof(struct rbd_options),
695 static void fio_init fio_rbd_register(void)
697 register_ioengine(&ioengine);
700 static void fio_exit fio_rbd_unregister(void)
702 unregister_ioengine(&ioengine);