4 * IO engine using Ceph's librbd to test RADOS Block Devices.
8 #include <rbd/librbd.h>
11 #include "../optgroup.h"
15 rbd_completion_t completion;
24 struct io_u **aio_events;
25 struct io_u **sort_events;
37 static struct fio_option options[] = {
39 .name = "clustername",
40 .lname = "ceph cluster name",
41 .type = FIO_OPT_STR_STORE,
42 .help = "Cluster name for ceph",
43 .off1 = offsetof(struct rbd_options, cluster_name),
44 .category = FIO_OPT_C_ENGINE,
45 .group = FIO_OPT_G_RBD,
49 .lname = "rbd engine rbdname",
50 .type = FIO_OPT_STR_STORE,
51 .help = "RBD name for RBD engine",
52 .off1 = offsetof(struct rbd_options, rbd_name),
53 .category = FIO_OPT_C_ENGINE,
54 .group = FIO_OPT_G_RBD,
58 .lname = "rbd engine pool",
59 .type = FIO_OPT_STR_STORE,
60 .help = "Name of the pool hosting the RBD for the RBD engine",
61 .off1 = offsetof(struct rbd_options, pool_name),
62 .category = FIO_OPT_C_ENGINE,
63 .group = FIO_OPT_G_RBD,
67 .lname = "rbd engine clientname",
68 .type = FIO_OPT_STR_STORE,
69 .help = "Name of the ceph client to access the RBD for the RBD engine",
70 .off1 = offsetof(struct rbd_options, client_name),
71 .category = FIO_OPT_C_ENGINE,
72 .group = FIO_OPT_G_RBD,
78 .help = "Busy poll for completions instead of sleeping",
79 .off1 = offsetof(struct rbd_options, busy_poll),
81 .category = FIO_OPT_C_ENGINE,
82 .group = FIO_OPT_G_RBD,
89 static int _fio_setup_rbd_data(struct thread_data *td,
90 struct rbd_data **rbd_data_ptr)
97 rbd = calloc(1, sizeof(struct rbd_data));
101 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
102 if (!rbd->aio_events)
105 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
106 if (!rbd->sort_events)
115 free(rbd->aio_events);
116 if (rbd->sort_events)
117 free(rbd->sort_events);
124 static int _fio_rbd_connect(struct thread_data *td)
126 struct rbd_data *rbd = td->io_ops_data;
127 struct rbd_options *o = td->eo;
130 if (o->cluster_name) {
131 char *client_name = NULL;
134 * If we specify cluser name, the rados_create2
135 * will not assume 'client.'. name is considered
136 * as a full type.id namestr
138 if (o->client_name) {
139 if (!index(o->client_name, '.')) {
140 client_name = calloc(1, strlen("client.") +
141 strlen(o->client_name) + 1);
142 strcat(client_name, "client.");
143 strcat(client_name, o->client_name);
145 client_name = o->client_name;
149 r = rados_create2(&rbd->cluster, o->cluster_name,
152 if (client_name && !index(o->client_name, '.'))
155 r = rados_create(&rbd->cluster, o->client_name);
158 log_err("rados_create failed.\n");
162 r = rados_conf_read_file(rbd->cluster, NULL);
164 log_err("rados_conf_read_file failed.\n");
168 r = rados_connect(rbd->cluster);
170 log_err("rados_connect failed.\n");
171 goto failed_shutdown;
174 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
176 log_err("rados_ioctx_create failed.\n");
177 goto failed_shutdown;
180 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
182 log_err("rbd_open failed.\n");
188 rados_ioctx_destroy(rbd->io_ctx);
191 rados_shutdown(rbd->cluster);
197 static void _fio_rbd_disconnect(struct rbd_data *rbd)
202 /* shutdown everything */
204 rbd_close(rbd->image);
209 rados_ioctx_destroy(rbd->io_ctx);
214 rados_shutdown(rbd->cluster);
219 static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
221 struct fio_rbd_iou *fri = data;
222 struct io_u *io_u = fri->io_u;
226 * Looks like return value is 0 for success, or < 0 for
227 * a specific error. So we have to assume that it can't do
228 * partial completions.
230 ret = rbd_aio_get_return_value(fri->completion);
233 io_u->resid = io_u->xfer_buflen;
237 fri->io_complete = 1;
240 static struct io_u *fio_rbd_event(struct thread_data *td, int event)
242 struct rbd_data *rbd = td->io_ops_data;
244 return rbd->aio_events[event];
247 static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
248 unsigned int *events)
250 struct fio_rbd_iou *fri = io_u->engine_data;
252 if (fri->io_complete) {
254 rbd->aio_events[*events] = io_u;
257 rbd_aio_release(fri->completion);
264 static inline int rbd_io_u_seen(struct io_u *io_u)
266 struct fio_rbd_iou *fri = io_u->engine_data;
271 static void rbd_io_u_wait_complete(struct io_u *io_u)
273 struct fio_rbd_iou *fri = io_u->engine_data;
275 rbd_aio_wait_for_complete(fri->completion);
278 static int rbd_io_u_cmp(const void *p1, const void *p2)
280 const struct io_u **a = (const struct io_u **) p1;
281 const struct io_u **b = (const struct io_u **) p2;
284 at = utime_since_now(&(*a)->start_time);
285 bt = utime_since_now(&(*b)->start_time);
295 static int rbd_iter_events(struct thread_data *td, unsigned int *events,
296 unsigned int min_evts, int wait)
298 struct rbd_data *rbd = td->io_ops_data;
299 unsigned int this_events = 0;
304 io_u_qiter(&td->io_u_all, io_u, i) {
305 if (!(io_u->flags & IO_U_F_FLIGHT))
307 if (rbd_io_u_seen(io_u))
310 if (fri_check_complete(rbd, io_u, events))
313 rbd->sort_events[sidx++] = io_u;
320 * Sort events, oldest issue first, then wait on as many as we
321 * need in order of age. If we have enough events, stop waiting,
322 * and just check if any of the older ones are done.
325 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
327 for (i = 0; i < sidx; i++) {
328 io_u = rbd->sort_events[i];
330 if (fri_check_complete(rbd, io_u, events)) {
336 * Stop waiting when we have enough, but continue checking
337 * all pending IOs if they are complete.
339 if (*events >= min_evts)
342 rbd_io_u_wait_complete(io_u);
344 if (fri_check_complete(rbd, io_u, events))
351 static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
352 unsigned int max, const struct timespec *t)
354 unsigned int this_events, events = 0;
355 struct rbd_options *o = td->eo;
359 this_events = rbd_iter_events(td, &events, min, wait);
375 static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
377 struct rbd_data *rbd = td->io_ops_data;
378 struct fio_rbd_iou *fri = io_u->engine_data;
381 fio_ro_check(td, io_u);
384 fri->io_complete = 0;
386 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
389 log_err("rbd_aio_create_completion failed.\n");
393 if (io_u->ddir == DDIR_WRITE) {
394 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
395 io_u->xfer_buf, fri->completion);
397 log_err("rbd_aio_write failed.\n");
401 } else if (io_u->ddir == DDIR_READ) {
402 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
403 io_u->xfer_buf, fri->completion);
406 log_err("rbd_aio_read failed.\n");
409 } else if (io_u->ddir == DDIR_TRIM) {
410 r = rbd_aio_discard(rbd->image, io_u->offset,
411 io_u->xfer_buflen, fri->completion);
413 log_err("rbd_aio_discard failed.\n");
416 } else if (io_u->ddir == DDIR_SYNC) {
417 r = rbd_aio_flush(rbd->image, fri->completion);
419 log_err("rbd_flush failed.\n");
423 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
430 rbd_aio_release(fri->completion);
433 td_verror(td, io_u->error, "xfer");
434 return FIO_Q_COMPLETED;
437 static int fio_rbd_init(struct thread_data *td)
441 r = _fio_rbd_connect(td);
443 log_err("fio_rbd_connect failed, return code: %d .\n", r);
453 static void fio_rbd_cleanup(struct thread_data *td)
455 struct rbd_data *rbd = td->io_ops_data;
458 _fio_rbd_disconnect(rbd);
459 free(rbd->aio_events);
460 free(rbd->sort_events);
465 static int fio_rbd_setup(struct thread_data *td)
467 rbd_image_info_t info;
469 struct rbd_data *rbd = NULL;
470 int major, minor, extra;
473 /* log version of librbd. No cluster connection required. */
474 rbd_version(&major, &minor, &extra);
475 log_info("rbd engine: RBD version: %d.%d.%d\n", major, minor, extra);
477 /* allocate engine specific structure to deal with librbd. */
478 r = _fio_setup_rbd_data(td, &rbd);
480 log_err("fio_setup_rbd_data failed.\n");
483 td->io_ops_data = rbd;
485 /* librbd does not allow us to run first in the main thread and later
486 * in a fork child. It needs to be the same process context all the
489 td->o.use_thread = 1;
491 /* connect in the main thread to determine to determine
492 * the size of the given RADOS block device. And disconnect
495 r = _fio_rbd_connect(td);
497 log_err("fio_rbd_connect failed.\n");
501 /* get size of the RADOS block device */
502 r = rbd_stat(rbd->image, &info, sizeof(info));
504 log_err("rbd_status failed.\n");
507 dprint(FD_IO, "rbd-engine: image size: %lu\n", info.size);
509 /* taken from "net" engine. Pretend we deal with files,
510 * even if we do not have any ideas about files.
511 * The size of the RBD is set instead of a artificial file.
513 if (!td->files_index) {
514 add_file(td, td->o.filename ? : "rbd", 0, 0);
515 td->o.nr_files = td->o.nr_files ? : 1;
519 f->real_file_size = info.size;
521 /* disconnect, then we were only connected to determine
522 * the size of the RBD.
524 _fio_rbd_disconnect(rbd);
528 _fio_rbd_disconnect(rbd);
534 static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
539 static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
541 #if defined(CONFIG_RBD_INVAL)
542 struct rbd_data *rbd = td->io_ops_data;
544 return rbd_invalidate_cache(rbd->image);
550 static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
552 struct fio_rbd_iou *fri = io_u->engine_data;
555 io_u->engine_data = NULL;
560 static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
562 struct fio_rbd_iou *fri;
564 fri = calloc(1, sizeof(*fri));
566 io_u->engine_data = fri;
570 static struct ioengine_ops ioengine = {
572 .version = FIO_IOOPS_VERSION,
573 .setup = fio_rbd_setup,
574 .init = fio_rbd_init,
575 .queue = fio_rbd_queue,
576 .getevents = fio_rbd_getevents,
577 .event = fio_rbd_event,
578 .cleanup = fio_rbd_cleanup,
579 .open_file = fio_rbd_open,
580 .invalidate = fio_rbd_invalidate,
582 .io_u_init = fio_rbd_io_u_init,
583 .io_u_free = fio_rbd_io_u_free,
584 .option_struct_size = sizeof(struct rbd_options),
587 static void fio_init fio_rbd_register(void)
589 register_ioengine(&ioengine);
592 static void fio_exit fio_rbd_unregister(void)
594 unregister_ioengine(&ioengine);