Introduce enum fio_q_status
[fio.git] / engines / rbd.c
... / ...
CommitLineData
1/*
2 * rbd engine
3 *
4 * IO engine using Ceph's librbd to test RADOS Block Devices.
5 *
6 */
7
8#include <rbd/librbd.h>
9
10#include "../fio.h"
11#include "../optgroup.h"
12
13#ifdef CONFIG_RBD_POLL
14/* add for poll */
15#include <poll.h>
16#include <sys/eventfd.h>
17#endif
18
19struct fio_rbd_iou {
20 struct io_u *io_u;
21 rbd_completion_t completion;
22 int io_seen;
23 int io_complete;
24};
25
26struct rbd_data {
27 rados_t cluster;
28 rados_ioctx_t io_ctx;
29 rbd_image_t image;
30 struct io_u **aio_events;
31 struct io_u **sort_events;
32 int fd; /* add for poll */
33 bool connected;
34};
35
36struct rbd_options {
37 void *pad;
38 char *cluster_name;
39 char *rbd_name;
40 char *pool_name;
41 char *client_name;
42 int busy_poll;
43};
44
45static struct fio_option options[] = {
46 {
47 .name = "clustername",
48 .lname = "ceph cluster name",
49 .type = FIO_OPT_STR_STORE,
50 .help = "Cluster name for ceph",
51 .off1 = offsetof(struct rbd_options, cluster_name),
52 .category = FIO_OPT_C_ENGINE,
53 .group = FIO_OPT_G_RBD,
54 },
55 {
56 .name = "rbdname",
57 .lname = "rbd engine rbdname",
58 .type = FIO_OPT_STR_STORE,
59 .help = "RBD name for RBD engine",
60 .off1 = offsetof(struct rbd_options, rbd_name),
61 .category = FIO_OPT_C_ENGINE,
62 .group = FIO_OPT_G_RBD,
63 },
64 {
65 .name = "pool",
66 .lname = "rbd engine pool",
67 .type = FIO_OPT_STR_STORE,
68 .help = "Name of the pool hosting the RBD for the RBD engine",
69 .off1 = offsetof(struct rbd_options, pool_name),
70 .category = FIO_OPT_C_ENGINE,
71 .group = FIO_OPT_G_RBD,
72 },
73 {
74 .name = "clientname",
75 .lname = "rbd engine clientname",
76 .type = FIO_OPT_STR_STORE,
77 .help = "Name of the ceph client to access the RBD for the RBD engine",
78 .off1 = offsetof(struct rbd_options, client_name),
79 .category = FIO_OPT_C_ENGINE,
80 .group = FIO_OPT_G_RBD,
81 },
82 {
83 .name = "busy_poll",
84 .lname = "Busy poll",
85 .type = FIO_OPT_BOOL,
86 .help = "Busy poll for completions instead of sleeping",
87 .off1 = offsetof(struct rbd_options, busy_poll),
88 .def = "0",
89 .category = FIO_OPT_C_ENGINE,
90 .group = FIO_OPT_G_RBD,
91 },
92 {
93 .name = NULL,
94 },
95};
96
97static int _fio_setup_rbd_data(struct thread_data *td,
98 struct rbd_data **rbd_data_ptr)
99{
100 struct rbd_data *rbd;
101
102 if (td->io_ops_data)
103 return 0;
104
105 rbd = calloc(1, sizeof(struct rbd_data));
106 if (!rbd)
107 goto failed;
108
109 rbd->connected = false;
110
111 /* add for poll, init fd: -1 */
112 rbd->fd = -1;
113
114 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
115 if (!rbd->aio_events)
116 goto failed;
117
118 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
119 if (!rbd->sort_events)
120 goto failed;
121
122 *rbd_data_ptr = rbd;
123 return 0;
124
125failed:
126 if (rbd) {
127 if (rbd->aio_events)
128 free(rbd->aio_events);
129 if (rbd->sort_events)
130 free(rbd->sort_events);
131 free(rbd);
132 }
133 return 1;
134
135}
136
137#ifdef CONFIG_RBD_POLL
138static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
139{
140 int r;
141
142 /* add for rbd poll */
143 rbd->fd = eventfd(0, EFD_SEMAPHORE);
144 if (rbd->fd < 0) {
145 log_err("eventfd failed.\n");
146 return false;
147 }
148
149 r = rbd_set_image_notification(rbd->image, rbd->fd, EVENT_TYPE_EVENTFD);
150 if (r < 0) {
151 log_err("rbd_set_image_notification failed.\n");
152 close(rbd->fd);
153 rbd->fd = -1;
154 return false;
155 }
156
157 return true;
158}
159#else
160static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
161{
162 return true;
163}
164#endif
165
166static int _fio_rbd_connect(struct thread_data *td)
167{
168 struct rbd_data *rbd = td->io_ops_data;
169 struct rbd_options *o = td->eo;
170 int r;
171
172 if (o->cluster_name) {
173 char *client_name = NULL;
174
175 /*
176 * If we specify cluser name, the rados_create2
177 * will not assume 'client.'. name is considered
178 * as a full type.id namestr
179 */
180 if (o->client_name) {
181 if (!index(o->client_name, '.')) {
182 client_name = calloc(1, strlen("client.") +
183 strlen(o->client_name) + 1);
184 strcat(client_name, "client.");
185 strcat(client_name, o->client_name);
186 } else {
187 client_name = o->client_name;
188 }
189 }
190
191 r = rados_create2(&rbd->cluster, o->cluster_name,
192 client_name, 0);
193
194 if (client_name && !index(o->client_name, '.'))
195 free(client_name);
196 } else
197 r = rados_create(&rbd->cluster, o->client_name);
198
199 if (r < 0) {
200 log_err("rados_create failed.\n");
201 goto failed_early;
202 }
203
204 r = rados_conf_read_file(rbd->cluster, NULL);
205 if (r < 0) {
206 log_err("rados_conf_read_file failed.\n");
207 goto failed_early;
208 }
209
210 r = rados_connect(rbd->cluster);
211 if (r < 0) {
212 log_err("rados_connect failed.\n");
213 goto failed_shutdown;
214 }
215
216 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
217 if (r < 0) {
218 log_err("rados_ioctx_create failed.\n");
219 goto failed_shutdown;
220 }
221
222 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
223 if (r < 0) {
224 log_err("rbd_open failed.\n");
225 goto failed_open;
226 }
227
228 if (!_fio_rbd_setup_poll(rbd))
229 goto failed_poll;
230
231 return 0;
232
233failed_poll:
234 rbd_close(rbd->image);
235 rbd->image = NULL;
236failed_open:
237 rados_ioctx_destroy(rbd->io_ctx);
238 rbd->io_ctx = NULL;
239failed_shutdown:
240 rados_shutdown(rbd->cluster);
241 rbd->cluster = NULL;
242failed_early:
243 return 1;
244}
245
246static void _fio_rbd_disconnect(struct rbd_data *rbd)
247{
248 if (!rbd)
249 return;
250
251 /* close eventfd */
252 if (rbd->fd != -1) {
253 close(rbd->fd);
254 rbd->fd = -1;
255 }
256
257 /* shutdown everything */
258 if (rbd->image) {
259 rbd_close(rbd->image);
260 rbd->image = NULL;
261 }
262
263 if (rbd->io_ctx) {
264 rados_ioctx_destroy(rbd->io_ctx);
265 rbd->io_ctx = NULL;
266 }
267
268 if (rbd->cluster) {
269 rados_shutdown(rbd->cluster);
270 rbd->cluster = NULL;
271 }
272}
273
274static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
275{
276 struct fio_rbd_iou *fri = data;
277 struct io_u *io_u = fri->io_u;
278 ssize_t ret;
279
280 /*
281 * Looks like return value is 0 for success, or < 0 for
282 * a specific error. So we have to assume that it can't do
283 * partial completions.
284 */
285 ret = rbd_aio_get_return_value(fri->completion);
286 if (ret < 0) {
287 io_u->error = -ret;
288 io_u->resid = io_u->xfer_buflen;
289 } else
290 io_u->error = 0;
291
292 fri->io_complete = 1;
293}
294
295static struct io_u *fio_rbd_event(struct thread_data *td, int event)
296{
297 struct rbd_data *rbd = td->io_ops_data;
298
299 return rbd->aio_events[event];
300}
301
302static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
303 unsigned int *events)
304{
305 struct fio_rbd_iou *fri = io_u->engine_data;
306
307 if (fri->io_complete) {
308 fri->io_seen = 1;
309 rbd->aio_events[*events] = io_u;
310 (*events)++;
311
312 rbd_aio_release(fri->completion);
313 return 1;
314 }
315
316 return 0;
317}
318
319static inline int rbd_io_u_seen(struct io_u *io_u)
320{
321 struct fio_rbd_iou *fri = io_u->engine_data;
322
323 return fri->io_seen;
324}
325
326static void rbd_io_u_wait_complete(struct io_u *io_u)
327{
328 struct fio_rbd_iou *fri = io_u->engine_data;
329
330 rbd_aio_wait_for_complete(fri->completion);
331}
332
333static int rbd_io_u_cmp(const void *p1, const void *p2)
334{
335 const struct io_u **a = (const struct io_u **) p1;
336 const struct io_u **b = (const struct io_u **) p2;
337 uint64_t at, bt;
338
339 at = utime_since_now(&(*a)->start_time);
340 bt = utime_since_now(&(*b)->start_time);
341
342 if (at < bt)
343 return -1;
344 else if (at == bt)
345 return 0;
346 else
347 return 1;
348}
349
350static int rbd_iter_events(struct thread_data *td, unsigned int *events,
351 unsigned int min_evts, int wait)
352{
353 struct rbd_data *rbd = td->io_ops_data;
354 unsigned int this_events = 0;
355 struct io_u *io_u;
356 int i, sidx = 0;
357
358#ifdef CONFIG_RBD_POLL
359 int ret = 0;
360 int event_num = 0;
361 struct fio_rbd_iou *fri = NULL;
362 rbd_completion_t comps[min_evts];
363 uint64_t counter;
364 bool completed;
365
366 struct pollfd pfd;
367 pfd.fd = rbd->fd;
368 pfd.events = POLLIN;
369
370 ret = poll(&pfd, 1, wait ? -1 : 0);
371 if (ret <= 0)
372 return 0;
373 if (!(pfd.revents & POLLIN))
374 return 0;
375
376 event_num = rbd_poll_io_events(rbd->image, comps, min_evts);
377
378 for (i = 0; i < event_num; i++) {
379 fri = rbd_aio_get_arg(comps[i]);
380 io_u = fri->io_u;
381
382 /* best effort to decrement the semaphore */
383 ret = read(rbd->fd, &counter, sizeof(counter));
384 if (ret <= 0)
385 log_err("rbd_iter_events failed to decrement semaphore.\n");
386
387 completed = fri_check_complete(rbd, io_u, events);
388 assert(completed);
389
390 this_events++;
391 }
392#else
393 io_u_qiter(&td->io_u_all, io_u, i) {
394 if (!(io_u->flags & IO_U_F_FLIGHT))
395 continue;
396 if (rbd_io_u_seen(io_u))
397 continue;
398
399 if (fri_check_complete(rbd, io_u, events))
400 this_events++;
401 else if (wait)
402 rbd->sort_events[sidx++] = io_u;
403 }
404#endif
405
406 if (!wait || !sidx)
407 return this_events;
408
409 /*
410 * Sort events, oldest issue first, then wait on as many as we
411 * need in order of age. If we have enough events, stop waiting,
412 * and just check if any of the older ones are done.
413 */
414 if (sidx > 1)
415 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
416
417 for (i = 0; i < sidx; i++) {
418 io_u = rbd->sort_events[i];
419
420 if (fri_check_complete(rbd, io_u, events)) {
421 this_events++;
422 continue;
423 }
424
425 /*
426 * Stop waiting when we have enough, but continue checking
427 * all pending IOs if they are complete.
428 */
429 if (*events >= min_evts)
430 continue;
431
432 rbd_io_u_wait_complete(io_u);
433
434 if (fri_check_complete(rbd, io_u, events))
435 this_events++;
436 }
437
438 return this_events;
439}
440
441static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
442 unsigned int max, const struct timespec *t)
443{
444 unsigned int this_events, events = 0;
445 struct rbd_options *o = td->eo;
446 int wait = 0;
447
448 do {
449 this_events = rbd_iter_events(td, &events, min, wait);
450
451 if (events >= min)
452 break;
453 if (this_events)
454 continue;
455
456 if (!o->busy_poll)
457 wait = 1;
458 else
459 nop;
460 } while (1);
461
462 return events;
463}
464
465static enum fio_q_status
466fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
467{
468 struct rbd_data *rbd = td->io_ops_data;
469 struct fio_rbd_iou *fri = io_u->engine_data;
470 int r = -1;
471
472 fio_ro_check(td, io_u);
473
474 fri->io_seen = 0;
475 fri->io_complete = 0;
476
477 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
478 &fri->completion);
479 if (r < 0) {
480 log_err("rbd_aio_create_completion failed.\n");
481 goto failed;
482 }
483
484 if (io_u->ddir == DDIR_WRITE) {
485 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
486 io_u->xfer_buf, fri->completion);
487 if (r < 0) {
488 log_err("rbd_aio_write failed.\n");
489 goto failed_comp;
490 }
491
492 } else if (io_u->ddir == DDIR_READ) {
493 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
494 io_u->xfer_buf, fri->completion);
495
496 if (r < 0) {
497 log_err("rbd_aio_read failed.\n");
498 goto failed_comp;
499 }
500 } else if (io_u->ddir == DDIR_TRIM) {
501 r = rbd_aio_discard(rbd->image, io_u->offset,
502 io_u->xfer_buflen, fri->completion);
503 if (r < 0) {
504 log_err("rbd_aio_discard failed.\n");
505 goto failed_comp;
506 }
507 } else if (io_u->ddir == DDIR_SYNC) {
508 r = rbd_aio_flush(rbd->image, fri->completion);
509 if (r < 0) {
510 log_err("rbd_flush failed.\n");
511 goto failed_comp;
512 }
513 } else {
514 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
515 io_u->ddir);
516 r = -EINVAL;
517 goto failed_comp;
518 }
519
520 return FIO_Q_QUEUED;
521failed_comp:
522 rbd_aio_release(fri->completion);
523failed:
524 io_u->error = -r;
525 td_verror(td, io_u->error, "xfer");
526 return FIO_Q_COMPLETED;
527}
528
529static int fio_rbd_init(struct thread_data *td)
530{
531 int r;
532 struct rbd_data *rbd = td->io_ops_data;
533
534 if (rbd->connected)
535 return 0;
536
537 r = _fio_rbd_connect(td);
538 if (r) {
539 log_err("fio_rbd_connect failed, return code: %d .\n", r);
540 goto failed;
541 }
542
543 return 0;
544
545failed:
546 return 1;
547}
548
549static void fio_rbd_cleanup(struct thread_data *td)
550{
551 struct rbd_data *rbd = td->io_ops_data;
552
553 if (rbd) {
554 _fio_rbd_disconnect(rbd);
555 free(rbd->aio_events);
556 free(rbd->sort_events);
557 free(rbd);
558 }
559}
560
561static int fio_rbd_setup(struct thread_data *td)
562{
563 rbd_image_info_t info;
564 struct fio_file *f;
565 struct rbd_data *rbd = NULL;
566 int r;
567
568 /* allocate engine specific structure to deal with librbd. */
569 r = _fio_setup_rbd_data(td, &rbd);
570 if (r) {
571 log_err("fio_setup_rbd_data failed.\n");
572 goto cleanup;
573 }
574 td->io_ops_data = rbd;
575
576 /* librbd does not allow us to run first in the main thread and later
577 * in a fork child. It needs to be the same process context all the
578 * time.
579 */
580 td->o.use_thread = 1;
581
582 /* connect in the main thread to determine to determine
583 * the size of the given RADOS block device. And disconnect
584 * later on.
585 */
586 r = _fio_rbd_connect(td);
587 if (r) {
588 log_err("fio_rbd_connect failed.\n");
589 goto cleanup;
590 }
591 rbd->connected = true;
592
593 /* get size of the RADOS block device */
594 r = rbd_stat(rbd->image, &info, sizeof(info));
595 if (r < 0) {
596 log_err("rbd_status failed.\n");
597 goto cleanup;
598 } else if (info.size == 0) {
599 log_err("image size should be larger than zero.\n");
600 r = -EINVAL;
601 goto cleanup;
602 }
603
604 dprint(FD_IO, "rbd-engine: image size: %" PRIu64 "\n", info.size);
605
606 /* taken from "net" engine. Pretend we deal with files,
607 * even if we do not have any ideas about files.
608 * The size of the RBD is set instead of a artificial file.
609 */
610 if (!td->files_index) {
611 add_file(td, td->o.filename ? : "rbd", 0, 0);
612 td->o.nr_files = td->o.nr_files ? : 1;
613 td->o.open_files++;
614 }
615 f = td->files[0];
616 f->real_file_size = info.size;
617
618 return 0;
619
620cleanup:
621 fio_rbd_cleanup(td);
622 return r;
623}
624
625static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
626{
627 return 0;
628}
629
630static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
631{
632#if defined(CONFIG_RBD_INVAL)
633 struct rbd_data *rbd = td->io_ops_data;
634
635 return rbd_invalidate_cache(rbd->image);
636#else
637 return 0;
638#endif
639}
640
641static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
642{
643 struct fio_rbd_iou *fri = io_u->engine_data;
644
645 if (fri) {
646 io_u->engine_data = NULL;
647 free(fri);
648 }
649}
650
651static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
652{
653 struct fio_rbd_iou *fri;
654
655 fri = calloc(1, sizeof(*fri));
656 fri->io_u = io_u;
657 io_u->engine_data = fri;
658 return 0;
659}
660
661static struct ioengine_ops ioengine = {
662 .name = "rbd",
663 .version = FIO_IOOPS_VERSION,
664 .setup = fio_rbd_setup,
665 .init = fio_rbd_init,
666 .queue = fio_rbd_queue,
667 .getevents = fio_rbd_getevents,
668 .event = fio_rbd_event,
669 .cleanup = fio_rbd_cleanup,
670 .open_file = fio_rbd_open,
671 .invalidate = fio_rbd_invalidate,
672 .options = options,
673 .io_u_init = fio_rbd_io_u_init,
674 .io_u_free = fio_rbd_io_u_free,
675 .option_struct_size = sizeof(struct rbd_options),
676};
677
678static void fio_init fio_rbd_register(void)
679{
680 register_ioengine(&ioengine);
681}
682
683static void fio_exit fio_rbd_unregister(void)
684{
685 unregister_ioengine(&ioengine);
686}