remove redundant _fio_rbd_disconnect, which is already called in
[fio.git] / engines / rbd.c
... / ...
CommitLineData
1/*
2 * rbd engine
3 *
4 * IO engine using Ceph's librbd to test RADOS Block Devices.
5 *
6 */
7
8#include <rbd/librbd.h>
9
10#include "../fio.h"
11#include "../optgroup.h"
12#ifdef CONFIG_RBD_BLKIN
13#include <zipkin_c.h>
14#endif
15
16#ifdef CONFIG_RBD_POLL
17/* add for poll */
18#include <poll.h>
19#include <sys/eventfd.h>
20#endif
21
22struct fio_rbd_iou {
23 struct io_u *io_u;
24 rbd_completion_t completion;
25 int io_seen;
26 int io_complete;
27#ifdef CONFIG_RBD_BLKIN
28 struct blkin_trace_info info;
29#endif
30};
31
32struct rbd_data {
33 rados_t cluster;
34 rados_ioctx_t io_ctx;
35 rbd_image_t image;
36 struct io_u **aio_events;
37 struct io_u **sort_events;
38 int fd; /* add for poll */
39 bool connected;
40};
41
42struct rbd_options {
43 void *pad;
44 char *cluster_name;
45 char *rbd_name;
46 char *pool_name;
47 char *client_name;
48 int busy_poll;
49};
50
51static struct fio_option options[] = {
52 {
53 .name = "clustername",
54 .lname = "ceph cluster name",
55 .type = FIO_OPT_STR_STORE,
56 .help = "Cluster name for ceph",
57 .off1 = offsetof(struct rbd_options, cluster_name),
58 .category = FIO_OPT_C_ENGINE,
59 .group = FIO_OPT_G_RBD,
60 },
61 {
62 .name = "rbdname",
63 .lname = "rbd engine rbdname",
64 .type = FIO_OPT_STR_STORE,
65 .help = "RBD name for RBD engine",
66 .off1 = offsetof(struct rbd_options, rbd_name),
67 .category = FIO_OPT_C_ENGINE,
68 .group = FIO_OPT_G_RBD,
69 },
70 {
71 .name = "pool",
72 .lname = "rbd engine pool",
73 .type = FIO_OPT_STR_STORE,
74 .help = "Name of the pool hosting the RBD for the RBD engine",
75 .off1 = offsetof(struct rbd_options, pool_name),
76 .category = FIO_OPT_C_ENGINE,
77 .group = FIO_OPT_G_RBD,
78 },
79 {
80 .name = "clientname",
81 .lname = "rbd engine clientname",
82 .type = FIO_OPT_STR_STORE,
83 .help = "Name of the ceph client to access the RBD for the RBD engine",
84 .off1 = offsetof(struct rbd_options, client_name),
85 .category = FIO_OPT_C_ENGINE,
86 .group = FIO_OPT_G_RBD,
87 },
88 {
89 .name = "busy_poll",
90 .lname = "Busy poll",
91 .type = FIO_OPT_BOOL,
92 .help = "Busy poll for completions instead of sleeping",
93 .off1 = offsetof(struct rbd_options, busy_poll),
94 .def = "0",
95 .category = FIO_OPT_C_ENGINE,
96 .group = FIO_OPT_G_RBD,
97 },
98 {
99 .name = NULL,
100 },
101};
102
103static int _fio_setup_rbd_data(struct thread_data *td,
104 struct rbd_data **rbd_data_ptr)
105{
106 struct rbd_data *rbd;
107
108 if (td->io_ops_data)
109 return 0;
110
111 rbd = calloc(1, sizeof(struct rbd_data));
112 if (!rbd)
113 goto failed;
114
115 rbd->connected = false;
116
117 /* add for poll, init fd: -1 */
118 rbd->fd = -1;
119
120 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
121 if (!rbd->aio_events)
122 goto failed;
123
124 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
125 if (!rbd->sort_events)
126 goto failed;
127
128 *rbd_data_ptr = rbd;
129 return 0;
130
131failed:
132 if (rbd) {
133 if (rbd->aio_events)
134 free(rbd->aio_events);
135 if (rbd->sort_events)
136 free(rbd->sort_events);
137 free(rbd);
138 }
139 return 1;
140
141}
142
143#ifdef CONFIG_RBD_POLL
144static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
145{
146 int r;
147
148 /* add for rbd poll */
149 rbd->fd = eventfd(0, EFD_NONBLOCK);
150 if (rbd->fd < 0) {
151 log_err("eventfd failed.\n");
152 return false;
153 }
154
155 r = rbd_set_image_notification(rbd->image, rbd->fd, EVENT_TYPE_EVENTFD);
156 if (r < 0) {
157 log_err("rbd_set_image_notification failed.\n");
158 close(rbd->fd);
159 rbd->fd = -1;
160 return false;
161 }
162
163 return true;
164}
165#else
166static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
167{
168 return true;
169}
170#endif
171
172static int _fio_rbd_connect(struct thread_data *td)
173{
174 struct rbd_data *rbd = td->io_ops_data;
175 struct rbd_options *o = td->eo;
176 int r;
177
178 if (o->cluster_name) {
179 char *client_name = NULL;
180
181 /*
182 * If we specify cluser name, the rados_create2
183 * will not assume 'client.'. name is considered
184 * as a full type.id namestr
185 */
186 if (o->client_name) {
187 if (!index(o->client_name, '.')) {
188 client_name = calloc(1, strlen("client.") +
189 strlen(o->client_name) + 1);
190 strcat(client_name, "client.");
191 strcat(client_name, o->client_name);
192 } else {
193 client_name = o->client_name;
194 }
195 }
196
197 r = rados_create2(&rbd->cluster, o->cluster_name,
198 client_name, 0);
199
200 if (client_name && !index(o->client_name, '.'))
201 free(client_name);
202 } else
203 r = rados_create(&rbd->cluster, o->client_name);
204
205 if (r < 0) {
206 log_err("rados_create failed.\n");
207 goto failed_early;
208 }
209
210 r = rados_conf_read_file(rbd->cluster, NULL);
211 if (r < 0) {
212 log_err("rados_conf_read_file failed.\n");
213 goto failed_early;
214 }
215
216 r = rados_connect(rbd->cluster);
217 if (r < 0) {
218 log_err("rados_connect failed.\n");
219 goto failed_shutdown;
220 }
221
222 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
223 if (r < 0) {
224 log_err("rados_ioctx_create failed.\n");
225 goto failed_shutdown;
226 }
227
228 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
229 if (r < 0) {
230 log_err("rbd_open failed.\n");
231 goto failed_open;
232 }
233
234 if (!_fio_rbd_setup_poll(rbd))
235 goto failed_poll;
236
237 return 0;
238
239failed_poll:
240 rbd_close(rbd->image);
241 rbd->image = NULL;
242failed_open:
243 rados_ioctx_destroy(rbd->io_ctx);
244 rbd->io_ctx = NULL;
245failed_shutdown:
246 rados_shutdown(rbd->cluster);
247 rbd->cluster = NULL;
248failed_early:
249 return 1;
250}
251
252static void _fio_rbd_disconnect(struct rbd_data *rbd)
253{
254 if (!rbd)
255 return;
256
257 /* close eventfd */
258 if (rbd->fd != -1) {
259 close(rbd->fd);
260 rbd->fd = -1;
261 }
262
263 /* shutdown everything */
264 if (rbd->image) {
265 rbd_close(rbd->image);
266 rbd->image = NULL;
267 }
268
269 if (rbd->io_ctx) {
270 rados_ioctx_destroy(rbd->io_ctx);
271 rbd->io_ctx = NULL;
272 }
273
274 if (rbd->cluster) {
275 rados_shutdown(rbd->cluster);
276 rbd->cluster = NULL;
277 }
278}
279
280static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
281{
282 struct fio_rbd_iou *fri = data;
283 struct io_u *io_u = fri->io_u;
284 ssize_t ret;
285
286 /*
287 * Looks like return value is 0 for success, or < 0 for
288 * a specific error. So we have to assume that it can't do
289 * partial completions.
290 */
291 ret = rbd_aio_get_return_value(fri->completion);
292 if (ret < 0) {
293 io_u->error = ret;
294 io_u->resid = io_u->xfer_buflen;
295 } else
296 io_u->error = 0;
297
298 fri->io_complete = 1;
299}
300
301static struct io_u *fio_rbd_event(struct thread_data *td, int event)
302{
303 struct rbd_data *rbd = td->io_ops_data;
304
305 return rbd->aio_events[event];
306}
307
308static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
309 unsigned int *events)
310{
311 struct fio_rbd_iou *fri = io_u->engine_data;
312
313 if (fri->io_complete) {
314 fri->io_seen = 1;
315 rbd->aio_events[*events] = io_u;
316 (*events)++;
317
318 rbd_aio_release(fri->completion);
319 return 1;
320 }
321
322 return 0;
323}
324
325static inline int rbd_io_u_seen(struct io_u *io_u)
326{
327 struct fio_rbd_iou *fri = io_u->engine_data;
328
329 return fri->io_seen;
330}
331
332static void rbd_io_u_wait_complete(struct io_u *io_u)
333{
334 struct fio_rbd_iou *fri = io_u->engine_data;
335
336 rbd_aio_wait_for_complete(fri->completion);
337}
338
339static int rbd_io_u_cmp(const void *p1, const void *p2)
340{
341 const struct io_u **a = (const struct io_u **) p1;
342 const struct io_u **b = (const struct io_u **) p2;
343 uint64_t at, bt;
344
345 at = utime_since_now(&(*a)->start_time);
346 bt = utime_since_now(&(*b)->start_time);
347
348 if (at < bt)
349 return -1;
350 else if (at == bt)
351 return 0;
352 else
353 return 1;
354}
355
356static int rbd_iter_events(struct thread_data *td, unsigned int *events,
357 unsigned int min_evts, int wait)
358{
359 struct rbd_data *rbd = td->io_ops_data;
360 unsigned int this_events = 0;
361 struct io_u *io_u;
362 int i, sidx = 0;
363
364#ifdef CONFIG_RBD_POLL
365 int ret = 0;
366 int event_num = 0;
367 struct fio_rbd_iou *fri = NULL;
368 rbd_completion_t comps[min_evts];
369
370 struct pollfd pfd;
371 pfd.fd = rbd->fd;
372 pfd.events = POLLIN;
373
374 ret = poll(&pfd, 1, -1);
375 if (ret <= 0)
376 return 0;
377
378 assert(pfd.revents & POLLIN);
379
380 event_num = rbd_poll_io_events(rbd->image, comps, min_evts);
381
382 for (i = 0; i < event_num; i++) {
383 fri = rbd_aio_get_arg(comps[i]);
384 io_u = fri->io_u;
385#else
386 io_u_qiter(&td->io_u_all, io_u, i) {
387#endif
388 if (!(io_u->flags & IO_U_F_FLIGHT))
389 continue;
390 if (rbd_io_u_seen(io_u))
391 continue;
392
393 if (fri_check_complete(rbd, io_u, events))
394 this_events++;
395 else if (wait)
396 rbd->sort_events[sidx++] = io_u;
397 }
398
399 if (!wait || !sidx)
400 return this_events;
401
402 /*
403 * Sort events, oldest issue first, then wait on as many as we
404 * need in order of age. If we have enough events, stop waiting,
405 * and just check if any of the older ones are done.
406 */
407 if (sidx > 1)
408 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
409
410 for (i = 0; i < sidx; i++) {
411 io_u = rbd->sort_events[i];
412
413 if (fri_check_complete(rbd, io_u, events)) {
414 this_events++;
415 continue;
416 }
417
418 /*
419 * Stop waiting when we have enough, but continue checking
420 * all pending IOs if they are complete.
421 */
422 if (*events >= min_evts)
423 continue;
424
425 rbd_io_u_wait_complete(io_u);
426
427 if (fri_check_complete(rbd, io_u, events))
428 this_events++;
429 }
430
431 return this_events;
432}
433
434static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
435 unsigned int max, const struct timespec *t)
436{
437 unsigned int this_events, events = 0;
438 struct rbd_options *o = td->eo;
439 int wait = 0;
440
441 do {
442 this_events = rbd_iter_events(td, &events, min, wait);
443
444 if (events >= min)
445 break;
446 if (this_events)
447 continue;
448
449 if (!o->busy_poll)
450 wait = 1;
451 else
452 nop;
453 } while (1);
454
455 return events;
456}
457
458static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
459{
460 struct rbd_data *rbd = td->io_ops_data;
461 struct fio_rbd_iou *fri = io_u->engine_data;
462 int r = -1;
463
464 fio_ro_check(td, io_u);
465
466 fri->io_seen = 0;
467 fri->io_complete = 0;
468
469 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
470 &fri->completion);
471 if (r < 0) {
472 log_err("rbd_aio_create_completion failed.\n");
473 goto failed;
474 }
475
476 if (io_u->ddir == DDIR_WRITE) {
477#ifdef CONFIG_RBD_BLKIN
478 blkin_init_trace_info(&fri->info);
479 r = rbd_aio_write_traced(rbd->image, io_u->offset, io_u->xfer_buflen,
480 io_u->xfer_buf, fri->completion, &fri->info);
481#else
482 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
483 io_u->xfer_buf, fri->completion);
484#endif
485 if (r < 0) {
486 log_err("rbd_aio_write failed.\n");
487 goto failed_comp;
488 }
489
490 } else if (io_u->ddir == DDIR_READ) {
491#ifdef CONFIG_RBD_BLKIN
492 blkin_init_trace_info(&fri->info);
493 r = rbd_aio_read_traced(rbd->image, io_u->offset, io_u->xfer_buflen,
494 io_u->xfer_buf, fri->completion, &fri->info);
495#else
496 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
497 io_u->xfer_buf, fri->completion);
498#endif
499
500 if (r < 0) {
501 log_err("rbd_aio_read failed.\n");
502 goto failed_comp;
503 }
504 } else if (io_u->ddir == DDIR_TRIM) {
505 r = rbd_aio_discard(rbd->image, io_u->offset,
506 io_u->xfer_buflen, fri->completion);
507 if (r < 0) {
508 log_err("rbd_aio_discard failed.\n");
509 goto failed_comp;
510 }
511 } else if (io_u->ddir == DDIR_SYNC) {
512 r = rbd_aio_flush(rbd->image, fri->completion);
513 if (r < 0) {
514 log_err("rbd_flush failed.\n");
515 goto failed_comp;
516 }
517 } else {
518 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
519 io_u->ddir);
520 goto failed_comp;
521 }
522
523 return FIO_Q_QUEUED;
524failed_comp:
525 rbd_aio_release(fri->completion);
526failed:
527 io_u->error = r;
528 td_verror(td, io_u->error, "xfer");
529 return FIO_Q_COMPLETED;
530}
531
532static int fio_rbd_init(struct thread_data *td)
533{
534 int r;
535 struct rbd_data *rbd = td->io_ops_data;
536
537 if (rbd->connected)
538 return 0;
539
540 r = _fio_rbd_connect(td);
541 if (r) {
542 log_err("fio_rbd_connect failed, return code: %d .\n", r);
543 goto failed;
544 }
545
546 return 0;
547
548failed:
549 return 1;
550}
551
552static void fio_rbd_cleanup(struct thread_data *td)
553{
554 struct rbd_data *rbd = td->io_ops_data;
555
556 if (rbd) {
557 _fio_rbd_disconnect(rbd);
558 free(rbd->aio_events);
559 free(rbd->sort_events);
560 free(rbd);
561 }
562}
563
564static int fio_rbd_setup(struct thread_data *td)
565{
566 rbd_image_info_t info;
567 struct fio_file *f;
568 struct rbd_data *rbd = NULL;
569 int r;
570
571 /* allocate engine specific structure to deal with librbd. */
572 r = _fio_setup_rbd_data(td, &rbd);
573 if (r) {
574 log_err("fio_setup_rbd_data failed.\n");
575 goto cleanup;
576 }
577 td->io_ops_data = rbd;
578
579 /* librbd does not allow us to run first in the main thread and later
580 * in a fork child. It needs to be the same process context all the
581 * time.
582 */
583 td->o.use_thread = 1;
584
585 /* connect in the main thread to determine to determine
586 * the size of the given RADOS block device. And disconnect
587 * later on.
588 */
589 r = _fio_rbd_connect(td);
590 if (r) {
591 log_err("fio_rbd_connect failed.\n");
592 goto cleanup;
593 }
594 rbd->connected = true;
595
596 /* get size of the RADOS block device */
597 r = rbd_stat(rbd->image, &info, sizeof(info));
598 if (r < 0) {
599 log_err("rbd_status failed.\n");
600 goto cleanup;
601 } else if (info.size == 0) {
602 log_err("image size should be larger than zero.\n");
603 r = -EINVAL;
604 goto cleanup;
605 }
606
607 dprint(FD_IO, "rbd-engine: image size: %lu\n", info.size);
608
609 /* taken from "net" engine. Pretend we deal with files,
610 * even if we do not have any ideas about files.
611 * The size of the RBD is set instead of a artificial file.
612 */
613 if (!td->files_index) {
614 add_file(td, td->o.filename ? : "rbd", 0, 0);
615 td->o.nr_files = td->o.nr_files ? : 1;
616 td->o.open_files++;
617 }
618 f = td->files[0];
619 f->real_file_size = info.size;
620
621 return 0;
622
623cleanup:
624 fio_rbd_cleanup(td);
625 return r;
626}
627
628static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
629{
630 return 0;
631}
632
633static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
634{
635#if defined(CONFIG_RBD_INVAL)
636 struct rbd_data *rbd = td->io_ops_data;
637
638 return rbd_invalidate_cache(rbd->image);
639#else
640 return 0;
641#endif
642}
643
644static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
645{
646 struct fio_rbd_iou *fri = io_u->engine_data;
647
648 if (fri) {
649 io_u->engine_data = NULL;
650 free(fri);
651 }
652}
653
654static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
655{
656 struct fio_rbd_iou *fri;
657
658 fri = calloc(1, sizeof(*fri));
659 fri->io_u = io_u;
660 io_u->engine_data = fri;
661 return 0;
662}
663
664static struct ioengine_ops ioengine = {
665 .name = "rbd",
666 .version = FIO_IOOPS_VERSION,
667 .setup = fio_rbd_setup,
668 .init = fio_rbd_init,
669 .queue = fio_rbd_queue,
670 .getevents = fio_rbd_getevents,
671 .event = fio_rbd_event,
672 .cleanup = fio_rbd_cleanup,
673 .open_file = fio_rbd_open,
674 .invalidate = fio_rbd_invalidate,
675 .options = options,
676 .io_u_init = fio_rbd_io_u_init,
677 .io_u_free = fio_rbd_io_u_free,
678 .option_struct_size = sizeof(struct rbd_options),
679};
680
681static void fio_init fio_rbd_register(void)
682{
683 register_ioengine(&ioengine);
684}
685
686static void fio_exit fio_rbd_unregister(void)
687{
688 unregister_ioengine(&ioengine);
689}