examples/backwards-read.fio: add size
[fio.git] / engines / rbd.c
CommitLineData
fc5c0345
DG
1/*
2 * rbd engine
3 *
4 * IO engine using Ceph's librbd to test RADOS Block Devices.
5 *
6 */
7
8#include <rbd/librbd.h>
9
10#include "../fio.h"
d220c761 11#include "../optgroup.h"
fc5c0345
DG
12
13struct fio_rbd_iou {
14 struct io_u *io_u;
d8b64af2 15 rbd_completion_t completion;
d8b64af2 16 int io_seen;
b8ecbef6 17 int io_complete;
fc5c0345
DG
18};
19
20struct rbd_data {
21 rados_t cluster;
22 rados_ioctx_t io_ctx;
23 rbd_image_t image;
24 struct io_u **aio_events;
6f9961ac 25 struct io_u **sort_events;
fc5c0345
DG
26};
27
28struct rbd_options {
a1f871c7 29 void *pad;
6e20c6e7 30 char *cluster_name;
fc5c0345
DG
31 char *rbd_name;
32 char *pool_name;
33 char *client_name;
d7d702c7 34 int busy_poll;
fc5c0345
DG
35};
36
37static struct fio_option options[] = {
6e20c6e7
T
38 {
39 .name = "clustername",
40 .lname = "ceph cluster name",
41 .type = FIO_OPT_STR_STORE,
42 .help = "Cluster name for ceph",
43 .off1 = offsetof(struct rbd_options, cluster_name),
44 .category = FIO_OPT_C_ENGINE,
45 .group = FIO_OPT_G_RBD,
46 },
fc5c0345 47 {
d8b64af2
JA
48 .name = "rbdname",
49 .lname = "rbd engine rbdname",
50 .type = FIO_OPT_STR_STORE,
51 .help = "RBD name for RBD engine",
52 .off1 = offsetof(struct rbd_options, rbd_name),
53 .category = FIO_OPT_C_ENGINE,
54 .group = FIO_OPT_G_RBD,
55 },
fc5c0345 56 {
d7d702c7
JA
57 .name = "pool",
58 .lname = "rbd engine pool",
59 .type = FIO_OPT_STR_STORE,
60 .help = "Name of the pool hosting the RBD for the RBD engine",
61 .off1 = offsetof(struct rbd_options, pool_name),
62 .category = FIO_OPT_C_ENGINE,
63 .group = FIO_OPT_G_RBD,
d8b64af2 64 },
fc5c0345 65 {
d7d702c7
JA
66 .name = "clientname",
67 .lname = "rbd engine clientname",
68 .type = FIO_OPT_STR_STORE,
69 .help = "Name of the ceph client to access the RBD for the RBD engine",
70 .off1 = offsetof(struct rbd_options, client_name),
71 .category = FIO_OPT_C_ENGINE,
72 .group = FIO_OPT_G_RBD,
73 },
74 {
75 .name = "busy_poll",
76 .lname = "Busy poll",
77 .type = FIO_OPT_BOOL,
78 .help = "Busy poll for completions instead of sleeping",
fea585d4 79 .off1 = offsetof(struct rbd_options, busy_poll),
d7d702c7
JA
80 .def = "0",
81 .category = FIO_OPT_C_ENGINE,
82 .group = FIO_OPT_G_RBD,
d8b64af2 83 },
fc5c0345 84 {
d8b64af2
JA
85 .name = NULL,
86 },
fc5c0345
DG
87};
88
89static int _fio_setup_rbd_data(struct thread_data *td,
90 struct rbd_data **rbd_data_ptr)
91{
6f9961ac 92 struct rbd_data *rbd;
fc5c0345 93
565e784d 94 if (td->io_ops_data)
fc5c0345
DG
95 return 0;
96
6f9961ac
JA
97 rbd = calloc(1, sizeof(struct rbd_data));
98 if (!rbd)
fc5c0345
DG
99 goto failed;
100
6f9961ac
JA
101 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
102 if (!rbd->aio_events)
fc5c0345
DG
103 goto failed;
104
6f9961ac
JA
105 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
106 if (!rbd->sort_events)
107 goto failed;
fc5c0345 108
6f9961ac 109 *rbd_data_ptr = rbd;
fc5c0345
DG
110 return 0;
111
112failed:
5a4adfd2
JQ
113 if (rbd) {
114 if (rbd->aio_events)
115 free(rbd->aio_events);
116 if (rbd->sort_events)
117 free(rbd->sort_events);
6f9961ac 118 free(rbd);
5a4adfd2 119 }
fc5c0345
DG
120 return 1;
121
122}
123
124static int _fio_rbd_connect(struct thread_data *td)
125{
565e784d 126 struct rbd_data *rbd = td->io_ops_data;
fc5c0345
DG
127 struct rbd_options *o = td->eo;
128 int r;
129
6e20c6e7
T
130 if (o->cluster_name) {
131 char *client_name = NULL;
132
133 /*
134 * If we specify cluser name, the rados_creat2
135 * will not assume 'client.'. name is considered
136 * as a full type.id namestr
137 */
138 if (!index(o->client_name, '.')) {
139 client_name = calloc(1, strlen("client.") +
140 strlen(o->client_name) + 1);
141 strcat(client_name, "client.");
142 o->client_name = strcat(client_name, o->client_name);
143 }
144 r = rados_create2(&rbd->cluster, o->cluster_name,
145 o->client_name, 0);
146 } else
147 r = rados_create(&rbd->cluster, o->client_name);
148
fc5c0345
DG
149 if (r < 0) {
150 log_err("rados_create failed.\n");
151 goto failed_early;
152 }
153
6f9961ac 154 r = rados_conf_read_file(rbd->cluster, NULL);
fc5c0345
DG
155 if (r < 0) {
156 log_err("rados_conf_read_file failed.\n");
157 goto failed_early;
158 }
159
6f9961ac 160 r = rados_connect(rbd->cluster);
fc5c0345
DG
161 if (r < 0) {
162 log_err("rados_connect failed.\n");
163 goto failed_shutdown;
164 }
165
6f9961ac 166 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
fc5c0345
DG
167 if (r < 0) {
168 log_err("rados_ioctx_create failed.\n");
169 goto failed_shutdown;
170 }
171
6f9961ac 172 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
fc5c0345
DG
173 if (r < 0) {
174 log_err("rbd_open failed.\n");
175 goto failed_open;
176 }
177 return 0;
178
179failed_open:
6f9961ac
JA
180 rados_ioctx_destroy(rbd->io_ctx);
181 rbd->io_ctx = NULL;
fc5c0345 182failed_shutdown:
6f9961ac
JA
183 rados_shutdown(rbd->cluster);
184 rbd->cluster = NULL;
fc5c0345
DG
185failed_early:
186 return 1;
187}
188
6f9961ac 189static void _fio_rbd_disconnect(struct rbd_data *rbd)
fc5c0345 190{
6f9961ac 191 if (!rbd)
fc5c0345
DG
192 return;
193
194 /* shutdown everything */
6f9961ac
JA
195 if (rbd->image) {
196 rbd_close(rbd->image);
197 rbd->image = NULL;
fc5c0345
DG
198 }
199
6f9961ac
JA
200 if (rbd->io_ctx) {
201 rados_ioctx_destroy(rbd->io_ctx);
202 rbd->io_ctx = NULL;
fc5c0345
DG
203 }
204
6f9961ac
JA
205 if (rbd->cluster) {
206 rados_shutdown(rbd->cluster);
207 rbd->cluster = NULL;
fc5c0345
DG
208 }
209}
210
d8b64af2 211static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
fc5c0345 212{
dbf388d2
JA
213 struct fio_rbd_iou *fri = data;
214 struct io_u *io_u = fri->io_u;
d8b64af2 215 ssize_t ret;
fc5c0345 216
d8b64af2
JA
217 /*
218 * Looks like return value is 0 for success, or < 0 for
219 * a specific error. So we have to assume that it can't do
220 * partial completions.
221 */
222 ret = rbd_aio_get_return_value(fri->completion);
223 if (ret < 0) {
224 io_u->error = ret;
225 io_u->resid = io_u->xfer_buflen;
226 } else
227 io_u->error = 0;
20cf5aab
JD
228
229 fri->io_complete = 1;
d8b64af2 230}
fc5c0345 231
d8b64af2
JA
232static struct io_u *fio_rbd_event(struct thread_data *td, int event)
233{
565e784d 234 struct rbd_data *rbd = td->io_ops_data;
fc5c0345 235
6f9961ac 236 return rbd->aio_events[event];
fc5c0345
DG
237}
238
6f9961ac 239static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
d8b64af2 240 unsigned int *events)
fc5c0345 241{
d8b64af2 242 struct fio_rbd_iou *fri = io_u->engine_data;
fc5c0345 243
b8ecbef6 244 if (fri->io_complete) {
d8b64af2 245 fri->io_seen = 1;
6f9961ac 246 rbd->aio_events[*events] = io_u;
d8b64af2 247 (*events)++;
fc5c0345 248
d8b64af2
JA
249 rbd_aio_release(fri->completion);
250 return 1;
251 }
fc5c0345 252
d8b64af2 253 return 0;
fc5c0345
DG
254}
255
6f9961ac
JA
256static inline int rbd_io_u_seen(struct io_u *io_u)
257{
258 struct fio_rbd_iou *fri = io_u->engine_data;
259
260 return fri->io_seen;
261}
262
263static void rbd_io_u_wait_complete(struct io_u *io_u)
264{
265 struct fio_rbd_iou *fri = io_u->engine_data;
266
267 rbd_aio_wait_for_complete(fri->completion);
268}
269
270static int rbd_io_u_cmp(const void *p1, const void *p2)
271{
272 const struct io_u **a = (const struct io_u **) p1;
273 const struct io_u **b = (const struct io_u **) p2;
274 uint64_t at, bt;
275
276 at = utime_since_now(&(*a)->start_time);
277 bt = utime_since_now(&(*b)->start_time);
278
279 if (at < bt)
280 return -1;
281 else if (at == bt)
282 return 0;
283 else
284 return 1;
285}
286
d8b64af2
JA
287static int rbd_iter_events(struct thread_data *td, unsigned int *events,
288 unsigned int min_evts, int wait)
82340a9f 289{
565e784d 290 struct rbd_data *rbd = td->io_ops_data;
d8b64af2
JA
291 unsigned int this_events = 0;
292 struct io_u *io_u;
6f9961ac 293 int i, sidx;
82340a9f 294
6f9961ac 295 sidx = 0;
d8b64af2 296 io_u_qiter(&td->io_u_all, io_u, i) {
d8b64af2
JA
297 if (!(io_u->flags & IO_U_F_FLIGHT))
298 continue;
6f9961ac 299 if (rbd_io_u_seen(io_u))
d8b64af2 300 continue;
82340a9f 301
6f9961ac 302 if (fri_check_complete(rbd, io_u, events))
d8b64af2 303 this_events++;
6f9961ac
JA
304 else if (wait)
305 rbd->sort_events[sidx++] = io_u;
306 }
82340a9f 307
6f9961ac
JA
308 if (!wait || !sidx)
309 return this_events;
310
311 /*
312 * Sort events, oldest issue first, then wait on as many as we
313 * need in order of age. If we have enough events, stop waiting,
314 * and just check if any of the older ones are done.
315 */
316 if (sidx > 1)
317 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
318
319 for (i = 0; i < sidx; i++) {
320 io_u = rbd->sort_events[i];
321
322 if (fri_check_complete(rbd, io_u, events)) {
323 this_events++;
324 continue;
d8b64af2 325 }
6f9961ac
JA
326
327 /*
328 * Stop waiting when we have enough, but continue checking
329 * all pending IOs if they are complete.
330 */
d8b64af2 331 if (*events >= min_evts)
6f9961ac
JA
332 continue;
333
334 rbd_io_u_wait_complete(io_u);
335
336 if (fri_check_complete(rbd, io_u, events))
337 this_events++;
d8b64af2 338 }
fc5c0345 339
d8b64af2 340 return this_events;
fc5c0345
DG
341}
342
343static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
1f440ece 344 unsigned int max, const struct timespec *t)
fc5c0345 345{
d8b64af2 346 unsigned int this_events, events = 0;
d7d702c7 347 struct rbd_options *o = td->eo;
d8b64af2 348 int wait = 0;
fc5c0345
DG
349
350 do {
d8b64af2 351 this_events = rbd_iter_events(td, &events, min, wait);
fc5c0345 352
d8b64af2 353 if (events >= min)
fc5c0345 354 break;
d8b64af2
JA
355 if (this_events)
356 continue;
fc5c0345 357
d7d702c7
JA
358 if (!o->busy_poll)
359 wait = 1;
360 else
361 nop;
fc5c0345
DG
362 } while (1);
363
364 return events;
365}
366
367static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
368{
565e784d 369 struct rbd_data *rbd = td->io_ops_data;
d8b64af2
JA
370 struct fio_rbd_iou *fri = io_u->engine_data;
371 int r = -1;
fc5c0345
DG
372
373 fio_ro_check(td, io_u);
374
d8b64af2 375 fri->io_seen = 0;
b8ecbef6 376 fri->io_complete = 0;
d8b64af2 377
dbf388d2 378 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
d8b64af2 379 &fri->completion);
dbf388d2
JA
380 if (r < 0) {
381 log_err("rbd_aio_create_completion failed.\n");
382 goto failed;
383 }
fc5c0345 384
dbf388d2 385 if (io_u->ddir == DDIR_WRITE) {
6f9961ac
JA
386 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
387 io_u->xfer_buf, fri->completion);
fc5c0345
DG
388 if (r < 0) {
389 log_err("rbd_aio_write failed.\n");
dbf388d2 390 goto failed_comp;
fc5c0345
DG
391 }
392
393 } else if (io_u->ddir == DDIR_READ) {
6f9961ac
JA
394 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
395 io_u->xfer_buf, fri->completion);
fc5c0345
DG
396
397 if (r < 0) {
398 log_err("rbd_aio_read failed.\n");
dbf388d2 399 goto failed_comp;
fc5c0345 400 }
dbf388d2 401 } else if (io_u->ddir == DDIR_TRIM) {
6f9961ac
JA
402 r = rbd_aio_discard(rbd->image, io_u->offset,
403 io_u->xfer_buflen, fri->completion);
82340a9f 404 if (r < 0) {
dbf388d2
JA
405 log_err("rbd_aio_discard failed.\n");
406 goto failed_comp;
82340a9f 407 }
dbf388d2 408 } else if (io_u->ddir == DDIR_SYNC) {
6f9961ac 409 r = rbd_aio_flush(rbd->image, fri->completion);
fc5c0345
DG
410 if (r < 0) {
411 log_err("rbd_flush failed.\n");
dbf388d2 412 goto failed_comp;
fc5c0345 413 }
fc5c0345
DG
414 } else {
415 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
416 io_u->ddir);
dbf388d2 417 goto failed_comp;
fc5c0345
DG
418 }
419
420 return FIO_Q_QUEUED;
dbf388d2
JA
421failed_comp:
422 rbd_aio_release(fri->completion);
fc5c0345
DG
423failed:
424 io_u->error = r;
425 td_verror(td, io_u->error, "xfer");
426 return FIO_Q_COMPLETED;
427}
428
429static int fio_rbd_init(struct thread_data *td)
430{
431 int r;
432
433 r = _fio_rbd_connect(td);
434 if (r) {
435 log_err("fio_rbd_connect failed, return code: %d .\n", r);
436 goto failed;
437 }
438
439 return 0;
440
441failed:
442 return 1;
fc5c0345
DG
443}
444
445static void fio_rbd_cleanup(struct thread_data *td)
446{
565e784d 447 struct rbd_data *rbd = td->io_ops_data;
fc5c0345 448
6f9961ac
JA
449 if (rbd) {
450 _fio_rbd_disconnect(rbd);
451 free(rbd->aio_events);
452 free(rbd->sort_events);
453 free(rbd);
fc5c0345 454 }
fc5c0345
DG
455}
456
457static int fio_rbd_setup(struct thread_data *td)
458{
fc5c0345
DG
459 rbd_image_info_t info;
460 struct fio_file *f;
6f9961ac 461 struct rbd_data *rbd = NULL;
fc5c0345 462 int major, minor, extra;
6f9961ac 463 int r;
fc5c0345
DG
464
465 /* log version of librbd. No cluster connection required. */
466 rbd_version(&major, &minor, &extra);
467 log_info("rbd engine: RBD version: %d.%d.%d\n", major, minor, extra);
468
469 /* allocate engine specific structure to deal with librbd. */
6f9961ac 470 r = _fio_setup_rbd_data(td, &rbd);
fc5c0345
DG
471 if (r) {
472 log_err("fio_setup_rbd_data failed.\n");
473 goto cleanup;
474 }
565e784d 475 td->io_ops_data = rbd;
fc5c0345 476
d8b64af2
JA
477 /* librbd does not allow us to run first in the main thread and later
478 * in a fork child. It needs to be the same process context all the
479 * time.
fc5c0345
DG
480 */
481 td->o.use_thread = 1;
482
483 /* connect in the main thread to determine to determine
484 * the size of the given RADOS block device. And disconnect
485 * later on.
486 */
487 r = _fio_rbd_connect(td);
488 if (r) {
489 log_err("fio_rbd_connect failed.\n");
490 goto cleanup;
491 }
492
493 /* get size of the RADOS block device */
6f9961ac 494 r = rbd_stat(rbd->image, &info, sizeof(info));
fc5c0345
DG
495 if (r < 0) {
496 log_err("rbd_status failed.\n");
497 goto disconnect;
498 }
499 dprint(FD_IO, "rbd-engine: image size: %lu\n", info.size);
500
501 /* taken from "net" engine. Pretend we deal with files,
502 * even if we do not have any ideas about files.
503 * The size of the RBD is set instead of a artificial file.
504 */
505 if (!td->files_index) {
5903e7b7 506 add_file(td, td->o.filename ? : "rbd", 0, 0);
fc5c0345 507 td->o.nr_files = td->o.nr_files ? : 1;
b53f2c54 508 td->o.open_files++;
fc5c0345
DG
509 }
510 f = td->files[0];
511 f->real_file_size = info.size;
512
513 /* disconnect, then we were only connected to determine
514 * the size of the RBD.
515 */
6f9961ac 516 _fio_rbd_disconnect(rbd);
fc5c0345
DG
517 return 0;
518
519disconnect:
6f9961ac 520 _fio_rbd_disconnect(rbd);
fc5c0345
DG
521cleanup:
522 fio_rbd_cleanup(td);
523 return r;
524}
525
526static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
527{
528 return 0;
529}
530
d9b100fc
JA
531static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
532{
903b2812 533#if defined(CONFIG_RBD_INVAL)
565e784d 534 struct rbd_data *rbd = td->io_ops_data;
903b2812 535
6f9961ac 536 return rbd_invalidate_cache(rbd->image);
903b2812 537#else
d9b100fc 538 return 0;
903b2812 539#endif
d9b100fc
JA
540}
541
fc5c0345
DG
542static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
543{
d8b64af2 544 struct fio_rbd_iou *fri = io_u->engine_data;
fc5c0345 545
d8b64af2 546 if (fri) {
fc5c0345 547 io_u->engine_data = NULL;
d8b64af2 548 free(fri);
fc5c0345
DG
549 }
550}
551
552static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
553{
d8b64af2 554 struct fio_rbd_iou *fri;
fc5c0345 555
d8b64af2
JA
556 fri = calloc(1, sizeof(*fri));
557 fri->io_u = io_u;
558 io_u->engine_data = fri;
fc5c0345
DG
559 return 0;
560}
561
10aa136b 562static struct ioengine_ops ioengine = {
d9b100fc
JA
563 .name = "rbd",
564 .version = FIO_IOOPS_VERSION,
565 .setup = fio_rbd_setup,
566 .init = fio_rbd_init,
567 .queue = fio_rbd_queue,
568 .getevents = fio_rbd_getevents,
569 .event = fio_rbd_event,
570 .cleanup = fio_rbd_cleanup,
571 .open_file = fio_rbd_open,
572 .invalidate = fio_rbd_invalidate,
573 .options = options,
574 .io_u_init = fio_rbd_io_u_init,
575 .io_u_free = fio_rbd_io_u_free,
576 .option_struct_size = sizeof(struct rbd_options),
fc5c0345
DG
577};
578
579static void fio_init fio_rbd_register(void)
580{
581 register_ioengine(&ioengine);
582}
583
584static void fio_exit fio_rbd_unregister(void)
585{
586 unregister_ioengine(&ioengine);
587}