Fix stat(2) related bugs introduced by changes made for Windows
[fio.git] / engines / rbd.c
... / ...
CommitLineData
1/*
2 * rbd engine
3 *
4 * IO engine using Ceph's librbd to test RADOS Block Devices.
5 *
6 */
7
8#include <rbd/librbd.h>
9
10#include "../fio.h"
11#include "../optgroup.h"
12
13struct fio_rbd_iou {
14 struct io_u *io_u;
15 rbd_completion_t completion;
16 int io_seen;
17 int io_complete;
18};
19
20struct rbd_data {
21 rados_t cluster;
22 rados_ioctx_t io_ctx;
23 rbd_image_t image;
24 struct io_u **aio_events;
25 struct io_u **sort_events;
26};
27
28struct rbd_options {
29 void *pad;
30 char *cluster_name;
31 char *rbd_name;
32 char *pool_name;
33 char *client_name;
34 int busy_poll;
35};
36
37static struct fio_option options[] = {
38 {
39 .name = "clustername",
40 .lname = "ceph cluster name",
41 .type = FIO_OPT_STR_STORE,
42 .help = "Cluster name for ceph",
43 .off1 = offsetof(struct rbd_options, cluster_name),
44 .category = FIO_OPT_C_ENGINE,
45 .group = FIO_OPT_G_RBD,
46 },
47 {
48 .name = "rbdname",
49 .lname = "rbd engine rbdname",
50 .type = FIO_OPT_STR_STORE,
51 .help = "RBD name for RBD engine",
52 .off1 = offsetof(struct rbd_options, rbd_name),
53 .category = FIO_OPT_C_ENGINE,
54 .group = FIO_OPT_G_RBD,
55 },
56 {
57 .name = "pool",
58 .lname = "rbd engine pool",
59 .type = FIO_OPT_STR_STORE,
60 .help = "Name of the pool hosting the RBD for the RBD engine",
61 .off1 = offsetof(struct rbd_options, pool_name),
62 .category = FIO_OPT_C_ENGINE,
63 .group = FIO_OPT_G_RBD,
64 },
65 {
66 .name = "clientname",
67 .lname = "rbd engine clientname",
68 .type = FIO_OPT_STR_STORE,
69 .help = "Name of the ceph client to access the RBD for the RBD engine",
70 .off1 = offsetof(struct rbd_options, client_name),
71 .category = FIO_OPT_C_ENGINE,
72 .group = FIO_OPT_G_RBD,
73 },
74 {
75 .name = "busy_poll",
76 .lname = "Busy poll",
77 .type = FIO_OPT_BOOL,
78 .help = "Busy poll for completions instead of sleeping",
79 .off1 = offsetof(struct rbd_options, busy_poll),
80 .def = "0",
81 .category = FIO_OPT_C_ENGINE,
82 .group = FIO_OPT_G_RBD,
83 },
84 {
85 .name = NULL,
86 },
87};
88
89static int _fio_setup_rbd_data(struct thread_data *td,
90 struct rbd_data **rbd_data_ptr)
91{
92 struct rbd_data *rbd;
93
94 if (td->io_ops_data)
95 return 0;
96
97 rbd = calloc(1, sizeof(struct rbd_data));
98 if (!rbd)
99 goto failed;
100
101 rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
102 if (!rbd->aio_events)
103 goto failed;
104
105 rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
106 if (!rbd->sort_events)
107 goto failed;
108
109 *rbd_data_ptr = rbd;
110 return 0;
111
112failed:
113 if (rbd) {
114 if (rbd->aio_events)
115 free(rbd->aio_events);
116 if (rbd->sort_events)
117 free(rbd->sort_events);
118 free(rbd);
119 }
120 return 1;
121
122}
123
124static int _fio_rbd_connect(struct thread_data *td)
125{
126 struct rbd_data *rbd = td->io_ops_data;
127 struct rbd_options *o = td->eo;
128 int r;
129
130 if (o->cluster_name) {
131 char *client_name = NULL;
132
133 /*
134 * If we specify cluser name, the rados_creat2
135 * will not assume 'client.'. name is considered
136 * as a full type.id namestr
137 */
138 if (!index(o->client_name, '.')) {
139 client_name = calloc(1, strlen("client.") +
140 strlen(o->client_name) + 1);
141 strcat(client_name, "client.");
142 o->client_name = strcat(client_name, o->client_name);
143 }
144 r = rados_create2(&rbd->cluster, o->cluster_name,
145 o->client_name, 0);
146 } else
147 r = rados_create(&rbd->cluster, o->client_name);
148
149 if (r < 0) {
150 log_err("rados_create failed.\n");
151 goto failed_early;
152 }
153
154 r = rados_conf_read_file(rbd->cluster, NULL);
155 if (r < 0) {
156 log_err("rados_conf_read_file failed.\n");
157 goto failed_early;
158 }
159
160 r = rados_connect(rbd->cluster);
161 if (r < 0) {
162 log_err("rados_connect failed.\n");
163 goto failed_shutdown;
164 }
165
166 r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
167 if (r < 0) {
168 log_err("rados_ioctx_create failed.\n");
169 goto failed_shutdown;
170 }
171
172 r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
173 if (r < 0) {
174 log_err("rbd_open failed.\n");
175 goto failed_open;
176 }
177 return 0;
178
179failed_open:
180 rados_ioctx_destroy(rbd->io_ctx);
181 rbd->io_ctx = NULL;
182failed_shutdown:
183 rados_shutdown(rbd->cluster);
184 rbd->cluster = NULL;
185failed_early:
186 return 1;
187}
188
189static void _fio_rbd_disconnect(struct rbd_data *rbd)
190{
191 if (!rbd)
192 return;
193
194 /* shutdown everything */
195 if (rbd->image) {
196 rbd_close(rbd->image);
197 rbd->image = NULL;
198 }
199
200 if (rbd->io_ctx) {
201 rados_ioctx_destroy(rbd->io_ctx);
202 rbd->io_ctx = NULL;
203 }
204
205 if (rbd->cluster) {
206 rados_shutdown(rbd->cluster);
207 rbd->cluster = NULL;
208 }
209}
210
211static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
212{
213 struct fio_rbd_iou *fri = data;
214 struct io_u *io_u = fri->io_u;
215 ssize_t ret;
216
217 /*
218 * Looks like return value is 0 for success, or < 0 for
219 * a specific error. So we have to assume that it can't do
220 * partial completions.
221 */
222 ret = rbd_aio_get_return_value(fri->completion);
223 if (ret < 0) {
224 io_u->error = ret;
225 io_u->resid = io_u->xfer_buflen;
226 } else
227 io_u->error = 0;
228
229 fri->io_complete = 1;
230}
231
232static struct io_u *fio_rbd_event(struct thread_data *td, int event)
233{
234 struct rbd_data *rbd = td->io_ops_data;
235
236 return rbd->aio_events[event];
237}
238
239static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
240 unsigned int *events)
241{
242 struct fio_rbd_iou *fri = io_u->engine_data;
243
244 if (fri->io_complete) {
245 fri->io_seen = 1;
246 rbd->aio_events[*events] = io_u;
247 (*events)++;
248
249 rbd_aio_release(fri->completion);
250 return 1;
251 }
252
253 return 0;
254}
255
256static inline int rbd_io_u_seen(struct io_u *io_u)
257{
258 struct fio_rbd_iou *fri = io_u->engine_data;
259
260 return fri->io_seen;
261}
262
263static void rbd_io_u_wait_complete(struct io_u *io_u)
264{
265 struct fio_rbd_iou *fri = io_u->engine_data;
266
267 rbd_aio_wait_for_complete(fri->completion);
268}
269
270static int rbd_io_u_cmp(const void *p1, const void *p2)
271{
272 const struct io_u **a = (const struct io_u **) p1;
273 const struct io_u **b = (const struct io_u **) p2;
274 uint64_t at, bt;
275
276 at = utime_since_now(&(*a)->start_time);
277 bt = utime_since_now(&(*b)->start_time);
278
279 if (at < bt)
280 return -1;
281 else if (at == bt)
282 return 0;
283 else
284 return 1;
285}
286
287static int rbd_iter_events(struct thread_data *td, unsigned int *events,
288 unsigned int min_evts, int wait)
289{
290 struct rbd_data *rbd = td->io_ops_data;
291 unsigned int this_events = 0;
292 struct io_u *io_u;
293 int i, sidx;
294
295 sidx = 0;
296 io_u_qiter(&td->io_u_all, io_u, i) {
297 if (!(io_u->flags & IO_U_F_FLIGHT))
298 continue;
299 if (rbd_io_u_seen(io_u))
300 continue;
301
302 if (fri_check_complete(rbd, io_u, events))
303 this_events++;
304 else if (wait)
305 rbd->sort_events[sidx++] = io_u;
306 }
307
308 if (!wait || !sidx)
309 return this_events;
310
311 /*
312 * Sort events, oldest issue first, then wait on as many as we
313 * need in order of age. If we have enough events, stop waiting,
314 * and just check if any of the older ones are done.
315 */
316 if (sidx > 1)
317 qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
318
319 for (i = 0; i < sidx; i++) {
320 io_u = rbd->sort_events[i];
321
322 if (fri_check_complete(rbd, io_u, events)) {
323 this_events++;
324 continue;
325 }
326
327 /*
328 * Stop waiting when we have enough, but continue checking
329 * all pending IOs if they are complete.
330 */
331 if (*events >= min_evts)
332 continue;
333
334 rbd_io_u_wait_complete(io_u);
335
336 if (fri_check_complete(rbd, io_u, events))
337 this_events++;
338 }
339
340 return this_events;
341}
342
343static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
344 unsigned int max, const struct timespec *t)
345{
346 unsigned int this_events, events = 0;
347 struct rbd_options *o = td->eo;
348 int wait = 0;
349
350 do {
351 this_events = rbd_iter_events(td, &events, min, wait);
352
353 if (events >= min)
354 break;
355 if (this_events)
356 continue;
357
358 if (!o->busy_poll)
359 wait = 1;
360 else
361 nop;
362 } while (1);
363
364 return events;
365}
366
367static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
368{
369 struct rbd_data *rbd = td->io_ops_data;
370 struct fio_rbd_iou *fri = io_u->engine_data;
371 int r = -1;
372
373 fio_ro_check(td, io_u);
374
375 fri->io_seen = 0;
376 fri->io_complete = 0;
377
378 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
379 &fri->completion);
380 if (r < 0) {
381 log_err("rbd_aio_create_completion failed.\n");
382 goto failed;
383 }
384
385 if (io_u->ddir == DDIR_WRITE) {
386 r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
387 io_u->xfer_buf, fri->completion);
388 if (r < 0) {
389 log_err("rbd_aio_write failed.\n");
390 goto failed_comp;
391 }
392
393 } else if (io_u->ddir == DDIR_READ) {
394 r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
395 io_u->xfer_buf, fri->completion);
396
397 if (r < 0) {
398 log_err("rbd_aio_read failed.\n");
399 goto failed_comp;
400 }
401 } else if (io_u->ddir == DDIR_TRIM) {
402 r = rbd_aio_discard(rbd->image, io_u->offset,
403 io_u->xfer_buflen, fri->completion);
404 if (r < 0) {
405 log_err("rbd_aio_discard failed.\n");
406 goto failed_comp;
407 }
408 } else if (io_u->ddir == DDIR_SYNC) {
409 r = rbd_aio_flush(rbd->image, fri->completion);
410 if (r < 0) {
411 log_err("rbd_flush failed.\n");
412 goto failed_comp;
413 }
414 } else {
415 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
416 io_u->ddir);
417 goto failed_comp;
418 }
419
420 return FIO_Q_QUEUED;
421failed_comp:
422 rbd_aio_release(fri->completion);
423failed:
424 io_u->error = r;
425 td_verror(td, io_u->error, "xfer");
426 return FIO_Q_COMPLETED;
427}
428
429static int fio_rbd_init(struct thread_data *td)
430{
431 int r;
432
433 r = _fio_rbd_connect(td);
434 if (r) {
435 log_err("fio_rbd_connect failed, return code: %d .\n", r);
436 goto failed;
437 }
438
439 return 0;
440
441failed:
442 return 1;
443}
444
445static void fio_rbd_cleanup(struct thread_data *td)
446{
447 struct rbd_data *rbd = td->io_ops_data;
448
449 if (rbd) {
450 _fio_rbd_disconnect(rbd);
451 free(rbd->aio_events);
452 free(rbd->sort_events);
453 free(rbd);
454 }
455}
456
457static int fio_rbd_setup(struct thread_data *td)
458{
459 rbd_image_info_t info;
460 struct fio_file *f;
461 struct rbd_data *rbd = NULL;
462 int major, minor, extra;
463 int r;
464
465 /* log version of librbd. No cluster connection required. */
466 rbd_version(&major, &minor, &extra);
467 log_info("rbd engine: RBD version: %d.%d.%d\n", major, minor, extra);
468
469 /* allocate engine specific structure to deal with librbd. */
470 r = _fio_setup_rbd_data(td, &rbd);
471 if (r) {
472 log_err("fio_setup_rbd_data failed.\n");
473 goto cleanup;
474 }
475 td->io_ops_data = rbd;
476
477 /* librbd does not allow us to run first in the main thread and later
478 * in a fork child. It needs to be the same process context all the
479 * time.
480 */
481 td->o.use_thread = 1;
482
483 /* connect in the main thread to determine to determine
484 * the size of the given RADOS block device. And disconnect
485 * later on.
486 */
487 r = _fio_rbd_connect(td);
488 if (r) {
489 log_err("fio_rbd_connect failed.\n");
490 goto cleanup;
491 }
492
493 /* get size of the RADOS block device */
494 r = rbd_stat(rbd->image, &info, sizeof(info));
495 if (r < 0) {
496 log_err("rbd_status failed.\n");
497 goto disconnect;
498 }
499 dprint(FD_IO, "rbd-engine: image size: %lu\n", info.size);
500
501 /* taken from "net" engine. Pretend we deal with files,
502 * even if we do not have any ideas about files.
503 * The size of the RBD is set instead of a artificial file.
504 */
505 if (!td->files_index) {
506 add_file(td, td->o.filename ? : "rbd", 0, 0);
507 td->o.nr_files = td->o.nr_files ? : 1;
508 td->o.open_files++;
509 }
510 f = td->files[0];
511 f->real_file_size = info.size;
512
513 /* disconnect, then we were only connected to determine
514 * the size of the RBD.
515 */
516 _fio_rbd_disconnect(rbd);
517 return 0;
518
519disconnect:
520 _fio_rbd_disconnect(rbd);
521cleanup:
522 fio_rbd_cleanup(td);
523 return r;
524}
525
526static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
527{
528 return 0;
529}
530
531static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
532{
533#if defined(CONFIG_RBD_INVAL)
534 struct rbd_data *rbd = td->io_ops_data;
535
536 return rbd_invalidate_cache(rbd->image);
537#else
538 return 0;
539#endif
540}
541
542static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
543{
544 struct fio_rbd_iou *fri = io_u->engine_data;
545
546 if (fri) {
547 io_u->engine_data = NULL;
548 free(fri);
549 }
550}
551
552static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
553{
554 struct fio_rbd_iou *fri;
555
556 fri = calloc(1, sizeof(*fri));
557 fri->io_u = io_u;
558 io_u->engine_data = fri;
559 return 0;
560}
561
562static struct ioengine_ops ioengine = {
563 .name = "rbd",
564 .version = FIO_IOOPS_VERSION,
565 .setup = fio_rbd_setup,
566 .init = fio_rbd_init,
567 .queue = fio_rbd_queue,
568 .getevents = fio_rbd_getevents,
569 .event = fio_rbd_event,
570 .cleanup = fio_rbd_cleanup,
571 .open_file = fio_rbd_open,
572 .invalidate = fio_rbd_invalidate,
573 .options = options,
574 .io_u_init = fio_rbd_io_u_init,
575 .io_u_free = fio_rbd_io_u_free,
576 .option_struct_size = sizeof(struct rbd_options),
577};
578
579static void fio_init fio_rbd_register(void)
580{
581 register_ioengine(&ioengine);
582}
583
584static void fio_exit fio_rbd_unregister(void)
585{
586 unregister_ioengine(&ioengine);
587}