Merge branch 'optimize-busy-poll' of https://github.com/ketor/fio
[fio.git] / engines / rbd.c
... / ...
CommitLineData
1/*
2 * rbd engine
3 *
4 * IO engine using Ceph's librbd to test RADOS Block Devices.
5 *
6 */
7
8#include <rbd/librbd.h>
9
10#include "../fio.h"
11
12struct fio_rbd_iou {
13 struct io_u *io_u;
14 rbd_completion_t completion;
15 int io_seen;
16 int io_complete;
17};
18
19struct rbd_data {
20 rados_t cluster;
21 rados_ioctx_t io_ctx;
22 rbd_image_t image;
23 struct io_u **aio_events;
24};
25
26struct rbd_options {
27 struct thread_data *td;
28 char *rbd_name;
29 char *pool_name;
30 char *client_name;
31 int busy_poll;
32};
33
34static struct fio_option options[] = {
35 {
36 .name = "rbdname",
37 .lname = "rbd engine rbdname",
38 .type = FIO_OPT_STR_STORE,
39 .help = "RBD name for RBD engine",
40 .off1 = offsetof(struct rbd_options, rbd_name),
41 .category = FIO_OPT_C_ENGINE,
42 .group = FIO_OPT_G_RBD,
43 },
44 {
45 .name = "pool",
46 .lname = "rbd engine pool",
47 .type = FIO_OPT_STR_STORE,
48 .help = "Name of the pool hosting the RBD for the RBD engine",
49 .off1 = offsetof(struct rbd_options, pool_name),
50 .category = FIO_OPT_C_ENGINE,
51 .group = FIO_OPT_G_RBD,
52 },
53 {
54 .name = "clientname",
55 .lname = "rbd engine clientname",
56 .type = FIO_OPT_STR_STORE,
57 .help = "Name of the ceph client to access the RBD for the RBD engine",
58 .off1 = offsetof(struct rbd_options, client_name),
59 .category = FIO_OPT_C_ENGINE,
60 .group = FIO_OPT_G_RBD,
61 },
62 {
63 .name = "busy_poll",
64 .lname = "Busy poll",
65 .type = FIO_OPT_BOOL,
66 .help = "Busy poll for completions instead of sleeping",
67 .off1 = offsetof(struct rbd_options, busy_poll),
68 .def = "0",
69 .category = FIO_OPT_C_ENGINE,
70 .group = FIO_OPT_G_RBD,
71 },
72 {
73 .name = NULL,
74 },
75};
76
77static int _fio_setup_rbd_data(struct thread_data *td,
78 struct rbd_data **rbd_data_ptr)
79{
80 struct rbd_data *rbd_data;
81
82 if (td->io_ops->data)
83 return 0;
84
85 rbd_data = malloc(sizeof(struct rbd_data));
86 if (!rbd_data)
87 goto failed;
88
89 memset(rbd_data, 0, sizeof(struct rbd_data));
90
91 rbd_data->aio_events = malloc(td->o.iodepth * sizeof(struct io_u *));
92 if (!rbd_data->aio_events)
93 goto failed;
94
95 memset(rbd_data->aio_events, 0, td->o.iodepth * sizeof(struct io_u *));
96
97 *rbd_data_ptr = rbd_data;
98
99 return 0;
100
101failed:
102 if (rbd_data)
103 free(rbd_data);
104 return 1;
105
106}
107
108static int _fio_rbd_connect(struct thread_data *td)
109{
110 struct rbd_data *rbd_data = td->io_ops->data;
111 struct rbd_options *o = td->eo;
112 int r;
113
114 r = rados_create(&rbd_data->cluster, o->client_name);
115 if (r < 0) {
116 log_err("rados_create failed.\n");
117 goto failed_early;
118 }
119
120 r = rados_conf_read_file(rbd_data->cluster, NULL);
121 if (r < 0) {
122 log_err("rados_conf_read_file failed.\n");
123 goto failed_early;
124 }
125
126 r = rados_connect(rbd_data->cluster);
127 if (r < 0) {
128 log_err("rados_connect failed.\n");
129 goto failed_shutdown;
130 }
131
132 r = rados_ioctx_create(rbd_data->cluster, o->pool_name,
133 &rbd_data->io_ctx);
134 if (r < 0) {
135 log_err("rados_ioctx_create failed.\n");
136 goto failed_shutdown;
137 }
138
139 r = rbd_open(rbd_data->io_ctx, o->rbd_name, &rbd_data->image,
140 NULL /*snap */ );
141 if (r < 0) {
142 log_err("rbd_open failed.\n");
143 goto failed_open;
144 }
145 return 0;
146
147failed_open:
148 rados_ioctx_destroy(rbd_data->io_ctx);
149 rbd_data->io_ctx = NULL;
150failed_shutdown:
151 rados_shutdown(rbd_data->cluster);
152 rbd_data->cluster = NULL;
153failed_early:
154 return 1;
155}
156
157static void _fio_rbd_disconnect(struct rbd_data *rbd_data)
158{
159 if (!rbd_data)
160 return;
161
162 /* shutdown everything */
163 if (rbd_data->image) {
164 rbd_close(rbd_data->image);
165 rbd_data->image = NULL;
166 }
167
168 if (rbd_data->io_ctx) {
169 rados_ioctx_destroy(rbd_data->io_ctx);
170 rbd_data->io_ctx = NULL;
171 }
172
173 if (rbd_data->cluster) {
174 rados_shutdown(rbd_data->cluster);
175 rbd_data->cluster = NULL;
176 }
177}
178
179static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
180{
181 struct fio_rbd_iou *fri = data;
182 struct io_u *io_u = fri->io_u;
183 ssize_t ret;
184
185 /*
186 * Looks like return value is 0 for success, or < 0 for
187 * a specific error. So we have to assume that it can't do
188 * partial completions.
189 */
190 fri->io_complete = 1;
191
192 ret = rbd_aio_get_return_value(fri->completion);
193 if (ret < 0) {
194 io_u->error = ret;
195 io_u->resid = io_u->xfer_buflen;
196 } else
197 io_u->error = 0;
198}
199
200static struct io_u *fio_rbd_event(struct thread_data *td, int event)
201{
202 struct rbd_data *rbd_data = td->io_ops->data;
203
204 return rbd_data->aio_events[event];
205}
206
207static inline int fri_check_complete(struct rbd_data *rbd_data,
208 struct io_u *io_u,
209 unsigned int *events)
210{
211 struct fio_rbd_iou *fri = io_u->engine_data;
212
213 if (fri->io_complete) {
214 fri->io_seen = 1;
215 rbd_data->aio_events[*events] = io_u;
216 (*events)++;
217
218 rbd_aio_release(fri->completion);
219 return 1;
220 }
221
222 return 0;
223}
224
225static int rbd_iter_events(struct thread_data *td, unsigned int *events,
226 unsigned int min_evts, int wait)
227{
228 struct rbd_data *rbd_data = td->io_ops->data;
229 unsigned int this_events = 0;
230 struct io_u *io_u;
231 int i;
232
233 io_u_qiter(&td->io_u_all, io_u, i) {
234 struct fio_rbd_iou *fri = io_u->engine_data;
235
236 if (!(io_u->flags & IO_U_F_FLIGHT))
237 continue;
238 if (fri->io_seen)
239 continue;
240
241 if (fri_check_complete(rbd_data, io_u, events))
242 this_events++;
243 else if (wait) {
244 rbd_aio_wait_for_complete(fri->completion);
245
246 if (fri_check_complete(rbd_data, io_u, events))
247 this_events++;
248 }
249 if (*events >= min_evts)
250 break;
251 }
252
253 return this_events;
254}
255
256static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
257 unsigned int max, const struct timespec *t)
258{
259 unsigned int this_events, events = 0;
260 struct rbd_options *o = td->eo;
261 int wait = 0;
262
263 do {
264 this_events = rbd_iter_events(td, &events, min, wait);
265
266 if (events >= min)
267 break;
268 if (this_events)
269 continue;
270
271 if (!o->busy_poll)
272 wait = 1;
273 else
274 nop;
275 } while (1);
276
277 return events;
278}
279
280static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
281{
282 struct rbd_data *rbd_data = td->io_ops->data;
283 struct fio_rbd_iou *fri = io_u->engine_data;
284 int r = -1;
285
286 fio_ro_check(td, io_u);
287
288 fri->io_seen = 0;
289 fri->io_complete = 0;
290
291 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
292 &fri->completion);
293 if (r < 0) {
294 log_err("rbd_aio_create_completion failed.\n");
295 goto failed;
296 }
297
298 if (io_u->ddir == DDIR_WRITE) {
299 r = rbd_aio_write(rbd_data->image, io_u->offset,
300 io_u->xfer_buflen, io_u->xfer_buf,
301 fri->completion);
302 if (r < 0) {
303 log_err("rbd_aio_write failed.\n");
304 goto failed_comp;
305 }
306
307 } else if (io_u->ddir == DDIR_READ) {
308 r = rbd_aio_read(rbd_data->image, io_u->offset,
309 io_u->xfer_buflen, io_u->xfer_buf,
310 fri->completion);
311
312 if (r < 0) {
313 log_err("rbd_aio_read failed.\n");
314 goto failed_comp;
315 }
316 } else if (io_u->ddir == DDIR_TRIM) {
317 r = rbd_aio_discard(rbd_data->image, io_u->offset,
318 io_u->xfer_buflen, fri->completion);
319 if (r < 0) {
320 log_err("rbd_aio_discard failed.\n");
321 goto failed_comp;
322 }
323 } else if (io_u->ddir == DDIR_SYNC) {
324 r = rbd_aio_flush(rbd_data->image, fri->completion);
325 if (r < 0) {
326 log_err("rbd_flush failed.\n");
327 goto failed_comp;
328 }
329 } else {
330 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
331 io_u->ddir);
332 goto failed_comp;
333 }
334
335 return FIO_Q_QUEUED;
336failed_comp:
337 rbd_aio_release(fri->completion);
338failed:
339 io_u->error = r;
340 td_verror(td, io_u->error, "xfer");
341 return FIO_Q_COMPLETED;
342}
343
344static int fio_rbd_init(struct thread_data *td)
345{
346 int r;
347
348 r = _fio_rbd_connect(td);
349 if (r) {
350 log_err("fio_rbd_connect failed, return code: %d .\n", r);
351 goto failed;
352 }
353
354 return 0;
355
356failed:
357 return 1;
358}
359
360static void fio_rbd_cleanup(struct thread_data *td)
361{
362 struct rbd_data *rbd_data = td->io_ops->data;
363
364 if (rbd_data) {
365 _fio_rbd_disconnect(rbd_data);
366 free(rbd_data->aio_events);
367 free(rbd_data);
368 }
369
370}
371
372static int fio_rbd_setup(struct thread_data *td)
373{
374 int r = 0;
375 rbd_image_info_t info;
376 struct fio_file *f;
377 struct rbd_data *rbd_data = NULL;
378 int major, minor, extra;
379
380 /* log version of librbd. No cluster connection required. */
381 rbd_version(&major, &minor, &extra);
382 log_info("rbd engine: RBD version: %d.%d.%d\n", major, minor, extra);
383
384 /* allocate engine specific structure to deal with librbd. */
385 r = _fio_setup_rbd_data(td, &rbd_data);
386 if (r) {
387 log_err("fio_setup_rbd_data failed.\n");
388 goto cleanup;
389 }
390 td->io_ops->data = rbd_data;
391
392 /* librbd does not allow us to run first in the main thread and later
393 * in a fork child. It needs to be the same process context all the
394 * time.
395 */
396 td->o.use_thread = 1;
397
398 /* connect in the main thread to determine to determine
399 * the size of the given RADOS block device. And disconnect
400 * later on.
401 */
402 r = _fio_rbd_connect(td);
403 if (r) {
404 log_err("fio_rbd_connect failed.\n");
405 goto cleanup;
406 }
407
408 /* get size of the RADOS block device */
409 r = rbd_stat(rbd_data->image, &info, sizeof(info));
410 if (r < 0) {
411 log_err("rbd_status failed.\n");
412 goto disconnect;
413 }
414 dprint(FD_IO, "rbd-engine: image size: %lu\n", info.size);
415
416 /* taken from "net" engine. Pretend we deal with files,
417 * even if we do not have any ideas about files.
418 * The size of the RBD is set instead of a artificial file.
419 */
420 if (!td->files_index) {
421 add_file(td, td->o.filename ? : "rbd", 0, 0);
422 td->o.nr_files = td->o.nr_files ? : 1;
423 td->o.open_files++;
424 }
425 f = td->files[0];
426 f->real_file_size = info.size;
427
428 /* disconnect, then we were only connected to determine
429 * the size of the RBD.
430 */
431 _fio_rbd_disconnect(rbd_data);
432 return 0;
433
434disconnect:
435 _fio_rbd_disconnect(rbd_data);
436cleanup:
437 fio_rbd_cleanup(td);
438 return r;
439}
440
441static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
442{
443 return 0;
444}
445
446static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
447{
448#if defined(CONFIG_RBD_INVAL)
449 struct rbd_data *rbd_data = td->io_ops->data;
450
451 return rbd_invalidate_cache(rbd_data->image);
452#else
453 return 0;
454#endif
455}
456
457static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
458{
459 struct fio_rbd_iou *fri = io_u->engine_data;
460
461 if (fri) {
462 io_u->engine_data = NULL;
463 free(fri);
464 }
465}
466
467static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
468{
469 struct fio_rbd_iou *fri;
470
471 fri = calloc(1, sizeof(*fri));
472 fri->io_u = io_u;
473 io_u->engine_data = fri;
474 return 0;
475}
476
477static struct ioengine_ops ioengine = {
478 .name = "rbd",
479 .version = FIO_IOOPS_VERSION,
480 .setup = fio_rbd_setup,
481 .init = fio_rbd_init,
482 .queue = fio_rbd_queue,
483 .getevents = fio_rbd_getevents,
484 .event = fio_rbd_event,
485 .cleanup = fio_rbd_cleanup,
486 .open_file = fio_rbd_open,
487 .invalidate = fio_rbd_invalidate,
488 .options = options,
489 .io_u_init = fio_rbd_io_u_init,
490 .io_u_free = fio_rbd_io_u_free,
491 .option_struct_size = sizeof(struct rbd_options),
492};
493
494static void fio_init fio_rbd_register(void)
495{
496 register_ioengine(&ioengine);
497}
498
499static void fio_exit fio_rbd_unregister(void)
500{
501 unregister_ioengine(&ioengine);
502}