engines/rbd: remove unused rbd_options->td
[fio.git] / engines / rbd.c
... / ...
CommitLineData
1/*
2 * rbd engine
3 *
4 * IO engine using Ceph's librbd to test RADOS Block Devices.
5 *
6 */
7
8#include <rbd/librbd.h>
9
10#include "../fio.h"
11
12struct fio_rbd_iou {
13 struct io_u *io_u;
14 rbd_completion_t completion;
15 int io_seen;
16};
17
18struct rbd_data {
19 rados_t cluster;
20 rados_ioctx_t io_ctx;
21 rbd_image_t image;
22 struct io_u **aio_events;
23};
24
25struct rbd_options {
26 char *rbd_name;
27 char *pool_name;
28 char *client_name;
29};
30
31static struct fio_option options[] = {
32 {
33 .name = "rbdname",
34 .lname = "rbd engine rbdname",
35 .type = FIO_OPT_STR_STORE,
36 .help = "RBD name for RBD engine",
37 .off1 = offsetof(struct rbd_options, rbd_name),
38 .category = FIO_OPT_C_ENGINE,
39 .group = FIO_OPT_G_RBD,
40 },
41 {
42 .name = "pool",
43 .lname = "rbd engine pool",
44 .type = FIO_OPT_STR_STORE,
45 .help = "Name of the pool hosting the RBD for the RBD engine",
46 .off1 = offsetof(struct rbd_options, pool_name),
47 .category = FIO_OPT_C_ENGINE,
48 .group = FIO_OPT_G_RBD,
49 },
50 {
51 .name = "clientname",
52 .lname = "rbd engine clientname",
53 .type = FIO_OPT_STR_STORE,
54 .help = "Name of the ceph client to access the RBD for the RBD engine",
55 .off1 = offsetof(struct rbd_options, client_name),
56 .category = FIO_OPT_C_ENGINE,
57 .group = FIO_OPT_G_RBD,
58 },
59 {
60 .name = NULL,
61 },
62};
63
64static int _fio_setup_rbd_data(struct thread_data *td,
65 struct rbd_data **rbd_data_ptr)
66{
67 struct rbd_data *rbd_data;
68
69 if (td->io_ops->data)
70 return 0;
71
72 rbd_data = malloc(sizeof(struct rbd_data));
73 if (!rbd_data)
74 goto failed;
75
76 memset(rbd_data, 0, sizeof(struct rbd_data));
77
78 rbd_data->aio_events = malloc(td->o.iodepth * sizeof(struct io_u *));
79 if (!rbd_data->aio_events)
80 goto failed;
81
82 memset(rbd_data->aio_events, 0, td->o.iodepth * sizeof(struct io_u *));
83
84 *rbd_data_ptr = rbd_data;
85
86 return 0;
87
88failed:
89 if (rbd_data)
90 free(rbd_data);
91 return 1;
92
93}
94
95static int _fio_rbd_connect(struct thread_data *td)
96{
97 struct rbd_data *rbd_data = td->io_ops->data;
98 struct rbd_options *o = td->eo;
99 int r;
100
101 r = rados_create(&rbd_data->cluster, o->client_name);
102 if (r < 0) {
103 log_err("rados_create failed.\n");
104 goto failed_early;
105 }
106
107 r = rados_conf_read_file(rbd_data->cluster, NULL);
108 if (r < 0) {
109 log_err("rados_conf_read_file failed.\n");
110 goto failed_early;
111 }
112
113 r = rados_connect(rbd_data->cluster);
114 if (r < 0) {
115 log_err("rados_connect failed.\n");
116 goto failed_shutdown;
117 }
118
119 r = rados_ioctx_create(rbd_data->cluster, o->pool_name,
120 &rbd_data->io_ctx);
121 if (r < 0) {
122 log_err("rados_ioctx_create failed.\n");
123 goto failed_shutdown;
124 }
125
126 r = rbd_open(rbd_data->io_ctx, o->rbd_name, &rbd_data->image,
127 NULL /*snap */ );
128 if (r < 0) {
129 log_err("rbd_open failed.\n");
130 goto failed_open;
131 }
132 return 0;
133
134failed_open:
135 rados_ioctx_destroy(rbd_data->io_ctx);
136 rbd_data->io_ctx = NULL;
137failed_shutdown:
138 rados_shutdown(rbd_data->cluster);
139 rbd_data->cluster = NULL;
140failed_early:
141 return 1;
142}
143
144static void _fio_rbd_disconnect(struct rbd_data *rbd_data)
145{
146 if (!rbd_data)
147 return;
148
149 /* shutdown everything */
150 if (rbd_data->image) {
151 rbd_close(rbd_data->image);
152 rbd_data->image = NULL;
153 }
154
155 if (rbd_data->io_ctx) {
156 rados_ioctx_destroy(rbd_data->io_ctx);
157 rbd_data->io_ctx = NULL;
158 }
159
160 if (rbd_data->cluster) {
161 rados_shutdown(rbd_data->cluster);
162 rbd_data->cluster = NULL;
163 }
164}
165
166static void _fio_rbd_finish_aiocb(rbd_completion_t comp, void *data)
167{
168 struct fio_rbd_iou *fri = data;
169 struct io_u *io_u = fri->io_u;
170 ssize_t ret;
171
172 /*
173 * Looks like return value is 0 for success, or < 0 for
174 * a specific error. So we have to assume that it can't do
175 * partial completions.
176 */
177 ret = rbd_aio_get_return_value(fri->completion);
178 if (ret < 0) {
179 io_u->error = ret;
180 io_u->resid = io_u->xfer_buflen;
181 } else
182 io_u->error = 0;
183}
184
185static struct io_u *fio_rbd_event(struct thread_data *td, int event)
186{
187 struct rbd_data *rbd_data = td->io_ops->data;
188
189 return rbd_data->aio_events[event];
190}
191
192static inline int fri_check_complete(struct rbd_data *rbd_data,
193 struct io_u *io_u,
194 unsigned int *events)
195{
196 struct fio_rbd_iou *fri = io_u->engine_data;
197
198 if (rbd_aio_is_complete(fri->completion)) {
199 fri->io_seen = 1;
200 rbd_data->aio_events[*events] = io_u;
201 (*events)++;
202
203 rbd_aio_release(fri->completion);
204 return 1;
205 }
206
207 return 0;
208}
209
210static int rbd_iter_events(struct thread_data *td, unsigned int *events,
211 unsigned int min_evts, int wait)
212{
213 struct rbd_data *rbd_data = td->io_ops->data;
214 unsigned int this_events = 0;
215 struct io_u *io_u;
216 int i;
217
218 io_u_qiter(&td->io_u_all, io_u, i) {
219 struct fio_rbd_iou *fri = io_u->engine_data;
220
221 if (!(io_u->flags & IO_U_F_FLIGHT))
222 continue;
223 if (fri->io_seen)
224 continue;
225
226 if (fri_check_complete(rbd_data, io_u, events))
227 this_events++;
228 else if (wait) {
229 rbd_aio_wait_for_complete(fri->completion);
230
231 if (fri_check_complete(rbd_data, io_u, events))
232 this_events++;
233 }
234 if (*events >= min_evts)
235 break;
236 }
237
238 return this_events;
239}
240
241static int fio_rbd_getevents(struct thread_data *td, unsigned int min,
242 unsigned int max, const struct timespec *t)
243{
244 unsigned int this_events, events = 0;
245 int wait = 0;
246
247 do {
248 this_events = rbd_iter_events(td, &events, min, wait);
249
250 if (events >= min)
251 break;
252 if (this_events)
253 continue;
254
255 wait = 1;
256 } while (1);
257
258 return events;
259}
260
261static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
262{
263 struct rbd_data *rbd_data = td->io_ops->data;
264 struct fio_rbd_iou *fri = io_u->engine_data;
265 int r = -1;
266
267 fio_ro_check(td, io_u);
268
269 fri->io_seen = 0;
270
271 r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
272 &fri->completion);
273 if (r < 0) {
274 log_err("rbd_aio_create_completion failed.\n");
275 goto failed;
276 }
277
278 if (io_u->ddir == DDIR_WRITE) {
279 r = rbd_aio_write(rbd_data->image, io_u->offset,
280 io_u->xfer_buflen, io_u->xfer_buf,
281 fri->completion);
282 if (r < 0) {
283 log_err("rbd_aio_write failed.\n");
284 goto failed_comp;
285 }
286
287 } else if (io_u->ddir == DDIR_READ) {
288 r = rbd_aio_read(rbd_data->image, io_u->offset,
289 io_u->xfer_buflen, io_u->xfer_buf,
290 fri->completion);
291
292 if (r < 0) {
293 log_err("rbd_aio_read failed.\n");
294 goto failed_comp;
295 }
296 } else if (io_u->ddir == DDIR_TRIM) {
297 r = rbd_aio_discard(rbd_data->image, io_u->offset,
298 io_u->xfer_buflen, fri->completion);
299 if (r < 0) {
300 log_err("rbd_aio_discard failed.\n");
301 goto failed_comp;
302 }
303 } else if (io_u->ddir == DDIR_SYNC) {
304 r = rbd_aio_flush(rbd_data->image, fri->completion);
305 if (r < 0) {
306 log_err("rbd_flush failed.\n");
307 goto failed_comp;
308 }
309 } else {
310 dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
311 io_u->ddir);
312 goto failed_comp;
313 }
314
315 return FIO_Q_QUEUED;
316failed_comp:
317 rbd_aio_release(fri->completion);
318failed:
319 io_u->error = r;
320 td_verror(td, io_u->error, "xfer");
321 return FIO_Q_COMPLETED;
322}
323
324static int fio_rbd_init(struct thread_data *td)
325{
326 int r;
327
328 r = _fio_rbd_connect(td);
329 if (r) {
330 log_err("fio_rbd_connect failed, return code: %d .\n", r);
331 goto failed;
332 }
333
334 return 0;
335
336failed:
337 return 1;
338}
339
340static void fio_rbd_cleanup(struct thread_data *td)
341{
342 struct rbd_data *rbd_data = td->io_ops->data;
343
344 if (rbd_data) {
345 _fio_rbd_disconnect(rbd_data);
346 free(rbd_data->aio_events);
347 free(rbd_data);
348 }
349
350}
351
352static int fio_rbd_setup(struct thread_data *td)
353{
354 int r = 0;
355 rbd_image_info_t info;
356 struct fio_file *f;
357 struct rbd_data *rbd_data = NULL;
358 int major, minor, extra;
359
360 /* log version of librbd. No cluster connection required. */
361 rbd_version(&major, &minor, &extra);
362 log_info("rbd engine: RBD version: %d.%d.%d\n", major, minor, extra);
363
364 /* allocate engine specific structure to deal with librbd. */
365 r = _fio_setup_rbd_data(td, &rbd_data);
366 if (r) {
367 log_err("fio_setup_rbd_data failed.\n");
368 goto cleanup;
369 }
370 td->io_ops->data = rbd_data;
371
372 /* librbd does not allow us to run first in the main thread and later
373 * in a fork child. It needs to be the same process context all the
374 * time.
375 */
376 td->o.use_thread = 1;
377
378 /* connect in the main thread to determine to determine
379 * the size of the given RADOS block device. And disconnect
380 * later on.
381 */
382 r = _fio_rbd_connect(td);
383 if (r) {
384 log_err("fio_rbd_connect failed.\n");
385 goto cleanup;
386 }
387
388 /* get size of the RADOS block device */
389 r = rbd_stat(rbd_data->image, &info, sizeof(info));
390 if (r < 0) {
391 log_err("rbd_status failed.\n");
392 goto disconnect;
393 }
394 dprint(FD_IO, "rbd-engine: image size: %lu\n", info.size);
395
396 /* taken from "net" engine. Pretend we deal with files,
397 * even if we do not have any ideas about files.
398 * The size of the RBD is set instead of a artificial file.
399 */
400 if (!td->files_index) {
401 add_file(td, td->o.filename ? : "rbd", 0, 0);
402 td->o.nr_files = td->o.nr_files ? : 1;
403 td->o.open_files++;
404 }
405 f = td->files[0];
406 f->real_file_size = info.size;
407
408 /* disconnect, then we were only connected to determine
409 * the size of the RBD.
410 */
411 _fio_rbd_disconnect(rbd_data);
412 return 0;
413
414disconnect:
415 _fio_rbd_disconnect(rbd_data);
416cleanup:
417 fio_rbd_cleanup(td);
418 return r;
419}
420
421static int fio_rbd_open(struct thread_data *td, struct fio_file *f)
422{
423 return 0;
424}
425
426static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
427{
428#if defined(CONFIG_RBD_INVAL)
429 struct rbd_data *rbd_data = td->io_ops->data;
430
431 return rbd_invalidate_cache(rbd_data->image);
432#else
433 return 0;
434#endif
435}
436
437static void fio_rbd_io_u_free(struct thread_data *td, struct io_u *io_u)
438{
439 struct fio_rbd_iou *fri = io_u->engine_data;
440
441 if (fri) {
442 io_u->engine_data = NULL;
443 free(fri);
444 }
445}
446
447static int fio_rbd_io_u_init(struct thread_data *td, struct io_u *io_u)
448{
449 struct fio_rbd_iou *fri;
450
451 fri = calloc(1, sizeof(*fri));
452 fri->io_u = io_u;
453 io_u->engine_data = fri;
454 return 0;
455}
456
457static struct ioengine_ops ioengine = {
458 .name = "rbd",
459 .version = FIO_IOOPS_VERSION,
460 .setup = fio_rbd_setup,
461 .init = fio_rbd_init,
462 .queue = fio_rbd_queue,
463 .getevents = fio_rbd_getevents,
464 .event = fio_rbd_event,
465 .cleanup = fio_rbd_cleanup,
466 .open_file = fio_rbd_open,
467 .invalidate = fio_rbd_invalidate,
468 .options = options,
469 .io_u_init = fio_rbd_io_u_init,
470 .io_u_free = fio_rbd_io_u_free,
471 .option_struct_size = sizeof(struct rbd_options),
472};
473
474static void fio_init fio_rbd_register(void)
475{
476 register_ioengine(&ioengine);
477}
478
479static void fio_exit fio_rbd_unregister(void)
480{
481 unregister_ioengine(&ioengine);
482}