Add roundup_pow2() as a generic helper
[fio.git] / engines / libaio.c
... / ...
CommitLineData
1/*
2 * libaio engine
3 *
4 * IO engine using the Linux native aio interface.
5 *
6 */
7#include <stdlib.h>
8#include <unistd.h>
9#include <errno.h>
10#include <libaio.h>
11#include <sys/time.h>
12#include <sys/resource.h>
13
14#include "../fio.h"
15#include "../lib/pow2.h"
16#include "../optgroup.h"
17#include "../lib/memalign.h"
18
19/* Should be defined in newest aio_abi.h */
20#ifndef IOCB_FLAG_IOPRIO
21#define IOCB_FLAG_IOPRIO (1 << 1)
22#endif
23
24/* Hack for libaio < 0.3.111 */
25#ifndef CONFIG_LIBAIO_RW_FLAGS
26#define aio_rw_flags __pad2
27#endif
28
29static int fio_libaio_commit(struct thread_data *td);
30static int fio_libaio_init(struct thread_data *td);
31
32struct libaio_data {
33 io_context_t aio_ctx;
34 struct io_event *aio_events;
35 struct iocb **iocbs;
36 struct io_u **io_us;
37
38 struct io_u **io_u_index;
39
40 /*
41 * Basic ring buffer. 'head' is incremented in _queue(), and
42 * 'tail' is incremented in _commit(). We keep 'queued' so
43 * that we know if the ring is full or empty, when
44 * 'head' == 'tail'. 'entries' is the ring size, and
45 * 'is_pow2' is just an optimization to use AND instead of
46 * modulus to get the remainder on ring increment.
47 */
48 int is_pow2;
49 unsigned int entries;
50 unsigned int queued;
51 unsigned int head;
52 unsigned int tail;
53};
54
55struct libaio_options {
56 void *pad;
57 unsigned int userspace_reap;
58 unsigned int cmdprio_percentage;
59 unsigned int nowait;
60};
61
62static struct fio_option options[] = {
63 {
64 .name = "userspace_reap",
65 .lname = "Libaio userspace reaping",
66 .type = FIO_OPT_STR_SET,
67 .off1 = offsetof(struct libaio_options, userspace_reap),
68 .help = "Use alternative user-space reap implementation",
69 .category = FIO_OPT_C_ENGINE,
70 .group = FIO_OPT_G_LIBAIO,
71 },
72#ifdef FIO_HAVE_IOPRIO_CLASS
73 {
74 .name = "cmdprio_percentage",
75 .lname = "high priority percentage",
76 .type = FIO_OPT_INT,
77 .off1 = offsetof(struct libaio_options, cmdprio_percentage),
78 .minval = 1,
79 .maxval = 100,
80 .help = "Send high priority I/O this percentage of the time",
81 .category = FIO_OPT_C_ENGINE,
82 .group = FIO_OPT_G_LIBAIO,
83 },
84#else
85 {
86 .name = "cmdprio_percentage",
87 .lname = "high priority percentage",
88 .type = FIO_OPT_UNSUPPORTED,
89 .help = "Your platform does not support I/O priority classes",
90 },
91#endif
92 {
93 .name = "nowait",
94 .lname = "RWF_NOWAIT",
95 .type = FIO_OPT_BOOL,
96 .off1 = offsetof(struct libaio_options, nowait),
97 .help = "Set RWF_NOWAIT for reads/writes",
98 .category = FIO_OPT_C_ENGINE,
99 .group = FIO_OPT_G_LIBAIO,
100 },
101 {
102 .name = NULL,
103 },
104};
105
106static inline void ring_inc(struct libaio_data *ld, unsigned int *val,
107 unsigned int add)
108{
109 if (ld->is_pow2)
110 *val = (*val + add) & (ld->entries - 1);
111 else
112 *val = (*val + add) % ld->entries;
113}
114
115static int fio_libaio_prep(struct thread_data *td, struct io_u *io_u)
116{
117 struct libaio_options *o = td->eo;
118 struct fio_file *f = io_u->file;
119 struct iocb *iocb = &io_u->iocb;
120
121 if (io_u->ddir == DDIR_READ) {
122 io_prep_pread(iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
123 if (o->nowait)
124 iocb->aio_rw_flags |= RWF_NOWAIT;
125 } else if (io_u->ddir == DDIR_WRITE) {
126 io_prep_pwrite(iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
127 if (o->nowait)
128 iocb->aio_rw_flags |= RWF_NOWAIT;
129 } else if (ddir_sync(io_u->ddir))
130 io_prep_fsync(iocb, f->fd);
131
132 return 0;
133}
134
135static void fio_libaio_prio_prep(struct thread_data *td, struct io_u *io_u)
136{
137 struct libaio_options *o = td->eo;
138 if (rand_between(&td->prio_state, 0, 99) < o->cmdprio_percentage) {
139 io_u->iocb.aio_reqprio = IOPRIO_CLASS_RT << IOPRIO_CLASS_SHIFT;
140 io_u->iocb.u.c.flags |= IOCB_FLAG_IOPRIO;
141 io_u->flags |= IO_U_F_PRIORITY;
142 }
143 return;
144}
145
146static struct io_u *fio_libaio_event(struct thread_data *td, int event)
147{
148 struct libaio_data *ld = td->io_ops_data;
149 struct io_event *ev;
150 struct io_u *io_u;
151
152 ev = ld->aio_events + event;
153 io_u = container_of(ev->obj, struct io_u, iocb);
154
155 if (ev->res != io_u->xfer_buflen) {
156 if (ev->res > io_u->xfer_buflen)
157 io_u->error = -ev->res;
158 else
159 io_u->resid = io_u->xfer_buflen - ev->res;
160 } else
161 io_u->error = 0;
162
163 return io_u;
164}
165
166struct aio_ring {
167 unsigned id; /** kernel internal index number */
168 unsigned nr; /** number of io_events */
169 unsigned head;
170 unsigned tail;
171
172 unsigned magic;
173 unsigned compat_features;
174 unsigned incompat_features;
175 unsigned header_length; /** size of aio_ring */
176
177 struct io_event events[0];
178};
179
180#define AIO_RING_MAGIC 0xa10a10a1
181
182static int user_io_getevents(io_context_t aio_ctx, unsigned int max,
183 struct io_event *events)
184{
185 long i = 0;
186 unsigned head;
187 struct aio_ring *ring = (struct aio_ring*) aio_ctx;
188
189 while (i < max) {
190 head = ring->head;
191
192 if (head == ring->tail) {
193 /* There are no more completions */
194 break;
195 } else {
196 /* There is another completion to reap */
197 events[i] = ring->events[head];
198 atomic_store_release(&ring->head,
199 (head + 1) % ring->nr);
200 i++;
201 }
202 }
203
204 return i;
205}
206
207static int fio_libaio_getevents(struct thread_data *td, unsigned int min,
208 unsigned int max, const struct timespec *t)
209{
210 struct libaio_data *ld = td->io_ops_data;
211 struct libaio_options *o = td->eo;
212 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
213 struct timespec __lt, *lt = NULL;
214 int r, events = 0;
215
216 if (t) {
217 __lt = *t;
218 lt = &__lt;
219 }
220
221 do {
222 if (o->userspace_reap == 1
223 && actual_min == 0
224 && ((struct aio_ring *)(ld->aio_ctx))->magic
225 == AIO_RING_MAGIC) {
226 r = user_io_getevents(ld->aio_ctx, max,
227 ld->aio_events + events);
228 } else {
229 r = io_getevents(ld->aio_ctx, actual_min,
230 max, ld->aio_events + events, lt);
231 }
232 if (r > 0)
233 events += r;
234 else if ((min && r == 0) || r == -EAGAIN) {
235 fio_libaio_commit(td);
236 if (actual_min)
237 usleep(10);
238 } else if (r != -EINTR)
239 break;
240 } while (events < min);
241
242 return r < 0 ? r : events;
243}
244
245static enum fio_q_status fio_libaio_queue(struct thread_data *td,
246 struct io_u *io_u)
247{
248 struct libaio_data *ld = td->io_ops_data;
249 struct libaio_options *o = td->eo;
250
251 fio_ro_check(td, io_u);
252
253 if (ld->queued == td->o.iodepth)
254 return FIO_Q_BUSY;
255
256 /*
257 * fsync is tricky, since it can fail and we need to do it
258 * serialized with other io. the reason is that linux doesn't
259 * support aio fsync yet. So return busy for the case where we
260 * have pending io, to let fio complete those first.
261 */
262 if (ddir_sync(io_u->ddir)) {
263 if (ld->queued)
264 return FIO_Q_BUSY;
265
266 do_io_u_sync(td, io_u);
267 return FIO_Q_COMPLETED;
268 }
269
270 if (io_u->ddir == DDIR_TRIM) {
271 if (ld->queued)
272 return FIO_Q_BUSY;
273
274 do_io_u_trim(td, io_u);
275 io_u_mark_submit(td, 1);
276 io_u_mark_complete(td, 1);
277 return FIO_Q_COMPLETED;
278 }
279
280 if (o->cmdprio_percentage)
281 fio_libaio_prio_prep(td, io_u);
282
283 ld->iocbs[ld->head] = &io_u->iocb;
284 ld->io_us[ld->head] = io_u;
285 ring_inc(ld, &ld->head, 1);
286 ld->queued++;
287 return FIO_Q_QUEUED;
288}
289
290static void fio_libaio_queued(struct thread_data *td, struct io_u **io_us,
291 unsigned int nr)
292{
293 struct timespec now;
294 unsigned int i;
295
296 if (!fio_fill_issue_time(td))
297 return;
298
299 fio_gettime(&now, NULL);
300
301 for (i = 0; i < nr; i++) {
302 struct io_u *io_u = io_us[i];
303
304 memcpy(&io_u->issue_time, &now, sizeof(now));
305 io_u_queued(td, io_u);
306 }
307}
308
309static int fio_libaio_commit(struct thread_data *td)
310{
311 struct libaio_data *ld = td->io_ops_data;
312 struct iocb **iocbs;
313 struct io_u **io_us;
314 struct timespec ts;
315 int ret, wait_start = 0;
316
317 if (!ld->queued)
318 return 0;
319
320 do {
321 long nr = ld->queued;
322
323 nr = min((unsigned int) nr, ld->entries - ld->tail);
324 io_us = ld->io_us + ld->tail;
325 iocbs = ld->iocbs + ld->tail;
326
327 ret = io_submit(ld->aio_ctx, nr, iocbs);
328 if (ret > 0) {
329 fio_libaio_queued(td, io_us, ret);
330 io_u_mark_submit(td, ret);
331
332 ld->queued -= ret;
333 ring_inc(ld, &ld->tail, ret);
334 ret = 0;
335 wait_start = 0;
336 } else if (ret == -EINTR || !ret) {
337 if (!ret)
338 io_u_mark_submit(td, ret);
339 wait_start = 0;
340 continue;
341 } else if (ret == -EAGAIN) {
342 /*
343 * If we get EAGAIN, we should break out without
344 * error and let the upper layer reap some
345 * events for us. If we have no queued IO, we
346 * must loop here. If we loop for more than 30s,
347 * just error out, something must be buggy in the
348 * IO path.
349 */
350 if (ld->queued) {
351 ret = 0;
352 break;
353 }
354 if (!wait_start) {
355 fio_gettime(&ts, NULL);
356 wait_start = 1;
357 } else if (mtime_since_now(&ts) > 30000) {
358 log_err("fio: aio appears to be stalled, giving up\n");
359 break;
360 }
361 usleep(1);
362 continue;
363 } else if (ret == -ENOMEM) {
364 /*
365 * If we get -ENOMEM, reap events if we can. If
366 * we cannot, treat it as a fatal event since there's
367 * nothing we can do about it.
368 */
369 if (ld->queued)
370 ret = 0;
371 break;
372 } else
373 break;
374 } while (ld->queued);
375
376 return ret;
377}
378
379static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u)
380{
381 struct libaio_data *ld = td->io_ops_data;
382
383 return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events);
384}
385
386static void fio_libaio_cleanup(struct thread_data *td)
387{
388 struct libaio_data *ld = td->io_ops_data;
389
390 if (ld) {
391 /*
392 * Work-around to avoid huge RCU stalls at exit time. If we
393 * don't do this here, then it'll be torn down by exit_aio().
394 * But for that case we can parallellize the freeing, thus
395 * speeding it up a lot.
396 */
397 if (!(td->flags & TD_F_CHILD))
398 io_destroy(ld->aio_ctx);
399 free(ld->aio_events);
400 free(ld->iocbs);
401 free(ld->io_us);
402 free(ld);
403 }
404}
405
406static int fio_libaio_post_init(struct thread_data *td)
407{
408 struct libaio_data *ld = td->io_ops_data;
409 int err;
410
411 err = io_queue_init(td->o.iodepth, &ld->aio_ctx);
412 if (err) {
413 td_verror(td, -err, "io_queue_init");
414 return 1;
415 }
416
417 return 0;
418}
419
420static int fio_libaio_init(struct thread_data *td)
421{
422 struct libaio_data *ld;
423 struct thread_options *to = &td->o;
424 struct libaio_options *o = td->eo;
425
426 ld = calloc(1, sizeof(*ld));
427
428 ld->entries = td->o.iodepth;
429 ld->is_pow2 = is_power_of_2(ld->entries);
430 ld->aio_events = calloc(ld->entries, sizeof(struct io_event));
431 ld->iocbs = calloc(ld->entries, sizeof(struct iocb *));
432 ld->io_us = calloc(ld->entries, sizeof(struct io_u *));
433
434 td->io_ops_data = ld;
435 /*
436 * Check for option conflicts
437 */
438 if ((fio_option_is_set(to, ioprio) || fio_option_is_set(to, ioprio_class)) &&
439 o->cmdprio_percentage != 0) {
440 log_err("%s: cmdprio_percentage option and mutually exclusive "
441 "prio or prioclass option is set, exiting\n", to->name);
442 td_verror(td, EINVAL, "fio_libaio_init");
443 return 1;
444 }
445 return 0;
446}
447
448FIO_STATIC struct ioengine_ops ioengine = {
449 .name = "libaio",
450 .version = FIO_IOOPS_VERSION,
451 .flags = FIO_ASYNCIO_SYNC_TRIM,
452 .init = fio_libaio_init,
453 .post_init = fio_libaio_post_init,
454 .prep = fio_libaio_prep,
455 .queue = fio_libaio_queue,
456 .commit = fio_libaio_commit,
457 .cancel = fio_libaio_cancel,
458 .getevents = fio_libaio_getevents,
459 .event = fio_libaio_event,
460 .cleanup = fio_libaio_cleanup,
461 .open_file = generic_open_file,
462 .close_file = generic_close_file,
463 .get_file_size = generic_get_file_size,
464 .options = options,
465 .option_struct_size = sizeof(struct libaio_options),
466};
467
468static void fio_init fio_libaio_register(void)
469{
470 register_ioengine(&ioengine);
471}
472
473static void fio_exit fio_libaio_unregister(void)
474{
475 unregister_ioengine(&ioengine);
476}