4 * IO engine using the Linux native aio interface.
15 #ifdef FIO_HAVE_LIBAIO
17 #define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
21 struct io_event *aio_events;
27 struct libaio_options {
28 struct thread_data *td;
29 unsigned int userspace_reap;
32 static struct fio_option options[] = {
34 .name = "userspace_reap",
35 .lname = "Libaio userspace reaping",
36 .type = FIO_OPT_STR_SET,
37 .off1 = offsetof(struct libaio_options, userspace_reap),
38 .help = "Use alternative user-space reap implementation",
39 .category = FIO_OPT_C_IO,
46 static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u)
48 struct fio_file *f = io_u->file;
50 if (io_u->ddir == DDIR_READ)
51 io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
52 else if (io_u->ddir == DDIR_WRITE)
53 io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
54 else if (ddir_sync(io_u->ddir))
55 io_prep_fsync(&io_u->iocb, f->fd);
60 static struct io_u *fio_libaio_event(struct thread_data *td, int event)
62 struct libaio_data *ld = td->io_ops->data;
66 ev = ld->aio_events + event;
69 if (ev->res != io_u->xfer_buflen) {
70 if (ev->res > io_u->xfer_buflen)
71 io_u->error = -ev->res;
73 io_u->resid = io_u->xfer_buflen - ev->res;
81 unsigned id; /** kernel internal index number */
82 unsigned nr; /** number of io_events */
87 unsigned compat_features;
88 unsigned incompat_features;
89 unsigned header_length; /** size of aio_ring */
91 struct io_event events[0];
94 #define AIO_RING_MAGIC 0xa10a10a1
96 static int user_io_getevents(io_context_t aio_ctx, unsigned int max,
97 struct io_event *events)
101 struct aio_ring *ring = (struct aio_ring*) aio_ctx;
106 if (head == ring->tail) {
107 /* There are no more completions */
110 /* There is another completion to reap */
111 events[i] = ring->events[head];
113 ring->head = (head + 1) % ring->nr;
121 static int fio_libaio_getevents(struct thread_data *td, unsigned int min,
122 unsigned int max, struct timespec *t)
124 struct libaio_data *ld = td->io_ops->data;
125 struct libaio_options *o = td->eo;
126 unsigned actual_min = td->o.iodepth_batch_complete == 0 ? 0 : min;
130 if (o->userspace_reap == 1
132 && ((struct aio_ring *)(ld->aio_ctx))->magic
134 r = user_io_getevents(ld->aio_ctx, max,
135 ld->aio_events + events);
137 r = io_getevents(ld->aio_ctx, actual_min,
138 max, ld->aio_events + events, t);
142 else if (r == -EAGAIN)
144 } while (events < min);
146 return r < 0 ? r : events;
149 static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u)
151 struct libaio_data *ld = td->io_ops->data;
153 fio_ro_check(td, io_u);
155 if (ld->iocbs_nr == (int) td->o.iodepth)
159 * fsync is tricky, since it can fail and we need to do it
160 * serialized with other io. the reason is that linux doesn't
161 * support aio fsync yet. So return busy for the case where we
162 * have pending io, to let fio complete those first.
164 if (ddir_sync(io_u->ddir)) {
168 do_io_u_sync(td, io_u);
169 return FIO_Q_COMPLETED;
172 if (io_u->ddir == DDIR_TRIM) {
176 do_io_u_trim(td, io_u);
177 return FIO_Q_COMPLETED;
180 ld->iocbs[ld->iocbs_nr] = &io_u->iocb;
181 ld->io_us[ld->iocbs_nr] = io_u;
186 static void fio_libaio_queued(struct thread_data *td, struct io_u **io_us,
192 if (!fio_fill_issue_time(td))
195 fio_gettime(&now, NULL);
197 for (i = 0; i < nr; i++) {
198 struct io_u *io_u = io_us[i];
200 memcpy(&io_u->issue_time, &now, sizeof(now));
201 io_u_queued(td, io_u);
205 static int fio_libaio_commit(struct thread_data *td)
207 struct libaio_data *ld = td->io_ops->data;
218 ret = io_submit(ld->aio_ctx, ld->iocbs_nr, iocbs);
220 fio_libaio_queued(td, io_us, ret);
221 io_u_mark_submit(td, ret);
226 } else if (!ret || ret == -EAGAIN || ret == -EINTR) {
228 io_u_mark_submit(td, ret);
232 } while (ld->iocbs_nr);
237 static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u)
239 struct libaio_data *ld = td->io_ops->data;
241 return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events);
244 static void fio_libaio_cleanup(struct thread_data *td)
246 struct libaio_data *ld = td->io_ops->data;
249 io_destroy(ld->aio_ctx);
250 free(ld->aio_events);
257 static int fio_libaio_init(struct thread_data *td)
259 struct libaio_data *ld = malloc(sizeof(*ld));
262 memset(ld, 0, sizeof(*ld));
264 err = io_queue_init(td->o.iodepth, &ld->aio_ctx);
266 td_verror(td, -err, "io_queue_init");
267 log_err("fio: check /proc/sys/fs/aio-max-nr\n");
272 ld->aio_events = malloc(td->o.iodepth * sizeof(struct io_event));
273 memset(ld->aio_events, 0, td->o.iodepth * sizeof(struct io_event));
274 ld->iocbs = malloc(td->o.iodepth * sizeof(struct iocb *));
275 memset(ld->iocbs, 0, sizeof(struct iocb *));
276 ld->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
277 memset(ld->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
280 td->io_ops->data = ld;
284 static struct ioengine_ops ioengine = {
286 .version = FIO_IOOPS_VERSION,
287 .init = fio_libaio_init,
288 .prep = fio_libaio_prep,
289 .queue = fio_libaio_queue,
290 .commit = fio_libaio_commit,
291 .cancel = fio_libaio_cancel,
292 .getevents = fio_libaio_getevents,
293 .event = fio_libaio_event,
294 .cleanup = fio_libaio_cleanup,
295 .open_file = generic_open_file,
296 .close_file = generic_close_file,
297 .get_file_size = generic_get_file_size,
299 .option_struct_size = sizeof(struct libaio_options),
302 #else /* FIO_HAVE_LIBAIO */
305 * When we have a proper configure system in place, we simply wont build
306 * and install this io engine. For now install a crippled version that
307 * just complains and fails to load.
309 static int fio_libaio_init(struct thread_data fio_unused *td)
311 log_err("fio: libaio not available\n");
315 static struct ioengine_ops ioengine = {
317 .version = FIO_IOOPS_VERSION,
318 .init = fio_libaio_init,
323 static void fio_init fio_libaio_register(void)
325 register_ioengine(&ioengine);
328 static void fio_exit fio_libaio_unregister(void)
330 unregister_ioengine(&ioengine);