4 * IO engine using the Linux native aio interface.
18 struct io_event *aio_events;
24 struct libaio_options {
25 struct thread_data *td;
26 unsigned int userspace_reap;
29 static struct fio_option options[] = {
31 .name = "userspace_reap",
32 .lname = "Libaio userspace reaping",
33 .type = FIO_OPT_STR_SET,
34 .off1 = offsetof(struct libaio_options, userspace_reap),
35 .help = "Use alternative user-space reap implementation",
36 .category = FIO_OPT_C_IO,
43 static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u)
45 struct fio_file *f = io_u->file;
47 if (io_u->ddir == DDIR_READ)
48 io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
49 else if (io_u->ddir == DDIR_WRITE)
50 io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
51 else if (ddir_sync(io_u->ddir))
52 io_prep_fsync(&io_u->iocb, f->fd);
57 static struct io_u *fio_libaio_event(struct thread_data *td, int event)
59 struct libaio_data *ld = td->io_ops->data;
63 ev = ld->aio_events + event;
64 io_u = container_of(ev->obj, struct io_u, iocb);
66 if (ev->res != io_u->xfer_buflen) {
67 if (ev->res > io_u->xfer_buflen)
68 io_u->error = -ev->res;
70 io_u->resid = io_u->xfer_buflen - ev->res;
78 unsigned id; /** kernel internal index number */
79 unsigned nr; /** number of io_events */
84 unsigned compat_features;
85 unsigned incompat_features;
86 unsigned header_length; /** size of aio_ring */
88 struct io_event events[0];
91 #define AIO_RING_MAGIC 0xa10a10a1
93 static int user_io_getevents(io_context_t aio_ctx, unsigned int max,
94 struct io_event *events)
98 struct aio_ring *ring = (struct aio_ring*) aio_ctx;
103 if (head == ring->tail) {
104 /* There are no more completions */
107 /* There is another completion to reap */
108 events[i] = ring->events[head];
110 ring->head = (head + 1) % ring->nr;
118 static int fio_libaio_getevents(struct thread_data *td, unsigned int min,
119 unsigned int max, struct timespec *t)
121 struct libaio_data *ld = td->io_ops->data;
122 struct libaio_options *o = td->eo;
123 unsigned actual_min = td->o.iodepth_batch_complete == 0 ? 0 : min;
127 if (o->userspace_reap == 1
129 && ((struct aio_ring *)(ld->aio_ctx))->magic
131 r = user_io_getevents(ld->aio_ctx, max,
132 ld->aio_events + events);
134 r = io_getevents(ld->aio_ctx, actual_min,
135 max, ld->aio_events + events, t);
139 else if (r == -EAGAIN)
141 } while (events < min);
143 return r < 0 ? r : events;
146 static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u)
148 struct libaio_data *ld = td->io_ops->data;
150 fio_ro_check(td, io_u);
152 if (ld->iocbs_nr == (int) td->o.iodepth)
156 * fsync is tricky, since it can fail and we need to do it
157 * serialized with other io. the reason is that linux doesn't
158 * support aio fsync yet. So return busy for the case where we
159 * have pending io, to let fio complete those first.
161 if (ddir_sync(io_u->ddir)) {
165 do_io_u_sync(td, io_u);
166 return FIO_Q_COMPLETED;
169 if (io_u->ddir == DDIR_TRIM) {
173 do_io_u_trim(td, io_u);
174 return FIO_Q_COMPLETED;
177 ld->iocbs[ld->iocbs_nr] = &io_u->iocb;
178 ld->io_us[ld->iocbs_nr] = io_u;
183 static void fio_libaio_queued(struct thread_data *td, struct io_u **io_us,
189 if (!fio_fill_issue_time(td))
192 fio_gettime(&now, NULL);
194 for (i = 0; i < nr; i++) {
195 struct io_u *io_u = io_us[i];
197 memcpy(&io_u->issue_time, &now, sizeof(now));
198 io_u_queued(td, io_u);
202 static int fio_libaio_commit(struct thread_data *td)
204 struct libaio_data *ld = td->io_ops->data;
215 ret = io_submit(ld->aio_ctx, ld->iocbs_nr, iocbs);
217 fio_libaio_queued(td, io_us, ret);
218 io_u_mark_submit(td, ret);
223 } else if (!ret || ret == -EAGAIN || ret == -EINTR) {
225 io_u_mark_submit(td, ret);
229 } while (ld->iocbs_nr);
234 static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u)
236 struct libaio_data *ld = td->io_ops->data;
238 return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events);
241 static void fio_libaio_cleanup(struct thread_data *td)
243 struct libaio_data *ld = td->io_ops->data;
246 io_destroy(ld->aio_ctx);
247 free(ld->aio_events);
254 static int fio_libaio_init(struct thread_data *td)
256 struct libaio_data *ld = malloc(sizeof(*ld));
257 struct libaio_options *o = td->eo;
260 memset(ld, 0, sizeof(*ld));
263 * First try passing in 0 for queue depth, since we don't
264 * care about the user ring. If that fails, the kernel is too old
265 * and we need the right depth.
267 if (!o->userspace_reap)
268 err = io_queue_init(INT_MAX, &ld->aio_ctx);
269 if (o->userspace_reap || err == -EINVAL)
270 err = io_queue_init(td->o.iodepth, &ld->aio_ctx);
272 td_verror(td, -err, "io_queue_init");
273 log_err("fio: check /proc/sys/fs/aio-max-nr\n");
278 ld->aio_events = malloc(td->o.iodepth * sizeof(struct io_event));
279 memset(ld->aio_events, 0, td->o.iodepth * sizeof(struct io_event));
280 ld->iocbs = malloc(td->o.iodepth * sizeof(struct iocb *));
281 memset(ld->iocbs, 0, sizeof(struct iocb *));
282 ld->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
283 memset(ld->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
286 td->io_ops->data = ld;
290 static struct ioengine_ops ioengine = {
292 .version = FIO_IOOPS_VERSION,
293 .init = fio_libaio_init,
294 .prep = fio_libaio_prep,
295 .queue = fio_libaio_queue,
296 .commit = fio_libaio_commit,
297 .cancel = fio_libaio_cancel,
298 .getevents = fio_libaio_getevents,
299 .event = fio_libaio_event,
300 .cleanup = fio_libaio_cleanup,
301 .open_file = generic_open_file,
302 .close_file = generic_close_file,
303 .get_file_size = generic_get_file_size,
305 .option_struct_size = sizeof(struct libaio_options),
308 static void fio_init fio_libaio_register(void)
310 register_ioengine(&ioengine);
313 static void fio_exit fio_libaio_unregister(void)
315 unregister_ioengine(&ioengine);