4 * IO engine using the Linux native aio interface.
15 #ifdef FIO_HAVE_LIBAIO
17 #define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
21 struct io_event *aio_events;
27 struct libaio_options {
28 struct thread_data *td;
29 unsigned int userspace_reap;
32 static struct fio_option options[] = {
34 .name = "userspace_reap",
35 .type = FIO_OPT_STR_SET,
36 .off1 = offsetof(struct libaio_options, userspace_reap),
37 .help = "Use alternative user-space reap implementation",
38 .category = FIO_OPT_G_IO_ENG,
45 static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u)
47 struct fio_file *f = io_u->file;
49 if (io_u->ddir == DDIR_READ)
50 io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
51 else if (io_u->ddir == DDIR_WRITE)
52 io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
53 else if (ddir_sync(io_u->ddir))
54 io_prep_fsync(&io_u->iocb, f->fd);
59 static struct io_u *fio_libaio_event(struct thread_data *td, int event)
61 struct libaio_data *ld = td->io_ops->data;
65 ev = ld->aio_events + event;
68 if (ev->res != io_u->xfer_buflen) {
69 if (ev->res > io_u->xfer_buflen)
70 io_u->error = -ev->res;
72 io_u->resid = io_u->xfer_buflen - ev->res;
80 unsigned id; /** kernel internal index number */
81 unsigned nr; /** number of io_events */
86 unsigned compat_features;
87 unsigned incompat_features;
88 unsigned header_length; /** size of aio_ring */
90 struct io_event events[0];
93 #define AIO_RING_MAGIC 0xa10a10a1
95 static int user_io_getevents(io_context_t aio_ctx, unsigned int max,
96 struct io_event *events)
100 struct aio_ring *ring = (struct aio_ring*) aio_ctx;
105 if (head == ring->tail) {
106 /* There are no more completions */
109 /* There is another completion to reap */
110 events[i] = ring->events[head];
112 ring->head = (head + 1) % ring->nr;
120 static int fio_libaio_getevents(struct thread_data *td, unsigned int min,
121 unsigned int max, struct timespec *t)
123 struct libaio_data *ld = td->io_ops->data;
124 struct libaio_options *o = td->eo;
125 unsigned actual_min = td->o.iodepth_batch_complete == 0 ? 0 : min;
129 if (o->userspace_reap == 1
131 && ((struct aio_ring *)(ld->aio_ctx))->magic
133 r = user_io_getevents(ld->aio_ctx, max,
134 ld->aio_events + events);
136 r = io_getevents(ld->aio_ctx, actual_min,
137 max, ld->aio_events + events, t);
141 else if (r == -EAGAIN)
143 } while (events < min);
145 return r < 0 ? r : events;
148 static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u)
150 struct libaio_data *ld = td->io_ops->data;
152 fio_ro_check(td, io_u);
154 if (ld->iocbs_nr == (int) td->o.iodepth)
158 * fsync is tricky, since it can fail and we need to do it
159 * serialized with other io. the reason is that linux doesn't
160 * support aio fsync yet. So return busy for the case where we
161 * have pending io, to let fio complete those first.
163 if (ddir_sync(io_u->ddir)) {
167 do_io_u_sync(td, io_u);
168 return FIO_Q_COMPLETED;
171 if (io_u->ddir == DDIR_TRIM) {
175 do_io_u_trim(td, io_u);
176 return FIO_Q_COMPLETED;
179 ld->iocbs[ld->iocbs_nr] = &io_u->iocb;
180 ld->io_us[ld->iocbs_nr] = io_u;
185 static void fio_libaio_queued(struct thread_data *td, struct io_u **io_us,
191 if (!fio_fill_issue_time(td))
194 fio_gettime(&now, NULL);
196 for (i = 0; i < nr; i++) {
197 struct io_u *io_u = io_us[i];
199 memcpy(&io_u->issue_time, &now, sizeof(now));
200 io_u_queued(td, io_u);
204 static int fio_libaio_commit(struct thread_data *td)
206 struct libaio_data *ld = td->io_ops->data;
217 ret = io_submit(ld->aio_ctx, ld->iocbs_nr, iocbs);
219 fio_libaio_queued(td, io_us, ret);
220 io_u_mark_submit(td, ret);
225 } else if (!ret || ret == -EAGAIN || ret == -EINTR) {
227 io_u_mark_submit(td, ret);
231 } while (ld->iocbs_nr);
236 static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u)
238 struct libaio_data *ld = td->io_ops->data;
240 return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events);
243 static void fio_libaio_cleanup(struct thread_data *td)
245 struct libaio_data *ld = td->io_ops->data;
248 io_destroy(ld->aio_ctx);
249 free(ld->aio_events);
256 static int fio_libaio_init(struct thread_data *td)
258 struct libaio_data *ld = malloc(sizeof(*ld));
261 memset(ld, 0, sizeof(*ld));
263 err = io_queue_init(td->o.iodepth, &ld->aio_ctx);
265 td_verror(td, -err, "io_queue_init");
266 log_err("fio: check /proc/sys/fs/aio-max-nr\n");
271 ld->aio_events = malloc(td->o.iodepth * sizeof(struct io_event));
272 memset(ld->aio_events, 0, td->o.iodepth * sizeof(struct io_event));
273 ld->iocbs = malloc(td->o.iodepth * sizeof(struct iocb *));
274 memset(ld->iocbs, 0, sizeof(struct iocb *));
275 ld->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
276 memset(ld->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
279 td->io_ops->data = ld;
283 static struct ioengine_ops ioengine = {
285 .version = FIO_IOOPS_VERSION,
286 .init = fio_libaio_init,
287 .prep = fio_libaio_prep,
288 .queue = fio_libaio_queue,
289 .commit = fio_libaio_commit,
290 .cancel = fio_libaio_cancel,
291 .getevents = fio_libaio_getevents,
292 .event = fio_libaio_event,
293 .cleanup = fio_libaio_cleanup,
294 .open_file = generic_open_file,
295 .close_file = generic_close_file,
296 .get_file_size = generic_get_file_size,
298 .option_struct_size = sizeof(struct libaio_options),
301 #else /* FIO_HAVE_LIBAIO */
304 * When we have a proper configure system in place, we simply wont build
305 * and install this io engine. For now install a crippled version that
306 * just complains and fails to load.
308 static int fio_libaio_init(struct thread_data fio_unused *td)
310 log_err("fio: libaio not available\n");
314 static struct ioengine_ops ioengine = {
316 .version = FIO_IOOPS_VERSION,
317 .init = fio_libaio_init,
322 static void fio_init fio_libaio_register(void)
324 register_ioengine(&ioengine);
327 static void fio_exit fio_libaio_unregister(void)
329 unregister_ioengine(&ioengine);