Categorize engine and profile options
[fio.git] / engines / libaio.c
... / ...
CommitLineData
1/*
2 * libaio engine
3 *
4 * IO engine using the Linux native aio interface.
5 *
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <unistd.h>
10#include <errno.h>
11#include <assert.h>
12#include <libaio.h>
13
14#include "../fio.h"
15
16struct libaio_data {
17 io_context_t aio_ctx;
18 struct io_event *aio_events;
19 struct iocb **iocbs;
20 struct io_u **io_us;
21 int iocbs_nr;
22};
23
24struct libaio_options {
25 struct thread_data *td;
26 unsigned int userspace_reap;
27};
28
29static struct fio_option options[] = {
30 {
31 .name = "userspace_reap",
32 .type = FIO_OPT_STR_SET,
33 .off1 = offsetof(struct libaio_options, userspace_reap),
34 .help = "Use alternative user-space reap implementation",
35 .category = FIO_OPT_C_ENGINE,
36 .category = FIO_OPT_G_LIBAIO,
37 },
38 {
39 .name = NULL,
40 },
41};
42
43static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u)
44{
45 struct fio_file *f = io_u->file;
46
47 if (io_u->ddir == DDIR_READ)
48 io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
49 else if (io_u->ddir == DDIR_WRITE)
50 io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
51 else if (ddir_sync(io_u->ddir))
52 io_prep_fsync(&io_u->iocb, f->fd);
53
54 return 0;
55}
56
57static struct io_u *fio_libaio_event(struct thread_data *td, int event)
58{
59 struct libaio_data *ld = td->io_ops->data;
60 struct io_event *ev;
61 struct io_u *io_u;
62
63 ev = ld->aio_events + event;
64 io_u = container_of(ev->obj, struct io_u, iocb);
65
66 if (ev->res != io_u->xfer_buflen) {
67 if (ev->res > io_u->xfer_buflen)
68 io_u->error = -ev->res;
69 else
70 io_u->resid = io_u->xfer_buflen - ev->res;
71 } else
72 io_u->error = 0;
73
74 return io_u;
75}
76
77struct aio_ring {
78 unsigned id; /** kernel internal index number */
79 unsigned nr; /** number of io_events */
80 unsigned head;
81 unsigned tail;
82
83 unsigned magic;
84 unsigned compat_features;
85 unsigned incompat_features;
86 unsigned header_length; /** size of aio_ring */
87
88 struct io_event events[0];
89};
90
91#define AIO_RING_MAGIC 0xa10a10a1
92
93static int user_io_getevents(io_context_t aio_ctx, unsigned int max,
94 struct io_event *events)
95{
96 long i = 0;
97 unsigned head;
98 struct aio_ring *ring = (struct aio_ring*) aio_ctx;
99
100 while (i < max) {
101 head = ring->head;
102
103 if (head == ring->tail) {
104 /* There are no more completions */
105 break;
106 } else {
107 /* There is another completion to reap */
108 events[i] = ring->events[head];
109 read_barrier();
110 ring->head = (head + 1) % ring->nr;
111 i++;
112 }
113 }
114
115 return i;
116}
117
118static int fio_libaio_getevents(struct thread_data *td, unsigned int min,
119 unsigned int max, struct timespec *t)
120{
121 struct libaio_data *ld = td->io_ops->data;
122 struct libaio_options *o = td->eo;
123 unsigned actual_min = td->o.iodepth_batch_complete == 0 ? 0 : min;
124 int r, events = 0;
125
126 do {
127 if (o->userspace_reap == 1
128 && actual_min == 0
129 && ((struct aio_ring *)(ld->aio_ctx))->magic
130 == AIO_RING_MAGIC) {
131 r = user_io_getevents(ld->aio_ctx, max,
132 ld->aio_events + events);
133 } else {
134 r = io_getevents(ld->aio_ctx, actual_min,
135 max, ld->aio_events + events, t);
136 }
137 if (r >= 0)
138 events += r;
139 else if (r == -EAGAIN)
140 usleep(100);
141 } while (events < min);
142
143 return r < 0 ? r : events;
144}
145
146static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u)
147{
148 struct libaio_data *ld = td->io_ops->data;
149
150 fio_ro_check(td, io_u);
151
152 if (ld->iocbs_nr == (int) td->o.iodepth)
153 return FIO_Q_BUSY;
154
155 /*
156 * fsync is tricky, since it can fail and we need to do it
157 * serialized with other io. the reason is that linux doesn't
158 * support aio fsync yet. So return busy for the case where we
159 * have pending io, to let fio complete those first.
160 */
161 if (ddir_sync(io_u->ddir)) {
162 if (ld->iocbs_nr)
163 return FIO_Q_BUSY;
164
165 do_io_u_sync(td, io_u);
166 return FIO_Q_COMPLETED;
167 }
168
169 if (io_u->ddir == DDIR_TRIM) {
170 if (ld->iocbs_nr)
171 return FIO_Q_BUSY;
172
173 do_io_u_trim(td, io_u);
174 return FIO_Q_COMPLETED;
175 }
176
177 ld->iocbs[ld->iocbs_nr] = &io_u->iocb;
178 ld->io_us[ld->iocbs_nr] = io_u;
179 ld->iocbs_nr++;
180 return FIO_Q_QUEUED;
181}
182
183static void fio_libaio_queued(struct thread_data *td, struct io_u **io_us,
184 unsigned int nr)
185{
186 struct timeval now;
187 unsigned int i;
188
189 if (!fio_fill_issue_time(td))
190 return;
191
192 fio_gettime(&now, NULL);
193
194 for (i = 0; i < nr; i++) {
195 struct io_u *io_u = io_us[i];
196
197 memcpy(&io_u->issue_time, &now, sizeof(now));
198 io_u_queued(td, io_u);
199 }
200}
201
202static int fio_libaio_commit(struct thread_data *td)
203{
204 struct libaio_data *ld = td->io_ops->data;
205 struct iocb **iocbs;
206 struct io_u **io_us;
207 int ret;
208
209 if (!ld->iocbs_nr)
210 return 0;
211
212 io_us = ld->io_us;
213 iocbs = ld->iocbs;
214 do {
215 ret = io_submit(ld->aio_ctx, ld->iocbs_nr, iocbs);
216 if (ret > 0) {
217 fio_libaio_queued(td, io_us, ret);
218 io_u_mark_submit(td, ret);
219 ld->iocbs_nr -= ret;
220 io_us += ret;
221 iocbs += ret;
222 ret = 0;
223 } else if (!ret || ret == -EAGAIN || ret == -EINTR) {
224 if (!ret)
225 io_u_mark_submit(td, ret);
226 continue;
227 } else
228 break;
229 } while (ld->iocbs_nr);
230
231 return ret;
232}
233
234static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u)
235{
236 struct libaio_data *ld = td->io_ops->data;
237
238 return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events);
239}
240
241static void fio_libaio_cleanup(struct thread_data *td)
242{
243 struct libaio_data *ld = td->io_ops->data;
244
245 if (ld) {
246 io_destroy(ld->aio_ctx);
247 free(ld->aio_events);
248 free(ld->iocbs);
249 free(ld->io_us);
250 free(ld);
251 }
252}
253
254static int fio_libaio_init(struct thread_data *td)
255{
256 struct libaio_data *ld = malloc(sizeof(*ld));
257 struct libaio_options *o = td->eo;
258 int err = 0;
259
260 memset(ld, 0, sizeof(*ld));
261
262 /*
263 * First try passing in 0 for queue depth, since we don't
264 * care about the user ring. If that fails, the kernel is too old
265 * and we need the right depth.
266 */
267 if (!o->userspace_reap)
268 err = io_queue_init(INT_MAX, &ld->aio_ctx);
269 if (o->userspace_reap || err == -EINVAL)
270 err = io_queue_init(td->o.iodepth, &ld->aio_ctx);
271 if (err) {
272 td_verror(td, -err, "io_queue_init");
273 log_err("fio: check /proc/sys/fs/aio-max-nr\n");
274 free(ld);
275 return 1;
276 }
277
278 ld->aio_events = malloc(td->o.iodepth * sizeof(struct io_event));
279 memset(ld->aio_events, 0, td->o.iodepth * sizeof(struct io_event));
280 ld->iocbs = malloc(td->o.iodepth * sizeof(struct iocb *));
281 memset(ld->iocbs, 0, sizeof(struct iocb *));
282 ld->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
283 memset(ld->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
284 ld->iocbs_nr = 0;
285
286 td->io_ops->data = ld;
287 return 0;
288}
289
290static struct ioengine_ops ioengine = {
291 .name = "libaio",
292 .version = FIO_IOOPS_VERSION,
293 .init = fio_libaio_init,
294 .prep = fio_libaio_prep,
295 .queue = fio_libaio_queue,
296 .commit = fio_libaio_commit,
297 .cancel = fio_libaio_cancel,
298 .getevents = fio_libaio_getevents,
299 .event = fio_libaio_event,
300 .cleanup = fio_libaio_cleanup,
301 .open_file = generic_open_file,
302 .close_file = generic_close_file,
303 .get_file_size = generic_get_file_size,
304 .options = options,
305 .option_struct_size = sizeof(struct libaio_options),
306};
307
308static void fio_init fio_libaio_register(void)
309{
310 register_ioengine(&ioengine);
311}
312
313static void fio_exit fio_libaio_unregister(void)
314{
315 unregister_ioengine(&ioengine);
316}