Commit | Line | Data |
---|---|---|
2866c82d | 1 | /* |
da751ca9 JA |
2 | * libaio engine |
3 | * | |
4 | * IO engine using the Linux native aio interface. | |
2866c82d JA |
5 | * |
6 | */ | |
7 | #include <stdio.h> | |
8 | #include <stdlib.h> | |
9 | #include <unistd.h> | |
10 | #include <errno.h> | |
11 | #include <assert.h> | |
67bf9823 | 12 | #include <libaio.h> |
5f350952 JA |
13 | |
14 | #include "../fio.h" | |
0f38bbef | 15 | #include "../lib/pow2.h" |
2866c82d | 16 | |
0fc2e103 JA |
17 | static int fio_libaio_commit(struct thread_data *td); |
18 | ||
2866c82d JA |
19 | struct libaio_data { |
20 | io_context_t aio_ctx; | |
21 | struct io_event *aio_events; | |
755200a3 | 22 | struct iocb **iocbs; |
7e77dd02 | 23 | struct io_u **io_us; |
acd45e02 JA |
24 | |
25 | /* | |
26 | * Basic ring buffer. 'head' is incremented in _queue(), and | |
27 | * 'tail' is incremented in _commit(). We keep 'queued' so | |
28 | * that we know if the ring is full or empty, when | |
29 | * 'head' == 'tail'. 'entries' is the ring size, and | |
30 | * 'is_pow2' is just an optimization to use AND instead of | |
31 | * modulus to get the remainder on ring increment. | |
32 | */ | |
33 | int is_pow2; | |
34 | unsigned int entries; | |
35 | unsigned int queued; | |
36 | unsigned int head; | |
37 | unsigned int tail; | |
2866c82d JA |
38 | }; |
39 | ||
de890a1e | 40 | struct libaio_options { |
a1f871c7 | 41 | void *pad; |
de890a1e SL |
42 | unsigned int userspace_reap; |
43 | }; | |
44 | ||
45 | static struct fio_option options[] = { | |
46 | { | |
47 | .name = "userspace_reap", | |
e8b0e958 | 48 | .lname = "Libaio userspace reaping", |
de890a1e SL |
49 | .type = FIO_OPT_STR_SET, |
50 | .off1 = offsetof(struct libaio_options, userspace_reap), | |
51 | .help = "Use alternative user-space reap implementation", | |
e90a0adf | 52 | .category = FIO_OPT_C_ENGINE, |
5a3cd5f3 | 53 | .group = FIO_OPT_G_LIBAIO, |
de890a1e SL |
54 | }, |
55 | { | |
56 | .name = NULL, | |
57 | }, | |
58 | }; | |
59 | ||
acd45e02 JA |
60 | static inline void ring_inc(struct libaio_data *ld, unsigned int *val, |
61 | unsigned int add) | |
62 | { | |
63 | if (ld->is_pow2) | |
64 | *val = (*val + add) & (ld->entries - 1); | |
65 | else | |
66 | *val = (*val + add) % ld->entries; | |
67 | } | |
68 | ||
7a16dd02 | 69 | static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u) |
2866c82d | 70 | { |
53cdc686 JA |
71 | struct fio_file *f = io_u->file; |
72 | ||
2866c82d | 73 | if (io_u->ddir == DDIR_READ) |
cec6b55d | 74 | io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); |
87dc1ab1 | 75 | else if (io_u->ddir == DDIR_WRITE) |
cec6b55d | 76 | io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); |
5f9099ea | 77 | else if (ddir_sync(io_u->ddir)) |
87dc1ab1 | 78 | io_prep_fsync(&io_u->iocb, f->fd); |
2866c82d JA |
79 | |
80 | return 0; | |
81 | } | |
82 | ||
83 | static struct io_u *fio_libaio_event(struct thread_data *td, int event) | |
84 | { | |
85 | struct libaio_data *ld = td->io_ops->data; | |
f423479d JA |
86 | struct io_event *ev; |
87 | struct io_u *io_u; | |
2866c82d | 88 | |
f423479d | 89 | ev = ld->aio_events + event; |
0cc6ee59 | 90 | io_u = container_of(ev->obj, struct io_u, iocb); |
f423479d JA |
91 | |
92 | if (ev->res != io_u->xfer_buflen) { | |
93 | if (ev->res > io_u->xfer_buflen) | |
94 | io_u->error = -ev->res; | |
95 | else | |
96 | io_u->resid = io_u->xfer_buflen - ev->res; | |
97 | } else | |
98 | io_u->error = 0; | |
99 | ||
100 | return io_u; | |
2866c82d JA |
101 | } |
102 | ||
675012f0 DE |
103 | struct aio_ring { |
104 | unsigned id; /** kernel internal index number */ | |
105 | unsigned nr; /** number of io_events */ | |
106 | unsigned head; | |
107 | unsigned tail; | |
c44b1ff5 | 108 | |
675012f0 DE |
109 | unsigned magic; |
110 | unsigned compat_features; | |
111 | unsigned incompat_features; | |
112 | unsigned header_length; /** size of aio_ring */ | |
113 | ||
114 | struct io_event events[0]; | |
115 | }; | |
116 | ||
117 | #define AIO_RING_MAGIC 0xa10a10a1 | |
118 | ||
119 | static int user_io_getevents(io_context_t aio_ctx, unsigned int max, | |
c44b1ff5 | 120 | struct io_event *events) |
675012f0 DE |
121 | { |
122 | long i = 0; | |
123 | unsigned head; | |
c44b1ff5 | 124 | struct aio_ring *ring = (struct aio_ring*) aio_ctx; |
675012f0 DE |
125 | |
126 | while (i < max) { | |
127 | head = ring->head; | |
128 | ||
129 | if (head == ring->tail) { | |
130 | /* There are no more completions */ | |
131 | break; | |
132 | } else { | |
133 | /* There is another completion to reap */ | |
134 | events[i] = ring->events[head]; | |
135 | read_barrier(); | |
c44b1ff5 | 136 | ring->head = (head + 1) % ring->nr; |
675012f0 DE |
137 | i++; |
138 | } | |
139 | } | |
140 | ||
141 | return i; | |
142 | } | |
143 | ||
e7d2e616 | 144 | static int fio_libaio_getevents(struct thread_data *td, unsigned int min, |
1f440ece | 145 | unsigned int max, const struct timespec *t) |
2866c82d JA |
146 | { |
147 | struct libaio_data *ld = td->io_ops->data; | |
de890a1e | 148 | struct libaio_options *o = td->eo; |
0b7fdba7 | 149 | unsigned actual_min = td->o.iodepth_batch_complete == 0 ? 0 : min; |
1f440ece | 150 | struct timespec __lt, *lt = NULL; |
0b7fdba7 | 151 | int r, events = 0; |
2866c82d | 152 | |
1f440ece JA |
153 | if (t) { |
154 | __lt = *t; | |
155 | lt = &__lt; | |
156 | } | |
157 | ||
2866c82d | 158 | do { |
de890a1e | 159 | if (o->userspace_reap == 1 |
675012f0 DE |
160 | && actual_min == 0 |
161 | && ((struct aio_ring *)(ld->aio_ctx))->magic | |
162 | == AIO_RING_MAGIC) { | |
163 | r = user_io_getevents(ld->aio_ctx, max, | |
164 | ld->aio_events + events); | |
165 | } else { | |
166 | r = io_getevents(ld->aio_ctx, actual_min, | |
1f440ece | 167 | max, ld->aio_events + events, lt); |
675012f0 | 168 | } |
3441a52d | 169 | if (r > 0) |
0b7fdba7 | 170 | events += r; |
3441a52d | 171 | else if ((min && r == 0) || r == -EAGAIN) { |
0fc2e103 | 172 | fio_libaio_commit(td); |
2866c82d | 173 | usleep(100); |
0fc2e103 | 174 | } else if (r != -EINTR) |
a31dc2dd | 175 | break; |
0b7fdba7 DE |
176 | } while (events < min); |
177 | ||
178 | return r < 0 ? r : events; | |
2866c82d JA |
179 | } |
180 | ||
181 | static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u) | |
182 | { | |
183 | struct libaio_data *ld = td->io_ops->data; | |
2866c82d | 184 | |
7101d9c2 JA |
185 | fio_ro_check(td, io_u); |
186 | ||
acd45e02 | 187 | if (ld->queued == td->o.iodepth) |
755200a3 JA |
188 | return FIO_Q_BUSY; |
189 | ||
190 | /* | |
191 | * fsync is tricky, since it can fail and we need to do it | |
192 | * serialized with other io. the reason is that linux doesn't | |
193 | * support aio fsync yet. So return busy for the case where we | |
194 | * have pending io, to let fio complete those first. | |
195 | */ | |
f011531e | 196 | if (ddir_sync(io_u->ddir)) { |
acd45e02 | 197 | if (ld->queued) |
755200a3 | 198 | return FIO_Q_BUSY; |
5f9099ea | 199 | |
f011531e | 200 | do_io_u_sync(td, io_u); |
755200a3 JA |
201 | return FIO_Q_COMPLETED; |
202 | } | |
203 | ||
a5f3027c | 204 | if (io_u->ddir == DDIR_TRIM) { |
acd45e02 | 205 | if (ld->queued) |
a5f3027c JA |
206 | return FIO_Q_BUSY; |
207 | ||
208 | do_io_u_trim(td, io_u); | |
209 | return FIO_Q_COMPLETED; | |
210 | } | |
211 | ||
acd45e02 JA |
212 | ld->iocbs[ld->head] = &io_u->iocb; |
213 | ld->io_us[ld->head] = io_u; | |
214 | ring_inc(ld, &ld->head, 1); | |
215 | ld->queued++; | |
755200a3 JA |
216 | return FIO_Q_QUEUED; |
217 | } | |
218 | ||
7e77dd02 JA |
219 | static void fio_libaio_queued(struct thread_data *td, struct io_u **io_us, |
220 | unsigned int nr) | |
221 | { | |
222 | struct timeval now; | |
223 | unsigned int i; | |
224 | ||
12d9d841 JA |
225 | if (!fio_fill_issue_time(td)) |
226 | return; | |
227 | ||
7e77dd02 JA |
228 | fio_gettime(&now, NULL); |
229 | ||
230 | for (i = 0; i < nr; i++) { | |
231 | struct io_u *io_u = io_us[i]; | |
232 | ||
233 | memcpy(&io_u->issue_time, &now, sizeof(now)); | |
234 | io_u_queued(td, io_u); | |
235 | } | |
236 | } | |
237 | ||
755200a3 JA |
238 | static int fio_libaio_commit(struct thread_data *td) |
239 | { | |
240 | struct libaio_data *ld = td->io_ops->data; | |
241 | struct iocb **iocbs; | |
7e77dd02 | 242 | struct io_u **io_us; |
a120ca7f JA |
243 | struct timeval tv; |
244 | int ret, wait_start = 0; | |
755200a3 | 245 | |
acd45e02 | 246 | if (!ld->queued) |
755200a3 JA |
247 | return 0; |
248 | ||
2866c82d | 249 | do { |
acd45e02 JA |
250 | long nr = ld->queued; |
251 | ||
252 | nr = min((unsigned int) nr, ld->entries - ld->tail); | |
253 | io_us = ld->io_us + ld->tail; | |
254 | iocbs = ld->iocbs + ld->tail; | |
255 | ||
256 | ret = io_submit(ld->aio_ctx, nr, iocbs); | |
5e00c2c4 | 257 | if (ret > 0) { |
7e77dd02 | 258 | fio_libaio_queued(td, io_us, ret); |
838bc709 | 259 | io_u_mark_submit(td, ret); |
acd45e02 JA |
260 | |
261 | ld->queued -= ret; | |
262 | ring_inc(ld, &ld->tail, ret); | |
5e00c2c4 | 263 | ret = 0; |
e3b4e568 | 264 | wait_start = 0; |
acd45e02 | 265 | } else if (ret == -EINTR || !ret) { |
838bc709 JA |
266 | if (!ret) |
267 | io_u_mark_submit(td, ret); | |
e3b4e568 | 268 | wait_start = 0; |
2866c82d | 269 | continue; |
acd45e02 JA |
270 | } else if (ret == -EAGAIN) { |
271 | /* | |
272 | * If we get EAGAIN, we should break out without | |
273 | * error and let the upper layer reap some | |
a120ca7f JA |
274 | * events for us. If we have no queued IO, we |
275 | * must loop here. If we loop for more than 30s, | |
276 | * just error out, something must be buggy in the | |
277 | * IO path. | |
acd45e02 | 278 | */ |
a120ca7f JA |
279 | if (ld->queued) { |
280 | ret = 0; | |
281 | break; | |
282 | } | |
283 | if (!wait_start) { | |
284 | fio_gettime(&tv, NULL); | |
d36b072d | 285 | wait_start = 1; |
a120ca7f JA |
286 | } else if (mtime_since_now(&tv) > 30000) { |
287 | log_err("fio: aio appears to be stalled, giving up\n"); | |
288 | break; | |
289 | } | |
290 | usleep(1); | |
291 | continue; | |
a31dc2dd JA |
292 | } else if (ret == -ENOMEM) { |
293 | /* | |
294 | * If we get -ENOMEM, reap events if we can. If | |
295 | * we cannot, treat it as a fatal event since there's | |
296 | * nothing we can do about it. | |
297 | */ | |
298 | if (ld->queued) | |
299 | ret = 0; | |
300 | break; | |
838bc709 | 301 | } else |
2866c82d | 302 | break; |
2c3a4ae9 | 303 | } while (ld->queued); |
2866c82d | 304 | |
36167d82 | 305 | return ret; |
2866c82d JA |
306 | } |
307 | ||
308 | static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u) | |
309 | { | |
310 | struct libaio_data *ld = td->io_ops->data; | |
311 | ||
312 | return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events); | |
313 | } | |
314 | ||
315 | static void fio_libaio_cleanup(struct thread_data *td) | |
316 | { | |
317 | struct libaio_data *ld = td->io_ops->data; | |
318 | ||
319 | if (ld) { | |
f24c2649 JA |
320 | /* |
321 | * Work-around to avoid huge RCU stalls at exit time. If we | |
322 | * don't do this here, then it'll be torn down by exit_aio(). | |
323 | * But for that case we can parallellize the freeing, thus | |
324 | * speeding it up a lot. | |
325 | */ | |
326 | if (!(td->flags & TD_F_CHILD)) | |
327 | io_destroy(ld->aio_ctx); | |
7e77dd02 JA |
328 | free(ld->aio_events); |
329 | free(ld->iocbs); | |
330 | free(ld->io_us); | |
2866c82d | 331 | free(ld); |
2866c82d JA |
332 | } |
333 | } | |
334 | ||
335 | static int fio_libaio_init(struct thread_data *td) | |
336 | { | |
c90e1017 | 337 | struct libaio_options *o = td->eo; |
acd45e02 | 338 | struct libaio_data *ld; |
c90e1017 | 339 | int err = 0; |
2866c82d | 340 | |
acd45e02 | 341 | ld = calloc(1, sizeof(*ld)); |
c1db2dce | 342 | |
c90e1017 JA |
343 | /* |
344 | * First try passing in 0 for queue depth, since we don't | |
345 | * care about the user ring. If that fails, the kernel is too old | |
346 | * and we need the right depth. | |
347 | */ | |
348 | if (!o->userspace_reap) | |
c224ec05 | 349 | err = io_queue_init(INT_MAX, &ld->aio_ctx); |
c90e1017 JA |
350 | if (o->userspace_reap || err == -EINVAL) |
351 | err = io_queue_init(td->o.iodepth, &ld->aio_ctx); | |
c1db2dce JA |
352 | if (err) { |
353 | td_verror(td, -err, "io_queue_init"); | |
75de55ac | 354 | log_err("fio: check /proc/sys/fs/aio-max-nr\n"); |
cb781c75 | 355 | free(ld); |
2866c82d JA |
356 | return 1; |
357 | } | |
358 | ||
acd45e02 JA |
359 | ld->entries = td->o.iodepth; |
360 | ld->is_pow2 = is_power_of_2(ld->entries); | |
361 | ld->aio_events = calloc(ld->entries, sizeof(struct io_event)); | |
362 | ld->iocbs = calloc(ld->entries, sizeof(struct iocb *)); | |
363 | ld->io_us = calloc(ld->entries, sizeof(struct io_u *)); | |
755200a3 | 364 | |
2866c82d JA |
365 | td->io_ops->data = ld; |
366 | return 0; | |
367 | } | |
368 | ||
5f350952 | 369 | static struct ioengine_ops ioengine = { |
de890a1e SL |
370 | .name = "libaio", |
371 | .version = FIO_IOOPS_VERSION, | |
372 | .init = fio_libaio_init, | |
373 | .prep = fio_libaio_prep, | |
374 | .queue = fio_libaio_queue, | |
375 | .commit = fio_libaio_commit, | |
376 | .cancel = fio_libaio_cancel, | |
377 | .getevents = fio_libaio_getevents, | |
378 | .event = fio_libaio_event, | |
379 | .cleanup = fio_libaio_cleanup, | |
380 | .open_file = generic_open_file, | |
381 | .close_file = generic_close_file, | |
382 | .get_file_size = generic_get_file_size, | |
383 | .options = options, | |
384 | .option_struct_size = sizeof(struct libaio_options), | |
2866c82d | 385 | }; |
34cfcdaf | 386 | |
5f350952 JA |
387 | static void fio_init fio_libaio_register(void) |
388 | { | |
389 | register_ioengine(&ioengine); | |
390 | } | |
391 | ||
392 | static void fio_exit fio_libaio_unregister(void) | |
393 | { | |
394 | unregister_ioengine(&ioengine); | |
395 | } |