Commit | Line | Data |
---|---|---|
2866c82d | 1 | /* |
da751ca9 JA |
2 | * libaio engine |
3 | * | |
4 | * IO engine using the Linux native aio interface. | |
2866c82d JA |
5 | * |
6 | */ | |
7 | #include <stdio.h> | |
8 | #include <stdlib.h> | |
9 | #include <unistd.h> | |
10 | #include <errno.h> | |
11 | #include <assert.h> | |
67bf9823 | 12 | #include <libaio.h> |
5f350952 JA |
13 | |
14 | #include "../fio.h" | |
2866c82d | 15 | |
2866c82d JA |
16 | struct libaio_data { |
17 | io_context_t aio_ctx; | |
18 | struct io_event *aio_events; | |
755200a3 | 19 | struct iocb **iocbs; |
7e77dd02 | 20 | struct io_u **io_us; |
acd45e02 JA |
21 | |
22 | /* | |
23 | * Basic ring buffer. 'head' is incremented in _queue(), and | |
24 | * 'tail' is incremented in _commit(). We keep 'queued' so | |
25 | * that we know if the ring is full or empty, when | |
26 | * 'head' == 'tail'. 'entries' is the ring size, and | |
27 | * 'is_pow2' is just an optimization to use AND instead of | |
28 | * modulus to get the remainder on ring increment. | |
29 | */ | |
30 | int is_pow2; | |
31 | unsigned int entries; | |
32 | unsigned int queued; | |
33 | unsigned int head; | |
34 | unsigned int tail; | |
2866c82d JA |
35 | }; |
36 | ||
de890a1e SL |
37 | struct libaio_options { |
38 | struct thread_data *td; | |
39 | unsigned int userspace_reap; | |
40 | }; | |
41 | ||
42 | static struct fio_option options[] = { | |
43 | { | |
44 | .name = "userspace_reap", | |
e8b0e958 | 45 | .lname = "Libaio userspace reaping", |
de890a1e SL |
46 | .type = FIO_OPT_STR_SET, |
47 | .off1 = offsetof(struct libaio_options, userspace_reap), | |
48 | .help = "Use alternative user-space reap implementation", | |
e90a0adf | 49 | .category = FIO_OPT_C_ENGINE, |
5a3cd5f3 | 50 | .group = FIO_OPT_G_LIBAIO, |
de890a1e SL |
51 | }, |
52 | { | |
53 | .name = NULL, | |
54 | }, | |
55 | }; | |
56 | ||
acd45e02 JA |
57 | static inline void ring_inc(struct libaio_data *ld, unsigned int *val, |
58 | unsigned int add) | |
59 | { | |
60 | if (ld->is_pow2) | |
61 | *val = (*val + add) & (ld->entries - 1); | |
62 | else | |
63 | *val = (*val + add) % ld->entries; | |
64 | } | |
65 | ||
7a16dd02 | 66 | static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u) |
2866c82d | 67 | { |
53cdc686 JA |
68 | struct fio_file *f = io_u->file; |
69 | ||
2866c82d | 70 | if (io_u->ddir == DDIR_READ) |
cec6b55d | 71 | io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); |
87dc1ab1 | 72 | else if (io_u->ddir == DDIR_WRITE) |
cec6b55d | 73 | io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); |
5f9099ea | 74 | else if (ddir_sync(io_u->ddir)) |
87dc1ab1 | 75 | io_prep_fsync(&io_u->iocb, f->fd); |
2866c82d JA |
76 | |
77 | return 0; | |
78 | } | |
79 | ||
80 | static struct io_u *fio_libaio_event(struct thread_data *td, int event) | |
81 | { | |
82 | struct libaio_data *ld = td->io_ops->data; | |
f423479d JA |
83 | struct io_event *ev; |
84 | struct io_u *io_u; | |
2866c82d | 85 | |
f423479d | 86 | ev = ld->aio_events + event; |
0cc6ee59 | 87 | io_u = container_of(ev->obj, struct io_u, iocb); |
f423479d JA |
88 | |
89 | if (ev->res != io_u->xfer_buflen) { | |
90 | if (ev->res > io_u->xfer_buflen) | |
91 | io_u->error = -ev->res; | |
92 | else | |
93 | io_u->resid = io_u->xfer_buflen - ev->res; | |
94 | } else | |
95 | io_u->error = 0; | |
96 | ||
97 | return io_u; | |
2866c82d JA |
98 | } |
99 | ||
675012f0 DE |
100 | struct aio_ring { |
101 | unsigned id; /** kernel internal index number */ | |
102 | unsigned nr; /** number of io_events */ | |
103 | unsigned head; | |
104 | unsigned tail; | |
c44b1ff5 | 105 | |
675012f0 DE |
106 | unsigned magic; |
107 | unsigned compat_features; | |
108 | unsigned incompat_features; | |
109 | unsigned header_length; /** size of aio_ring */ | |
110 | ||
111 | struct io_event events[0]; | |
112 | }; | |
113 | ||
114 | #define AIO_RING_MAGIC 0xa10a10a1 | |
115 | ||
116 | static int user_io_getevents(io_context_t aio_ctx, unsigned int max, | |
c44b1ff5 | 117 | struct io_event *events) |
675012f0 DE |
118 | { |
119 | long i = 0; | |
120 | unsigned head; | |
c44b1ff5 | 121 | struct aio_ring *ring = (struct aio_ring*) aio_ctx; |
675012f0 DE |
122 | |
123 | while (i < max) { | |
124 | head = ring->head; | |
125 | ||
126 | if (head == ring->tail) { | |
127 | /* There are no more completions */ | |
128 | break; | |
129 | } else { | |
130 | /* There is another completion to reap */ | |
131 | events[i] = ring->events[head]; | |
132 | read_barrier(); | |
c44b1ff5 | 133 | ring->head = (head + 1) % ring->nr; |
675012f0 DE |
134 | i++; |
135 | } | |
136 | } | |
137 | ||
138 | return i; | |
139 | } | |
140 | ||
e7d2e616 JA |
141 | static int fio_libaio_getevents(struct thread_data *td, unsigned int min, |
142 | unsigned int max, struct timespec *t) | |
2866c82d JA |
143 | { |
144 | struct libaio_data *ld = td->io_ops->data; | |
de890a1e | 145 | struct libaio_options *o = td->eo; |
0b7fdba7 DE |
146 | unsigned actual_min = td->o.iodepth_batch_complete == 0 ? 0 : min; |
147 | int r, events = 0; | |
2866c82d JA |
148 | |
149 | do { | |
de890a1e | 150 | if (o->userspace_reap == 1 |
675012f0 DE |
151 | && actual_min == 0 |
152 | && ((struct aio_ring *)(ld->aio_ctx))->magic | |
153 | == AIO_RING_MAGIC) { | |
154 | r = user_io_getevents(ld->aio_ctx, max, | |
155 | ld->aio_events + events); | |
156 | } else { | |
157 | r = io_getevents(ld->aio_ctx, actual_min, | |
158 | max, ld->aio_events + events, t); | |
159 | } | |
0b7fdba7 DE |
160 | if (r >= 0) |
161 | events += r; | |
162 | else if (r == -EAGAIN) | |
2866c82d | 163 | usleep(100); |
0b7fdba7 DE |
164 | } while (events < min); |
165 | ||
166 | return r < 0 ? r : events; | |
2866c82d JA |
167 | } |
168 | ||
169 | static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u) | |
170 | { | |
171 | struct libaio_data *ld = td->io_ops->data; | |
2866c82d | 172 | |
7101d9c2 JA |
173 | fio_ro_check(td, io_u); |
174 | ||
acd45e02 | 175 | if (ld->queued == td->o.iodepth) |
755200a3 JA |
176 | return FIO_Q_BUSY; |
177 | ||
178 | /* | |
179 | * fsync is tricky, since it can fail and we need to do it | |
180 | * serialized with other io. the reason is that linux doesn't | |
181 | * support aio fsync yet. So return busy for the case where we | |
182 | * have pending io, to let fio complete those first. | |
183 | */ | |
f011531e | 184 | if (ddir_sync(io_u->ddir)) { |
acd45e02 | 185 | if (ld->queued) |
755200a3 | 186 | return FIO_Q_BUSY; |
5f9099ea | 187 | |
f011531e | 188 | do_io_u_sync(td, io_u); |
755200a3 JA |
189 | return FIO_Q_COMPLETED; |
190 | } | |
191 | ||
a5f3027c | 192 | if (io_u->ddir == DDIR_TRIM) { |
acd45e02 | 193 | if (ld->queued) |
a5f3027c JA |
194 | return FIO_Q_BUSY; |
195 | ||
196 | do_io_u_trim(td, io_u); | |
197 | return FIO_Q_COMPLETED; | |
198 | } | |
199 | ||
acd45e02 JA |
200 | ld->iocbs[ld->head] = &io_u->iocb; |
201 | ld->io_us[ld->head] = io_u; | |
202 | ring_inc(ld, &ld->head, 1); | |
203 | ld->queued++; | |
755200a3 JA |
204 | return FIO_Q_QUEUED; |
205 | } | |
206 | ||
7e77dd02 JA |
207 | static void fio_libaio_queued(struct thread_data *td, struct io_u **io_us, |
208 | unsigned int nr) | |
209 | { | |
210 | struct timeval now; | |
211 | unsigned int i; | |
212 | ||
12d9d841 JA |
213 | if (!fio_fill_issue_time(td)) |
214 | return; | |
215 | ||
7e77dd02 JA |
216 | fio_gettime(&now, NULL); |
217 | ||
218 | for (i = 0; i < nr; i++) { | |
219 | struct io_u *io_u = io_us[i]; | |
220 | ||
221 | memcpy(&io_u->issue_time, &now, sizeof(now)); | |
222 | io_u_queued(td, io_u); | |
223 | } | |
224 | } | |
225 | ||
755200a3 JA |
226 | static int fio_libaio_commit(struct thread_data *td) |
227 | { | |
228 | struct libaio_data *ld = td->io_ops->data; | |
229 | struct iocb **iocbs; | |
7e77dd02 | 230 | struct io_u **io_us; |
5e00c2c4 | 231 | int ret; |
755200a3 | 232 | |
acd45e02 | 233 | if (!ld->queued) |
755200a3 JA |
234 | return 0; |
235 | ||
2866c82d | 236 | do { |
acd45e02 JA |
237 | long nr = ld->queued; |
238 | ||
239 | nr = min((unsigned int) nr, ld->entries - ld->tail); | |
240 | io_us = ld->io_us + ld->tail; | |
241 | iocbs = ld->iocbs + ld->tail; | |
242 | ||
243 | ret = io_submit(ld->aio_ctx, nr, iocbs); | |
5e00c2c4 | 244 | if (ret > 0) { |
7e77dd02 | 245 | fio_libaio_queued(td, io_us, ret); |
838bc709 | 246 | io_u_mark_submit(td, ret); |
acd45e02 JA |
247 | |
248 | ld->queued -= ret; | |
249 | ring_inc(ld, &ld->tail, ret); | |
5e00c2c4 | 250 | ret = 0; |
acd45e02 | 251 | } else if (ret == -EINTR || !ret) { |
838bc709 JA |
252 | if (!ret) |
253 | io_u_mark_submit(td, ret); | |
2866c82d | 254 | continue; |
acd45e02 JA |
255 | } else if (ret == -EAGAIN) { |
256 | /* | |
257 | * If we get EAGAIN, we should break out without | |
258 | * error and let the upper layer reap some | |
259 | * events for us. | |
260 | */ | |
261 | ret = 0; | |
262 | break; | |
838bc709 | 263 | } else |
2866c82d | 264 | break; |
acd45e02 | 265 | } while (ld->head != ld->tail); |
2866c82d | 266 | |
36167d82 | 267 | return ret; |
2866c82d JA |
268 | } |
269 | ||
270 | static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u) | |
271 | { | |
272 | struct libaio_data *ld = td->io_ops->data; | |
273 | ||
274 | return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events); | |
275 | } | |
276 | ||
277 | static void fio_libaio_cleanup(struct thread_data *td) | |
278 | { | |
279 | struct libaio_data *ld = td->io_ops->data; | |
280 | ||
281 | if (ld) { | |
282 | io_destroy(ld->aio_ctx); | |
7e77dd02 JA |
283 | free(ld->aio_events); |
284 | free(ld->iocbs); | |
285 | free(ld->io_us); | |
2866c82d | 286 | free(ld); |
2866c82d JA |
287 | } |
288 | } | |
289 | ||
290 | static int fio_libaio_init(struct thread_data *td) | |
291 | { | |
c90e1017 | 292 | struct libaio_options *o = td->eo; |
acd45e02 | 293 | struct libaio_data *ld; |
c90e1017 | 294 | int err = 0; |
2866c82d | 295 | |
acd45e02 | 296 | ld = calloc(1, sizeof(*ld)); |
c1db2dce | 297 | |
c90e1017 JA |
298 | /* |
299 | * First try passing in 0 for queue depth, since we don't | |
300 | * care about the user ring. If that fails, the kernel is too old | |
301 | * and we need the right depth. | |
302 | */ | |
303 | if (!o->userspace_reap) | |
c224ec05 | 304 | err = io_queue_init(INT_MAX, &ld->aio_ctx); |
c90e1017 JA |
305 | if (o->userspace_reap || err == -EINVAL) |
306 | err = io_queue_init(td->o.iodepth, &ld->aio_ctx); | |
c1db2dce JA |
307 | if (err) { |
308 | td_verror(td, -err, "io_queue_init"); | |
75de55ac | 309 | log_err("fio: check /proc/sys/fs/aio-max-nr\n"); |
cb781c75 | 310 | free(ld); |
2866c82d JA |
311 | return 1; |
312 | } | |
313 | ||
acd45e02 JA |
314 | ld->entries = td->o.iodepth; |
315 | ld->is_pow2 = is_power_of_2(ld->entries); | |
316 | ld->aio_events = calloc(ld->entries, sizeof(struct io_event)); | |
317 | ld->iocbs = calloc(ld->entries, sizeof(struct iocb *)); | |
318 | ld->io_us = calloc(ld->entries, sizeof(struct io_u *)); | |
755200a3 | 319 | |
2866c82d JA |
320 | td->io_ops->data = ld; |
321 | return 0; | |
322 | } | |
323 | ||
5f350952 | 324 | static struct ioengine_ops ioengine = { |
de890a1e SL |
325 | .name = "libaio", |
326 | .version = FIO_IOOPS_VERSION, | |
327 | .init = fio_libaio_init, | |
328 | .prep = fio_libaio_prep, | |
329 | .queue = fio_libaio_queue, | |
330 | .commit = fio_libaio_commit, | |
331 | .cancel = fio_libaio_cancel, | |
332 | .getevents = fio_libaio_getevents, | |
333 | .event = fio_libaio_event, | |
334 | .cleanup = fio_libaio_cleanup, | |
335 | .open_file = generic_open_file, | |
336 | .close_file = generic_close_file, | |
337 | .get_file_size = generic_get_file_size, | |
338 | .options = options, | |
339 | .option_struct_size = sizeof(struct libaio_options), | |
2866c82d | 340 | }; |
34cfcdaf | 341 | |
5f350952 JA |
342 | static void fio_init fio_libaio_register(void) |
343 | { | |
344 | register_ioengine(&ioengine); | |
345 | } | |
346 | ||
347 | static void fio_exit fio_libaio_unregister(void) | |
348 | { | |
349 | unregister_ioengine(&ioengine); | |
350 | } |