Per-command priority: Priority logging and libaio/io_uring cmdprio_percentage
[fio.git] / engines / libaio.c
CommitLineData
2866c82d 1/*
da751ca9
JA
2 * libaio engine
3 *
4 * IO engine using the Linux native aio interface.
2866c82d
JA
5 *
6 */
2866c82d
JA
7#include <stdlib.h>
8#include <unistd.h>
9#include <errno.h>
67bf9823 10#include <libaio.h>
46961748
JA
11#include <sys/time.h>
12#include <sys/resource.h>
5f350952
JA
13
14#include "../fio.h"
0f38bbef 15#include "../lib/pow2.h"
d220c761 16#include "../optgroup.h"
90a8c9b2 17#include "../lib/memalign.h"
2866c82d 18
b2a432bf
PC
19/* Should be defined in newest aio_abi.h */
20#ifndef IOCB_FLAG_IOPRIO
21#define IOCB_FLAG_IOPRIO (1 << 1)
22#endif
23
0fc2e103 24static int fio_libaio_commit(struct thread_data *td);
b2a432bf 25static int fio_libaio_init(struct thread_data *td);
0fc2e103 26
2866c82d
JA
27struct libaio_data {
28 io_context_t aio_ctx;
29 struct io_event *aio_events;
755200a3 30 struct iocb **iocbs;
7e77dd02 31 struct io_u **io_us;
acd45e02 32
3c3168e9
JA
33 struct io_u **io_u_index;
34
acd45e02
JA
35 /*
36 * Basic ring buffer. 'head' is incremented in _queue(), and
37 * 'tail' is incremented in _commit(). We keep 'queued' so
38 * that we know if the ring is full or empty, when
39 * 'head' == 'tail'. 'entries' is the ring size, and
40 * 'is_pow2' is just an optimization to use AND instead of
41 * modulus to get the remainder on ring increment.
42 */
43 int is_pow2;
44 unsigned int entries;
45 unsigned int queued;
46 unsigned int head;
47 unsigned int tail;
2866c82d
JA
48};
49
de890a1e 50struct libaio_options {
a1f871c7 51 void *pad;
de890a1e 52 unsigned int userspace_reap;
b2a432bf 53 unsigned int cmdprio_percentage;
de890a1e
SL
54};
55
56static struct fio_option options[] = {
57 {
58 .name = "userspace_reap",
e8b0e958 59 .lname = "Libaio userspace reaping",
de890a1e
SL
60 .type = FIO_OPT_STR_SET,
61 .off1 = offsetof(struct libaio_options, userspace_reap),
62 .help = "Use alternative user-space reap implementation",
e90a0adf 63 .category = FIO_OPT_C_ENGINE,
5a3cd5f3 64 .group = FIO_OPT_G_LIBAIO,
de890a1e 65 },
b2a432bf
PC
66#ifdef FIO_HAVE_IOPRIO_CLASS
67 {
68 .name = "cmdprio_percentage",
69 .lname = "high priority percentage",
70 .type = FIO_OPT_INT,
71 .off1 = offsetof(struct libaio_options, cmdprio_percentage),
72 .minval = 1,
73 .maxval = 100,
74 .help = "Send high priority I/O this percentage of the time",
75 .category = FIO_OPT_C_ENGINE,
76 .group = FIO_OPT_G_LIBAIO,
77 },
78#else
79 {
80 .name = "cmdprio_percentage",
81 .lname = "high priority percentage",
82 .type = FIO_OPT_UNSUPPORTED,
83 .help = "Your platform does not support I/O priority classes",
84 },
85#endif
de890a1e
SL
86 {
87 .name = NULL,
88 },
89};
90
acd45e02
JA
91static inline void ring_inc(struct libaio_data *ld, unsigned int *val,
92 unsigned int add)
93{
94 if (ld->is_pow2)
95 *val = (*val + add) & (ld->entries - 1);
96 else
97 *val = (*val + add) % ld->entries;
98}
99
7a16dd02 100static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u)
2866c82d 101{
53cdc686 102 struct fio_file *f = io_u->file;
ad4e9298 103 struct iocb *iocb = &io_u->iocb;
53cdc686 104
a391d73d 105 if (io_u->ddir == DDIR_READ) {
702906e9 106 io_prep_pread(iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
a391d73d 107 } else if (io_u->ddir == DDIR_WRITE) {
702906e9 108 io_prep_pwrite(iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
a391d73d 109 } else if (ddir_sync(io_u->ddir))
3c3168e9 110 io_prep_fsync(iocb, f->fd);
2866c82d
JA
111
112 return 0;
113}
114
b2a432bf
PC
115static void fio_libaio_prio_prep(struct thread_data *td, struct io_u *io_u)
116{
117 struct libaio_options *o = td->eo;
118 if (rand_between(&td->prio_state, 0, 99) < o->cmdprio_percentage) {
119 io_u->iocb.aio_reqprio = IOPRIO_CLASS_RT << IOPRIO_CLASS_SHIFT;
120 io_u->iocb.u.c.flags |= IOCB_FLAG_IOPRIO;
121 io_u->flags |= IO_U_F_PRIORITY;
122 }
123 return;
124}
125
2866c82d
JA
126static struct io_u *fio_libaio_event(struct thread_data *td, int event)
127{
565e784d 128 struct libaio_data *ld = td->io_ops_data;
f423479d
JA
129 struct io_event *ev;
130 struct io_u *io_u;
2866c82d 131
f423479d 132 ev = ld->aio_events + event;
702906e9 133 io_u = container_of(ev->obj, struct io_u, iocb);
f423479d
JA
134
135 if (ev->res != io_u->xfer_buflen) {
136 if (ev->res > io_u->xfer_buflen)
137 io_u->error = -ev->res;
138 else
139 io_u->resid = io_u->xfer_buflen - ev->res;
140 } else
141 io_u->error = 0;
142
143 return io_u;
2866c82d
JA
144}
145
675012f0
DE
146struct aio_ring {
147 unsigned id; /** kernel internal index number */
148 unsigned nr; /** number of io_events */
149 unsigned head;
150 unsigned tail;
c44b1ff5 151
675012f0
DE
152 unsigned magic;
153 unsigned compat_features;
154 unsigned incompat_features;
155 unsigned header_length; /** size of aio_ring */
156
157 struct io_event events[0];
158};
159
160#define AIO_RING_MAGIC 0xa10a10a1
161
162static int user_io_getevents(io_context_t aio_ctx, unsigned int max,
c44b1ff5 163 struct io_event *events)
675012f0
DE
164{
165 long i = 0;
166 unsigned head;
c44b1ff5 167 struct aio_ring *ring = (struct aio_ring*) aio_ctx;
675012f0
DE
168
169 while (i < max) {
170 head = ring->head;
171
172 if (head == ring->tail) {
173 /* There are no more completions */
174 break;
175 } else {
176 /* There is another completion to reap */
177 events[i] = ring->events[head];
178 read_barrier();
c44b1ff5 179 ring->head = (head + 1) % ring->nr;
675012f0
DE
180 i++;
181 }
182 }
183
184 return i;
185}
186
e7d2e616 187static int fio_libaio_getevents(struct thread_data *td, unsigned int min,
1f440ece 188 unsigned int max, const struct timespec *t)
2866c82d 189{
565e784d 190 struct libaio_data *ld = td->io_ops_data;
de890a1e 191 struct libaio_options *o = td->eo;
82407585 192 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
1f440ece 193 struct timespec __lt, *lt = NULL;
0b7fdba7 194 int r, events = 0;
2866c82d 195
1f440ece
JA
196 if (t) {
197 __lt = *t;
198 lt = &__lt;
199 }
200
2866c82d 201 do {
de890a1e 202 if (o->userspace_reap == 1
675012f0
DE
203 && actual_min == 0
204 && ((struct aio_ring *)(ld->aio_ctx))->magic
205 == AIO_RING_MAGIC) {
206 r = user_io_getevents(ld->aio_ctx, max,
207 ld->aio_events + events);
208 } else {
209 r = io_getevents(ld->aio_ctx, actual_min,
1f440ece 210 max, ld->aio_events + events, lt);
675012f0 211 }
3441a52d 212 if (r > 0)
0b7fdba7 213 events += r;
3441a52d 214 else if ((min && r == 0) || r == -EAGAIN) {
0fc2e103 215 fio_libaio_commit(td);
6347e43d
JA
216 if (actual_min)
217 usleep(10);
0fc2e103 218 } else if (r != -EINTR)
a31dc2dd 219 break;
0b7fdba7
DE
220 } while (events < min);
221
222 return r < 0 ? r : events;
2866c82d
JA
223}
224
2e4ef4fb
JA
225static enum fio_q_status fio_libaio_queue(struct thread_data *td,
226 struct io_u *io_u)
2866c82d 227{
565e784d 228 struct libaio_data *ld = td->io_ops_data;
b2a432bf 229 struct libaio_options *o = td->eo;
2866c82d 230
7101d9c2
JA
231 fio_ro_check(td, io_u);
232
acd45e02 233 if (ld->queued == td->o.iodepth)
755200a3
JA
234 return FIO_Q_BUSY;
235
236 /*
237 * fsync is tricky, since it can fail and we need to do it
238 * serialized with other io. the reason is that linux doesn't
239 * support aio fsync yet. So return busy for the case where we
240 * have pending io, to let fio complete those first.
241 */
f011531e 242 if (ddir_sync(io_u->ddir)) {
acd45e02 243 if (ld->queued)
755200a3 244 return FIO_Q_BUSY;
5f9099ea 245
f011531e 246 do_io_u_sync(td, io_u);
755200a3
JA
247 return FIO_Q_COMPLETED;
248 }
249
a5f3027c 250 if (io_u->ddir == DDIR_TRIM) {
acd45e02 251 if (ld->queued)
a5f3027c
JA
252 return FIO_Q_BUSY;
253
254 do_io_u_trim(td, io_u);
c0681c9d
VF
255 io_u_mark_submit(td, 1);
256 io_u_mark_complete(td, 1);
a5f3027c
JA
257 return FIO_Q_COMPLETED;
258 }
259
b2a432bf
PC
260 if (o->cmdprio_percentage)
261 fio_libaio_prio_prep(td, io_u);
262
702906e9 263 ld->iocbs[ld->head] = &io_u->iocb;
acd45e02
JA
264 ld->io_us[ld->head] = io_u;
265 ring_inc(ld, &ld->head, 1);
266 ld->queued++;
755200a3
JA
267 return FIO_Q_QUEUED;
268}
269
7e77dd02
JA
270static void fio_libaio_queued(struct thread_data *td, struct io_u **io_us,
271 unsigned int nr)
272{
8b6a404c 273 struct timespec now;
7e77dd02
JA
274 unsigned int i;
275
12d9d841
JA
276 if (!fio_fill_issue_time(td))
277 return;
278
7e77dd02
JA
279 fio_gettime(&now, NULL);
280
281 for (i = 0; i < nr; i++) {
282 struct io_u *io_u = io_us[i];
283
284 memcpy(&io_u->issue_time, &now, sizeof(now));
285 io_u_queued(td, io_u);
286 }
287}
288
755200a3
JA
289static int fio_libaio_commit(struct thread_data *td)
290{
565e784d 291 struct libaio_data *ld = td->io_ops_data;
755200a3 292 struct iocb **iocbs;
7e77dd02 293 struct io_u **io_us;
8b6a404c 294 struct timespec ts;
a120ca7f 295 int ret, wait_start = 0;
755200a3 296
acd45e02 297 if (!ld->queued)
755200a3
JA
298 return 0;
299
2866c82d 300 do {
acd45e02
JA
301 long nr = ld->queued;
302
303 nr = min((unsigned int) nr, ld->entries - ld->tail);
304 io_us = ld->io_us + ld->tail;
305 iocbs = ld->iocbs + ld->tail;
306
307 ret = io_submit(ld->aio_ctx, nr, iocbs);
5e00c2c4 308 if (ret > 0) {
7e77dd02 309 fio_libaio_queued(td, io_us, ret);
838bc709 310 io_u_mark_submit(td, ret);
acd45e02
JA
311
312 ld->queued -= ret;
313 ring_inc(ld, &ld->tail, ret);
5e00c2c4 314 ret = 0;
e3b4e568 315 wait_start = 0;
acd45e02 316 } else if (ret == -EINTR || !ret) {
838bc709
JA
317 if (!ret)
318 io_u_mark_submit(td, ret);
e3b4e568 319 wait_start = 0;
2866c82d 320 continue;
acd45e02
JA
321 } else if (ret == -EAGAIN) {
322 /*
323 * If we get EAGAIN, we should break out without
324 * error and let the upper layer reap some
a120ca7f
JA
325 * events for us. If we have no queued IO, we
326 * must loop here. If we loop for more than 30s,
327 * just error out, something must be buggy in the
328 * IO path.
acd45e02 329 */
a120ca7f
JA
330 if (ld->queued) {
331 ret = 0;
332 break;
333 }
334 if (!wait_start) {
8b6a404c 335 fio_gettime(&ts, NULL);
d36b072d 336 wait_start = 1;
8b6a404c 337 } else if (mtime_since_now(&ts) > 30000) {
a120ca7f
JA
338 log_err("fio: aio appears to be stalled, giving up\n");
339 break;
340 }
341 usleep(1);
342 continue;
a31dc2dd
JA
343 } else if (ret == -ENOMEM) {
344 /*
345 * If we get -ENOMEM, reap events if we can. If
346 * we cannot, treat it as a fatal event since there's
347 * nothing we can do about it.
348 */
349 if (ld->queued)
350 ret = 0;
351 break;
838bc709 352 } else
2866c82d 353 break;
2c3a4ae9 354 } while (ld->queued);
2866c82d 355
36167d82 356 return ret;
2866c82d
JA
357}
358
359static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u)
360{
565e784d 361 struct libaio_data *ld = td->io_ops_data;
2866c82d
JA
362
363 return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events);
364}
365
366static void fio_libaio_cleanup(struct thread_data *td)
367{
565e784d 368 struct libaio_data *ld = td->io_ops_data;
2866c82d
JA
369
370 if (ld) {
f24c2649
JA
371 /*
372 * Work-around to avoid huge RCU stalls at exit time. If we
373 * don't do this here, then it'll be torn down by exit_aio().
374 * But for that case we can parallellize the freeing, thus
375 * speeding it up a lot.
376 */
377 if (!(td->flags & TD_F_CHILD))
378 io_destroy(ld->aio_ctx);
7e77dd02
JA
379 free(ld->aio_events);
380 free(ld->iocbs);
381 free(ld->io_us);
2866c82d 382 free(ld);
2866c82d
JA
383 }
384}
385
2041bd34
JA
386static int fio_libaio_post_init(struct thread_data *td)
387{
388 struct libaio_data *ld = td->io_ops_data;
ad4e9298 389 int err;
2041bd34 390
ad4e9298 391 err = io_queue_init(td->o.iodepth, &ld->aio_ctx);
2041bd34
JA
392 if (err) {
393 td_verror(td, -err, "io_queue_init");
394 return 1;
395 }
396
397 return 0;
ebec344d
JA
398}
399
2866c82d
JA
400static int fio_libaio_init(struct thread_data *td)
401{
acd45e02 402 struct libaio_data *ld;
b2a432bf
PC
403 struct thread_options *to = &td->o;
404 struct libaio_options *o = td->eo;
2866c82d 405
acd45e02 406 ld = calloc(1, sizeof(*ld));
c1db2dce 407
acd45e02
JA
408 ld->entries = td->o.iodepth;
409 ld->is_pow2 = is_power_of_2(ld->entries);
410 ld->aio_events = calloc(ld->entries, sizeof(struct io_event));
411 ld->iocbs = calloc(ld->entries, sizeof(struct iocb *));
412 ld->io_us = calloc(ld->entries, sizeof(struct io_u *));
755200a3 413
565e784d 414 td->io_ops_data = ld;
b2a432bf
PC
415 /*
416 * Check for option conflicts
417 */
418 if ((fio_option_is_set(to, ioprio) || fio_option_is_set(to, ioprio_class)) &&
419 o->cmdprio_percentage != 0) {
420 log_err("%s: cmdprio_percentage option and mutually exclusive "
421 "prio or prioclass option is set, exiting\n", to->name);
422 td_verror(td, EINVAL, "fio_libaio_init");
423 return 1;
424 }
2866c82d
JA
425 return 0;
426}
427
5f350952 428static struct ioengine_ops ioengine = {
de890a1e
SL
429 .name = "libaio",
430 .version = FIO_IOOPS_VERSION,
04ba61df 431 .flags = FIO_ASYNCIO_SYNC_TRIM,
de890a1e 432 .init = fio_libaio_init,
2041bd34 433 .post_init = fio_libaio_post_init,
de890a1e
SL
434 .prep = fio_libaio_prep,
435 .queue = fio_libaio_queue,
436 .commit = fio_libaio_commit,
437 .cancel = fio_libaio_cancel,
438 .getevents = fio_libaio_getevents,
439 .event = fio_libaio_event,
440 .cleanup = fio_libaio_cleanup,
441 .open_file = generic_open_file,
442 .close_file = generic_close_file,
443 .get_file_size = generic_get_file_size,
444 .options = options,
445 .option_struct_size = sizeof(struct libaio_options),
2866c82d 446};
34cfcdaf 447
5f350952
JA
448static void fio_init fio_libaio_register(void)
449{
450 register_ioengine(&ioengine);
451}
452
453static void fio_exit fio_libaio_unregister(void)
454{
455 unregister_ioengine(&ioengine);
456}