X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=engines%2Flibaio.c;h=9c14dd88d5d0b3609a0a417fbcfb9c6b9c40c90b;hb=ff00f247a6e764ec445aefc1ab6acf3c0817ca65;hp=7d59df3869f12238d3ebdefc6ae4f85145b9de85;hpb=ec2ea18e87fad6c42d6875cd4ee1bb25ef9b4627;p=fio.git diff --git a/engines/libaio.c b/engines/libaio.c index 7d59df38..9c14dd88 100644 --- a/engines/libaio.c +++ b/engines/libaio.c @@ -8,12 +8,27 @@ #include #include #include +#include +#include #include "../fio.h" #include "../lib/pow2.h" #include "../optgroup.h" +#include "../lib/memalign.h" +#include "cmdprio.h" + +/* Should be defined in newest aio_abi.h */ +#ifndef IOCB_FLAG_IOPRIO +#define IOCB_FLAG_IOPRIO (1 << 1) +#endif + +/* Hack for libaio < 0.3.111 */ +#ifndef CONFIG_LIBAIO_RW_FLAGS +#define aio_rw_flags __pad2 +#endif static int fio_libaio_commit(struct thread_data *td); +static int fio_libaio_init(struct thread_data *td); struct libaio_data { io_context_t aio_ctx; @@ -21,6 +36,8 @@ struct libaio_data { struct iocb **iocbs; struct io_u **io_us; + struct io_u **io_u_index; + /* * Basic ring buffer. 'head' is incremented in _queue(), and * 'tail' is incremented in _commit(). We keep 'queued' so @@ -34,13 +51,26 @@ struct libaio_data { unsigned int queued; unsigned int head; unsigned int tail; + + bool use_cmdprio; }; struct libaio_options { - void *pad; + struct thread_data *td; unsigned int userspace_reap; + struct cmdprio cmdprio; + unsigned int nowait; }; +static int str_cmdprio_bssplit_cb(void *data, const char *input) +{ + struct libaio_options *o = data; + struct thread_data *td = o->td; + struct cmdprio *cmdprio = &o->cmdprio; + + return fio_cmdprio_bssplit_parse(td, input, cmdprio); +} + static struct fio_option options[] = { { .name = "userspace_reap", @@ -51,6 +81,96 @@ static struct fio_option options[] = { .category = FIO_OPT_C_ENGINE, .group = FIO_OPT_G_LIBAIO, }, +#ifdef FIO_HAVE_IOPRIO_CLASS + { + .name = "cmdprio_percentage", + .lname = "high priority percentage", + .type = FIO_OPT_INT, + .off1 = offsetof(struct libaio_options, + cmdprio.percentage[DDIR_READ]), + .off2 = offsetof(struct libaio_options, + cmdprio.percentage[DDIR_WRITE]), + .minval = 0, + .maxval = 100, + .help = "Send high priority I/O this percentage of the time", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_LIBAIO, + }, + { + .name = "cmdprio_class", + .lname = "Asynchronous I/O priority class", + .type = FIO_OPT_INT, + .off1 = offsetof(struct libaio_options, + cmdprio.class[DDIR_READ]), + .off2 = offsetof(struct libaio_options, + cmdprio.class[DDIR_WRITE]), + .help = "Set asynchronous IO priority class", + .minval = IOPRIO_MIN_PRIO_CLASS + 1, + .maxval = IOPRIO_MAX_PRIO_CLASS, + .interval = 1, + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_LIBAIO, + }, + { + .name = "cmdprio", + .lname = "Asynchronous I/O priority level", + .type = FIO_OPT_INT, + .off1 = offsetof(struct libaio_options, + cmdprio.level[DDIR_READ]), + .off2 = offsetof(struct libaio_options, + cmdprio.level[DDIR_WRITE]), + .help = "Set asynchronous IO priority level", + .minval = IOPRIO_MIN_PRIO, + .maxval = IOPRIO_MAX_PRIO, + .interval = 1, + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_LIBAIO, + }, + { + .name = "cmdprio_bssplit", + .lname = "Priority percentage block size split", + .type = FIO_OPT_STR_ULL, + .cb = str_cmdprio_bssplit_cb, + .off1 = offsetof(struct libaio_options, cmdprio.bssplit), + .help = "Set priority percentages for different block sizes", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_LIBAIO, + }, +#else + { + .name = "cmdprio_percentage", + .lname = "high priority percentage", + .type = FIO_OPT_UNSUPPORTED, + .help = "Your platform does not support I/O priority classes", + }, + { + .name = "cmdprio_class", + .lname = "Asynchronous I/O priority class", + .type = FIO_OPT_UNSUPPORTED, + .help = "Your platform does not support I/O priority classes", + }, + { + .name = "cmdprio", + .lname = "Asynchronous I/O priority level", + .type = FIO_OPT_UNSUPPORTED, + .help = "Your platform does not support I/O priority classes", + }, + { + .name = "cmdprio_bssplit", + .lname = "Priority percentage block size split", + .type = FIO_OPT_UNSUPPORTED, + .help = "Your platform does not support I/O priority classes", + }, +#endif + { + .name = "nowait", + .lname = "RWF_NOWAIT", + .type = FIO_OPT_BOOL, + .off1 = offsetof(struct libaio_options, nowait), + .help = "Set RWF_NOWAIT for reads/writes", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_LIBAIO, + }, { .name = NULL, }, @@ -65,20 +185,56 @@ static inline void ring_inc(struct libaio_data *ld, unsigned int *val, *val = (*val + add) % ld->entries; } -static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u) +static int fio_libaio_prep(struct thread_data *td, struct io_u *io_u) { + struct libaio_options *o = td->eo; struct fio_file *f = io_u->file; - - if (io_u->ddir == DDIR_READ) - io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); - else if (io_u->ddir == DDIR_WRITE) - io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); - else if (ddir_sync(io_u->ddir)) - io_prep_fsync(&io_u->iocb, f->fd); + struct iocb *iocb = &io_u->iocb; + + if (io_u->ddir == DDIR_READ) { + io_prep_pread(iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); + if (o->nowait) + iocb->aio_rw_flags |= RWF_NOWAIT; + } else if (io_u->ddir == DDIR_WRITE) { + io_prep_pwrite(iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); + if (o->nowait) + iocb->aio_rw_flags |= RWF_NOWAIT; + } else if (ddir_sync(io_u->ddir)) + io_prep_fsync(iocb, f->fd); return 0; } +static void fio_libaio_cmdprio_prep(struct thread_data *td, struct io_u *io_u) +{ + struct libaio_options *o = td->eo; + struct cmdprio *cmdprio = &o->cmdprio; + enum fio_ddir ddir = io_u->ddir; + unsigned int p = fio_cmdprio_percentage(cmdprio, io_u); + unsigned int cmdprio_value = + ioprio_value(cmdprio->class[ddir], cmdprio->level[ddir]); + + if (p && rand_between(&td->prio_state, 0, 99) < p) { + io_u->ioprio = cmdprio_value; + io_u->iocb.aio_reqprio = cmdprio_value; + io_u->iocb.u.c.flags |= IOCB_FLAG_IOPRIO; + if (!td->ioprio || cmdprio_value < td->ioprio) { + /* + * The async IO priority is higher (has a lower value) + * than the default context priority. + */ + io_u->flags |= IO_U_F_HIGH_PRIO; + } + } else if (td->ioprio && td->ioprio < cmdprio_value) { + /* + * The IO will be executed with the default context priority, + * and this priority is higher (has a lower value) than the + * async IO priority. + */ + io_u->flags |= IO_U_F_HIGH_PRIO; + } +} + static struct io_u *fio_libaio_event(struct thread_data *td, int event) { struct libaio_data *ld = td->io_ops_data; @@ -131,8 +287,8 @@ static int user_io_getevents(io_context_t aio_ctx, unsigned int max, } else { /* There is another completion to reap */ events[i] = ring->events[head]; - read_barrier(); - ring->head = (head + 1) % ring->nr; + atomic_store_release(&ring->head, + (head + 1) % ring->nr); i++; } } @@ -169,7 +325,8 @@ static int fio_libaio_getevents(struct thread_data *td, unsigned int min, events += r; else if ((min && r == 0) || r == -EAGAIN) { fio_libaio_commit(td); - usleep(100); + if (actual_min) + usleep(10); } else if (r != -EINTR) break; } while (events < min); @@ -177,7 +334,8 @@ static int fio_libaio_getevents(struct thread_data *td, unsigned int min, return r < 0 ? r : events; } -static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u) +static enum fio_q_status fio_libaio_queue(struct thread_data *td, + struct io_u *io_u) { struct libaio_data *ld = td->io_ops_data; @@ -205,9 +363,14 @@ static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u) return FIO_Q_BUSY; do_io_u_trim(td, io_u); + io_u_mark_submit(td, 1); + io_u_mark_complete(td, 1); return FIO_Q_COMPLETED; } + if (ld->use_cmdprio) + fio_libaio_cmdprio_prep(td, io_u); + ld->iocbs[ld->head] = &io_u->iocb; ld->io_us[ld->head] = io_u; ring_inc(ld, &ld->head, 1); @@ -331,30 +494,29 @@ static void fio_libaio_cleanup(struct thread_data *td) } } -static int fio_libaio_init(struct thread_data *td) +static int fio_libaio_post_init(struct thread_data *td) { - struct libaio_options *o = td->eo; - struct libaio_data *ld; - int err = 0; - - ld = calloc(1, sizeof(*ld)); + struct libaio_data *ld = td->io_ops_data; + int err; - /* - * First try passing in 0 for queue depth, since we don't - * care about the user ring. If that fails, the kernel is too old - * and we need the right depth. - */ - if (!o->userspace_reap) - err = io_queue_init(INT_MAX, &ld->aio_ctx); - if (o->userspace_reap || err == -EINVAL) - err = io_queue_init(td->o.iodepth, &ld->aio_ctx); + err = io_queue_init(td->o.iodepth, &ld->aio_ctx); if (err) { td_verror(td, -err, "io_queue_init"); - log_err("fio: check /proc/sys/fs/aio-max-nr\n"); - free(ld); return 1; } + return 0; +} + +static int fio_libaio_init(struct thread_data *td) +{ + struct libaio_data *ld; + struct libaio_options *o = td->eo; + struct cmdprio *cmdprio = &o->cmdprio; + int ret; + + ld = calloc(1, sizeof(*ld)); + ld->entries = td->o.iodepth; ld->is_pow2 = is_power_of_2(ld->entries); ld->aio_events = calloc(ld->entries, sizeof(struct io_event)); @@ -362,13 +524,22 @@ static int fio_libaio_init(struct thread_data *td) ld->io_us = calloc(ld->entries, sizeof(struct io_u *)); td->io_ops_data = ld; + + ret = fio_cmdprio_init(td, cmdprio, &ld->use_cmdprio); + if (ret) { + td_verror(td, EINVAL, "fio_libaio_init"); + return 1; + } + return 0; } -static struct ioengine_ops ioengine = { +FIO_STATIC struct ioengine_ops ioengine = { .name = "libaio", .version = FIO_IOOPS_VERSION, + .flags = FIO_ASYNCIO_SYNC_TRIM, .init = fio_libaio_init, + .post_init = fio_libaio_post_init, .prep = fio_libaio_prep, .queue = fio_libaio_queue, .commit = fio_libaio_commit,