X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=engines%2Fio_uring.c;h=27a4a67860956153a450e3aab099dbd47c63b909;hp=5b3509a98d734bd3fe50157b81a3aa1a358b9547;hb=03ec570f6e571b1731378b8fcf4843e8051db7ed;hpb=051382218cbe5101a5caa83eab55ed04608f8475 diff --git a/engines/io_uring.c b/engines/io_uring.c index 5b3509a9..27a4a678 100644 --- a/engines/io_uring.c +++ b/engines/io_uring.c @@ -17,11 +17,13 @@ #include "../optgroup.h" #include "../lib/memalign.h" #include "../lib/fls.h" +#include "../lib/roundup.h" #ifdef ARCH_HAVE_IOURING #include "../lib/types.h" #include "../os/linux/io_uring.h" +#include "cmdprio.h" struct io_sq_ring { unsigned *head; @@ -50,6 +52,8 @@ struct ioring_data { struct io_u **io_u_index; + int *fds; + struct io_sq_ring sq_ring; struct io_uring_sqe *sqes; struct iovec *iovecs; @@ -61,17 +65,36 @@ struct ioring_data { int queued; int cq_ring_off; unsigned iodepth; + int prepped; struct ioring_mmap mmap[3]; + + bool use_cmdprio; }; struct ioring_options { - void *pad; + struct thread_data *td; unsigned int hipri; + struct cmdprio cmdprio; unsigned int fixedbufs; + unsigned int registerfiles; unsigned int sqpoll_thread; unsigned int sqpoll_set; unsigned int sqpoll_cpu; + unsigned int nonvectored; + unsigned int uncached; + unsigned int nowait; + unsigned int force_async; +}; + +static const int ddir_to_op[2][2] = { + { IORING_OP_READV, IORING_OP_READ }, + { IORING_OP_WRITEV, IORING_OP_WRITE } +}; + +static const int fixed_ddir_to_op[2] = { + IORING_OP_READ_FIXED, + IORING_OP_WRITE_FIXED }; static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val) @@ -83,6 +106,15 @@ static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val) return 0; } +static int str_cmdprio_bssplit_cb(void *data, const char *input) +{ + struct ioring_options *o = data; + struct thread_data *td = o->td; + struct cmdprio *cmdprio = &o->cmdprio; + + return fio_cmdprio_bssplit_parse(td, input, cmdprio); +} + static struct fio_option options[] = { { .name = "hipri", @@ -91,8 +123,89 @@ static struct fio_option options[] = { .off1 = offsetof(struct ioring_options, hipri), .help = "Use polled IO completions", .category = FIO_OPT_C_ENGINE, - .group = FIO_OPT_G_LIBAIO, + .group = FIO_OPT_G_IOURING, + }, +#ifdef FIO_HAVE_IOPRIO_CLASS + { + .name = "cmdprio_percentage", + .lname = "high priority percentage", + .type = FIO_OPT_INT, + .off1 = offsetof(struct ioring_options, + cmdprio.percentage[DDIR_READ]), + .off2 = offsetof(struct ioring_options, + cmdprio.percentage[DDIR_WRITE]), + .minval = 0, + .maxval = 100, + .help = "Send high priority I/O this percentage of the time", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_IOURING, + }, + { + .name = "cmdprio_class", + .lname = "Asynchronous I/O priority class", + .type = FIO_OPT_INT, + .off1 = offsetof(struct ioring_options, + cmdprio.class[DDIR_READ]), + .off2 = offsetof(struct ioring_options, + cmdprio.class[DDIR_WRITE]), + .help = "Set asynchronous IO priority class", + .minval = IOPRIO_MIN_PRIO_CLASS + 1, + .maxval = IOPRIO_MAX_PRIO_CLASS, + .interval = 1, + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_IOURING, }, + { + .name = "cmdprio", + .lname = "Asynchronous I/O priority level", + .type = FIO_OPT_INT, + .off1 = offsetof(struct ioring_options, + cmdprio.level[DDIR_READ]), + .off2 = offsetof(struct ioring_options, + cmdprio.level[DDIR_WRITE]), + .help = "Set asynchronous IO priority level", + .minval = IOPRIO_MIN_PRIO, + .maxval = IOPRIO_MAX_PRIO, + .interval = 1, + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_IOURING, + }, + { + .name = "cmdprio_bssplit", + .lname = "Priority percentage block size split", + .type = FIO_OPT_STR_ULL, + .cb = str_cmdprio_bssplit_cb, + .off1 = offsetof(struct ioring_options, cmdprio.bssplit), + .help = "Set priority percentages for different block sizes", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_IOURING, + }, +#else + { + .name = "cmdprio_percentage", + .lname = "high priority percentage", + .type = FIO_OPT_UNSUPPORTED, + .help = "Your platform does not support I/O priority classes", + }, + { + .name = "cmdprio_class", + .lname = "Asynchronous I/O priority class", + .type = FIO_OPT_UNSUPPORTED, + .help = "Your platform does not support I/O priority classes", + }, + { + .name = "cmdprio", + .lname = "Asynchronous I/O priority level", + .type = FIO_OPT_UNSUPPORTED, + .help = "Your platform does not support I/O priority classes", + }, + { + .name = "cmdprio_bssplit", + .lname = "Priority percentage block size split", + .type = FIO_OPT_UNSUPPORTED, + .help = "Your platform does not support I/O priority classes", + }, +#endif { .name = "fixedbufs", .lname = "Fixed (pre-mapped) IO buffers", @@ -100,7 +213,16 @@ static struct fio_option options[] = { .off1 = offsetof(struct ioring_options, fixedbufs), .help = "Pre map IO buffers", .category = FIO_OPT_C_ENGINE, - .group = FIO_OPT_G_LIBAIO, + .group = FIO_OPT_G_IOURING, + }, + { + .name = "registerfiles", + .lname = "Register file set", + .type = FIO_OPT_STR_SET, + .off1 = offsetof(struct ioring_options, registerfiles), + .help = "Pre-open/register files", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_IOURING, }, { .name = "sqthread_poll", @@ -109,7 +231,7 @@ static struct fio_option options[] = { .off1 = offsetof(struct ioring_options, sqpoll_thread), .help = "Offload submission/completion to kernel thread", .category = FIO_OPT_C_ENGINE, - .group = FIO_OPT_G_LIBAIO, + .group = FIO_OPT_G_IOURING, }, { .name = "sqthread_poll_cpu", @@ -118,7 +240,44 @@ static struct fio_option options[] = { .cb = fio_ioring_sqpoll_cb, .help = "What CPU to run SQ thread polling on", .category = FIO_OPT_C_ENGINE, - .group = FIO_OPT_G_LIBAIO, + .group = FIO_OPT_G_IOURING, + }, + { + .name = "nonvectored", + .lname = "Non-vectored", + .type = FIO_OPT_INT, + .off1 = offsetof(struct ioring_options, nonvectored), + .def = "-1", + .help = "Use non-vectored read/write commands", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_IOURING, + }, + { + .name = "uncached", + .lname = "Uncached", + .type = FIO_OPT_INT, + .off1 = offsetof(struct ioring_options, uncached), + .help = "Use RWF_UNCACHED for buffered read/writes", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_IOURING, + }, + { + .name = "nowait", + .lname = "RWF_NOWAIT", + .type = FIO_OPT_BOOL, + .off1 = offsetof(struct ioring_options, nowait), + .help = "Use RWF_NOWAIT for reads/writes", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_IOURING, + }, + { + .name = "force_async", + .lname = "Force async", + .type = FIO_OPT_INT, + .off1 = offsetof(struct ioring_options, force_async), + .help = "Set IOSQE_ASYNC every N requests", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_IOURING, }, { .name = NULL, @@ -128,7 +287,7 @@ static struct fio_option options[] = { static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit, unsigned int min_complete, unsigned int flags) { - return syscall(__NR_sys_io_uring_enter, ld->ring_fd, to_submit, + return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit, min_complete, flags, NULL, 0); } @@ -140,34 +299,66 @@ static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u) struct io_uring_sqe *sqe; sqe = &ld->sqes[io_u->index]; - sqe->fd = f->fd; - sqe->flags = 0; - sqe->ioprio = 0; - sqe->buf_index = 0; + + if (o->registerfiles) { + sqe->fd = f->engine_pos; + sqe->flags = IOSQE_FIXED_FILE; + } else { + sqe->fd = f->fd; + sqe->flags = 0; + } if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) { if (o->fixedbufs) { - if (io_u->ddir == DDIR_READ) - sqe->opcode = IORING_OP_READ_FIXED; - else - sqe->opcode = IORING_OP_WRITE_FIXED; + sqe->opcode = fixed_ddir_to_op[io_u->ddir]; sqe->addr = (unsigned long) io_u->xfer_buf; sqe->len = io_u->xfer_buflen; sqe->buf_index = io_u->index; } else { - if (io_u->ddir == DDIR_READ) - sqe->opcode = IORING_OP_READV; - else - sqe->opcode = IORING_OP_WRITEV; - sqe->addr = (unsigned long) &ld->iovecs[io_u->index]; - sqe->len = 1; + struct iovec *iov = &ld->iovecs[io_u->index]; + + /* + * Update based on actual io_u, requeue could have + * adjusted these + */ + iov->iov_base = io_u->xfer_buf; + iov->iov_len = io_u->xfer_buflen; + + sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored]; + if (o->nonvectored) { + sqe->addr = (unsigned long) iov->iov_base; + sqe->len = iov->iov_len; + } else { + sqe->addr = (unsigned long) iov; + sqe->len = 1; + } } + sqe->rw_flags = 0; + if (!td->o.odirect && o->uncached) + sqe->rw_flags |= RWF_UNCACHED; + if (o->nowait) + sqe->rw_flags |= RWF_NOWAIT; sqe->off = io_u->offset; } else if (ddir_sync(io_u->ddir)) { - sqe->fsync_flags = 0; - if (io_u->ddir == DDIR_DATASYNC) - sqe->fsync_flags |= IORING_FSYNC_DATASYNC; - sqe->opcode = IORING_OP_FSYNC; + sqe->ioprio = 0; + if (io_u->ddir == DDIR_SYNC_FILE_RANGE) { + sqe->off = f->first_write; + sqe->len = f->last_write - f->first_write; + sqe->sync_range_flags = td->o.sync_file_range; + sqe->opcode = IORING_OP_SYNC_FILE_RANGE; + } else { + sqe->off = 0; + sqe->addr = 0; + sqe->len = 0; + if (io_u->ddir == DDIR_DATASYNC) + sqe->fsync_flags |= IORING_FSYNC_DATASYNC; + sqe->opcode = IORING_OP_FSYNC; + } + } + + if (o->force_async && ++ld->prepped == o->force_async) { + ld->prepped = 0; + sqe->flags |= IOSQE_ASYNC; } sqe->user_data = (unsigned long) io_u; @@ -206,15 +397,15 @@ static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events, head = *ring->head; do { - read_barrier(); - if (head == *ring->tail) + if (head == atomic_load_acquire(ring->tail)) break; reaped++; head++; } while (reaped + events < max); - *ring->head = head; - write_barrier(); + if (reaped) + atomic_store_release(ring->head, head); + return reaped; } @@ -233,6 +424,8 @@ static int fio_ioring_getevents(struct thread_data *td, unsigned int min, r = fio_ioring_cqring_reap(td, events, max); if (r) { events += r; + if (actual_min != 0) + actual_min -= r; continue; } @@ -240,7 +433,7 @@ static int fio_ioring_getevents(struct thread_data *td, unsigned int min, r = io_uring_enter(ld, 0, actual_min, IORING_ENTER_GETEVENTS); if (r < 0) { - if (errno == EAGAIN) + if (errno == EAGAIN || errno == EINTR) continue; td_verror(td, errno, "io_uring_enter"); break; @@ -251,6 +444,43 @@ static int fio_ioring_getevents(struct thread_data *td, unsigned int min, return r < 0 ? r : events; } +static void fio_ioring_prio_prep(struct thread_data *td, struct io_u *io_u) +{ + struct ioring_options *o = td->eo; + struct ioring_data *ld = td->io_ops_data; + struct io_uring_sqe *sqe = &ld->sqes[io_u->index]; + struct cmdprio *cmdprio = &o->cmdprio; + enum fio_ddir ddir = io_u->ddir; + unsigned int p = fio_cmdprio_percentage(cmdprio, io_u); + unsigned int cmdprio_value = + ioprio_value(cmdprio->class[ddir], cmdprio->level[ddir]); + + if (p && rand_between(&td->prio_state, 0, 99) < p) { + sqe->ioprio = cmdprio_value; + if (!td->ioprio || cmdprio_value < td->ioprio) { + /* + * The async IO priority is higher (has a lower value) + * than the priority set by "prio" and "prioclass" + * options. + */ + io_u->flags |= IO_U_F_HIGH_PRIO; + } + } else { + sqe->ioprio = td->ioprio; + if (cmdprio_value && td->ioprio && td->ioprio < cmdprio_value) { + /* + * The IO will be executed with the priority set by + * "prio" and "prioclass" options, and this priority + * is higher (has a lower value) than the async IO + * priority. + */ + io_u->flags |= IO_U_F_HIGH_PRIO; + } + } + + io_u->ioprio = sqe->ioprio; +} + static enum fio_q_status fio_ioring_queue(struct thread_data *td, struct io_u *io_u) { @@ -275,15 +505,13 @@ static enum fio_q_status fio_ioring_queue(struct thread_data *td, tail = *ring->tail; next_tail = tail + 1; - read_barrier(); - if (next_tail == *ring->head) + if (next_tail == atomic_load_acquire(ring->head)) return FIO_Q_BUSY; - /* ensure sqe stores are ordered with tail update */ - write_barrier(); + if (ld->use_cmdprio) + fio_ioring_prio_prep(td, io_u); ring->array[tail & ld->sq_ring_mask] = io_u->index; - *ring->tail = next_tail; - write_barrier(); + atomic_store_release(ring->tail, next_tail); ld->queued++; return FIO_Q_QUEUED; @@ -327,9 +555,10 @@ static int fio_ioring_commit(struct thread_data *td) */ if (o->sqpoll_thread) { struct io_sq_ring *ring = &ld->sq_ring; + unsigned flags; - read_barrier(); - if (*ring->flags & IORING_SQ_NEED_WAKEUP) + flags = atomic_load_acquire(ring->flags); + if (flags & IORING_SQ_NEED_WAKEUP) io_uring_enter(ld, ld->queued, 0, IORING_ENTER_SQ_WAKEUP); ld->queued = 0; @@ -351,7 +580,7 @@ static int fio_ioring_commit(struct thread_data *td) io_u_mark_submit(td, ret); continue; } else { - if (errno == EAGAIN) { + if (errno == EAGAIN || errno == EINTR) { ret = fio_ioring_cqring_reap(td, 0, ld->queued); if (ret) continue; @@ -371,7 +600,7 @@ static void fio_ioring_unmap(struct ioring_data *ld) { int i; - for (i = 0; i < ARRAY_SIZE(ld->mmap); i++) + for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++) munmap(ld->mmap[i].ptr, ld->mmap[i].len); close(ld->ring_fd); } @@ -386,6 +615,7 @@ static void fio_ioring_cleanup(struct thread_data *td) free(ld->io_u_index); free(ld->iovecs); + free(ld->fds); free(ld); } } @@ -430,6 +660,40 @@ static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p) return 0; } +static void fio_ioring_probe(struct thread_data *td) +{ + struct ioring_data *ld = td->io_ops_data; + struct ioring_options *o = td->eo; + struct io_uring_probe *p; + int ret; + + /* already set by user, don't touch */ + if (o->nonvectored != -1) + return; + + /* default to off, as that's always safe */ + o->nonvectored = 0; + + p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op)); + if (!p) + return; + + memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op)); + ret = syscall(__NR_io_uring_register, ld->ring_fd, + IORING_REGISTER_PROBE, p, 256); + if (ret < 0) + goto out; + + if (IORING_OP_WRITE > p->ops_len) + goto out; + + if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) && + (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED)) + o->nonvectored = 1; +out: + free(p); +} + static int fio_ioring_queue_init(struct thread_data *td) { struct ioring_data *ld = td->io_ops_data; @@ -450,22 +714,16 @@ static int fio_ioring_queue_init(struct thread_data *td) } } - ret = syscall(__NR_sys_io_uring_setup, depth, &p); + ret = syscall(__NR_io_uring_setup, depth, &p); if (ret < 0) return ret; ld->ring_fd = ret; - if (o->fixedbufs) { - struct rlimit rlim = { - .rlim_cur = RLIM_INFINITY, - .rlim_max = RLIM_INFINITY, - }; - - if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) - return -1; + fio_ioring_probe(td); - ret = syscall(__NR_sys_io_uring_register, ld->ring_fd, + if (o->fixedbufs) { + ret = syscall(__NR_io_uring_register, ld->ring_fd, IORING_REGISTER_BUFFERS, ld->iovecs, depth); if (ret < 0) return ret; @@ -474,9 +732,50 @@ static int fio_ioring_queue_init(struct thread_data *td) return fio_ioring_mmap(ld, &p); } +static int fio_ioring_register_files(struct thread_data *td) +{ + struct ioring_data *ld = td->io_ops_data; + struct fio_file *f; + unsigned int i; + int ret; + + ld->fds = calloc(td->o.nr_files, sizeof(int)); + + for_each_file(td, f, i) { + ret = generic_open_file(td, f); + if (ret) + goto err; + ld->fds[i] = f->fd; + f->engine_pos = i; + } + + ret = syscall(__NR_io_uring_register, ld->ring_fd, + IORING_REGISTER_FILES, ld->fds, td->o.nr_files); + if (ret) { +err: + free(ld->fds); + ld->fds = NULL; + } + + /* + * Pretend the file is closed again, and really close it if we hit + * an error. + */ + for_each_file(td, f, i) { + if (ret) { + int fio_unused ret2; + ret2 = generic_close_file(td, f); + } else + f->fd = -1; + } + + return ret; +} + static int fio_ioring_post_init(struct thread_data *td) { struct ioring_data *ld = td->io_ops_data; + struct ioring_options *o = td->eo; struct io_u *io_u; int err, i; @@ -490,21 +789,49 @@ static int fio_ioring_post_init(struct thread_data *td) err = fio_ioring_queue_init(td); if (err) { - td_verror(td, errno, "io_queue_init"); + int init_err = errno; + + if (init_err == ENOSYS) + log_err("fio: your kernel doesn't support io_uring\n"); + td_verror(td, init_err, "io_queue_init"); return 1; } - return 0; -} + for (i = 0; i < td->o.iodepth; i++) { + struct io_uring_sqe *sqe; -static unsigned roundup_pow2(unsigned depth) -{ - return 1UL << __fls(depth - 1); + sqe = &ld->sqes[i]; + memset(sqe, 0, sizeof(*sqe)); + } + + if (o->registerfiles) { + err = fio_ioring_register_files(td); + if (err) { + td_verror(td, errno, "ioring_register_files"); + return 1; + } + } + + return 0; } static int fio_ioring_init(struct thread_data *td) { + struct ioring_options *o = td->eo; struct ioring_data *ld; + struct cmdprio *cmdprio = &o->cmdprio; + bool has_cmdprio = false; + int ret; + + /* sqthread submission requires registered files */ + if (o->sqpoll_thread) + o->registerfiles = 1; + + if (o->registerfiles && td->o.nr_files != td->o.open_files) { + log_err("fio: io_uring registered files require nr_files to " + "be identical to open_files\n"); + return 1; + } ld = calloc(1, sizeof(*ld)); @@ -517,6 +844,23 @@ static int fio_ioring_init(struct thread_data *td) ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec)); td->io_ops_data = ld; + + ret = fio_cmdprio_init(td, cmdprio, &has_cmdprio); + if (ret) { + td_verror(td, EINVAL, "fio_ioring_init"); + return 1; + } + + /* + * Since io_uring can have a submission context (sqthread_poll) that is + * different from the process context, we cannot rely on the the IO + * priority set by ioprio_set() (option prio/prioclass) to be inherited. + * Therefore, we set the sqe->ioprio field when prio/prioclass is used. + */ + ld->use_cmdprio = has_cmdprio || + fio_option_is_set(&td->o, ioprio_class) || + fio_option_is_set(&td->o, ioprio); + return 0; } @@ -528,9 +872,34 @@ static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u) return 0; } +static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f) +{ + struct ioring_data *ld = td->io_ops_data; + struct ioring_options *o = td->eo; + + if (!ld || !o->registerfiles) + return generic_open_file(td, f); + + f->fd = ld->fds[f->engine_pos]; + return 0; +} + +static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f) +{ + struct ioring_data *ld = td->io_ops_data; + struct ioring_options *o = td->eo; + + if (!ld || !o->registerfiles) + return generic_close_file(td, f); + + f->fd = -1; + return 0; +} + static struct ioengine_ops ioengine = { .name = "io_uring", .version = FIO_IOOPS_VERSION, + .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD, .init = fio_ioring_init, .post_init = fio_ioring_post_init, .io_u_init = fio_ioring_io_u_init, @@ -540,8 +909,8 @@ static struct ioengine_ops ioengine = { .getevents = fio_ioring_getevents, .event = fio_ioring_event, .cleanup = fio_ioring_cleanup, - .open_file = generic_open_file, - .close_file = generic_close_file, + .open_file = fio_ioring_open_file, + .close_file = fio_ioring_close_file, .get_file_size = generic_get_file_size, .options = options, .option_struct_size = sizeof(struct ioring_options),