X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=t%2Fio_uring.c;h=8d3f3a9ba198086dd1145d48ef01185c9849054f;hp=83d723f938e88d547bcab93482441fdecd73caa4;hb=8025517dfa599be4bc795e4af7c9012d10b81bc5;hpb=e31b82888efc4986d89f3609e9edbc7205642bc8 diff --git a/t/io_uring.c b/t/io_uring.c index 83d723f9..8d3f3a9b 100644 --- a/t/io_uring.c +++ b/t/io_uring.c @@ -21,13 +21,7 @@ #include #include "../arch/arch.h" - -typedef uint64_t u64; -typedef uint32_t u32; -typedef int32_t s32; -typedef uint16_t u16; -typedef uint8_t u8; - +#include "../lib/types.h" #include "../os/io_uring.h" #define barrier() __asm__ __volatile__("": : :"memory") @@ -35,37 +29,46 @@ typedef uint8_t u8; #define min(a, b) ((a < b) ? (a) : (b)) struct io_sq_ring { - u32 *head; - u32 *tail; - u32 *ring_mask; - u32 *ring_entries; - u32 *array; + unsigned *head; + unsigned *tail; + unsigned *ring_mask; + unsigned *ring_entries; + unsigned *flags; + unsigned *array; }; struct io_cq_ring { - u32 *head; - u32 *tail; - u32 *ring_mask; - u32 *ring_entries; - struct io_uring_event *events; + unsigned *head; + unsigned *tail; + unsigned *ring_mask; + unsigned *ring_entries; + struct io_uring_cqe *cqes; }; -#define DEPTH 32 +#define DEPTH 128 -#define BATCH_SUBMIT 8 -#define BATCH_COMPLETE 8 +#define BATCH_SUBMIT 64 +#define BATCH_COMPLETE 64 #define BS 4096 +#define MAX_FDS 16 + static unsigned sq_ring_mask, cq_ring_mask; +struct file { + unsigned long max_blocks; + unsigned pending_ios; + int real_fd; + int fixed_fd; +}; + struct submitter { pthread_t thread; - unsigned long max_blocks; int ring_fd; struct drand48_data rand; struct io_sq_ring sq_ring; - struct io_uring_iocb *iocbs; + struct io_uring_sqe *sqes; struct iovec iovecs[DEPTH]; struct io_cq_ring cq_ring; int inflight; @@ -74,22 +77,61 @@ struct submitter { unsigned long calls; unsigned long cachehit, cachemiss; volatile int finish; - char filename[128]; + + __s32 *fds; + + struct file files[MAX_FDS]; + unsigned nr_files; + unsigned cur_file; }; static struct submitter submitters[1]; static volatile int finish; -static int polled = 0; /* use IO polling */ -static int fixedbufs = 0; /* use fixed user buffers */ -static int buffered = 1; /* use buffered IO, not O_DIRECT */ -static int sq_thread = 0; /* use kernel submission thread */ -static int sq_thread_cpu = 0; /* pin above thread to this CPU */ +static int polled = 1; /* use IO polling */ +static int fixedbufs = 1; /* use fixed user buffers */ +static int buffered = 0; /* use buffered IO, not O_DIRECT */ +static int sq_thread_poll = 0; /* use kernel submission/poller thread */ +static int sq_thread_cpu = -1; /* pin above thread to this CPU */ +static int do_nop = 0; /* no-op SQ ring commands */ + +static int io_uring_register_buffers(struct submitter *s) +{ + struct io_uring_register_buffers reg = { + .iovecs = s->iovecs, + .nr_iovecs = DEPTH + }; + + if (do_nop) + return 0; + + return syscall(__NR_sys_io_uring_register, s->ring_fd, + IORING_REGISTER_BUFFERS, ®); +} + +static int io_uring_register_files(struct submitter *s) +{ + struct io_uring_register_files reg; + int i; + + if (do_nop) + return 0; + + s->fds = calloc(s->nr_files, sizeof(__s32)); + for (i = 0; i < s->nr_files; i++) { + s->fds[i] = s->files[i].real_fd; + s->files[i].fixed_fd = i; + } + reg.fds = s->fds; + reg.nr_fds = s->nr_files; + + return syscall(__NR_sys_io_uring_register, s->ring_fd, + IORING_REGISTER_FILES, ®); +} -static int io_uring_setup(unsigned entries, struct iovec *iovecs, - struct io_uring_params *p) +static int io_uring_setup(unsigned entries, struct io_uring_params *p) { - return syscall(__NR_sys_io_uring_setup, entries, iovecs, p); + return syscall(__NR_sys_io_uring_setup, entries, p); } static int io_uring_enter(struct submitter *s, unsigned int to_submit, @@ -104,28 +146,60 @@ static int gettid(void) return syscall(__NR_gettid); } -static void init_io(struct submitter *s, int fd, unsigned index) +static unsigned file_depth(struct submitter *s) { - struct io_uring_iocb *iocb = &s->iocbs[index]; + return (DEPTH + s->nr_files - 1) / s->nr_files; +} + +static void init_io(struct submitter *s, unsigned index) +{ + struct io_uring_sqe *sqe = &s->sqes[index]; unsigned long offset; + struct file *f; long r; + if (do_nop) { + sqe->opcode = IORING_OP_NOP; + return; + } + + if (s->nr_files == 1) { + f = &s->files[0]; + } else { + f = &s->files[s->cur_file]; + if (f->pending_ios >= file_depth(s)) { + s->cur_file++; + if (s->cur_file == s->nr_files) + s->cur_file = 0; + } + } + f->pending_ios++; + lrand48_r(&s->rand, &r); - offset = (r % (s->max_blocks - 1)) * BS; - - iocb->opcode = IORING_OP_READ; - iocb->flags = 0; - iocb->ioprio = 0; - iocb->fd = fd; - iocb->off = offset; - iocb->addr = s->iovecs[index].iov_base; - iocb->len = BS; + offset = (r % (f->max_blocks - 1)) * BS; + + sqe->flags = IOSQE_FIXED_FILE; + if (fixedbufs) { + sqe->opcode = IORING_OP_READ_FIXED; + sqe->addr = s->iovecs[index].iov_base; + sqe->len = BS; + sqe->buf_index = index; + } else { + sqe->opcode = IORING_OP_READV; + sqe->addr = &s->iovecs[index]; + sqe->len = 1; + sqe->buf_index = 0; + } + sqe->ioprio = 0; + sqe->fd = f->fixed_fd; + sqe->off = offset; + sqe->user_data = (unsigned long) f; } -static int prep_more_ios(struct submitter *s, int fd, int max_ios) +static int prep_more_ios(struct submitter *s, int max_ios) { struct io_sq_ring *ring = &s->sq_ring; - u32 index, tail, next_tail, prepped = 0; + unsigned index, tail, next_tail, prepped = 0; next_tail = tail = *ring->tail; do { @@ -135,14 +209,14 @@ static int prep_more_ios(struct submitter *s, int fd, int max_ios) break; index = tail & sq_ring_mask; - init_io(s, fd, index); + init_io(s, index); ring->array[index] = index; prepped++; tail = next_tail; } while (prepped < max_ios); if (*ring->tail != tail) { - /* order tail store with writes to iocbs above */ + /* order tail store with writes to sqes above */ barrier(); *ring->tail = tail; barrier(); @@ -150,22 +224,22 @@ static int prep_more_ios(struct submitter *s, int fd, int max_ios) return prepped; } -static int get_file_size(int fd, unsigned long *blocks) +static int get_file_size(struct file *f) { struct stat st; - if (fstat(fd, &st) < 0) + if (fstat(f->real_fd, &st) < 0) return -1; if (S_ISBLK(st.st_mode)) { unsigned long long bytes; - if (ioctl(fd, BLKGETSIZE64, &bytes) != 0) + if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0) return -1; - *blocks = bytes / BS; + f->max_blocks = bytes / BS; return 0; } else if (S_ISREG(st.st_mode)) { - *blocks = st.st_size / BS; + f->max_blocks = st.st_size / BS; return 0; } @@ -175,25 +249,26 @@ static int get_file_size(int fd, unsigned long *blocks) static int reap_events(struct submitter *s) { struct io_cq_ring *ring = &s->cq_ring; - struct io_uring_event *ev; - u32 head, reaped = 0; + struct io_uring_cqe *cqe; + unsigned head, reaped = 0; head = *ring->head; do { + struct file *f; + barrier(); if (head == *ring->tail) break; - ev = &ring->events[head & cq_ring_mask]; - if (ev->res != BS) { - struct io_uring_iocb *iocb = &s->iocbs[ev->index]; - - printf("io: unexpected ret=%d\n", ev->res); - printf("offset=%lu, size=%lu\n", - (unsigned long) iocb->off, - (unsigned long) iocb->len); - return -1; + cqe = &ring->cqes[head & cq_ring_mask]; + if (!do_nop) { + f = (struct file *) cqe->user_data; + f->pending_ios--; + if (cqe->res != BS) { + printf("io: unexpected ret=%d\n", cqe->res); + return -1; + } } - if (ev->flags & IOEV_FLAG_CACHEHIT) + if (cqe->flags & IOCQE_FLAG_CACHEHIT) s->cachehit++; else s->cachemiss++; @@ -210,29 +285,11 @@ static int reap_events(struct submitter *s) static void *submitter_fn(void *data) { struct submitter *s = data; - int fd, ret, prepped, flags; + struct io_sq_ring *ring = &s->sq_ring; + int ret, prepped; printf("submitter=%d\n", gettid()); - flags = O_RDONLY; - if (!buffered) - flags |= O_DIRECT; - fd = open(s->filename, flags); - if (fd < 0) { - perror("open"); - goto done; - } - - if (get_file_size(fd, &s->max_blocks)) { - printf("failed getting size of device/file\n"); - goto err; - } - if (s->max_blocks <= 1) { - printf("Zero file/device size?\n"); - goto err; - } - s->max_blocks--; - srand48_r(pthread_self(), &s->rand); prepped = 0; @@ -241,7 +298,7 @@ static void *submitter_fn(void *data) if (!prepped && s->inflight < DEPTH) { to_prep = min(DEPTH - s->inflight, BATCH_SUBMIT); - prepped = prep_more_ios(s, fd, to_prep); + prepped = prep_more_ios(s, to_prep); } s->inflight += prepped; submit_more: @@ -252,13 +309,33 @@ submit: else to_wait = min(s->inflight + to_submit, BATCH_COMPLETE); - ret = io_uring_enter(s, to_submit, to_wait, - IORING_ENTER_GETEVENTS); - s->calls++; + /* + * Only need to call io_uring_enter if we're not using SQ thread + * poll, or if IORING_SQ_NEED_WAKEUP is set. + */ + if (!sq_thread_poll || (*ring->flags & IORING_SQ_NEED_WAKEUP)) { + unsigned flags = 0; + + if (to_wait) + flags = IORING_ENTER_GETEVENTS; + ret = io_uring_enter(s, to_submit, to_wait, flags); + s->calls++; + } - this_reap = reap_events(s); - if (this_reap == -1) - break; + /* + * For non SQ thread poll, we already got the events we needed + * through the io_uring_enter() above. For SQ thread poll, we + * need to loop here until we find enough events. + */ + this_reap = 0; + do { + int r; + r = reap_events(s); + if (r == -1) + break; + else if (r > 0) + this_reap += r; + } while (sq_thread_poll && this_reap < to_wait); s->reaps += this_reap; if (ret >= 0) { @@ -290,9 +367,7 @@ submit: break; } } while (!s->finish); -err: - close(fd); -done: + finish = 1; return NULL; } @@ -319,33 +394,43 @@ static int setup_ring(struct submitter *s) struct io_sq_ring *sring = &s->sq_ring; struct io_cq_ring *cring = &s->cq_ring; struct io_uring_params p; + int ret, fd; void *ptr; - int fd; memset(&p, 0, sizeof(p)); if (polled) p.flags |= IORING_SETUP_IOPOLL; - if (fixedbufs) - p.flags |= IORING_SETUP_FIXEDBUFS; - if (buffered) - p.flags |= IORING_SETUP_SQWQ; - else if (sq_thread) { - p.flags |= IORING_SETUP_SQTHREAD; - p.sq_thread_cpu = sq_thread_cpu; + if (sq_thread_poll) { + p.flags |= IORING_SETUP_SQPOLL; + if (sq_thread_cpu != -1) { + p.flags |= IORING_SETUP_SQ_AFF; + p.sq_thread_cpu = sq_thread_cpu; + } } - if (fixedbufs) - fd = io_uring_setup(DEPTH, s->iovecs, &p); - else - fd = io_uring_setup(DEPTH, NULL, &p); + fd = io_uring_setup(DEPTH, &p); if (fd < 0) { perror("io_uring_setup"); return 1; } - s->ring_fd = fd; - ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(u32), + + if (fixedbufs) { + ret = io_uring_register_buffers(s); + if (ret < 0) { + perror("io_uring_register_buffers"); + return 1; + } + } + + ret = io_uring_register_files(s); + if (ret < 0) { + perror("io_uring_register_files"); + return 1; + } + + ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING); printf("sq_ring ptr = 0x%p\n", ptr); @@ -353,15 +438,16 @@ static int setup_ring(struct submitter *s) sring->tail = ptr + p.sq_off.tail; sring->ring_mask = ptr + p.sq_off.ring_mask; sring->ring_entries = ptr + p.sq_off.ring_entries; + sring->flags = ptr + p.sq_off.flags; sring->array = ptr + p.sq_off.array; sq_ring_mask = *sring->ring_mask; - s->iocbs = mmap(0, p.sq_entries * sizeof(struct io_uring_iocb), + s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, - IORING_OFF_IOCB); - printf("iocbs ptr = 0x%p\n", s->iocbs); + IORING_OFF_SQES); + printf("sqes ptr = 0x%p\n", s->sqes); - ptr = mmap(0, p.cq_off.events + p.cq_entries * sizeof(struct io_uring_event), + ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING); printf("cq_ring ptr = 0x%p\n", ptr); @@ -369,7 +455,7 @@ static int setup_ring(struct submitter *s) cring->tail = ptr + p.cq_off.tail; cring->ring_mask = ptr + p.cq_off.ring_mask; cring->ring_entries = ptr + p.cq_off.ring_entries; - cring->events = ptr + p.cq_off.events; + cring->cqes = ptr + p.cq_off.cqes; cq_ring_mask = *cring->ring_mask; return 0; } @@ -378,15 +464,44 @@ int main(int argc, char *argv[]) { struct submitter *s = &submitters[0]; unsigned long done, calls, reap, cache_hit, cache_miss; - int err, i; + int err, i, flags, fd; struct rlimit rlim; void *ret; - if (argc < 2) { + if (!do_nop && argc < 2) { printf("%s: filename\n", argv[0]); return 1; } + flags = O_RDONLY | O_NOATIME; + if (!buffered) + flags |= O_DIRECT; + + i = 1; + while (!do_nop && i < argc) { + struct file *f = &s->files[s->nr_files]; + + fd = open(argv[i], flags); + if (fd < 0) { + perror("open"); + return 1; + } + f->real_fd = fd; + if (get_file_size(f)) { + printf("failed getting size of device/file\n"); + return 1; + } + if (f->max_blocks <= 1) { + printf("Zero file/device size?\n"); + return 1; + } + f->max_blocks--; + + printf("Added file %s\n", argv[i]); + s->nr_files++; + i++; + } + rlim.rlim_cur = RLIM_INFINITY; rlim.rlim_max = RLIM_INFINITY; if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) { @@ -414,7 +529,6 @@ int main(int argc, char *argv[]) } printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered); printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries); - strcpy(s->filename, argv[1]); pthread_create(&s->thread, NULL, submitter_fn, s); @@ -445,8 +559,9 @@ int main(int argc, char *argv[]) if (this_call - calls) { rpc = (this_done - done) / (this_call - calls); ipc = (this_reap - reap) / (this_call - calls); - } - printf("IOPS=%lu, IOS/call=%lu/%lu, inflight=%u (head=%u tail=%u), Cachehit=%0.2f%%\n", + } else + rpc = ipc = -1; + printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (head=%u tail=%u), Cachehit=%0.2f%%\n", this_done - done, rpc, ipc, s->inflight, *s->cq_ring.head, *s->cq_ring.tail, hit); done = this_done;