#define DEPTH 128
#define BATCH_SUBMIT 32
#define BATCH_COMPLETE 32
-
#define BS 4096
#define MAX_FDS 16
struct submitter {
pthread_t thread;
int ring_fd;
- struct drand48_data rand;
struct io_sq_ring sq_ring;
struct io_uring_sqe *sqes;
struct io_cq_ring cq_ring;
static int depth = DEPTH;
static int batch_submit = BATCH_SUBMIT;
static int batch_complete = BATCH_COMPLETE;
+static int bs = BS;
static int polled = 1; /* use IO polling */
static int fixedbufs = 1; /* use fixed user buffers */
static int register_files = 1; /* use fixed files */
static int sq_thread_cpu = -1; /* pin above thread to this CPU */
static int do_nop = 0; /* no-op SQ ring commands */
+static int vectored = 1;
+
static int io_uring_register_buffers(struct submitter *s)
{
if (do_nop)
return 0;
- return syscall(__NR_sys_io_uring_register, s->ring_fd,
+ return syscall(__NR_io_uring_register, s->ring_fd,
IORING_REGISTER_BUFFERS, s->iovecs, depth);
}
s->files[i].fixed_fd = i;
}
- return syscall(__NR_sys_io_uring_register, s->ring_fd,
+ return syscall(__NR_io_uring_register, s->ring_fd,
IORING_REGISTER_FILES, s->fds, s->nr_files);
}
static int io_uring_setup(unsigned entries, struct io_uring_params *p)
{
- return syscall(__NR_sys_io_uring_setup, entries, p);
+ return syscall(__NR_io_uring_setup, entries, p);
+}
+
+static void io_uring_probe(int fd)
+{
+ struct io_uring_probe *p;
+ int ret;
+
+ p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
+ if (!p)
+ return;
+
+ memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
+ ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256);
+ if (ret < 0)
+ goto out;
+
+ if (IORING_OP_READ > p->ops_len)
+ goto out;
+
+ if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
+ vectored = 0;
+out:
+ free(p);
}
static int io_uring_enter(struct submitter *s, unsigned int to_submit,
unsigned int min_complete, unsigned int flags)
{
- return syscall(__NR_sys_io_uring_enter, s->ring_fd, to_submit,
- min_complete, flags, NULL, 0);
+ return syscall(__NR_io_uring_enter, s->ring_fd, to_submit, min_complete,
+ flags, NULL, 0);
}
+#ifndef CONFIG_HAVE_GETTID
static int gettid(void)
{
return syscall(__NR_gettid);
}
+#endif
static unsigned file_depth(struct submitter *s)
{
}
f->pending_ios++;
- lrand48_r(&s->rand, &r);
- offset = (r % (f->max_blocks - 1)) * BS;
+ r = lrand48();
+ offset = (r % (f->max_blocks - 1)) * bs;
if (register_files) {
sqe->flags = IOSQE_FIXED_FILE;
if (fixedbufs) {
sqe->opcode = IORING_OP_READ_FIXED;
sqe->addr = (unsigned long) s->iovecs[index].iov_base;
- sqe->len = BS;
+ sqe->len = bs;
sqe->buf_index = index;
+ } else if (!vectored) {
+ sqe->opcode = IORING_OP_READ;
+ sqe->addr = (unsigned long) s->iovecs[index].iov_base;
+ sqe->len = bs;
+ sqe->buf_index = 0;
} else {
sqe->opcode = IORING_OP_READV;
sqe->addr = (unsigned long) &s->iovecs[index];
} while (prepped < max_ios);
if (*ring->tail != tail) {
- /* order tail store with writes to sqes above */
- write_barrier();
*ring->tail = tail;
write_barrier();
}
if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
return -1;
- f->max_blocks = bytes / BS;
+ f->max_blocks = bytes / bs;
return 0;
} else if (S_ISREG(st.st_mode)) {
- f->max_blocks = st.st_size / BS;
+ f->max_blocks = st.st_size / bs;
return 0;
}
if (!do_nop) {
f = (struct file *) (uintptr_t) cqe->user_data;
f->pending_ios--;
- if (cqe->res != BS) {
+ if (cqe->res != bs) {
printf("io: unexpected ret=%d\n", cqe->res);
if (polled && cqe->res == -EOPNOTSUPP)
printf("Your filesystem/driver/kernel doesn't support polled IO\n");
printf("submitter=%d\n", gettid());
- srand48_r(pthread_self(), &s->rand);
+ srand48(pthread_self());
prepped = 0;
do {
}
s->ring_fd = fd;
+ io_uring_probe(fd);
+
if (fixedbufs) {
ret = io_uring_register_buffers(s);
if (ret < 0) {
printf("%s [options] -- [filenames]\n"
" -d <int> : IO Depth, default %d\n"
" -s <int> : Batch submit, default %d\n"
- " -c <int> : Batch complete, default %d\n",
- argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE);
+ " -c <int> : Batch complete, default %d\n"
+ " -b <int> : Block size, default %d\n"
+ " -p <bool> : Polled IO, default %d\n",
+ argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled);
exit(0);
}
return 1;
}
- while ((opt = getopt(argc, argv, "d:s:c:h?")) != -1) {
+ while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:h?")) != -1) {
switch (opt) {
case 'd':
depth = atoi(optarg);
case 'c':
batch_complete = atoi(optarg);
break;
+ case 'b':
+ bs = atoi(optarg);
+ break;
+ case 'p':
+ polled = !!atoi(optarg);
+ break;
+ case 'B':
+ fixedbufs = !!atoi(optarg);
+ break;
+ case 'F':
+ register_files = !!atoi(optarg);
+ break;
case 'h':
case '?':
default:
for (i = 0; i < depth; i++) {
void *buf;
- if (posix_memalign(&buf, BS, BS)) {
+ if (posix_memalign(&buf, bs, bs)) {
printf("failed alloc\n");
return 1;
}
s->iovecs[i].iov_base = buf;
- s->iovecs[i].iov_len = BS;
+ s->iovecs[i].iov_len = bs;
}
err = setup_ring(s);
printf("ring setup failed: %s, %d\n", strerror(errno), err);
return 1;
}
- printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
+ printf("polled=%d, fixedbufs=%d, register_files=%d, buffered=%d", polled, fixedbufs, register_files, buffered);
printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", depth, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
pthread_create(&s->thread, NULL, submitter_fn, s);