static int sq_thread_cpu = -1; /* pin above thread to this CPU */
static int do_nop = 0; /* no-op SQ ring commands */
+static int vectored = 1;
+
static int io_uring_register_buffers(struct submitter *s)
{
if (do_nop)
return syscall(__NR_io_uring_setup, entries, p);
}
+static void io_uring_probe(int fd)
+{
+ struct io_uring_probe *p;
+ int ret;
+
+ p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
+ if (!p)
+ return;
+
+ memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
+ ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256);
+ if (ret < 0)
+ goto out;
+
+ if (IORING_OP_READ > p->ops_len)
+ goto out;
+
+ if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
+ vectored = 0;
+out:
+ free(p);
+}
+
static int io_uring_enter(struct submitter *s, unsigned int to_submit,
unsigned int min_complete, unsigned int flags)
{
sqe->addr = (unsigned long) s->iovecs[index].iov_base;
sqe->len = bs;
sqe->buf_index = index;
+ } else if (!vectored) {
+ sqe->opcode = IORING_OP_READ;
+ sqe->addr = (unsigned long) s->iovecs[index].iov_base;
+ sqe->len = bs;
+ sqe->buf_index = 0;
} else {
sqe->opcode = IORING_OP_READV;
sqe->addr = (unsigned long) &s->iovecs[index];
next_tail = tail = *ring->tail;
do {
next_tail++;
- read_barrier();
- if (next_tail == *ring->head)
+ if (next_tail == atomic_load_acquire(ring->head))
break;
index = tail & sq_ring_mask;
tail = next_tail;
} while (prepped < max_ios);
- if (*ring->tail != tail) {
- *ring->tail = tail;
- write_barrier();
- }
+ if (prepped)
+ atomic_store_release(ring->tail, tail);
return prepped;
}
struct file *f;
read_barrier();
- if (head == *ring->tail)
+ if (head == atomic_load_acquire(ring->tail))
break;
cqe = &ring->cqes[head & cq_ring_mask];
if (!do_nop) {
head++;
} while (1);
- s->inflight -= reaped;
- *ring->head = head;
- write_barrier();
+ if (reaped) {
+ s->inflight -= reaped;
+ atomic_store_release(ring->head, head);
+ }
return reaped;
}
prepped = 0;
do {
int to_wait, to_submit, this_reap, to_prep;
+ unsigned ring_flags = 0;
if (!prepped && s->inflight < depth) {
to_prep = min(depth - s->inflight, batch_submit);
* Only need to call io_uring_enter if we're not using SQ thread
* poll, or if IORING_SQ_NEED_WAKEUP is set.
*/
- if (!sq_thread_poll || (*ring->flags & IORING_SQ_NEED_WAKEUP)) {
+ if (sq_thread_poll)
+ ring_flags = atomic_load_acquire(ring->flags);
+ if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) {
unsigned flags = 0;
if (to_wait)
flags = IORING_ENTER_GETEVENTS;
- if ((*ring->flags & IORING_SQ_NEED_WAKEUP))
+ if (ring_flags & IORING_SQ_NEED_WAKEUP)
flags |= IORING_ENTER_SQ_WAKEUP;
ret = io_uring_enter(s, to_submit, to_wait, flags);
s->calls++;
+ } else {
+ /* for SQPOLL, we submitted it all effectively */
+ ret = to_submit;
}
/*
}
s->ring_fd = fd;
+ io_uring_probe(fd);
+
if (fixedbufs) {
ret = io_uring_register_buffers(s);
if (ret < 0) {
return 1;
}
- while ((opt = getopt(argc, argv, "d:s:c:b:p:h?")) != -1) {
+ while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:h?")) != -1) {
switch (opt) {
case 'd':
depth = atoi(optarg);
case 'p':
polled = !!atoi(optarg);
break;
+ case 'B':
+ fixedbufs = !!atoi(optarg);
+ break;
+ case 'F':
+ register_files = !!atoi(optarg);
+ break;
case 'h':
case '?':
default:
printf("ring setup failed: %s, %d\n", strerror(errno), err);
return 1;
}
- printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
+ printf("polled=%d, fixedbufs=%d, register_files=%d, buffered=%d", polled, fixedbufs, register_files, buffered);
printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", depth, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
pthread_create(&s->thread, NULL, submitter_fn, s);