#ifdef ARCH_HAVE_IOURING
#include "../lib/types.h"
-#include "../os/io_uring.h"
+#include "../os/linux/io_uring.h"
struct io_sq_ring {
unsigned *head;
struct ioring_data {
int ring_fd;
- struct io_u **io_us;
struct io_u **io_u_index;
struct io_sq_ring sq_ring;
int cq_ring_off;
unsigned iodepth;
- uint64_t cachehit;
- uint64_t cachemiss;
-
struct ioring_mmap mmap[3];
};
unsigned int min_complete, unsigned int flags)
{
return syscall(__NR_sys_io_uring_enter, ld->ring_fd, to_submit,
- min_complete, flags);
+ min_complete, flags, NULL, 0);
}
static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
sqe->buf_index = 0;
if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
- if (io_u->ddir == DDIR_READ)
- sqe->opcode = IORING_OP_READV;
- else
- sqe->opcode = IORING_OP_WRITEV;
-
if (o->fixedbufs) {
- sqe->flags |= IOSQE_FIXED_BUFFER;
- sqe->addr = io_u->xfer_buf;
+ if (io_u->ddir == DDIR_READ)
+ sqe->opcode = IORING_OP_READ_FIXED;
+ else
+ sqe->opcode = IORING_OP_WRITE_FIXED;
+ sqe->addr = (unsigned long) io_u->xfer_buf;
sqe->len = io_u->xfer_buflen;
sqe->buf_index = io_u->index;
} else {
- sqe->addr = &ld->iovecs[io_u->index];
+ if (io_u->ddir == DDIR_READ)
+ sqe->opcode = IORING_OP_READV;
+ else
+ sqe->opcode = IORING_OP_WRITEV;
+ sqe->addr = (unsigned long) &ld->iovecs[io_u->index];
sqe->len = 1;
}
sqe->off = io_u->offset;
- } else if (ddir_sync(io_u->ddir))
+ } else if (ddir_sync(io_u->ddir)) {
+ sqe->fsync_flags = 0;
+ if (io_u->ddir == DDIR_DATASYNC)
+ sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
sqe->opcode = IORING_OP_FSYNC;
+ }
- sqe->data = (unsigned long) io_u;
+ sqe->user_data = (unsigned long) io_u;
return 0;
}
index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
cqe = &ld->cq_ring.cqes[index];
- io_u = (struct io_u *) cqe->data;
+ io_u = (struct io_u *) (uintptr_t) cqe->user_data;
if (cqe->res != io_u->xfer_buflen) {
if (cqe->res > io_u->xfer_buflen)
} else
io_u->error = 0;
- if (io_u->ddir == DDIR_READ) {
- if (cqe->flags & IOCQE_FLAG_CACHEHIT)
- ld->cachehit++;
- else
- ld->cachemiss++;
- }
-
return io_u;
}
r = fio_ioring_cqring_reap(td, events, max);
if (r) {
events += r;
+ if (actual_min != 0)
+ actual_min -= r;
continue;
}
if (next_tail == *ring->head)
return FIO_Q_BUSY;
+ /* ensure sqe stores are ordered with tail update */
+ write_barrier();
ring->array[tail & ld->sq_ring_mask] = io_u->index;
*ring->tail = next_tail;
write_barrier();
read_barrier();
if (*ring->flags & IORING_SQ_NEED_WAKEUP)
- io_uring_enter(ld, ld->queued, 0, 0);
+ io_uring_enter(ld, ld->queued, 0,
+ IORING_ENTER_SQ_WAKEUP);
ld->queued = 0;
return 0;
}
struct ioring_data *ld = td->io_ops_data;
if (ld) {
- td->ts.cachehit += ld->cachehit;
- td->ts.cachemiss += ld->cachemiss;
-
if (!(td->flags & TD_F_CHILD))
fio_ioring_unmap(ld);
free(ld->io_u_index);
- free(ld->io_us);
free(ld->iovecs);
free(ld);
}
}
}
- if (o->fixedbufs) {
- struct rlimit rlim = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- setrlimit(RLIMIT_MEMLOCK, &rlim);
- }
-
ret = syscall(__NR_sys_io_uring_setup, depth, &p);
if (ret < 0)
return ret;
ld->ring_fd = ret;
if (o->fixedbufs) {
- struct io_uring_register_buffers reg = {
- .iovecs = ld->iovecs,
- .nr_iovecs = depth
+ struct rlimit rlim = {
+ .rlim_cur = RLIM_INFINITY,
+ .rlim_max = RLIM_INFINITY,
};
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0)
+ return -1;
+
ret = syscall(__NR_sys_io_uring_register, ld->ring_fd,
- IORING_REGISTER_BUFFERS, ®);
+ IORING_REGISTER_BUFFERS, ld->iovecs, depth);
if (ret < 0)
return ret;
}
/* io_u index */
ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
- ld->io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
td->io_ops_data = ld;