From 9e26aff95394142b8d2074e0f415140d2bbea145 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 21 Jun 2020 14:17:58 -0700 Subject: [PATCH] engines/io_uring: Use atomic_{load_acquire,store_release}() This patch improves performance by using acquire and release semantics instead of barriers and also brings the io_uring engine code closer to that of liburing. Signed-off-by: Bart Van Assche --- engines/io_uring.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/engines/io_uring.c b/engines/io_uring.c index cab7ecaf..cd0810f4 100644 --- a/engines/io_uring.c +++ b/engines/io_uring.c @@ -301,15 +301,13 @@ static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events, head = *ring->head; do { - read_barrier(); - if (head == *ring->tail) + if (head == atomic_load_acquire(ring->tail)) break; reaped++; head++; } while (reaped + events < max); - *ring->head = head; - write_barrier(); + atomic_store_release(ring->head, head); return reaped; } @@ -384,15 +382,13 @@ static enum fio_q_status fio_ioring_queue(struct thread_data *td, tail = *ring->tail; next_tail = tail + 1; - read_barrier(); - if (next_tail == *ring->head) + if (next_tail == atomic_load_acquire(ring->head)) return FIO_Q_BUSY; if (o->cmdprio_percentage) fio_ioring_prio_prep(td, io_u); ring->array[tail & ld->sq_ring_mask] = io_u->index; - *ring->tail = next_tail; - write_barrier(); + atomic_store_release(ring->tail, next_tail); ld->queued++; return FIO_Q_QUEUED; -- 2.25.1