summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Roesch <shr@fb.com>2022-04-25 11:26:37 -0700
committerJens Axboe <axboe@kernel.dk>2022-05-16 09:31:06 -0600
commit406176e9a0e7c023a6572d734d4e2d40e3e2c4d6 (patch)
tree353964297a895a77614493d176b59c1d6d9a74ea
parent0ba877d0e76f378c01a34c221bf1dc67d158744c (diff)
downloadliburing-406176e9a0e7c023a6572d734d4e2d40e3e2c4d6.tar.gz
liburing-406176e9a0e7c023a6572d734d4e2d40e3e2c4d6.tar.bz2
liburing: index large CQE's correctly
Large CQE's need to take into account that each CQE has double the size. When the CQE array is indexed, the offset into the array needs to be changed accordingly. Signed-off-by: Stefan Roesch <shr@fb.com> Link: https://lore.kernel.org/r/20220425182639.2446370-5-shr@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--src/include/liburing.h18
-rw-r--r--src/queue.c6
2 files changed, 21 insertions, 3 deletions
diff --git a/src/include/liburing.h b/src/include/liburing.h
index eebb23a..1aedc35 100644
--- a/src/include/liburing.h
+++ b/src/include/liburing.h
@@ -189,6 +189,16 @@ int __io_uring_get_cqe(struct io_uring *ring,
#define LIBURING_UDATA_TIMEOUT ((__u64) -1)
+/*
+ * Calculates the step size for CQE iteration.
+ * For standard CQE's its 1, for big CQE's its two.
+ */
+#define io_uring_cqe_shift(ring) \
+ (!!((ring)->flags & IORING_SETUP_CQE32))
+
+#define io_uring_cqe_index(ring,ptr,mask) \
+ (((ptr) & (mask)) << io_uring_cqe_shift(ring))
+
#define io_uring_for_each_cqe(ring, head, cqe) \
/* \
* io_uring_smp_load_acquire() enforces the order of tail \
@@ -196,7 +206,7 @@ int __io_uring_get_cqe(struct io_uring *ring,
*/ \
for (head = *(ring)->cq.khead; \
(cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) ? \
- &(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] : NULL)); \
+ &(ring)->cq.cqes[io_uring_cqe_index(ring, head, *(ring)->cq.kring_mask)] : NULL)); \
head++) \
/*
@@ -901,6 +911,10 @@ static inline int __io_uring_peek_cqe(struct io_uring *ring,
int err = 0;
unsigned available;
unsigned mask = *ring->cq.kring_mask;
+ int shift = 0;
+
+ if (ring->flags & IORING_SETUP_CQE32)
+ shift = 1;
do {
unsigned tail = io_uring_smp_load_acquire(ring->cq.ktail);
@@ -911,7 +925,7 @@ static inline int __io_uring_peek_cqe(struct io_uring *ring,
if (!available)
break;
- cqe = &ring->cq.cqes[head & mask];
+ cqe = &ring->cq.cqes[(head & mask) << shift];
if (!(ring->features & IORING_FEAT_EXT_ARG) &&
cqe->user_data == LIBURING_UDATA_TIMEOUT) {
if (cqe->res < 0)
diff --git a/src/queue.c b/src/queue.c
index 36b4b29..ce0ecf6 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -133,6 +133,10 @@ unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
{
unsigned ready;
bool overflow_checked = false;
+ int shift = 0;
+
+ if (ring->flags & IORING_SETUP_CQE32)
+ shift = 1;
again:
ready = io_uring_cq_ready(ring);
@@ -145,7 +149,7 @@ again:
count = count > ready ? ready : count;
last = head + count;
for (;head != last; head++, i++)
- cqes[i] = &ring->cq.cqes[head & mask];
+ cqes[i] = &ring->cq.cqes[(head & mask) << shift];
return count;
}