summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-09-27 06:12:56 -0600
committerJens Axboe <axboe@kernel.dk>2019-09-27 06:12:56 -0600
commitfe13f562c148e2e0d18ed84600a49dd7b8ccf264 (patch)
tree08773aa1e95e42c36114ad5ebfe2185c65922ba4 /src
parent94c9df3b934cdfbc36ce1f795b537c7d47689407 (diff)
downloadliburing-fe13f562c148e2e0d18ed84600a49dd7b8ccf264.tar.gz
liburing-fe13f562c148e2e0d18ed84600a49dd7b8ccf264.tar.bz2
Add io_uring_wait_cqes()
Allows to wait for a specific number of CQEs, which can be used more optimally when we can inform the kernel of how many we are waiting for. Calling convention is the same as io_uring_wait_cqe(), we can only return one cqe inline. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'src')
-rw-r--r--src/include/liburing.h2
-rw-r--r--src/liburing.map1
-rw-r--r--src/queue.c10
3 files changed, 13 insertions, 0 deletions
diff --git a/src/include/liburing.h b/src/include/liburing.h
index 6367307..74b83f0 100644
--- a/src/include/liburing.h
+++ b/src/include/liburing.h
@@ -76,6 +76,8 @@ unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
struct io_uring_cqe **cqes, unsigned count);
extern int io_uring_wait_cqe(struct io_uring *ring,
struct io_uring_cqe **cqe_ptr);
+extern int io_uring_wait_cqes(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr, unsigned wait_nr);
extern int io_uring_wait_cqes_timeout(struct io_uring *ring,
struct io_uring_cqe **cqe_ptr, unsigned wait_nr, struct timespec *ts);
extern int io_uring_wait_cqe_timeout(struct io_uring *ring,
diff --git a/src/liburing.map b/src/liburing.map
index fe955d6..18b816c 100644
--- a/src/liburing.map
+++ b/src/liburing.map
@@ -28,4 +28,5 @@ LIBURING_0.2 {
io_uring_peek_batch_cqe;
io_uring_wait_cqe_timeout;
io_uring_wait_cqes_timeout;
+ io_uring_wait_cqes;
} LIBURING_0.1;
diff --git a/src/queue.c b/src/queue.c
index f8a6ce7..46d1510 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -124,6 +124,16 @@ int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
}
/*
+ * Like io_uring_wait_cqe(), except we ask to wait for more entries in the
+ * kernel.
+ */
+int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
+ unsigned wait_nr)
+{
+ return __io_uring_get_cqe(ring, cqe_ptr, 0, wait_nr);
+}
+
+/*
* Like io_uring_wait_cqe(), except it accepts a timeout value as well. Note
* that an sqe is used internally to handle the timeout. Applications using
* this function must never set sqe->user_data to LIBURING_UDATA_TIMEOUT!