summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-09-28 05:35:02 -0600
committerJens Axboe <axboe@kernel.dk>2019-09-28 09:52:59 -0600
commit20c929379461d790fb7556a175e12d719bcd20b7 (patch)
tree30d537769aea9ebd96f966620b6ddddc993c48dd /src
parentac72640594bcb04b4952e7b2b83b06f2046afa8a (diff)
downloadliburing-20c929379461d790fb7556a175e12d719bcd20b7.tar.gz
liburing-20c929379461d790fb7556a175e12d719bcd20b7.tar.bz2
Inline fast-path of io_uring_{wait,peek}_cqe()
For cases where we usually have completions available, it can be fairly costly to always have to call into the liburing library. Ensure that we handle the fast path of finding completions without needing a library call. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'src')
-rw-r--r--src/include/liburing.h65
-rw-r--r--src/liburing.map2
-rw-r--r--src/queue.c46
3 files changed, 72 insertions, 41 deletions
diff --git a/src/include/liburing.h b/src/include/liburing.h
index f5c27d8..9ea4ba9 100644
--- a/src/include/liburing.h
+++ b/src/include/liburing.h
@@ -70,12 +70,8 @@ extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
struct io_uring *ring);
extern void io_uring_queue_exit(struct io_uring *ring);
-extern int io_uring_peek_cqe(struct io_uring *ring,
- struct io_uring_cqe **cqe_ptr);
unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
struct io_uring_cqe **cqes, unsigned count);
-extern int io_uring_wait_cqe(struct io_uring *ring,
- struct io_uring_cqe **cqe_ptr);
extern int io_uring_wait_cqes(struct io_uring *ring,
struct io_uring_cqe **cqe_ptr, unsigned wait_nr, struct timespec *ts,
sigset_t *sigmask);
@@ -95,6 +91,14 @@ extern int io_uring_unregister_files(struct io_uring *ring);
extern int io_uring_register_eventfd(struct io_uring *ring, int fd);
extern int io_uring_unregister_eventfd(struct io_uring *ring);
+/*
+ * Helper for the peek/wait single cqe functions. Exported because of that,
+ * but probably shouldn't be used directly in an application.
+ */
+extern int __io_uring_get_cqe(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr, unsigned submit,
+ unsigned wait_nr, sigset_t *sigmask);
+
#define LIBURING_UDATA_TIMEOUT ((__u64) -1)
#define io_uring_for_each_cqe(ring, head, cqe) \
@@ -256,6 +260,59 @@ static inline unsigned io_uring_cq_ready(struct io_uring *ring)
return io_uring_smp_load_acquire(ring->cq.ktail) - *ring->cq.khead;
}
+static struct io_uring_cqe *__io_uring_peek_cqe(struct io_uring *ring)
+{
+ struct io_uring_cqe *cqe;
+ unsigned head;
+ int err = 0;
+
+ do {
+ io_uring_for_each_cqe(ring, head, cqe)
+ break;
+ if (cqe) {
+ if (cqe->user_data == LIBURING_UDATA_TIMEOUT) {
+ if (cqe->res < 0)
+ err = cqe->res;
+ io_uring_cq_advance(ring, 1);
+ if (!err)
+ continue;
+ cqe = NULL;
+ }
+ }
+ break;
+ } while (1);
+
+ return cqe;
+}
+
+/*
+ * Return an IO completion, if one is readily available. Returns 0 with
+ * cqe_ptr filled in on success, -errno on failure.
+ */
+static inline int io_uring_peek_cqe(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr)
+{
+ *cqe_ptr = __io_uring_peek_cqe(ring);
+ if (*cqe_ptr)
+ return 0;
+
+ return __io_uring_get_cqe(ring, cqe_ptr, 0, 0, NULL);
+}
+
+/*
+ * Return an IO completion, waiting for it if necessary. Returns 0 with
+ * cqe_ptr filled in on success, -errno on failure.
+ */
+static inline int io_uring_wait_cqe(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr)
+{
+ *cqe_ptr = __io_uring_peek_cqe(ring);
+ if (*cqe_ptr)
+ return 0;
+
+ return __io_uring_get_cqe(ring, cqe_ptr, 0, 1, NULL);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/src/liburing.map b/src/liburing.map
index 969ec61..6088904 100644
--- a/src/liburing.map
+++ b/src/liburing.map
@@ -28,4 +28,6 @@ LIBURING_0.2 {
io_uring_peek_batch_cqe;
io_uring_wait_cqe_timeout;
io_uring_wait_cqes;
+
+ __io_uring_get_cqe;
} LIBURING_0.1;
diff --git a/src/queue.c b/src/queue.c
index b9e26a0..b7d1acd 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -11,48 +11,29 @@
#include "liburing.h"
#include "liburing/barrier.h"
-static int __io_uring_get_cqe(struct io_uring *ring,
- struct io_uring_cqe **cqe_ptr, unsigned submit,
- unsigned wait_nr, sigset_t *sigmask)
+int __io_uring_get_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
+ unsigned submit, unsigned wait_nr, sigset_t *sigmask)
{
int ret, err = 0;
- unsigned head;
do {
- io_uring_for_each_cqe(ring, head, *cqe_ptr)
+ *cqe_ptr = __io_uring_peek_cqe(ring);
+ if (*cqe_ptr)
break;
- if (*cqe_ptr) {
- if ((*cqe_ptr)->user_data == LIBURING_UDATA_TIMEOUT) {
- if ((*cqe_ptr)->res < 0)
- err = (*cqe_ptr)->res;
- io_uring_cq_advance(ring, 1);
- if (!err)
- continue;
- *cqe_ptr = NULL;
- }
+ if (!wait_nr) {
+ err = -EAGAIN;
break;
}
- if (!wait_nr)
- return -EAGAIN;
ret = io_uring_enter(ring->ring_fd, submit, wait_nr,
- IORING_ENTER_GETEVENTS, sigmask);
+ IORING_ENTER_GETEVENTS, sigmask);
if (ret < 0)
- return -errno;
- } while (1);
+ err = -errno;
+ } while (!err);
return err;
}
/*
- * Return an IO completion, if one is readily available. Returns 0 with
- * cqe_ptr filled in on success, -errno on failure.
- */
-int io_uring_peek_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
-{
- return __io_uring_get_cqe(ring, cqe_ptr, 0, 0, NULL);
-}
-
-/*
* Fill in an array of IO completions up to count, if any are available.
* Returns the amount of IO completions filled.
*/
@@ -115,15 +96,6 @@ static int __io_uring_flush_sq(struct io_uring *ring)
}
/*
- * Return an IO completion, waiting for it if necessary. Returns 0 with
- * cqe_ptr filled in on success, -errno on failure.
- */
-int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
-{
- return __io_uring_get_cqe(ring, cqe_ptr, 0, 1, NULL);
-}
-
-/*
* Like io_uring_wait_cqe(), except it accepts a timeout value as well. Note
* that an sqe is used internally to handle the timeout. Applications using
* this function must never set sqe->user_data to LIBURING_UDATA_TIMEOUT!