diff options
author | Jens Axboe <axboe@kernel.dk> | 2022-02-20 11:33:15 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-02-21 08:55:07 -0700 |
commit | 6cf23f9a6470d93b3396444f88b5bcd946fc0a55 (patch) | |
tree | 2ad9a6bee5b1b1556cb20f0519c458890875b183 | |
parent | 52bb74252002fb46d777309d7512cbe35fb902ba (diff) | |
download | liburing-6cf23f9a6470d93b3396444f88b5bcd946fc0a55.tar.gz liburing-6cf23f9a6470d93b3396444f88b5bcd946fc0a55.tar.bz2 |
Move io_uring_get_sqe() inline in the liburing.h header
It's a fast path function, we should not be making it a function call
into the library.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | src/include/liburing.h | 22 | ||||
-rw-r--r-- | src/queue.c | 21 |
2 files changed, 21 insertions, 22 deletions
diff --git a/src/include/liburing.h b/src/include/liburing.h index 1269370..3cf314d 100644 --- a/src/include/liburing.h +++ b/src/include/liburing.h @@ -127,7 +127,6 @@ int io_uring_submit_and_wait_timeout(struct io_uring *ring, unsigned wait_nr, struct __kernel_timespec *ts, sigset_t *sigmask); -struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring); int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs, unsigned nr_iovecs); @@ -825,6 +824,27 @@ static inline int io_uring_wait_cqe(struct io_uring *ring, return io_uring_wait_cqe_nr(ring, cqe_ptr, 1); } +/* + * Return an sqe to fill. Application must later call io_uring_submit() + * when it's ready to tell the kernel about it. The caller may call this + * function multiple times before calling io_uring_submit(). + * + * Returns a vacant sqe, or NULL if we're full. + */ +static inline struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) +{ + struct io_uring_sq *sq = &ring->sq; + unsigned int head = io_uring_smp_load_acquire(sq->khead); + unsigned int next = sq->sqe_tail + 1; + struct io_uring_sqe *sqe = NULL; + + if (next - head <= *sq->kring_entries) { + sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask]; + sq->sqe_tail = next; + } + return sqe; +} + ssize_t io_uring_mlock_size(unsigned entries, unsigned flags); ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p); diff --git a/src/queue.c b/src/queue.c index f8384d1..e5fd983 100644 --- a/src/queue.c +++ b/src/queue.c @@ -403,27 +403,6 @@ int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr) return __io_uring_submit_and_wait(ring, wait_nr); } -/* - * Return an sqe to fill. Application must later call io_uring_submit() - * when it's ready to tell the kernel about it. The caller may call this - * function multiple times before calling io_uring_submit(). - * - * Returns a vacant sqe, or NULL if we're full. - */ -struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) -{ - struct io_uring_sq *sq = &ring->sq; - unsigned int head = io_uring_smp_load_acquire(sq->khead); - unsigned int next = sq->sqe_tail + 1; - struct io_uring_sqe *sqe = NULL; - - if (next - head <= *sq->kring_entries) { - sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask]; - sq->sqe_tail = next; - } - return sqe; -} - int __io_uring_sqring_wait(struct io_uring *ring) { return ____sys_io_uring_enter(ring->ring_fd, 0, 0, |