F: io_uring/
F: include/linux/io_uring.h
F: include/linux/io_uring_types.h
+F: include/trace/events/io_uring.h
F: include/uapi/linux/io_uring.h
F: tools/io_uring/
} else {
atomic_inc(&ev_fd->refs);
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
- call_rcu(&ev_fd->rcu, io_eventfd_ops);
+ call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops);
else
atomic_dec(&ev_fd->refs);
}
void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
- if (ctx->off_timeout_used || ctx->drain_active) {
+ if (ctx->off_timeout_used)
+ io_flush_timeouts(ctx);
+ if (ctx->drain_active) {
spin_lock(&ctx->completion_lock);
- if (ctx->off_timeout_used)
- io_flush_timeouts(ctx);
- if (ctx->drain_active)
- io_queue_deferred(ctx);
+ io_queue_deferred(ctx);
spin_unlock(&ctx->completion_lock);
}
if (ctx->has_evfd)
spin_unlock(&ctx->completion_lock);
}
+static inline void io_cq_lock(struct io_ring_ctx *ctx)
+ __acquires(ctx->completion_lock)
+{
+ spin_lock(&ctx->completion_lock);
+}
+
+static inline void io_cq_unlock(struct io_ring_ctx *ctx)
+ __releases(ctx->completion_lock)
+{
+ spin_unlock(&ctx->completion_lock);
+}
+
/* keep it inlined for io_submit_flush_completions() */
static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
__releases(ctx->completion_lock)
io_cq_lock(ctx);
if (!(req->flags & REQ_F_CQE_SKIP))
- __io_fill_cqe_req(ctx, req);
+ io_fill_cqe_req(ctx, req);
/*
* If we're the last reference to this request, add to our locked
{
struct io_ring_ctx *ctx = req->ctx;
- io_cq_lock(ctx);
+ spin_lock(&ctx->completion_lock);
io_disarm_next(req);
- io_cq_unlock_post(ctx);
+ spin_unlock(&ctx->completion_lock);
}
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
}
if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
return -ETIME;
- return 1;
+
+ /*
+ * Run task_work after scheduling. If we got woken because of
+ * task_work being processed, run it now rather than let the caller
+ * do another wait loop.
+ */
+ ret = io_run_task_work_sig(ctx);
+ return ret < 0 ? ret : 1;
}
/*
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
+ if (__io_cqring_events_user(ctx) >= min_events)
+ break;
cond_resched();
} while (ret > 0);
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
-static inline void io_cq_lock(struct io_ring_ctx *ctx)
- __acquires(ctx->completion_lock)
-{
- spin_lock(&ctx->completion_lock);
-}
-
-static inline void io_cq_unlock(struct io_ring_ctx *ctx)
-{
- spin_unlock(&ctx->completion_lock);
-}
-
void io_cq_unlock_post(struct io_ring_ctx *ctx);
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{
- return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
- !wq_list_empty(&ctx->work_llist);
+ return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
}
static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
if (req->flags & REQ_F_BUFFER_SELECT) {
compat_ssize_t clen;
+ iomsg->free_iov = NULL;
if (msg.msg_iovlen == 0) {
sr->len = 0;
} else if (msg.msg_iovlen > 1) {
goto retry_multishot;
if (mshot_finished) {
- io_netmsg_recycle(req, issue_flags);
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov)
kfree(kmsg->free_iov);
+ io_netmsg_recycle(req, issue_flags);
req->flags &= ~REQ_F_NEED_CLEANUP;
}
continue;
req->cqe.flags = io_put_kbuf(req, 0);
- __io_fill_cqe_req(req->ctx, req);
+ io_fill_cqe_req(req->ctx, req);
}
if (unlikely(!nr_events))
}
static bool io_kill_timeout(struct io_kiocb *req, int status)
- __must_hold(&req->ctx->completion_lock)
__must_hold(&req->ctx->timeout_lock)
{
struct io_timeout_data *io = req->async_data;
}
__cold void io_flush_timeouts(struct io_ring_ctx *ctx)
- __must_hold(&ctx->completion_lock)
{
- u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+ u32 seq;
struct io_timeout *timeout, *tmp;
spin_lock_irq(&ctx->timeout_lock);
+ seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
u32 events_needed, events_got;
struct io_timeout *timeout, *tmp;
int canceled = 0;
- io_cq_lock(ctx);
+ /*
+ * completion_lock is needed for io_match_task(). Take it before
+ * timeout_lockfirst to keep locking ordering.
+ */
+ spin_lock(&ctx->completion_lock);
spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
canceled++;
}
spin_unlock_irq(&ctx->timeout_lock);
- io_cq_unlock_post(ctx);
+ spin_unlock(&ctx->completion_lock);
return canceled != 0;
}