};
struct io_tw_state {
- /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
- bool locked;
};
enum {
fallback_work.work);
struct llist_node *node = llist_del_all(&ctx->fallback_llist);
struct io_kiocb *req, *tmp;
- struct io_tw_state ts = { .locked = true, };
+ struct io_tw_state ts = {};
percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
req->io_task_work.func(req, &ts);
- if (WARN_ON_ONCE(!ts.locked))
- return;
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
return;
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
- if (ts->locked) {
- io_submit_flush_completions(ctx);
- mutex_unlock(&ctx->uring_lock);
- ts->locked = false;
- }
+
+ io_submit_flush_completions(ctx);
+ mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
}
if (req->ctx != ctx) {
ctx_flush_and_put(ctx, &ts);
ctx = req->ctx;
-
- ts.locked = true;
mutex_lock(&ctx->uring_lock);
percpu_ref_get(&ctx->refs);
}
static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
int min_events)
{
- struct io_tw_state ts = { .locked = true, };
- int ret;
+ struct io_tw_state ts = {};
if (llist_empty(&ctx->work_llist))
return 0;
-
- ret = __io_run_local_work(ctx, &ts, min_events);
- /* shouldn't happen! */
- if (WARN_ON_ONCE(!ts.locked))
- mutex_lock(&ctx->uring_lock);
- return ret;
+ return __io_run_local_work(ctx, &ts, min_events);
}
static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
{
- struct io_tw_state ts = { .locked = true };
+ struct io_tw_state ts = {};
int ret;
mutex_lock(&ctx->uring_lock);
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
{
- if (ts->locked)
- io_req_complete_defer(req);
- else
- io_req_complete_post(req, IO_URING_F_UNLOCKED);
+ io_req_complete_defer(req);
}
/*
static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
{
- if (!ts->locked) {
- mutex_lock(&ctx->uring_lock);
- ts->locked = true;
- }
+ lockdep_assert_held(&ctx->uring_lock);
}
/*
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
- if (!io_fill_cqe_req_aux(req, ts->locked, mask,
+ if (!io_fill_cqe_req_aux(req, true, mask,
IORING_CQE_F_MORE)) {
io_req_set_res(req, mask, 0);
return IOU_POLL_REMOVE_POLL_USE_RES;
io_req_io_end(req);
- if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
- unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
+ if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
+ req->cqe.flags |= io_put_kbuf(req, 0);
- req->cqe.flags |= io_put_kbuf(req, issue_flags);
- }
io_req_task_complete(req, ts);
}
struct io_ring_ctx *ctx = req->ctx;
if (!io_timeout_finish(timeout, data)) {
- bool filled;
- filled = io_fill_cqe_req_aux(req, ts->locked, -ETIME,
- IORING_CQE_F_MORE);
- if (filled) {
+ if (io_fill_cqe_req_aux(req, true, -ETIME, IORING_CQE_F_MORE)) {
/* re-arm timer */
spin_lock_irq(&ctx->timeout_lock);
list_add(&timeout->list, ctx->timeout_list.prev);
static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts)
{
- unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_kiocb *prev = timeout->prev;
int ret = -ENOENT;
.data = prev->cqe.user_data,
};
- ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
+ ret = io_try_cancel(req->task->io_uring, &cd, 0);
}
io_req_set_res(req, ret ?: -ETIME, 0);
io_req_task_complete(req, ts);
static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- unsigned issue_flags = IO_URING_F_UNLOCKED;
- /* locked task_work executor checks the deffered list completion */
- if (ts->locked)
- issue_flags = IO_URING_F_COMPLETE_DEFER;
-
- ioucmd->task_work_cb(ioucmd, issue_flags);
+ /* task_work executor checks the deffered list completion */
+ ioucmd->task_work_cb(ioucmd, IO_URING_F_COMPLETE_DEFER);
}
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
static void io_waitid_complete(struct io_kiocb *req, int ret)
{
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
- struct io_tw_state ts = { .locked = true };
+ struct io_tw_state ts = {};
/* anyone completing better be holding a reference */
WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));