}
}
-void io_req_complete_failed(struct io_kiocb *req, s32 res)
+void io_req_defer_failed(struct io_kiocb *req, s32 res)
__must_hold(&ctx->uring_lock)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
if (def->fail)
def->fail(req);
- io_req_complete_post(req, 0);
+ io_req_complete_defer(req);
}
/*
static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
{
- /* not needed for normal modes, but SQPOLL depends on it */
io_tw_lock(req->ctx, locked);
- io_req_complete_failed(req, req->cqe.res);
+ io_req_defer_failed(req, req->cqe.res);
}
void io_req_task_submit(struct io_kiocb *req, bool *locked)
if (likely(!(req->task->flags & PF_EXITING)))
io_queue_sqe(req);
else
- io_req_complete_failed(req, -EFAULT);
+ io_req_defer_failed(req, -EFAULT);
}
void io_req_task_queue_fail(struct io_kiocb *req, int ret)
ret = io_req_prep_async(req);
if (ret) {
fail:
- io_req_complete_failed(req, ret);
+ io_req_defer_failed(req, ret);
return;
}
io_prep_async_link(req);
struct io_kiocb *linked_timeout;
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
- io_req_complete_failed(req, ret);
+ io_req_defer_failed(req, ret);
return;
}
*/
req->flags &= ~REQ_F_HARDLINK;
req->flags |= REQ_F_LINK;
- io_req_complete_failed(req, req->cqe.res);
+ io_req_defer_failed(req, req->cqe.res);
} else if (unlikely(req->ctx->drain_active)) {
io_drain_req(req);
} else {
int ret = io_req_prep_async(req);
if (unlikely(ret))
- io_req_complete_failed(req, ret);
+ io_req_defer_failed(req, ret);
else
io_queue_iowq(req, NULL);
}
int io_run_task_work_sig(struct io_ring_ctx *ctx);
int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
int io_run_local_work(struct io_ring_ctx *ctx);
-void io_req_complete_failed(struct io_kiocb *req, s32 res);
+void io_req_defer_failed(struct io_kiocb *req, s32 res);
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
bool allow_overflow);