REQ_F_REISSUE_BIT,
REQ_F_DONT_REISSUE_BIT,
REQ_F_CREDS_BIT,
+ REQ_F_REFCOUNT_BIT,
/* keep async read/write and isreg together and in order */
REQ_F_NOWAIT_READ_BIT,
REQ_F_NOWAIT_WRITE_BIT,
REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
/* has creds assigned */
REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
+ /* skip refcounting if not set */
+ REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
};
struct async_poll {
static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
long res, unsigned int cflags);
static void io_put_req(struct io_kiocb *req);
-static void io_put_req_deferred(struct io_kiocb *req, int nr);
+static void io_put_req_deferred(struct io_kiocb *req);
static void io_dismantle_req(struct io_kiocb *req);
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req);
static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
{
+ WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
return atomic_inc_not_zero(&req->refs);
}
-static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
-{
- WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
- return atomic_sub_and_test(refs, &req->refs);
-}
-
static inline bool req_ref_put_and_test(struct io_kiocb *req)
{
+ if (likely(!(req->flags & REQ_F_REFCOUNT)))
+ return true;
+
WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
return atomic_dec_and_test(&req->refs);
}
static inline void req_ref_put(struct io_kiocb *req)
{
+ WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
WARN_ON_ONCE(req_ref_put_and_test(req));
}
static inline void req_ref_get(struct io_kiocb *req)
{
+ WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
atomic_inc(&req->refs);
}
+static inline void io_req_refcount(struct io_kiocb *req)
+{
+ if (!(req->flags & REQ_F_REFCOUNT)) {
+ req->flags |= REQ_F_REFCOUNT;
+ atomic_set(&req->refs, 1);
+ }
+}
+
static inline void io_req_set_rsrc_node(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
io_cqring_fill_event(req->ctx, req->user_data, status, 0);
- io_put_req_deferred(req, 1);
+ io_put_req_deferred(req);
}
}
static void io_req_complete_failed(struct io_kiocb *req, long res)
{
req_set_fail(req);
- io_put_req(req);
io_req_complete_post(req, res, 0);
}
return nr != 0;
}
+/*
+ * A request might get retired back into the request caches even before opcode
+ * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
+ * Because of that, io_alloc_req() should be called only under ->uring_lock
+ * and with extra caution to not get a request that is still worked on.
+ */
static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
+ __must_hold(&ctx->uring_lock)
{
struct io_submit_state *state = &ctx->submit_state;
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
if (hrtimer_try_to_cancel(&io->timer) != -1) {
io_cqring_fill_event(link->ctx, link->user_data,
-ECANCELED, 0);
- io_put_req_deferred(link, 1);
+ io_put_req_deferred(link);
return true;
}
}
trace_io_uring_fail_link(req, link);
io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
- io_put_req_deferred(link, 2);
+ io_put_req_deferred(link);
link = nxt;
}
}
for (i = 0; i < nr; i++) {
struct io_kiocb *req = state->compl_reqs[i];
- /* submission and completion refs */
- if (req_ref_sub_and_test(req, 2))
+ if (req_ref_put_and_test(req))
io_req_free_batch(&rb, req, &ctx->submit_state);
}
io_free_req(req);
}
-static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
+static inline void io_put_req_deferred(struct io_kiocb *req)
{
- if (req_ref_sub_and_test(req, refs)) {
+ if (req_ref_put_and_test(req)) {
req->io_task_work.func = io_free_req;
io_req_task_work_add(req);
}
if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
!(req->flags & REQ_F_DONT_REISSUE)) {
req->iopoll_completed = 0;
- req_ref_get(req);
io_req_task_queue_reissue(req);
continue;
}
if (check_reissue && (req->flags & REQ_F_REISSUE)) {
req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) {
- req_ref_get(req);
io_req_task_queue_reissue(req);
} else {
int cflags = 0;
req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
list_del_init(&wait->entry);
-
- /* submit ref gets dropped, acquire a new one */
- req_ref_get(req);
io_req_task_queue(req);
return 1;
}
req->apoll = apoll;
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;
+ io_req_refcount(req);
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
io_async_wake);
static bool io_poll_remove_one(struct io_kiocb *req)
__must_hold(&req->ctx->completion_lock)
{
- int refs;
bool do_complete;
io_poll_remove_double(req);
io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
io_commit_cqring(req->ctx);
req_set_fail(req);
-
- /* non-poll requests have submit ref still */
- refs = 1 + (req->opcode != IORING_OP_POLL_ADD);
- io_put_req_deferred(req, refs);
+ io_put_req_deferred(req);
}
return do_complete;
}
if (flags & ~IORING_POLL_ADD_MULTI)
return -EINVAL;
+ io_req_refcount(req);
poll->events = io_poll_parse_events(sqe, flags);
return 0;
}
req_set_fail(req);
io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
- io_put_req_deferred(req, 1);
+ io_put_req_deferred(req);
return 0;
}
struct io_kiocb *timeout;
int ret = 0;
+ io_req_refcount(req);
+ /* will be dropped by ->io_free_work() after returning to io-wq */
+ req_ref_get(req);
+
timeout = io_prep_linked_timeout(req);
if (timeout)
io_queue_linked_timeout(timeout);
}
/* avoid locking problems by failing it from a clean context */
- if (ret) {
- /* io-wq is going to take one down */
- req_ref_get(req);
+ if (ret)
io_req_task_queue_fail(req, ret);
- }
}
static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
nxt->opcode != IORING_OP_LINK_TIMEOUT)
return NULL;
+ /* linked timeouts should have two refs once prep'ed */
+ io_req_refcount(req);
+ io_req_refcount(nxt);
+ req_ref_get(nxt);
+
nxt->timeout.head = req;
nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
req->flags |= REQ_F_LINK_TIMEOUT;
* doesn't support non-blocking read/write attempts
*/
if (likely(!ret)) {
- /* drop submission reference */
if (req->flags & REQ_F_COMPLETE_INLINE) {
struct io_ring_ctx *ctx = req->ctx;
struct io_submit_state *state = &ctx->submit_state;
state->compl_reqs[state->compl_nr++] = req;
if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
io_submit_flush_completions(ctx);
- } else {
- io_put_req(req);
}
} else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
switch (io_arm_poll_handler(req)) {
req->user_data = READ_ONCE(sqe->user_data);
req->file = NULL;
req->fixed_rsrc_refs = NULL;
- /* one is dropped after submission, the other at completion */
- atomic_set(&req->refs, 2);
req->task = current;
/* enforce forwards compatibility on users */