current->flags |= PF_NO_TASKWORK;
}
-static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
-{
- struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
-
- complete(&ctx->ref_comp);
-}
-
static __cold void io_fallback_req_func(struct work_struct *work)
{
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
io_kworker_tw_start();
- percpu_ref_get(&ctx->refs);
+ io_ring_ref_get(ctx);
mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
req->io_task_work.func(req, ts);
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
- percpu_ref_put(&ctx->refs);
+ io_ring_ref_put(ctx);
io_kworker_tw_end();
}
hash_bits = clamp(hash_bits, 1, 8);
if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
goto err;
- if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
- 0, GFP_KERNEL))
- goto err;
+ io_ring_ref_init(ctx);
ctx->flags = p->flags;
ctx->hybrid_poll_time = LLONG_MAX;
atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
ret |= io_futex_cache_init(ctx);
ret |= io_rsrc_cache_init(ctx);
if (ret)
- goto free_ref;
+ goto err;
init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
mutex_init(&ctx->mmap_lock);
return ctx;
-
-free_ref:
- percpu_ref_exit(&ctx->refs);
err:
io_free_alloc_caches(ctx);
kvfree(ctx->cancel_table.hbs);
* worker for it).
*/
if (WARN_ON_ONCE(!same_thread_group(tctx->task, current) ||
- percpu_ref_is_dying(&req->ctx->refs)))
+ io_ring_ref_is_dying(req->ctx)))
atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
ret = 1;
}
- percpu_ref_get_many(&ctx->refs, ret);
+ io_ring_ref_get_many(ctx, ret);
while (ret--) {
struct io_kiocb *req = reqs[ret];
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
- percpu_ref_put(&ctx->refs);
+ io_ring_ref_put(ctx);
}
/*
ctx_flush_and_put(ctx, ts);
ctx = req->ctx;
mutex_lock(&ctx->uring_lock);
- percpu_ref_get(&ctx->refs);
+ io_ring_ref_get(ctx);
}
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
if (sync && last_ctx != req->ctx) {
if (last_ctx) {
flush_delayed_work(&last_ctx->fallback_work);
- percpu_ref_put(&last_ctx->refs);
+ io_ring_ref_put(last_ctx);
}
last_ctx = req->ctx;
- percpu_ref_get(&last_ctx->refs);
+ io_ring_ref_get(last_ctx);
}
if (llist_add(&req->io_task_work.node,
&req->ctx->fallback_llist))
if (last_ctx) {
flush_delayed_work(&last_ctx->fallback_work);
- percpu_ref_put(&last_ctx->refs);
+ io_ring_ref_put(last_ctx);
}
}
return;
}
- if (!percpu_ref_is_dying(&ctx->refs) &&
+ if (!io_ring_ref_is_dying(ctx) &&
!task_work_add(tctx->task, &tctx->task_work, ctx->notify_method))
return;
nr++;
}
if (nr)
- percpu_ref_put_many(&ctx->refs, nr);
+ io_ring_ref_put_many(ctx, nr);
mutex_unlock(&ctx->uring_lock);
}
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
static_branch_dec(&io_key_has_sqarray);
- percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
io_req_caches_free(ctx);
if (ctx->hash_map)
* might've been lost due to loose synchronisation.
*/
wake_up_all(&ctx->poll_wq);
- percpu_ref_put(&ctx->refs);
+ io_ring_ref_put(ctx);
}
__cold void io_activate_pollwq(struct io_ring_ctx *ctx)
* only need to sync with it, which is done by injecting a tw
*/
init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb);
- percpu_ref_get(&ctx->refs);
+ io_ring_ref_get(ctx);
if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL))
- percpu_ref_put(&ctx->refs);
+ io_ring_ref_put(ctx);
out:
spin_unlock(&ctx->completion_lock);
}
struct creds *creds;
mutex_lock(&ctx->uring_lock);
- percpu_ref_kill(&ctx->refs);
+ io_ring_ref_kill(ctx);
xa_for_each(&ctx->personalities, index, creds)
io_unregister_personality(ctx, index);
mutex_unlock(&ctx->uring_lock);
{
__io_req_set_refcount(req, 1);
}
+
+#define IO_RING_REF_DEAD (1UL << (BITS_PER_LONG - 1))
+#define IO_RING_REF_MASK (~IO_RING_REF_DEAD)
+
+static inline bool io_ring_ref_is_dying(struct io_ring_ctx *ctx)
+{
+ return atomic_long_read(&ctx->refs) & IO_RING_REF_DEAD;
+}
+
+static inline void io_ring_ref_put_many(struct io_ring_ctx *ctx, int nr_refs)
+{
+ unsigned long refs;
+
+ refs = atomic_long_sub_return(nr_refs, &ctx->refs);
+ if (!(refs & IO_RING_REF_MASK))
+ complete(&ctx->ref_comp);
+}
+
+static inline void io_ring_ref_put(struct io_ring_ctx *ctx)
+{
+ io_ring_ref_put_many(ctx, 1);
+}
+
+static inline void io_ring_ref_kill(struct io_ring_ctx *ctx)
+{
+ atomic_long_xor(IO_RING_REF_DEAD, &ctx->refs);
+ io_ring_ref_put(ctx);
+}
+
+static inline void io_ring_ref_init(struct io_ring_ctx *ctx)
+{
+ atomic_long_set(&ctx->refs, 1);
+}
+
+static inline void io_ring_ref_get_many(struct io_ring_ctx *ctx, int nr_refs)
+{
+ atomic_long_add(nr_refs, &ctx->refs);
+}
+
+static inline void io_ring_ref_get(struct io_ring_ctx *ctx)
+{
+ atomic_long_inc(&ctx->refs);
+}
#endif