bool uring_locked = false;
struct io_ring_ctx *ctx = NULL;
struct tctx_tw *tw = container_of(cb, struct tctx_tw, task_work);
- struct io_uring_task *tctx = container_of(tw, struct io_uring_task, tw);
while (1) {
struct io_wq_work_node *node;
ctx_flush_and_put(ctx, &uring_locked);
/* relaxed read is enough as only the task itself sets ->in_idle */
- if (unlikely(atomic_read(&tctx->in_idle)))
+ if (unlikely(atomic_read(&tw->tctx->in_idle)))
io_uring_drop_tctx_refs(current);
}
{
struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx;
- struct tctx_tw *tw = &tctx->tw;
struct io_wq_work_node *node;
unsigned long flags;
+ struct tctx_tw *tw;
bool running;
- spin_lock_irqsave(&tw->task_lock, flags);
+ local_irq_save(flags);
+ tw = this_cpu_ptr(tctx->tw);
+
+ spin_lock(&tw->task_lock);
wq_list_add_tail(&req->io_task_work.node, &tw->task_list);
running = tw->task_running;
if (!running)
WARN_ON_ONCE(tctx->cached_refs);
percpu_counter_destroy(&tctx->inflight);
+ free_percpu(tctx->tw);
kfree(tctx);
tsk->io_uring = NULL;
}
struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx;
- int ret;
+ int ret, cpu;
tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
if (unlikely(!tctx))
return ret;
}
+ tctx->tw = alloc_percpu(struct tctx_tw);
+ if (!tctx->tw) {
+ percpu_counter_destroy(&tctx->inflight);
+ kfree(tctx);
+ return -ENOMEM;
+ }
+
tctx->io_wq = io_init_wq_offload(ctx, task);
if (IS_ERR(tctx->io_wq)) {
ret = PTR_ERR(tctx->io_wq);
percpu_counter_destroy(&tctx->inflight);
+ free_percpu(tctx->tw);
kfree(tctx);
return ret;
}
+ for_each_possible_cpu(cpu) {
+ struct tctx_tw *tw = per_cpu_ptr(tctx->tw, cpu);
+
+ spin_lock_init(&tw->task_lock);
+ INIT_WQ_LIST(&tw->task_list);
+ init_task_work(&tw->task_work, tctx_task_work);
+ tw->tctx = tctx;
+ }
+
xa_init(&tctx->xa);
init_waitqueue_head(&tctx->wait);
atomic_set(&tctx->in_idle, 0);
atomic_set(&tctx->inflight_tracked, 0);
task->io_uring = tctx;
- spin_lock_init(&tctx->tw.task_lock);
- INIT_WQ_LIST(&tctx->tw.task_list);
- init_task_work(&tctx->tw.task_work, tctx_task_work);
return 0;
}
// SPDX-License-Identifier: GPL-2.0
+#include <linux/percpu.h>
+
/*
* Arbitrary limit, can be raised if need be
*/
spinlock_t task_lock;
struct io_wq_work_list task_list;
struct callback_head task_work;
+ struct io_uring_task *tctx;
bool task_running;
};
atomic_t inflight_tracked;
atomic_t in_idle;
- struct tctx_tw tw;
+ struct __percpu tctx_tw *tw;
struct file *registered_rings[IO_RINGFD_REG_MAX];
};