io_put_task(req->task, 1);
wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
ctx->locked_free_nr++;
- percpu_ref_put(&ctx->refs);
}
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
ret = 1;
}
+ percpu_ref_get_many(&ctx->refs, ret);
for (i = 0; i < ret; i++) {
req = reqs[i];
wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
ctx->locked_free_nr++;
spin_unlock(&ctx->completion_lock);
-
- percpu_ref_put(&ctx->refs);
}
static inline void io_remove_next_linked(struct io_kiocb *req)
__must_hold(&ctx->uring_lock)
{
struct task_struct *task = NULL;
- int task_refs = 0, ctx_refs = 0;
+ int task_refs = 0;
do {
struct io_kiocb *req = container_of(node, struct io_kiocb,
task_refs = 0;
}
task_refs++;
- ctx_refs++;
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
} while (node);
- if (ctx_refs)
- percpu_ref_put_many(&ctx->refs, ctx_refs);
if (task)
io_put_task(task, task_refs);
}
return 0;
/* make sure SQ entry isn't read before tail */
nr = min3(nr, ctx->sq_entries, entries);
- if (unlikely(!percpu_ref_tryget_many(&ctx->refs, nr)))
- return -EAGAIN;
io_get_task_refs(nr);
io_submit_state_start(&ctx->submit_state, nr);
int unused = nr - ref_used;
current->io_uring->cached_refs += unused;
- percpu_ref_put_many(&ctx->refs, unused);
}
io_submit_state_end(ctx);
static void io_req_caches_free(struct io_ring_ctx *ctx)
{
struct io_submit_state *state = &ctx->submit_state;
+ int nr = 0;
mutex_lock(&ctx->uring_lock);
io_flush_cached_locked_reqs(ctx, state);
node = wq_stack_extract(&state->free_list);
req = container_of(node, struct io_kiocb, comp_list);
kmem_cache_free(req_cachep, req);
+ nr++;
}
+ if (nr)
+ percpu_ref_put_many(&ctx->refs, nr);
mutex_unlock(&ctx->uring_lock);
}
io_sq_thread_unpark(sqd);
}
+ io_req_caches_free(ctx);
+
if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
/* there is little hope left, don't run it too often */
interval = HZ * 60;
*/
mutex_unlock(&ctx->uring_lock);
do {
- ret = wait_for_completion_interruptible(&ctx->ref_comp);
- if (!ret)
+ ret = wait_for_completion_interruptible_timeout(&ctx->ref_comp, HZ);
+ if (ret) {
+ ret = min(0L, ret);
break;
+ }
+
ret = io_run_task_work_sig();
+ io_req_caches_free(ctx);
} while (ret >= 0);
mutex_lock(&ctx->uring_lock);