io_queue_linked_timeout(link);
}
-static void io_tw_requeue_iowq(struct io_kiocb *req, struct io_tw_state *ts)
-{
- req->flags &= ~REQ_F_REISSUE;
- io_queue_iowq(req);
-}
-
-void io_tw_queue_iowq(struct io_kiocb *req)
-{
- req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
- req->io_task_work.func = io_tw_requeue_iowq;
- io_req_task_work_add(req);
-}
-
static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
{
while (!list_empty(&ctx->defer_list)) {
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
bool io_alloc_async_data(struct io_kiocb *req);
void io_req_task_queue(struct io_kiocb *req);
-void io_tw_queue_iowq(struct io_kiocb *req);
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
return NULL;
}
+#ifdef CONFIG_BLOCK
+static void io_resubmit_prep(struct io_kiocb *req)
+{
+ struct io_async_rw *io = req->async_data;
+
+ iov_iter_restore(&io->iter, &io->iter_state);
+}
+
static bool io_rw_should_reissue(struct io_kiocb *req)
{
-#ifdef CONFIG_BLOCK
umode_t mode = file_inode(req->file)->i_mode;
struct io_ring_ctx *ctx = req->ctx;
*/
if (percpu_ref_is_dying(&ctx->refs))
return false;
+ /*
+ * Play it safe and assume not safe to re-import and reissue if we're
+ * not in the original thread group (or in task context).
+ */
+ if (!same_thread_group(req->task, current) || !in_task())
+ return false;
return true;
+}
#else
+static void io_resubmit_prep(struct io_kiocb *req)
+{
+}
+static bool io_rw_should_reissue(struct io_kiocb *req)
+{
return false;
-#endif
}
+#endif
static void io_req_end_write(struct io_kiocb *req)
{
* current cycle.
*/
io_req_io_end(req);
- io_tw_queue_iowq(req);
+ req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
return true;
}
req_set_fail(req);
io_req_end_write(req);
if (unlikely(res != req->cqe.res)) {
if (res == -EAGAIN && io_rw_should_reissue(req)) {
- io_tw_queue_iowq(req);
+ req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
return;
}
req->cqe.res = res;
}
if (req->flags & REQ_F_REISSUE) {
- struct io_async_rw *io = req->async_data;
-
req->flags &= ~REQ_F_REISSUE;
- iov_iter_restore(&io->iter, &io->iter_state);
+ io_resubmit_prep(req);
return -EAGAIN;
}
return IOU_ISSUE_SKIP_COMPLETE;
ret = io_iter_do_read(rw, &io->iter);
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
- if (req->flags & REQ_F_REISSUE)
- return IOU_ISSUE_SKIP_COMPLETE;
+ req->flags &= ~REQ_F_REISSUE;
/* If we can poll, just do that. */
if (io_file_can_poll(req))
return -EAGAIN;
else
ret2 = -EINVAL;
- if (req->flags & REQ_F_REISSUE)
- return IOU_ISSUE_SKIP_COMPLETE;
+ if (req->flags & REQ_F_REISSUE) {
+ req->flags &= ~REQ_F_REISSUE;
+ ret2 = -EAGAIN;
+ }
/*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just