return NULL;
}
-#ifdef CONFIG_BLOCK
-static void io_resubmit_prep(struct io_kiocb *req)
-{
- struct io_async_rw *io = req->async_data;
-
- iov_iter_restore(&io->iter, &io->iter_state);
-}
-
static bool io_rw_should_reissue(struct io_kiocb *req)
{
+#ifdef CONFIG_BLOCK
umode_t mode = file_inode(req->file)->i_mode;
struct io_ring_ctx *ctx = req->ctx;
*/
if (percpu_ref_is_dying(&ctx->refs))
return false;
- /*
- * Play it safe and assume not safe to re-import and reissue if we're
- * not in the original thread group (or in task context).
- */
- if (!same_thread_group(req->task, current) || !in_task())
- return false;
return true;
-}
#else
-static void io_resubmit_prep(struct io_kiocb *req)
-{
-}
-static bool io_rw_should_reissue(struct io_kiocb *req)
-{
return false;
-}
#endif
+}
static void io_req_end_write(struct io_kiocb *req)
{
}
if (req->flags & REQ_F_REISSUE) {
+ struct io_async_rw *io = req->async_data;
+
req->flags &= ~REQ_F_REISSUE;
- io_resubmit_prep(req);
+ iov_iter_restore(&io->iter, &io->iter_state);
return -EAGAIN;
}
return IOU_ISSUE_SKIP_COMPLETE;
int ret;
ret = __io_read(req, issue_flags);
- if (ret >= 0) {
- ret = kiocb_done(req, ret, issue_flags);
- }
+ if (ret >= 0)
+ return kiocb_done(req, ret, issue_flags);
return ret;
}
return -EAGAIN;
}
done:
- ret = kiocb_done(req, ret2, issue_flags);
+ return kiocb_done(req, ret2, issue_flags);
} else {
ret_eagain:
iov_iter_restore(&io->iter, &io->iter_state);
io_req_end_write(req);
return -EAGAIN;
}
- return ret;
}
void io_rw_fail(struct io_kiocb *req)