}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
-static void io_uring_cmd_cache_sqes(struct io_kiocb *req)
-{
- struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct io_uring_cmd_data *cache = req->async_data;
-
- memcpy(cache->sqes, ioucmd->sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = cache->sqes;
-}
-
static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
return -ENOMEM;
cache->op_data = NULL;
- ioucmd->sqe = sqe;
- /* defer memcpy until we need it */
- if (unlikely(req->flags & REQ_F_FORCE_ASYNC))
- io_uring_cmd_cache_sqes(req);
+ /*
+ * Unconditionally cache the SQE for now - this is only needed for
+ * requests that go async, but prep handlers must ensure that any
+ * sqe data is stable beyond prep. Since uring_cmd is special in
+ * that it doesn't read in per-op data, play it safe and ensure that
+ * any SQE data is stable beyond prep. This can later get relaxed.
+ */
+ memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = cache->sqes;
return 0;
}
}
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
- if (ret == -EAGAIN) {
- struct io_uring_cmd_data *cache = req->async_data;
-
- if (ioucmd->sqe != cache->sqes)
- io_uring_cmd_cache_sqes(req);
- return -EAGAIN;
- } else if (ret == -EIOCBQUEUED) {
- return -EIOCBQUEUED;
- }
-
+ if (ret == -EAGAIN || ret == -EIOCBQUEUED)
+ return ret;
if (ret < 0)
req_set_fail(req);
io_req_uring_cleanup(req, issue_flags);