Commit | Line | Data |
---|---|---|
99f15d8d JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/file.h> | |
5 | #include <linux/io_uring.h> | |
6 | ||
7 | #include <uapi/linux/io_uring.h> | |
8 | ||
99f15d8d JA |
9 | #include "io_uring.h" |
10 | #include "uring_cmd.h" | |
11 | ||
12 | static void io_uring_cmd_work(struct io_kiocb *req, bool *locked) | |
13 | { | |
14 | struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req); | |
15 | ||
16 | ioucmd->task_work_cb(ioucmd); | |
17 | } | |
18 | ||
19 | void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, | |
20 | void (*task_work_cb)(struct io_uring_cmd *)) | |
21 | { | |
22 | struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); | |
23 | ||
24 | ioucmd->task_work_cb = task_work_cb; | |
25 | req->io_task_work.func = io_uring_cmd_work; | |
26 | io_req_task_work_add(req); | |
27 | } | |
28 | EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task); | |
29 | ||
30 | static inline void io_req_set_cqe32_extra(struct io_kiocb *req, | |
31 | u64 extra1, u64 extra2) | |
32 | { | |
33 | req->extra1 = extra1; | |
34 | req->extra2 = extra2; | |
35 | req->flags |= REQ_F_CQE32_INIT; | |
36 | } | |
37 | ||
38 | /* | |
39 | * Called by consumers of io_uring_cmd, if they originally returned | |
40 | * -EIOCBQUEUED upon receiving the command. | |
41 | */ | |
42 | void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2) | |
43 | { | |
44 | struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); | |
45 | ||
46 | if (ret < 0) | |
47 | req_set_fail(req); | |
48 | ||
49 | io_req_set_res(req, 0, ret); | |
50 | if (req->ctx->flags & IORING_SETUP_CQE32) | |
51 | io_req_set_cqe32_extra(req, res2, 0); | |
52 | __io_req_complete(req, 0); | |
53 | } | |
54 | EXPORT_SYMBOL_GPL(io_uring_cmd_done); | |
55 | ||
56 | int io_uring_cmd_prep_async(struct io_kiocb *req) | |
57 | { | |
58 | struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req); | |
59 | size_t cmd_size; | |
60 | ||
61 | cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128); | |
62 | ||
63 | memcpy(req->async_data, ioucmd->cmd, cmd_size); | |
64 | return 0; | |
65 | } | |
66 | ||
67 | int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
68 | { | |
69 | struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req); | |
70 | ||
71 | if (sqe->rw_flags || sqe->__pad1) | |
72 | return -EINVAL; | |
73 | ioucmd->cmd = sqe->cmd; | |
74 | ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); | |
75 | return 0; | |
76 | } | |
77 | ||
78 | int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) | |
79 | { | |
80 | struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req); | |
81 | struct io_ring_ctx *ctx = req->ctx; | |
82 | struct file *file = req->file; | |
83 | int ret; | |
84 | ||
85 | if (!req->file->f_op->uring_cmd) | |
86 | return -EOPNOTSUPP; | |
87 | ||
88 | if (ctx->flags & IORING_SETUP_SQE128) | |
89 | issue_flags |= IO_URING_F_SQE128; | |
90 | if (ctx->flags & IORING_SETUP_CQE32) | |
91 | issue_flags |= IO_URING_F_CQE32; | |
92 | if (ctx->flags & IORING_SETUP_IOPOLL) | |
93 | issue_flags |= IO_URING_F_IOPOLL; | |
94 | ||
95 | if (req_has_async_data(req)) | |
96 | ioucmd->cmd = req->async_data; | |
97 | ||
98 | ret = file->f_op->uring_cmd(ioucmd, issue_flags); | |
99 | if (ret == -EAGAIN) { | |
100 | if (!req_has_async_data(req)) { | |
101 | if (io_alloc_async_data(req)) | |
102 | return -ENOMEM; | |
103 | io_uring_cmd_prep_async(req); | |
104 | } | |
105 | return -EAGAIN; | |
106 | } | |
107 | ||
108 | if (ret != -EIOCBQUEUED) { | |
109 | io_uring_cmd_done(ioucmd, ret, 0); | |
110 | return IOU_OK; | |
111 | } | |
112 | ||
113 | return IOU_ISSUE_SKIP_COMPLETE; | |
114 | } |