io_uring: propagate locking state to poll cancel
[linux-block.git] / io_uring / cancel.c
CommitLineData
7aaff708
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/namei.h>
9#include <linux/io_uring.h>
10
11#include <uapi/linux/io_uring.h>
12
13#include "io_uring_types.h"
14#include "io_uring.h"
15#include "tctx.h"
16#include "poll.h"
17#include "timeout.h"
18#include "cancel.h"
19
20struct io_cancel {
21 struct file *file;
22 u64 addr;
23 u32 flags;
24 s32 fd;
25};
26
27#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
28 IORING_ASYNC_CANCEL_ANY)
29
30static bool io_cancel_cb(struct io_wq_work *work, void *data)
31{
32 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
33 struct io_cancel_data *cd = data;
34
35 if (req->ctx != cd->ctx)
36 return false;
37 if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
38 ;
39 } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
40 if (req->file != cd->file)
41 return false;
42 } else {
43 if (req->cqe.user_data != cd->data)
44 return false;
45 }
46 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
47 if (cd->seq == req->work.cancel_seq)
48 return false;
49 req->work.cancel_seq = cd->seq;
50 }
51 return true;
52}
53
54static int io_async_cancel_one(struct io_uring_task *tctx,
55 struct io_cancel_data *cd)
56{
57 enum io_wq_cancel cancel_ret;
58 int ret = 0;
59 bool all;
60
61 if (!tctx || !tctx->io_wq)
62 return -ENOENT;
63
64 all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
65 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
66 switch (cancel_ret) {
67 case IO_WQ_CANCEL_OK:
68 ret = 0;
69 break;
70 case IO_WQ_CANCEL_RUNNING:
71 ret = -EALREADY;
72 break;
73 case IO_WQ_CANCEL_NOTFOUND:
74 ret = -ENOENT;
75 break;
76 }
77
78 return ret;
79}
80
5d7943d9
PB
81int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd,
82 unsigned issue_flags)
7aaff708
JA
83{
84 struct io_ring_ctx *ctx = req->ctx;
85 int ret;
86
87 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
88
89 ret = io_async_cancel_one(req->task->io_uring, cd);
90 /*
91 * Fall-through even for -EALREADY, as we may have poll armed
92 * that need unarming.
93 */
94 if (!ret)
95 return 0;
96
5d7943d9 97 ret = io_poll_cancel(ctx, cd, issue_flags);
7aaff708 98 if (ret != -ENOENT)
4dfab8ab
PB
99 return ret;
100
38513c46 101 spin_lock(&ctx->completion_lock);
7aaff708
JA
102 if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
103 ret = io_timeout_cancel(ctx, cd);
7aaff708
JA
104 spin_unlock(&ctx->completion_lock);
105 return ret;
106}
107
108
109int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
110{
111 struct io_cancel *cancel = io_kiocb_to_cmd(req);
112
113 if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
114 return -EINVAL;
115 if (sqe->off || sqe->len || sqe->splice_fd_in)
116 return -EINVAL;
117
118 cancel->addr = READ_ONCE(sqe->addr);
119 cancel->flags = READ_ONCE(sqe->cancel_flags);
120 if (cancel->flags & ~CANCEL_FLAGS)
121 return -EINVAL;
122 if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
123 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
124 return -EINVAL;
125 cancel->fd = READ_ONCE(sqe->fd);
126 }
127
128 return 0;
129}
130
131static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req,
132 unsigned int issue_flags)
133{
134 bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
135 struct io_ring_ctx *ctx = cd->ctx;
136 struct io_tctx_node *node;
137 int ret, nr = 0;
138
139 do {
5d7943d9 140 ret = io_try_cancel(req, cd, issue_flags);
7aaff708
JA
141 if (ret == -ENOENT)
142 break;
143 if (!all)
144 return ret;
145 nr++;
146 } while (1);
147
148 /* slow path, try all io-wq's */
149 io_ring_submit_lock(ctx, issue_flags);
150 ret = -ENOENT;
151 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
152 struct io_uring_task *tctx = node->task->io_uring;
153
154 ret = io_async_cancel_one(tctx, cd);
155 if (ret != -ENOENT) {
156 if (!all)
157 break;
158 nr++;
159 }
160 }
161 io_ring_submit_unlock(ctx, issue_flags);
162 return all ? nr : ret;
163}
164
165int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
166{
167 struct io_cancel *cancel = io_kiocb_to_cmd(req);
168 struct io_cancel_data cd = {
169 .ctx = req->ctx,
170 .data = cancel->addr,
171 .flags = cancel->flags,
172 .seq = atomic_inc_return(&req->ctx->cancel_seq),
173 };
174 int ret;
175
176 if (cd.flags & IORING_ASYNC_CANCEL_FD) {
177 if (req->flags & REQ_F_FIXED_FILE)
178 req->file = io_file_get_fixed(req, cancel->fd,
179 issue_flags);
180 else
181 req->file = io_file_get_normal(req, cancel->fd);
182 if (!req->file) {
183 ret = -EBADF;
184 goto done;
185 }
186 cd.file = req->file;
187 }
188
189 ret = __io_async_cancel(&cd, req, issue_flags);
190done:
191 if (ret < 0)
192 req_set_fail(req);
193 io_req_set_res(req, ret, 0);
194 return IOU_OK;
195}
38513c46 196
e6f89be6 197void init_hash_table(struct io_hash_table *table, unsigned size)
38513c46
HX
198{
199 unsigned int i;
200
201 for (i = 0; i < size; i++) {
e6f89be6
PB
202 spin_lock_init(&table->hbs[i].lock);
203 INIT_HLIST_HEAD(&table->hbs[i].list);
38513c46
HX
204 }
205}