1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
13 #include "io_uring_types.h"
27 #define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
28 IORING_ASYNC_CANCEL_ANY)
30 static bool io_cancel_cb(struct io_wq_work *work, void *data)
32 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
33 struct io_cancel_data *cd = data;
35 if (req->ctx != cd->ctx)
37 if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
39 } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
40 if (req->file != cd->file)
43 if (req->cqe.user_data != cd->data)
46 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
47 if (cd->seq == req->work.cancel_seq)
49 req->work.cancel_seq = cd->seq;
54 static int io_async_cancel_one(struct io_uring_task *tctx,
55 struct io_cancel_data *cd)
57 enum io_wq_cancel cancel_ret;
61 if (!tctx || !tctx->io_wq)
64 all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
65 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
70 case IO_WQ_CANCEL_RUNNING:
73 case IO_WQ_CANCEL_NOTFOUND:
81 int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
83 struct io_ring_ctx *ctx = req->ctx;
86 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
88 ret = io_async_cancel_one(req->task->io_uring, cd);
90 * Fall-through even for -EALREADY, as we may have poll armed
96 ret = io_poll_cancel(ctx, cd);
99 spin_lock(&ctx->completion_lock);
100 if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
101 ret = io_timeout_cancel(ctx, cd);
102 spin_unlock(&ctx->completion_lock);
108 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
110 struct io_cancel *cancel = io_kiocb_to_cmd(req);
112 if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
114 if (sqe->off || sqe->len || sqe->splice_fd_in)
117 cancel->addr = READ_ONCE(sqe->addr);
118 cancel->flags = READ_ONCE(sqe->cancel_flags);
119 if (cancel->flags & ~CANCEL_FLAGS)
121 if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
122 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
124 cancel->fd = READ_ONCE(sqe->fd);
130 static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req,
131 unsigned int issue_flags)
133 bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
134 struct io_ring_ctx *ctx = cd->ctx;
135 struct io_tctx_node *node;
139 ret = io_try_cancel(req, cd);
147 /* slow path, try all io-wq's */
148 io_ring_submit_lock(ctx, issue_flags);
150 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
151 struct io_uring_task *tctx = node->task->io_uring;
153 ret = io_async_cancel_one(tctx, cd);
154 if (ret != -ENOENT) {
160 io_ring_submit_unlock(ctx, issue_flags);
161 return all ? nr : ret;
164 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
166 struct io_cancel *cancel = io_kiocb_to_cmd(req);
167 struct io_cancel_data cd = {
169 .data = cancel->addr,
170 .flags = cancel->flags,
171 .seq = atomic_inc_return(&req->ctx->cancel_seq),
175 if (cd.flags & IORING_ASYNC_CANCEL_FD) {
176 if (req->flags & REQ_F_FIXED_FILE)
177 req->file = io_file_get_fixed(req, cancel->fd,
180 req->file = io_file_get_normal(req, cancel->fd);
188 ret = __io_async_cancel(&cd, req, issue_flags);
192 io_req_set_res(req, ret, 0);
196 void init_hash_table(struct io_hash_bucket *hash_table, unsigned size)
200 for (i = 0; i < size; i++) {
201 spin_lock_init(&hash_table[i].lock);
202 INIT_HLIST_HEAD(&hash_table[i].list);