Commit | Line | Data |
---|---|---|
c9f06aa7 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* | |
4 | * Arbitrary limit, can be raised if need be | |
5 | */ | |
6 | #define IO_RINGFD_REG_MAX 16 | |
7 | ||
8 | struct io_uring_task { | |
9 | /* submission side */ | |
10 | int cached_refs; | |
11 | struct xarray xa; | |
12 | struct wait_queue_head wait; | |
13 | const struct io_ring_ctx *last; | |
14 | struct io_wq *io_wq; | |
15 | struct percpu_counter inflight; | |
16 | atomic_t inflight_tracked; | |
17 | atomic_t in_idle; | |
18 | ||
19 | spinlock_t task_lock; | |
20 | struct io_wq_work_list task_list; | |
21 | struct io_wq_work_list prio_task_list; | |
22 | struct callback_head task_work; | |
23 | struct file **registered_rings; | |
24 | bool task_running; | |
25 | }; | |
26 | ||
27 | struct io_tctx_node { | |
28 | struct list_head ctx_node; | |
29 | struct task_struct *task; | |
30 | struct io_ring_ctx *ctx; | |
31 | }; | |
32 | ||
33 | int io_uring_alloc_task_context(struct task_struct *task, | |
34 | struct io_ring_ctx *ctx); | |
35 | void io_uring_del_tctx_node(unsigned long index); | |
36 | int __io_uring_add_tctx_node(struct io_ring_ctx *ctx); | |
37 | void io_uring_clean_tctx(struct io_uring_task *tctx); | |
38 | ||
39 | void io_uring_unreg_ringfd(void); | |
40 | int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg, | |
41 | unsigned nr_args); | |
42 | int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg, | |
43 | unsigned nr_args); | |
44 | ||
45 | /* | |
46 | * Note that this task has used io_uring. We use it for cancelation purposes. | |
47 | */ | |
48 | static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx) | |
49 | { | |
50 | struct io_uring_task *tctx = current->io_uring; | |
51 | ||
52 | if (likely(tctx && tctx->last == ctx)) | |
53 | return 0; | |
54 | return __io_uring_add_tctx_node(ctx); | |
55 | } |