Commit | Line | Data |
---|---|---|
e27f928e JA |
1 | #ifndef IO_URING_TYPES_H |
2 | #define IO_URING_TYPES_H | |
3 | ||
4 | #include <linux/blkdev.h> | |
5 | #include <linux/task_work.h> | |
d9b57aa3 | 6 | #include <linux/bitmap.h> |
e70cb608 | 7 | #include <linux/llist.h> |
d9b57aa3 | 8 | #include <uapi/linux/io_uring.h> |
e27f928e | 9 | |
6b04a373 PB |
10 | enum { |
11 | /* | |
12 | * A hint to not wake right away but delay until there are enough of | |
13 | * tw's queued to match the number of CQEs the task is waiting for. | |
14 | * | |
15 | * Must not be used wirh requests generating more than one CQE. | |
16 | * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. | |
17 | */ | |
18 | IOU_F_TWQ_LAZY_WAKE = 1, | |
19 | }; | |
20 | ||
b66509b8 PB |
21 | enum io_uring_cmd_flags { |
22 | IO_URING_F_COMPLETE_DEFER = 1, | |
23 | IO_URING_F_UNLOCKED = 2, | |
24 | /* the request is executed from poll, it should not be freed */ | |
25 | IO_URING_F_MULTISHOT = 4, | |
26 | /* executed by io-wq */ | |
27 | IO_URING_F_IOWQ = 8, | |
28 | /* int's last bit, sign checks are usually faster than a bit test */ | |
29 | IO_URING_F_NONBLOCK = INT_MIN, | |
30 | ||
31 | /* ctx state flags, for URING_CMD */ | |
32 | IO_URING_F_SQE128 = (1 << 8), | |
33 | IO_URING_F_CQE32 = (1 << 9), | |
34 | IO_URING_F_IOPOLL = (1 << 10), | |
35 | ||
36 | /* set when uring wants to cancel a previously issued command */ | |
37 | IO_URING_F_CANCEL = (1 << 11), | |
38 | IO_URING_F_COMPAT = (1 << 12), | |
39 | }; | |
40 | ||
ab1c84d8 PB |
41 | struct io_wq_work_node { |
42 | struct io_wq_work_node *next; | |
43 | }; | |
44 | ||
45 | struct io_wq_work_list { | |
46 | struct io_wq_work_node *first; | |
47 | struct io_wq_work_node *last; | |
48 | }; | |
49 | ||
50 | struct io_wq_work { | |
51 | struct io_wq_work_node list; | |
52 | unsigned flags; | |
53 | /* place it here instead of io_kiocb as it fills padding and saves 4B */ | |
54 | int cancel_seq; | |
55 | }; | |
56 | ||
57 | struct io_fixed_file { | |
58 | /* file * with additional FFS_* flags */ | |
59 | unsigned long file_ptr; | |
60 | }; | |
61 | ||
62 | struct io_file_table { | |
63 | struct io_fixed_file *files; | |
64 | unsigned long *bitmap; | |
65 | unsigned int alloc_hint; | |
66 | }; | |
e27f928e | 67 | |
e6f89be6 PB |
68 | struct io_hash_bucket { |
69 | spinlock_t lock; | |
70 | struct hlist_head list; | |
71 | } ____cacheline_aligned_in_smp; | |
72 | ||
73 | struct io_hash_table { | |
74 | struct io_hash_bucket *hbs; | |
75 | unsigned hash_bits; | |
76 | }; | |
77 | ||
e70cb608 PB |
78 | /* |
79 | * Arbitrary limit, can be raised if need be | |
80 | */ | |
81 | #define IO_RINGFD_REG_MAX 16 | |
82 | ||
83 | struct io_uring_task { | |
84 | /* submission side */ | |
85 | int cached_refs; | |
86 | const struct io_ring_ctx *last; | |
87 | struct io_wq *io_wq; | |
88 | struct file *registered_rings[IO_RINGFD_REG_MAX]; | |
89 | ||
90 | struct xarray xa; | |
91 | struct wait_queue_head wait; | |
8d664282 | 92 | atomic_t in_cancel; |
e70cb608 PB |
93 | atomic_t inflight_tracked; |
94 | struct percpu_counter inflight; | |
95 | ||
96 | struct { /* task_work */ | |
97 | struct llist_head task_list; | |
98 | struct callback_head task_work; | |
99 | } ____cacheline_aligned_in_smp; | |
100 | }; | |
101 | ||
e27f928e | 102 | struct io_uring { |
e5598d6a PB |
103 | u32 head; |
104 | u32 tail; | |
e27f928e JA |
105 | }; |
106 | ||
107 | /* | |
108 | * This data is shared with the application through the mmap at offsets | |
109 | * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. | |
110 | * | |
111 | * The offsets to the member fields are published through struct | |
112 | * io_sqring_offsets when calling io_uring_setup. | |
113 | */ | |
114 | struct io_rings { | |
115 | /* | |
116 | * Head and tail offsets into the ring; the offsets need to be | |
117 | * masked to get valid indices. | |
118 | * | |
119 | * The kernel controls head of the sq ring and the tail of the cq ring, | |
120 | * and the application controls tail of the sq ring and the head of the | |
121 | * cq ring. | |
122 | */ | |
123 | struct io_uring sq, cq; | |
124 | /* | |
125 | * Bitmasks to apply to head and tail offsets (constant, equals | |
126 | * ring_entries - 1) | |
127 | */ | |
128 | u32 sq_ring_mask, cq_ring_mask; | |
129 | /* Ring sizes (constant, power of 2) */ | |
130 | u32 sq_ring_entries, cq_ring_entries; | |
131 | /* | |
132 | * Number of invalid entries dropped by the kernel due to | |
133 | * invalid index stored in array | |
134 | * | |
135 | * Written by the kernel, shouldn't be modified by the | |
136 | * application (i.e. get number of "new events" by comparing to | |
137 | * cached value). | |
138 | * | |
139 | * After a new SQ head value was read by the application this | |
140 | * counter includes all submissions that were dropped reaching | |
141 | * the new SQ head (and possibly more). | |
142 | */ | |
143 | u32 sq_dropped; | |
144 | /* | |
145 | * Runtime SQ flags | |
146 | * | |
147 | * Written by the kernel, shouldn't be modified by the | |
148 | * application. | |
149 | * | |
150 | * The application needs a full memory barrier before checking | |
151 | * for IORING_SQ_NEED_WAKEUP after updating the sq tail. | |
152 | */ | |
153 | atomic_t sq_flags; | |
154 | /* | |
155 | * Runtime CQ flags | |
156 | * | |
157 | * Written by the application, shouldn't be modified by the | |
158 | * kernel. | |
159 | */ | |
160 | u32 cq_flags; | |
161 | /* | |
162 | * Number of completion events lost because the queue was full; | |
163 | * this should be avoided by the application by making sure | |
164 | * there are not more requests pending than there is space in | |
165 | * the completion queue. | |
166 | * | |
167 | * Written by the kernel, shouldn't be modified by the | |
168 | * application (i.e. get number of "new events" by comparing to | |
169 | * cached value). | |
170 | * | |
171 | * As completion events come in out of order this counter is not | |
172 | * ordered with any other data. | |
173 | */ | |
174 | u32 cq_overflow; | |
175 | /* | |
176 | * Ring buffer of completion events. | |
177 | * | |
178 | * The kernel writes completion events fresh every time they are | |
179 | * produced, so the application is allowed to modify pending | |
180 | * entries. | |
181 | */ | |
182 | struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; | |
183 | }; | |
184 | ||
185 | struct io_restriction { | |
186 | DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); | |
187 | DECLARE_BITMAP(sqe_op, IORING_OP_LAST); | |
188 | u8 sqe_flags_allowed; | |
189 | u8 sqe_flags_required; | |
190 | bool registered; | |
191 | }; | |
192 | ||
193 | struct io_submit_link { | |
194 | struct io_kiocb *head; | |
195 | struct io_kiocb *last; | |
196 | }; | |
197 | ||
198 | struct io_submit_state { | |
199 | /* inline/task_work completion list, under ->uring_lock */ | |
200 | struct io_wq_work_node free_list; | |
201 | /* batch completion logic */ | |
202 | struct io_wq_work_list compl_reqs; | |
203 | struct io_submit_link link; | |
204 | ||
205 | bool plug_started; | |
206 | bool need_plug; | |
e27f928e | 207 | unsigned short submit_nr; |
931147dd | 208 | unsigned int cqes_count; |
e27f928e JA |
209 | struct blk_plug plug; |
210 | }; | |
211 | ||
212 | struct io_ev_fd { | |
213 | struct eventfd_ctx *cq_ev_fd; | |
214 | unsigned int eventfd_async: 1; | |
215 | struct rcu_head rcu; | |
21a091b9 DY |
216 | atomic_t refs; |
217 | atomic_t ops; | |
e27f928e JA |
218 | }; |
219 | ||
9b797a37 | 220 | struct io_alloc_cache { |
efba1a9e | 221 | struct io_wq_work_node list; |
9731bc98 | 222 | unsigned int nr_cached; |
69bbc6ad | 223 | unsigned int max_cached; |
e1fe7ee8 | 224 | size_t elem_size; |
9b797a37 JA |
225 | }; |
226 | ||
e27f928e JA |
227 | struct io_ring_ctx { |
228 | /* const or read-mostly hot data */ | |
229 | struct { | |
e27f928e | 230 | unsigned int flags; |
e27f928e JA |
231 | unsigned int drain_next: 1; |
232 | unsigned int restricted: 1; | |
233 | unsigned int off_timeout_used: 1; | |
234 | unsigned int drain_active: 1; | |
e27f928e | 235 | unsigned int has_evfd: 1; |
e6aeb272 PB |
236 | /* all CQEs should be posted only by the submitter task */ |
237 | unsigned int task_complete: 1; | |
ec26c225 | 238 | unsigned int lockless_cq: 1; |
632ffe09 | 239 | unsigned int syscall_iopoll: 1; |
bca39f39 | 240 | unsigned int poll_activated: 1; |
632ffe09 PB |
241 | unsigned int drain_disabled: 1; |
242 | unsigned int compat: 1; | |
dde40322 | 243 | |
18df385f PB |
244 | struct task_struct *submitter_task; |
245 | struct io_rings *rings; | |
246 | struct percpu_ref refs; | |
03d89a2d | 247 | |
18df385f | 248 | enum task_work_notify_mode notify_method; |
e27f928e JA |
249 | } ____cacheline_aligned_in_smp; |
250 | ||
251 | /* submission data */ | |
252 | struct { | |
253 | struct mutex uring_lock; | |
254 | ||
255 | /* | |
256 | * Ring buffer of indices into array of io_uring_sqe, which is | |
257 | * mmapped by the application using the IORING_OFF_SQES offset. | |
258 | * | |
259 | * This indirection could e.g. be used to assign fixed | |
260 | * io_uring_sqe entries to operations and only submit them to | |
261 | * the queue when needed. | |
262 | * | |
263 | * The kernel modifies neither the indices array nor the entries | |
264 | * array. | |
265 | */ | |
266 | u32 *sq_array; | |
267 | struct io_uring_sqe *sq_sqes; | |
268 | unsigned cached_sq_head; | |
269 | unsigned sq_entries; | |
e27f928e JA |
270 | |
271 | /* | |
272 | * Fixed resources fast path, should be accessed only under | |
273 | * uring_lock, and updated through io_uring_register(2) | |
274 | */ | |
275 | struct io_rsrc_node *rsrc_node; | |
e27f928e JA |
276 | atomic_t cancel_seq; |
277 | struct io_file_table file_table; | |
278 | unsigned nr_user_files; | |
279 | unsigned nr_user_bufs; | |
280 | struct io_mapped_ubuf **user_bufs; | |
281 | ||
282 | struct io_submit_state submit_state; | |
283 | ||
284 | struct io_buffer_list *io_bl; | |
285 | struct xarray io_bl_xa; | |
e27f928e | 286 | |
9ca9fb24 | 287 | struct io_hash_table cancel_table_locked; |
9b797a37 | 288 | struct io_alloc_cache apoll_cache; |
43e0bbbd | 289 | struct io_alloc_cache netmsg_cache; |
644c4a7a PB |
290 | |
291 | /* | |
292 | * ->iopoll_list is protected by the ctx->uring_lock for | |
293 | * io_uring instances that don't use IORING_SETUP_SQPOLL. | |
294 | * For SQPOLL, only the single threaded io_sq_thread() will | |
295 | * manipulate the list, hence no extra locking is needed there. | |
296 | */ | |
297 | struct io_wq_work_list iopoll_list; | |
298 | bool poll_multi_queue; | |
93b8cc60 ML |
299 | |
300 | /* | |
301 | * Any cancelable uring_cmd is added to this list in | |
302 | * ->uring_cmd() by io_uring_cmd_insert_cancelable() | |
303 | */ | |
304 | struct hlist_head cancelable_uring_cmd; | |
e27f928e JA |
305 | } ____cacheline_aligned_in_smp; |
306 | ||
e27f928e JA |
307 | struct { |
308 | /* | |
309 | * We cache a range of free CQEs we can use, once exhausted it | |
310 | * should go through a slower range setup, see __io_get_cqe() | |
311 | */ | |
312 | struct io_uring_cqe *cqe_cached; | |
313 | struct io_uring_cqe *cqe_sentinel; | |
314 | ||
315 | unsigned cached_cq_tail; | |
316 | unsigned cq_entries; | |
317 | struct io_ev_fd __rcu *io_ev_fd; | |
e27f928e | 318 | unsigned cq_extra; |
e27f928e JA |
319 | } ____cacheline_aligned_in_smp; |
320 | ||
c9def23d PB |
321 | /* |
322 | * task_work and async notification delivery cacheline. Expected to | |
323 | * regularly bounce b/w CPUs. | |
324 | */ | |
325 | struct { | |
326 | struct llist_head work_llist; | |
327 | unsigned long check_cq; | |
328 | atomic_t cq_wait_nr; | |
329 | atomic_t cq_timeouts; | |
330 | struct wait_queue_head cq_wait; | |
331 | } ____cacheline_aligned_in_smp; | |
332 | ||
aff5b2df PB |
333 | /* timeouts */ |
334 | struct { | |
335 | spinlock_t timeout_lock; | |
aff5b2df PB |
336 | struct list_head timeout_list; |
337 | struct list_head ltimeout_list; | |
338 | unsigned cq_last_tm_flush; | |
339 | } ____cacheline_aligned_in_smp; | |
340 | ||
0aa7aa5f PB |
341 | struct io_uring_cqe completion_cqes[16]; |
342 | ||
644c4a7a PB |
343 | spinlock_t completion_lock; |
344 | ||
d7f06fea PB |
345 | /* IRQ completion list, under ->completion_lock */ |
346 | struct io_wq_work_list locked_free_list; | |
347 | unsigned int locked_free_nr; | |
348 | ||
18df385f PB |
349 | struct list_head io_buffers_comp; |
350 | struct list_head cq_overflow_list; | |
351 | struct io_hash_table cancel_table; | |
352 | ||
f31ecf67 JA |
353 | struct hlist_head waitid_list; |
354 | ||
194bb58c JA |
355 | #ifdef CONFIG_FUTEX |
356 | struct hlist_head futex_list; | |
357 | struct io_alloc_cache futex_cache; | |
358 | #endif | |
359 | ||
d7f06fea PB |
360 | const struct cred *sq_creds; /* cred used for __io_sq_thread() */ |
361 | struct io_sq_data *sq_data; /* if using sq thread polling */ | |
362 | ||
363 | struct wait_queue_head sqo_sq_wait; | |
364 | struct list_head sqd_list; | |
365 | ||
d7f06fea PB |
366 | unsigned int file_alloc_start; |
367 | unsigned int file_alloc_end; | |
368 | ||
369 | struct xarray personalities; | |
370 | u32 pers_next; | |
371 | ||
18df385f PB |
372 | struct list_head io_buffers_cache; |
373 | ||
c392cbec JA |
374 | /* deferred free list, protected by ->uring_lock */ |
375 | struct hlist_head io_buf_list; | |
376 | ||
e27f928e | 377 | /* Keep this last, we don't need it for the fast path */ |
7b235dd8 | 378 | struct wait_queue_head poll_wq; |
22eb2a3f PB |
379 | struct io_restriction restrictions; |
380 | ||
381 | /* slow path rsrc auxilary data, used by update/register */ | |
22eb2a3f PB |
382 | struct io_mapped_ubuf *dummy_ubuf; |
383 | struct io_rsrc_data *file_data; | |
384 | struct io_rsrc_data *buf_data; | |
385 | ||
0a4813b1 | 386 | /* protected by ->uring_lock */ |
22eb2a3f | 387 | struct list_head rsrc_ref_list; |
9eae8655 | 388 | struct io_alloc_cache rsrc_node_cache; |
4ea15b56 | 389 | struct wait_queue_head rsrc_quiesce_wq; |
0b222eeb | 390 | unsigned rsrc_quiesce; |
22eb2a3f | 391 | |
22eb2a3f PB |
392 | /* hashed buffered write serialization */ |
393 | struct io_wq_hash *hash_map; | |
394 | ||
395 | /* Only used for accounting purposes */ | |
396 | struct user_struct *user; | |
397 | struct mm_struct *mm_account; | |
398 | ||
399 | /* ctx exit and cancelation */ | |
400 | struct llist_head fallback_llist; | |
401 | struct delayed_work fallback_work; | |
402 | struct work_struct exit_work; | |
403 | struct list_head tctx_list; | |
404 | struct completion ref_comp; | |
405 | ||
406 | /* io-wq management, e.g. thread count */ | |
407 | u32 iowq_limits[2]; | |
408 | bool iowq_limits_set; | |
409 | ||
bca39f39 | 410 | struct callback_head poll_wq_task_work; |
22eb2a3f PB |
411 | struct list_head defer_list; |
412 | unsigned sq_thread_idle; | |
305bef98 PB |
413 | /* protected by ->completion_lock */ |
414 | unsigned evfd_last_cq_tail; | |
18df385f PB |
415 | |
416 | /* | |
417 | * If IORING_SETUP_NO_MMAP is used, then the below holds | |
418 | * the gup'ed pages for the two rings, and the sqes. | |
419 | */ | |
420 | unsigned short n_ring_pages; | |
421 | unsigned short n_sqe_pages; | |
422 | struct page **ring_pages; | |
423 | struct page **sqe_pages; | |
e27f928e JA |
424 | }; |
425 | ||
a282967c PB |
426 | struct io_tw_state { |
427 | /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */ | |
428 | bool locked; | |
429 | }; | |
430 | ||
e27f928e JA |
431 | enum { |
432 | REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, | |
433 | REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, | |
434 | REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, | |
435 | REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, | |
436 | REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, | |
437 | REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, | |
438 | REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, | |
439 | ||
440 | /* first byte is taken by user flags, shift it to not overlap */ | |
441 | REQ_F_FAIL_BIT = 8, | |
442 | REQ_F_INFLIGHT_BIT, | |
443 | REQ_F_CUR_POS_BIT, | |
444 | REQ_F_NOWAIT_BIT, | |
445 | REQ_F_LINK_TIMEOUT_BIT, | |
446 | REQ_F_NEED_CLEANUP_BIT, | |
447 | REQ_F_POLLED_BIT, | |
448 | REQ_F_BUFFER_SELECTED_BIT, | |
449 | REQ_F_BUFFER_RING_BIT, | |
e27f928e JA |
450 | REQ_F_REISSUE_BIT, |
451 | REQ_F_CREDS_BIT, | |
452 | REQ_F_REFCOUNT_BIT, | |
453 | REQ_F_ARM_LTIMEOUT_BIT, | |
454 | REQ_F_ASYNC_DATA_BIT, | |
455 | REQ_F_SKIP_LINK_CQES_BIT, | |
456 | REQ_F_SINGLE_POLL_BIT, | |
457 | REQ_F_DOUBLE_POLL_BIT, | |
458 | REQ_F_PARTIAL_IO_BIT, | |
e27f928e JA |
459 | REQ_F_APOLL_MULTISHOT_BIT, |
460 | REQ_F_CLEAR_POLLIN_BIT, | |
9ca9fb24 | 461 | REQ_F_HASH_LOCKED_BIT, |
e27f928e JA |
462 | /* keep async read/write and isreg together and in order */ |
463 | REQ_F_SUPPORT_NOWAIT_BIT, | |
464 | REQ_F_ISREG_BIT, | |
595e5228 | 465 | REQ_F_POLL_NO_LAZY_BIT, |
521223d7 | 466 | REQ_F_CANCEL_SEQ_BIT, |
95041b93 | 467 | REQ_F_CAN_POLL_BIT, |
e27f928e JA |
468 | |
469 | /* not a real bit, just to check we're not overflowing the space */ | |
470 | __REQ_F_LAST_BIT, | |
471 | }; | |
472 | ||
4bcb982c JA |
473 | typedef u64 __bitwise io_req_flags_t; |
474 | #define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno))) | |
475 | ||
e27f928e JA |
476 | enum { |
477 | /* ctx owns file */ | |
4bcb982c | 478 | REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT), |
e27f928e | 479 | /* drain existing IO first */ |
4bcb982c | 480 | REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT), |
e27f928e | 481 | /* linked sqes */ |
4bcb982c | 482 | REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT), |
e27f928e | 483 | /* doesn't sever on completion < 0 */ |
4bcb982c | 484 | REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT), |
e27f928e | 485 | /* IOSQE_ASYNC */ |
4bcb982c | 486 | REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT), |
e27f928e | 487 | /* IOSQE_BUFFER_SELECT */ |
4bcb982c | 488 | REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT), |
e27f928e | 489 | /* IOSQE_CQE_SKIP_SUCCESS */ |
4bcb982c | 490 | REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT), |
e27f928e JA |
491 | |
492 | /* fail rest of links */ | |
4bcb982c | 493 | REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT), |
e27f928e | 494 | /* on inflight list, should be cancelled and waited on exit reliably */ |
4bcb982c | 495 | REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT), |
e27f928e | 496 | /* read/write uses file position */ |
4bcb982c | 497 | REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT), |
e27f928e | 498 | /* must not punt to workers */ |
4bcb982c | 499 | REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT), |
e27f928e | 500 | /* has or had linked timeout */ |
4bcb982c | 501 | REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT), |
e27f928e | 502 | /* needs cleanup */ |
4bcb982c | 503 | REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT), |
e27f928e | 504 | /* already went through poll handler */ |
4bcb982c | 505 | REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT), |
e27f928e | 506 | /* buffer already selected */ |
4bcb982c | 507 | REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT), |
e27f928e | 508 | /* buffer selected from ring, needs commit */ |
4bcb982c | 509 | REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT), |
e27f928e | 510 | /* caller should reissue async */ |
4bcb982c | 511 | REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT), |
e27f928e | 512 | /* supports async reads/writes */ |
4bcb982c | 513 | REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT), |
e27f928e | 514 | /* regular file */ |
4bcb982c | 515 | REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT), |
e27f928e | 516 | /* has creds assigned */ |
4bcb982c | 517 | REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT), |
e27f928e | 518 | /* skip refcounting if not set */ |
4bcb982c | 519 | REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT), |
e27f928e | 520 | /* there is a linked timeout that has to be armed */ |
4bcb982c | 521 | REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT), |
e27f928e | 522 | /* ->async_data allocated */ |
4bcb982c | 523 | REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT), |
e27f928e | 524 | /* don't post CQEs while failing linked requests */ |
4bcb982c | 525 | REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT), |
e27f928e | 526 | /* single poll may be active */ |
4bcb982c | 527 | REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT), |
e27f928e | 528 | /* double poll may active */ |
4bcb982c | 529 | REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT), |
e27f928e | 530 | /* request has already done partial IO */ |
4bcb982c | 531 | REQ_F_PARTIAL_IO = IO_REQ_FLAG(REQ_F_PARTIAL_IO_BIT), |
e27f928e | 532 | /* fast poll multishot mode */ |
4bcb982c | 533 | REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT), |
e27f928e | 534 | /* recvmsg special flag, clear EPOLLIN */ |
4bcb982c | 535 | REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT), |
9ca9fb24 | 536 | /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ |
4bcb982c | 537 | REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT), |
595e5228 | 538 | /* don't use lazy poll wake for this request */ |
4bcb982c | 539 | REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), |
521223d7 JA |
540 | /* cancel sequence is set and valid */ |
541 | REQ_F_CANCEL_SEQ = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT), | |
95041b93 JA |
542 | /* file is pollable */ |
543 | REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT), | |
e27f928e JA |
544 | }; |
545 | ||
a282967c | 546 | typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); |
e27f928e JA |
547 | |
548 | struct io_task_work { | |
3218e5d3 | 549 | struct llist_node node; |
e27f928e JA |
550 | io_req_tw_func_t func; |
551 | }; | |
552 | ||
553 | struct io_cqe { | |
554 | __u64 user_data; | |
555 | __s32 res; | |
556 | /* fd initially, then cflags for completion */ | |
557 | union { | |
558 | __u32 flags; | |
559 | int fd; | |
560 | }; | |
561 | }; | |
562 | ||
563 | /* | |
564 | * Each request type overlays its private data structure on top of this one. | |
565 | * They must not exceed this one in size. | |
566 | */ | |
567 | struct io_cmd_data { | |
568 | struct file *file; | |
569 | /* each command gets 56 bytes of data */ | |
570 | __u8 data[56]; | |
571 | }; | |
572 | ||
f2ccb5ae SM |
573 | static inline void io_kiocb_cmd_sz_check(size_t cmd_sz) |
574 | { | |
575 | BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data)); | |
576 | } | |
577 | #define io_kiocb_to_cmd(req, cmd_type) ( \ | |
578 | io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \ | |
579 | ((cmd_type *)&(req)->cmd) \ | |
580 | ) | |
e27f928e JA |
581 | #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr) |
582 | ||
583 | struct io_kiocb { | |
584 | union { | |
585 | /* | |
586 | * NOTE! Each of the io_kiocb union members has the file pointer | |
587 | * as the first entry in their struct definition. So you can | |
588 | * access the file pointer through any of the sub-structs, | |
589 | * or directly as just 'file' in this struct. | |
590 | */ | |
591 | struct file *file; | |
592 | struct io_cmd_data cmd; | |
593 | }; | |
594 | ||
595 | u8 opcode; | |
596 | /* polled IO has completed */ | |
597 | u8 iopoll_completed; | |
598 | /* | |
599 | * Can be either a fixed buffer index, or used with provided buffers. | |
600 | * For the latter, before issue it points to the buffer group ID, | |
601 | * and after selection it points to the buffer ID itself. | |
602 | */ | |
603 | u16 buf_index; | |
4bcb982c JA |
604 | |
605 | unsigned nr_tw; | |
606 | ||
607 | /* REQ_F_* flags */ | |
608 | io_req_flags_t flags; | |
e27f928e JA |
609 | |
610 | struct io_cqe cqe; | |
611 | ||
612 | struct io_ring_ctx *ctx; | |
613 | struct task_struct *task; | |
614 | ||
e27f928e JA |
615 | union { |
616 | /* store used ubuf, so we can prevent reloading */ | |
617 | struct io_mapped_ubuf *imu; | |
618 | ||
619 | /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ | |
620 | struct io_buffer *kbuf; | |
621 | ||
622 | /* | |
623 | * stores buffer ID for ring provided buffers, valid IFF | |
624 | * REQ_F_BUFFER_RING is set. | |
625 | */ | |
626 | struct io_buffer_list *buf_list; | |
627 | }; | |
628 | ||
629 | union { | |
630 | /* used by request caches, completion batching and iopoll */ | |
631 | struct io_wq_work_node comp_list; | |
632 | /* cache ->apoll->events */ | |
633 | __poll_t apoll_events; | |
634 | }; | |
4bcb982c JA |
635 | |
636 | struct io_rsrc_node *rsrc_node; | |
637 | ||
e27f928e JA |
638 | atomic_t refs; |
639 | atomic_t poll_refs; | |
640 | struct io_task_work io_task_work; | |
641 | /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ | |
b24c5d75 | 642 | struct hlist_node hash_node; |
e27f928e JA |
643 | /* internal polling, see IORING_FEAT_FAST_POLL */ |
644 | struct async_poll *apoll; | |
645 | /* opcode allocated if it needs to store data for async defer */ | |
646 | void *async_data; | |
647 | /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ | |
648 | struct io_kiocb *link; | |
649 | /* custom credentials, valid IFF REQ_F_CREDS is set */ | |
650 | const struct cred *creds; | |
651 | struct io_wq_work work; | |
b24c5d75 PB |
652 | |
653 | struct { | |
654 | u64 extra1; | |
655 | u64 extra2; | |
656 | } big_cqe; | |
e27f928e JA |
657 | }; |
658 | ||
a4ad4f74 JA |
659 | struct io_overflow_cqe { |
660 | struct list_head list; | |
661 | struct io_uring_cqe cqe; | |
662 | }; | |
663 | ||
e27f928e | 664 | #endif |