Commit | Line | Data |
---|---|---|
e27f928e JA |
1 | #ifndef IO_URING_TYPES_H |
2 | #define IO_URING_TYPES_H | |
3 | ||
4 | #include <linux/blkdev.h> | |
8d0c12a8 | 5 | #include <linux/hashtable.h> |
e27f928e | 6 | #include <linux/task_work.h> |
d9b57aa3 | 7 | #include <linux/bitmap.h> |
e70cb608 | 8 | #include <linux/llist.h> |
d9b57aa3 | 9 | #include <uapi/linux/io_uring.h> |
e27f928e | 10 | |
6b04a373 PB |
11 | enum { |
12 | /* | |
13 | * A hint to not wake right away but delay until there are enough of | |
14 | * tw's queued to match the number of CQEs the task is waiting for. | |
15 | * | |
16 | * Must not be used wirh requests generating more than one CQE. | |
17 | * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. | |
18 | */ | |
19 | IOU_F_TWQ_LAZY_WAKE = 1, | |
20 | }; | |
21 | ||
b66509b8 PB |
22 | enum io_uring_cmd_flags { |
23 | IO_URING_F_COMPLETE_DEFER = 1, | |
24 | IO_URING_F_UNLOCKED = 2, | |
25 | /* the request is executed from poll, it should not be freed */ | |
26 | IO_URING_F_MULTISHOT = 4, | |
27 | /* executed by io-wq */ | |
28 | IO_URING_F_IOWQ = 8, | |
29 | /* int's last bit, sign checks are usually faster than a bit test */ | |
30 | IO_URING_F_NONBLOCK = INT_MIN, | |
31 | ||
32 | /* ctx state flags, for URING_CMD */ | |
33 | IO_URING_F_SQE128 = (1 << 8), | |
34 | IO_URING_F_CQE32 = (1 << 9), | |
35 | IO_URING_F_IOPOLL = (1 << 10), | |
36 | ||
37 | /* set when uring wants to cancel a previously issued command */ | |
38 | IO_URING_F_CANCEL = (1 << 11), | |
39 | IO_URING_F_COMPAT = (1 << 12), | |
40 | }; | |
41 | ||
ab1c84d8 PB |
42 | struct io_wq_work_node { |
43 | struct io_wq_work_node *next; | |
44 | }; | |
45 | ||
46 | struct io_wq_work_list { | |
47 | struct io_wq_work_node *first; | |
48 | struct io_wq_work_node *last; | |
49 | }; | |
50 | ||
51 | struct io_wq_work { | |
52 | struct io_wq_work_node list; | |
53 | unsigned flags; | |
54 | /* place it here instead of io_kiocb as it fills padding and saves 4B */ | |
55 | int cancel_seq; | |
56 | }; | |
57 | ||
58 | struct io_fixed_file { | |
59 | /* file * with additional FFS_* flags */ | |
60 | unsigned long file_ptr; | |
61 | }; | |
62 | ||
63 | struct io_file_table { | |
64 | struct io_fixed_file *files; | |
65 | unsigned long *bitmap; | |
66 | unsigned int alloc_hint; | |
67 | }; | |
e27f928e | 68 | |
e6f89be6 PB |
69 | struct io_hash_bucket { |
70 | spinlock_t lock; | |
71 | struct hlist_head list; | |
72 | } ____cacheline_aligned_in_smp; | |
73 | ||
74 | struct io_hash_table { | |
75 | struct io_hash_bucket *hbs; | |
76 | unsigned hash_bits; | |
77 | }; | |
78 | ||
e70cb608 PB |
79 | /* |
80 | * Arbitrary limit, can be raised if need be | |
81 | */ | |
82 | #define IO_RINGFD_REG_MAX 16 | |
83 | ||
84 | struct io_uring_task { | |
85 | /* submission side */ | |
86 | int cached_refs; | |
87 | const struct io_ring_ctx *last; | |
88 | struct io_wq *io_wq; | |
89 | struct file *registered_rings[IO_RINGFD_REG_MAX]; | |
90 | ||
91 | struct xarray xa; | |
92 | struct wait_queue_head wait; | |
8d664282 | 93 | atomic_t in_cancel; |
e70cb608 PB |
94 | atomic_t inflight_tracked; |
95 | struct percpu_counter inflight; | |
96 | ||
97 | struct { /* task_work */ | |
98 | struct llist_head task_list; | |
99 | struct callback_head task_work; | |
100 | } ____cacheline_aligned_in_smp; | |
101 | }; | |
102 | ||
e27f928e | 103 | struct io_uring { |
e5598d6a PB |
104 | u32 head; |
105 | u32 tail; | |
e27f928e JA |
106 | }; |
107 | ||
108 | /* | |
109 | * This data is shared with the application through the mmap at offsets | |
110 | * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. | |
111 | * | |
112 | * The offsets to the member fields are published through struct | |
113 | * io_sqring_offsets when calling io_uring_setup. | |
114 | */ | |
115 | struct io_rings { | |
116 | /* | |
117 | * Head and tail offsets into the ring; the offsets need to be | |
118 | * masked to get valid indices. | |
119 | * | |
120 | * The kernel controls head of the sq ring and the tail of the cq ring, | |
121 | * and the application controls tail of the sq ring and the head of the | |
122 | * cq ring. | |
123 | */ | |
124 | struct io_uring sq, cq; | |
125 | /* | |
126 | * Bitmasks to apply to head and tail offsets (constant, equals | |
127 | * ring_entries - 1) | |
128 | */ | |
129 | u32 sq_ring_mask, cq_ring_mask; | |
130 | /* Ring sizes (constant, power of 2) */ | |
131 | u32 sq_ring_entries, cq_ring_entries; | |
132 | /* | |
133 | * Number of invalid entries dropped by the kernel due to | |
134 | * invalid index stored in array | |
135 | * | |
136 | * Written by the kernel, shouldn't be modified by the | |
137 | * application (i.e. get number of "new events" by comparing to | |
138 | * cached value). | |
139 | * | |
140 | * After a new SQ head value was read by the application this | |
141 | * counter includes all submissions that were dropped reaching | |
142 | * the new SQ head (and possibly more). | |
143 | */ | |
144 | u32 sq_dropped; | |
145 | /* | |
146 | * Runtime SQ flags | |
147 | * | |
148 | * Written by the kernel, shouldn't be modified by the | |
149 | * application. | |
150 | * | |
151 | * The application needs a full memory barrier before checking | |
152 | * for IORING_SQ_NEED_WAKEUP after updating the sq tail. | |
153 | */ | |
154 | atomic_t sq_flags; | |
155 | /* | |
156 | * Runtime CQ flags | |
157 | * | |
158 | * Written by the application, shouldn't be modified by the | |
159 | * kernel. | |
160 | */ | |
161 | u32 cq_flags; | |
162 | /* | |
163 | * Number of completion events lost because the queue was full; | |
164 | * this should be avoided by the application by making sure | |
165 | * there are not more requests pending than there is space in | |
166 | * the completion queue. | |
167 | * | |
168 | * Written by the kernel, shouldn't be modified by the | |
169 | * application (i.e. get number of "new events" by comparing to | |
170 | * cached value). | |
171 | * | |
172 | * As completion events come in out of order this counter is not | |
173 | * ordered with any other data. | |
174 | */ | |
175 | u32 cq_overflow; | |
176 | /* | |
177 | * Ring buffer of completion events. | |
178 | * | |
179 | * The kernel writes completion events fresh every time they are | |
180 | * produced, so the application is allowed to modify pending | |
181 | * entries. | |
182 | */ | |
183 | struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; | |
184 | }; | |
185 | ||
186 | struct io_restriction { | |
187 | DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); | |
188 | DECLARE_BITMAP(sqe_op, IORING_OP_LAST); | |
189 | u8 sqe_flags_allowed; | |
190 | u8 sqe_flags_required; | |
191 | bool registered; | |
192 | }; | |
193 | ||
194 | struct io_submit_link { | |
195 | struct io_kiocb *head; | |
196 | struct io_kiocb *last; | |
197 | }; | |
198 | ||
199 | struct io_submit_state { | |
200 | /* inline/task_work completion list, under ->uring_lock */ | |
201 | struct io_wq_work_node free_list; | |
202 | /* batch completion logic */ | |
203 | struct io_wq_work_list compl_reqs; | |
204 | struct io_submit_link link; | |
205 | ||
206 | bool plug_started; | |
207 | bool need_plug; | |
e27f928e | 208 | unsigned short submit_nr; |
931147dd | 209 | unsigned int cqes_count; |
e27f928e JA |
210 | struct blk_plug plug; |
211 | }; | |
212 | ||
213 | struct io_ev_fd { | |
214 | struct eventfd_ctx *cq_ev_fd; | |
215 | unsigned int eventfd_async: 1; | |
216 | struct rcu_head rcu; | |
21a091b9 DY |
217 | atomic_t refs; |
218 | atomic_t ops; | |
e27f928e JA |
219 | }; |
220 | ||
9b797a37 | 221 | struct io_alloc_cache { |
efba1a9e | 222 | struct io_wq_work_node list; |
9731bc98 | 223 | unsigned int nr_cached; |
69bbc6ad | 224 | unsigned int max_cached; |
e1fe7ee8 | 225 | size_t elem_size; |
9b797a37 JA |
226 | }; |
227 | ||
e27f928e JA |
228 | struct io_ring_ctx { |
229 | /* const or read-mostly hot data */ | |
230 | struct { | |
e27f928e | 231 | unsigned int flags; |
e27f928e JA |
232 | unsigned int drain_next: 1; |
233 | unsigned int restricted: 1; | |
234 | unsigned int off_timeout_used: 1; | |
235 | unsigned int drain_active: 1; | |
e27f928e | 236 | unsigned int has_evfd: 1; |
e6aeb272 PB |
237 | /* all CQEs should be posted only by the submitter task */ |
238 | unsigned int task_complete: 1; | |
ec26c225 | 239 | unsigned int lockless_cq: 1; |
632ffe09 | 240 | unsigned int syscall_iopoll: 1; |
bca39f39 | 241 | unsigned int poll_activated: 1; |
632ffe09 PB |
242 | unsigned int drain_disabled: 1; |
243 | unsigned int compat: 1; | |
da08d2ed | 244 | unsigned int iowq_limits_set : 1; |
dde40322 | 245 | |
18df385f PB |
246 | struct task_struct *submitter_task; |
247 | struct io_rings *rings; | |
248 | struct percpu_ref refs; | |
03d89a2d | 249 | |
18df385f | 250 | enum task_work_notify_mode notify_method; |
8d0c12a8 | 251 | unsigned sq_thread_idle; |
e27f928e JA |
252 | } ____cacheline_aligned_in_smp; |
253 | ||
254 | /* submission data */ | |
255 | struct { | |
256 | struct mutex uring_lock; | |
257 | ||
258 | /* | |
259 | * Ring buffer of indices into array of io_uring_sqe, which is | |
260 | * mmapped by the application using the IORING_OFF_SQES offset. | |
261 | * | |
262 | * This indirection could e.g. be used to assign fixed | |
263 | * io_uring_sqe entries to operations and only submit them to | |
264 | * the queue when needed. | |
265 | * | |
266 | * The kernel modifies neither the indices array nor the entries | |
267 | * array. | |
268 | */ | |
269 | u32 *sq_array; | |
270 | struct io_uring_sqe *sq_sqes; | |
271 | unsigned cached_sq_head; | |
272 | unsigned sq_entries; | |
e27f928e JA |
273 | |
274 | /* | |
275 | * Fixed resources fast path, should be accessed only under | |
276 | * uring_lock, and updated through io_uring_register(2) | |
277 | */ | |
278 | struct io_rsrc_node *rsrc_node; | |
e27f928e | 279 | atomic_t cancel_seq; |
da08d2ed JA |
280 | |
281 | /* | |
282 | * ->iopoll_list is protected by the ctx->uring_lock for | |
283 | * io_uring instances that don't use IORING_SETUP_SQPOLL. | |
284 | * For SQPOLL, only the single threaded io_sq_thread() will | |
285 | * manipulate the list, hence no extra locking is needed there. | |
286 | */ | |
287 | bool poll_multi_queue; | |
288 | struct io_wq_work_list iopoll_list; | |
289 | ||
e27f928e | 290 | struct io_file_table file_table; |
da08d2ed | 291 | struct io_mapped_ubuf **user_bufs; |
e27f928e JA |
292 | unsigned nr_user_files; |
293 | unsigned nr_user_bufs; | |
e27f928e JA |
294 | |
295 | struct io_submit_state submit_state; | |
296 | ||
297 | struct io_buffer_list *io_bl; | |
298 | struct xarray io_bl_xa; | |
e27f928e | 299 | |
9ca9fb24 | 300 | struct io_hash_table cancel_table_locked; |
9b797a37 | 301 | struct io_alloc_cache apoll_cache; |
43e0bbbd | 302 | struct io_alloc_cache netmsg_cache; |
644c4a7a | 303 | |
93b8cc60 ML |
304 | /* |
305 | * Any cancelable uring_cmd is added to this list in | |
306 | * ->uring_cmd() by io_uring_cmd_insert_cancelable() | |
307 | */ | |
308 | struct hlist_head cancelable_uring_cmd; | |
e27f928e JA |
309 | } ____cacheline_aligned_in_smp; |
310 | ||
e27f928e JA |
311 | struct { |
312 | /* | |
313 | * We cache a range of free CQEs we can use, once exhausted it | |
314 | * should go through a slower range setup, see __io_get_cqe() | |
315 | */ | |
316 | struct io_uring_cqe *cqe_cached; | |
317 | struct io_uring_cqe *cqe_sentinel; | |
318 | ||
319 | unsigned cached_cq_tail; | |
320 | unsigned cq_entries; | |
321 | struct io_ev_fd __rcu *io_ev_fd; | |
e27f928e | 322 | unsigned cq_extra; |
e27f928e JA |
323 | } ____cacheline_aligned_in_smp; |
324 | ||
c9def23d PB |
325 | /* |
326 | * task_work and async notification delivery cacheline. Expected to | |
327 | * regularly bounce b/w CPUs. | |
328 | */ | |
329 | struct { | |
330 | struct llist_head work_llist; | |
331 | unsigned long check_cq; | |
332 | atomic_t cq_wait_nr; | |
333 | atomic_t cq_timeouts; | |
334 | struct wait_queue_head cq_wait; | |
335 | } ____cacheline_aligned_in_smp; | |
336 | ||
aff5b2df PB |
337 | /* timeouts */ |
338 | struct { | |
339 | spinlock_t timeout_lock; | |
aff5b2df PB |
340 | struct list_head timeout_list; |
341 | struct list_head ltimeout_list; | |
342 | unsigned cq_last_tm_flush; | |
343 | } ____cacheline_aligned_in_smp; | |
344 | ||
0aa7aa5f PB |
345 | struct io_uring_cqe completion_cqes[16]; |
346 | ||
644c4a7a PB |
347 | spinlock_t completion_lock; |
348 | ||
d7f06fea | 349 | /* IRQ completion list, under ->completion_lock */ |
d7f06fea | 350 | unsigned int locked_free_nr; |
da08d2ed | 351 | struct io_wq_work_list locked_free_list; |
d7f06fea | 352 | |
18df385f PB |
353 | struct list_head io_buffers_comp; |
354 | struct list_head cq_overflow_list; | |
355 | struct io_hash_table cancel_table; | |
356 | ||
f31ecf67 JA |
357 | struct hlist_head waitid_list; |
358 | ||
194bb58c JA |
359 | #ifdef CONFIG_FUTEX |
360 | struct hlist_head futex_list; | |
361 | struct io_alloc_cache futex_cache; | |
362 | #endif | |
363 | ||
d7f06fea PB |
364 | const struct cred *sq_creds; /* cred used for __io_sq_thread() */ |
365 | struct io_sq_data *sq_data; /* if using sq thread polling */ | |
366 | ||
367 | struct wait_queue_head sqo_sq_wait; | |
368 | struct list_head sqd_list; | |
369 | ||
d7f06fea PB |
370 | unsigned int file_alloc_start; |
371 | unsigned int file_alloc_end; | |
372 | ||
18df385f PB |
373 | struct list_head io_buffers_cache; |
374 | ||
c392cbec JA |
375 | /* deferred free list, protected by ->uring_lock */ |
376 | struct hlist_head io_buf_list; | |
377 | ||
e27f928e | 378 | /* Keep this last, we don't need it for the fast path */ |
7b235dd8 | 379 | struct wait_queue_head poll_wq; |
22eb2a3f PB |
380 | struct io_restriction restrictions; |
381 | ||
382 | /* slow path rsrc auxilary data, used by update/register */ | |
22eb2a3f PB |
383 | struct io_mapped_ubuf *dummy_ubuf; |
384 | struct io_rsrc_data *file_data; | |
385 | struct io_rsrc_data *buf_data; | |
386 | ||
0a4813b1 | 387 | /* protected by ->uring_lock */ |
22eb2a3f | 388 | struct list_head rsrc_ref_list; |
9eae8655 | 389 | struct io_alloc_cache rsrc_node_cache; |
4ea15b56 | 390 | struct wait_queue_head rsrc_quiesce_wq; |
0b222eeb | 391 | unsigned rsrc_quiesce; |
22eb2a3f | 392 | |
da08d2ed JA |
393 | u32 pers_next; |
394 | struct xarray personalities; | |
395 | ||
22eb2a3f PB |
396 | /* hashed buffered write serialization */ |
397 | struct io_wq_hash *hash_map; | |
398 | ||
399 | /* Only used for accounting purposes */ | |
400 | struct user_struct *user; | |
401 | struct mm_struct *mm_account; | |
402 | ||
403 | /* ctx exit and cancelation */ | |
404 | struct llist_head fallback_llist; | |
405 | struct delayed_work fallback_work; | |
406 | struct work_struct exit_work; | |
407 | struct list_head tctx_list; | |
408 | struct completion ref_comp; | |
409 | ||
410 | /* io-wq management, e.g. thread count */ | |
411 | u32 iowq_limits[2]; | |
22eb2a3f | 412 | |
bca39f39 | 413 | struct callback_head poll_wq_task_work; |
22eb2a3f | 414 | struct list_head defer_list; |
8d0c12a8 SR |
415 | |
416 | #ifdef CONFIG_NET_RX_BUSY_POLL | |
417 | struct list_head napi_list; /* track busy poll napi_id */ | |
418 | spinlock_t napi_lock; /* napi_list lock */ | |
419 | ||
420 | /* napi busy poll default timeout */ | |
421 | unsigned int napi_busy_poll_to; | |
422 | bool napi_prefer_busy_poll; | |
b4ccc4dd | 423 | bool napi_enabled; |
8d0c12a8 SR |
424 | |
425 | DECLARE_HASHTABLE(napi_ht, 4); | |
426 | #endif | |
427 | ||
305bef98 PB |
428 | /* protected by ->completion_lock */ |
429 | unsigned evfd_last_cq_tail; | |
18df385f PB |
430 | |
431 | /* | |
432 | * If IORING_SETUP_NO_MMAP is used, then the below holds | |
433 | * the gup'ed pages for the two rings, and the sqes. | |
434 | */ | |
435 | unsigned short n_ring_pages; | |
436 | unsigned short n_sqe_pages; | |
437 | struct page **ring_pages; | |
438 | struct page **sqe_pages; | |
e27f928e JA |
439 | }; |
440 | ||
a282967c PB |
441 | struct io_tw_state { |
442 | /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */ | |
443 | bool locked; | |
444 | }; | |
445 | ||
e27f928e JA |
446 | enum { |
447 | REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, | |
448 | REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, | |
449 | REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, | |
450 | REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, | |
451 | REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, | |
452 | REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, | |
453 | REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, | |
454 | ||
455 | /* first byte is taken by user flags, shift it to not overlap */ | |
456 | REQ_F_FAIL_BIT = 8, | |
457 | REQ_F_INFLIGHT_BIT, | |
458 | REQ_F_CUR_POS_BIT, | |
459 | REQ_F_NOWAIT_BIT, | |
460 | REQ_F_LINK_TIMEOUT_BIT, | |
461 | REQ_F_NEED_CLEANUP_BIT, | |
462 | REQ_F_POLLED_BIT, | |
463 | REQ_F_BUFFER_SELECTED_BIT, | |
464 | REQ_F_BUFFER_RING_BIT, | |
e27f928e JA |
465 | REQ_F_REISSUE_BIT, |
466 | REQ_F_CREDS_BIT, | |
467 | REQ_F_REFCOUNT_BIT, | |
468 | REQ_F_ARM_LTIMEOUT_BIT, | |
469 | REQ_F_ASYNC_DATA_BIT, | |
470 | REQ_F_SKIP_LINK_CQES_BIT, | |
471 | REQ_F_SINGLE_POLL_BIT, | |
472 | REQ_F_DOUBLE_POLL_BIT, | |
473 | REQ_F_PARTIAL_IO_BIT, | |
e27f928e JA |
474 | REQ_F_APOLL_MULTISHOT_BIT, |
475 | REQ_F_CLEAR_POLLIN_BIT, | |
9ca9fb24 | 476 | REQ_F_HASH_LOCKED_BIT, |
e27f928e JA |
477 | /* keep async read/write and isreg together and in order */ |
478 | REQ_F_SUPPORT_NOWAIT_BIT, | |
479 | REQ_F_ISREG_BIT, | |
595e5228 | 480 | REQ_F_POLL_NO_LAZY_BIT, |
521223d7 | 481 | REQ_F_CANCEL_SEQ_BIT, |
95041b93 | 482 | REQ_F_CAN_POLL_BIT, |
c3f9109d | 483 | REQ_F_BL_EMPTY_BIT, |
e27f928e JA |
484 | |
485 | /* not a real bit, just to check we're not overflowing the space */ | |
486 | __REQ_F_LAST_BIT, | |
487 | }; | |
488 | ||
4bcb982c JA |
489 | typedef u64 __bitwise io_req_flags_t; |
490 | #define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno))) | |
491 | ||
e27f928e JA |
492 | enum { |
493 | /* ctx owns file */ | |
4bcb982c | 494 | REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT), |
e27f928e | 495 | /* drain existing IO first */ |
4bcb982c | 496 | REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT), |
e27f928e | 497 | /* linked sqes */ |
4bcb982c | 498 | REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT), |
e27f928e | 499 | /* doesn't sever on completion < 0 */ |
4bcb982c | 500 | REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT), |
e27f928e | 501 | /* IOSQE_ASYNC */ |
4bcb982c | 502 | REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT), |
e27f928e | 503 | /* IOSQE_BUFFER_SELECT */ |
4bcb982c | 504 | REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT), |
e27f928e | 505 | /* IOSQE_CQE_SKIP_SUCCESS */ |
4bcb982c | 506 | REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT), |
e27f928e JA |
507 | |
508 | /* fail rest of links */ | |
4bcb982c | 509 | REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT), |
e27f928e | 510 | /* on inflight list, should be cancelled and waited on exit reliably */ |
4bcb982c | 511 | REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT), |
e27f928e | 512 | /* read/write uses file position */ |
4bcb982c | 513 | REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT), |
e27f928e | 514 | /* must not punt to workers */ |
4bcb982c | 515 | REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT), |
e27f928e | 516 | /* has or had linked timeout */ |
4bcb982c | 517 | REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT), |
e27f928e | 518 | /* needs cleanup */ |
4bcb982c | 519 | REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT), |
e27f928e | 520 | /* already went through poll handler */ |
4bcb982c | 521 | REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT), |
e27f928e | 522 | /* buffer already selected */ |
4bcb982c | 523 | REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT), |
e27f928e | 524 | /* buffer selected from ring, needs commit */ |
4bcb982c | 525 | REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT), |
e27f928e | 526 | /* caller should reissue async */ |
4bcb982c | 527 | REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT), |
e27f928e | 528 | /* supports async reads/writes */ |
4bcb982c | 529 | REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT), |
e27f928e | 530 | /* regular file */ |
4bcb982c | 531 | REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT), |
e27f928e | 532 | /* has creds assigned */ |
4bcb982c | 533 | REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT), |
e27f928e | 534 | /* skip refcounting if not set */ |
4bcb982c | 535 | REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT), |
e27f928e | 536 | /* there is a linked timeout that has to be armed */ |
4bcb982c | 537 | REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT), |
e27f928e | 538 | /* ->async_data allocated */ |
4bcb982c | 539 | REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT), |
e27f928e | 540 | /* don't post CQEs while failing linked requests */ |
4bcb982c | 541 | REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT), |
e27f928e | 542 | /* single poll may be active */ |
4bcb982c | 543 | REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT), |
e27f928e | 544 | /* double poll may active */ |
4bcb982c | 545 | REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT), |
e27f928e | 546 | /* request has already done partial IO */ |
4bcb982c | 547 | REQ_F_PARTIAL_IO = IO_REQ_FLAG(REQ_F_PARTIAL_IO_BIT), |
e27f928e | 548 | /* fast poll multishot mode */ |
4bcb982c | 549 | REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT), |
e27f928e | 550 | /* recvmsg special flag, clear EPOLLIN */ |
4bcb982c | 551 | REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT), |
9ca9fb24 | 552 | /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ |
4bcb982c | 553 | REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT), |
595e5228 | 554 | /* don't use lazy poll wake for this request */ |
4bcb982c | 555 | REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), |
521223d7 JA |
556 | /* cancel sequence is set and valid */ |
557 | REQ_F_CANCEL_SEQ = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT), | |
95041b93 JA |
558 | /* file is pollable */ |
559 | REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT), | |
c3f9109d JA |
560 | /* buffer list was empty after selection of buffer */ |
561 | REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT), | |
e27f928e JA |
562 | }; |
563 | ||
a282967c | 564 | typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); |
e27f928e JA |
565 | |
566 | struct io_task_work { | |
3218e5d3 | 567 | struct llist_node node; |
e27f928e JA |
568 | io_req_tw_func_t func; |
569 | }; | |
570 | ||
571 | struct io_cqe { | |
572 | __u64 user_data; | |
573 | __s32 res; | |
574 | /* fd initially, then cflags for completion */ | |
575 | union { | |
576 | __u32 flags; | |
577 | int fd; | |
578 | }; | |
579 | }; | |
580 | ||
581 | /* | |
582 | * Each request type overlays its private data structure on top of this one. | |
583 | * They must not exceed this one in size. | |
584 | */ | |
585 | struct io_cmd_data { | |
586 | struct file *file; | |
587 | /* each command gets 56 bytes of data */ | |
588 | __u8 data[56]; | |
589 | }; | |
590 | ||
f2ccb5ae SM |
591 | static inline void io_kiocb_cmd_sz_check(size_t cmd_sz) |
592 | { | |
593 | BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data)); | |
594 | } | |
595 | #define io_kiocb_to_cmd(req, cmd_type) ( \ | |
596 | io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \ | |
597 | ((cmd_type *)&(req)->cmd) \ | |
598 | ) | |
e27f928e JA |
599 | #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr) |
600 | ||
601 | struct io_kiocb { | |
602 | union { | |
603 | /* | |
604 | * NOTE! Each of the io_kiocb union members has the file pointer | |
605 | * as the first entry in their struct definition. So you can | |
606 | * access the file pointer through any of the sub-structs, | |
607 | * or directly as just 'file' in this struct. | |
608 | */ | |
609 | struct file *file; | |
610 | struct io_cmd_data cmd; | |
611 | }; | |
612 | ||
613 | u8 opcode; | |
614 | /* polled IO has completed */ | |
615 | u8 iopoll_completed; | |
616 | /* | |
617 | * Can be either a fixed buffer index, or used with provided buffers. | |
618 | * For the latter, before issue it points to the buffer group ID, | |
619 | * and after selection it points to the buffer ID itself. | |
620 | */ | |
621 | u16 buf_index; | |
4bcb982c JA |
622 | |
623 | unsigned nr_tw; | |
624 | ||
625 | /* REQ_F_* flags */ | |
626 | io_req_flags_t flags; | |
e27f928e JA |
627 | |
628 | struct io_cqe cqe; | |
629 | ||
630 | struct io_ring_ctx *ctx; | |
631 | struct task_struct *task; | |
632 | ||
e27f928e JA |
633 | union { |
634 | /* store used ubuf, so we can prevent reloading */ | |
635 | struct io_mapped_ubuf *imu; | |
636 | ||
637 | /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ | |
638 | struct io_buffer *kbuf; | |
639 | ||
640 | /* | |
641 | * stores buffer ID for ring provided buffers, valid IFF | |
642 | * REQ_F_BUFFER_RING is set. | |
643 | */ | |
644 | struct io_buffer_list *buf_list; | |
645 | }; | |
646 | ||
647 | union { | |
648 | /* used by request caches, completion batching and iopoll */ | |
649 | struct io_wq_work_node comp_list; | |
650 | /* cache ->apoll->events */ | |
651 | __poll_t apoll_events; | |
652 | }; | |
4bcb982c JA |
653 | |
654 | struct io_rsrc_node *rsrc_node; | |
655 | ||
e27f928e JA |
656 | atomic_t refs; |
657 | atomic_t poll_refs; | |
658 | struct io_task_work io_task_work; | |
659 | /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ | |
b24c5d75 | 660 | struct hlist_node hash_node; |
e27f928e JA |
661 | /* internal polling, see IORING_FEAT_FAST_POLL */ |
662 | struct async_poll *apoll; | |
663 | /* opcode allocated if it needs to store data for async defer */ | |
664 | void *async_data; | |
665 | /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ | |
666 | struct io_kiocb *link; | |
667 | /* custom credentials, valid IFF REQ_F_CREDS is set */ | |
668 | const struct cred *creds; | |
669 | struct io_wq_work work; | |
b24c5d75 PB |
670 | |
671 | struct { | |
672 | u64 extra1; | |
673 | u64 extra2; | |
674 | } big_cqe; | |
e27f928e JA |
675 | }; |
676 | ||
a4ad4f74 JA |
677 | struct io_overflow_cqe { |
678 | struct list_head list; | |
679 | struct io_uring_cqe cqe; | |
680 | }; | |
681 | ||
e27f928e | 682 | #endif |