Commit | Line | Data |
---|---|---|
e27f928e JA |
1 | #ifndef IO_URING_TYPES_H |
2 | #define IO_URING_TYPES_H | |
3 | ||
4 | #include <linux/blkdev.h> | |
5 | #include <linux/task_work.h> | |
d9b57aa3 | 6 | #include <linux/bitmap.h> |
e70cb608 | 7 | #include <linux/llist.h> |
d9b57aa3 | 8 | #include <uapi/linux/io_uring.h> |
e27f928e | 9 | |
ab1c84d8 PB |
10 | struct io_wq_work_node { |
11 | struct io_wq_work_node *next; | |
12 | }; | |
13 | ||
14 | struct io_wq_work_list { | |
15 | struct io_wq_work_node *first; | |
16 | struct io_wq_work_node *last; | |
17 | }; | |
18 | ||
19 | struct io_wq_work { | |
20 | struct io_wq_work_node list; | |
21 | unsigned flags; | |
22 | /* place it here instead of io_kiocb as it fills padding and saves 4B */ | |
23 | int cancel_seq; | |
24 | }; | |
25 | ||
26 | struct io_fixed_file { | |
27 | /* file * with additional FFS_* flags */ | |
28 | unsigned long file_ptr; | |
29 | }; | |
30 | ||
31 | struct io_file_table { | |
32 | struct io_fixed_file *files; | |
33 | unsigned long *bitmap; | |
34 | unsigned int alloc_hint; | |
35 | }; | |
e27f928e | 36 | |
e6f89be6 PB |
37 | struct io_hash_bucket { |
38 | spinlock_t lock; | |
39 | struct hlist_head list; | |
40 | } ____cacheline_aligned_in_smp; | |
41 | ||
42 | struct io_hash_table { | |
43 | struct io_hash_bucket *hbs; | |
44 | unsigned hash_bits; | |
45 | }; | |
46 | ||
e70cb608 PB |
47 | /* |
48 | * Arbitrary limit, can be raised if need be | |
49 | */ | |
50 | #define IO_RINGFD_REG_MAX 16 | |
51 | ||
52 | struct io_uring_task { | |
53 | /* submission side */ | |
54 | int cached_refs; | |
55 | const struct io_ring_ctx *last; | |
56 | struct io_wq *io_wq; | |
57 | struct file *registered_rings[IO_RINGFD_REG_MAX]; | |
58 | ||
59 | struct xarray xa; | |
60 | struct wait_queue_head wait; | |
8d664282 | 61 | atomic_t in_cancel; |
e70cb608 PB |
62 | atomic_t inflight_tracked; |
63 | struct percpu_counter inflight; | |
64 | ||
65 | struct { /* task_work */ | |
66 | struct llist_head task_list; | |
67 | struct callback_head task_work; | |
68 | } ____cacheline_aligned_in_smp; | |
69 | }; | |
70 | ||
e27f928e | 71 | struct io_uring { |
e5598d6a PB |
72 | u32 head; |
73 | u32 tail; | |
e27f928e JA |
74 | }; |
75 | ||
76 | /* | |
77 | * This data is shared with the application through the mmap at offsets | |
78 | * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. | |
79 | * | |
80 | * The offsets to the member fields are published through struct | |
81 | * io_sqring_offsets when calling io_uring_setup. | |
82 | */ | |
83 | struct io_rings { | |
84 | /* | |
85 | * Head and tail offsets into the ring; the offsets need to be | |
86 | * masked to get valid indices. | |
87 | * | |
88 | * The kernel controls head of the sq ring and the tail of the cq ring, | |
89 | * and the application controls tail of the sq ring and the head of the | |
90 | * cq ring. | |
91 | */ | |
92 | struct io_uring sq, cq; | |
93 | /* | |
94 | * Bitmasks to apply to head and tail offsets (constant, equals | |
95 | * ring_entries - 1) | |
96 | */ | |
97 | u32 sq_ring_mask, cq_ring_mask; | |
98 | /* Ring sizes (constant, power of 2) */ | |
99 | u32 sq_ring_entries, cq_ring_entries; | |
100 | /* | |
101 | * Number of invalid entries dropped by the kernel due to | |
102 | * invalid index stored in array | |
103 | * | |
104 | * Written by the kernel, shouldn't be modified by the | |
105 | * application (i.e. get number of "new events" by comparing to | |
106 | * cached value). | |
107 | * | |
108 | * After a new SQ head value was read by the application this | |
109 | * counter includes all submissions that were dropped reaching | |
110 | * the new SQ head (and possibly more). | |
111 | */ | |
112 | u32 sq_dropped; | |
113 | /* | |
114 | * Runtime SQ flags | |
115 | * | |
116 | * Written by the kernel, shouldn't be modified by the | |
117 | * application. | |
118 | * | |
119 | * The application needs a full memory barrier before checking | |
120 | * for IORING_SQ_NEED_WAKEUP after updating the sq tail. | |
121 | */ | |
122 | atomic_t sq_flags; | |
123 | /* | |
124 | * Runtime CQ flags | |
125 | * | |
126 | * Written by the application, shouldn't be modified by the | |
127 | * kernel. | |
128 | */ | |
129 | u32 cq_flags; | |
130 | /* | |
131 | * Number of completion events lost because the queue was full; | |
132 | * this should be avoided by the application by making sure | |
133 | * there are not more requests pending than there is space in | |
134 | * the completion queue. | |
135 | * | |
136 | * Written by the kernel, shouldn't be modified by the | |
137 | * application (i.e. get number of "new events" by comparing to | |
138 | * cached value). | |
139 | * | |
140 | * As completion events come in out of order this counter is not | |
141 | * ordered with any other data. | |
142 | */ | |
143 | u32 cq_overflow; | |
144 | /* | |
145 | * Ring buffer of completion events. | |
146 | * | |
147 | * The kernel writes completion events fresh every time they are | |
148 | * produced, so the application is allowed to modify pending | |
149 | * entries. | |
150 | */ | |
151 | struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; | |
152 | }; | |
153 | ||
154 | struct io_restriction { | |
155 | DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); | |
156 | DECLARE_BITMAP(sqe_op, IORING_OP_LAST); | |
157 | u8 sqe_flags_allowed; | |
158 | u8 sqe_flags_required; | |
159 | bool registered; | |
160 | }; | |
161 | ||
162 | struct io_submit_link { | |
163 | struct io_kiocb *head; | |
164 | struct io_kiocb *last; | |
165 | }; | |
166 | ||
167 | struct io_submit_state { | |
168 | /* inline/task_work completion list, under ->uring_lock */ | |
169 | struct io_wq_work_node free_list; | |
170 | /* batch completion logic */ | |
171 | struct io_wq_work_list compl_reqs; | |
172 | struct io_submit_link link; | |
173 | ||
174 | bool plug_started; | |
175 | bool need_plug; | |
e27f928e | 176 | unsigned short submit_nr; |
931147dd | 177 | unsigned int cqes_count; |
e27f928e JA |
178 | struct blk_plug plug; |
179 | }; | |
180 | ||
181 | struct io_ev_fd { | |
182 | struct eventfd_ctx *cq_ev_fd; | |
183 | unsigned int eventfd_async: 1; | |
184 | struct rcu_head rcu; | |
21a091b9 DY |
185 | atomic_t refs; |
186 | atomic_t ops; | |
e27f928e JA |
187 | }; |
188 | ||
9b797a37 | 189 | struct io_alloc_cache { |
efba1a9e | 190 | struct io_wq_work_node list; |
9731bc98 | 191 | unsigned int nr_cached; |
69bbc6ad | 192 | unsigned int max_cached; |
e1fe7ee8 | 193 | size_t elem_size; |
9b797a37 JA |
194 | }; |
195 | ||
e27f928e JA |
196 | struct io_ring_ctx { |
197 | /* const or read-mostly hot data */ | |
198 | struct { | |
e27f928e | 199 | unsigned int flags; |
e27f928e JA |
200 | unsigned int drain_next: 1; |
201 | unsigned int restricted: 1; | |
202 | unsigned int off_timeout_used: 1; | |
203 | unsigned int drain_active: 1; | |
e27f928e | 204 | unsigned int has_evfd: 1; |
e6aeb272 PB |
205 | /* all CQEs should be posted only by the submitter task */ |
206 | unsigned int task_complete: 1; | |
ec26c225 | 207 | unsigned int lockless_cq: 1; |
632ffe09 | 208 | unsigned int syscall_iopoll: 1; |
bca39f39 | 209 | unsigned int poll_activated: 1; |
632ffe09 PB |
210 | unsigned int drain_disabled: 1; |
211 | unsigned int compat: 1; | |
dde40322 | 212 | |
18df385f PB |
213 | struct task_struct *submitter_task; |
214 | struct io_rings *rings; | |
215 | struct percpu_ref refs; | |
03d89a2d | 216 | |
18df385f | 217 | enum task_work_notify_mode notify_method; |
e27f928e JA |
218 | } ____cacheline_aligned_in_smp; |
219 | ||
220 | /* submission data */ | |
221 | struct { | |
222 | struct mutex uring_lock; | |
223 | ||
224 | /* | |
225 | * Ring buffer of indices into array of io_uring_sqe, which is | |
226 | * mmapped by the application using the IORING_OFF_SQES offset. | |
227 | * | |
228 | * This indirection could e.g. be used to assign fixed | |
229 | * io_uring_sqe entries to operations and only submit them to | |
230 | * the queue when needed. | |
231 | * | |
232 | * The kernel modifies neither the indices array nor the entries | |
233 | * array. | |
234 | */ | |
235 | u32 *sq_array; | |
236 | struct io_uring_sqe *sq_sqes; | |
237 | unsigned cached_sq_head; | |
238 | unsigned sq_entries; | |
e27f928e JA |
239 | |
240 | /* | |
241 | * Fixed resources fast path, should be accessed only under | |
242 | * uring_lock, and updated through io_uring_register(2) | |
243 | */ | |
244 | struct io_rsrc_node *rsrc_node; | |
e27f928e JA |
245 | atomic_t cancel_seq; |
246 | struct io_file_table file_table; | |
247 | unsigned nr_user_files; | |
248 | unsigned nr_user_bufs; | |
249 | struct io_mapped_ubuf **user_bufs; | |
250 | ||
251 | struct io_submit_state submit_state; | |
252 | ||
253 | struct io_buffer_list *io_bl; | |
254 | struct xarray io_bl_xa; | |
e27f928e | 255 | |
9ca9fb24 | 256 | struct io_hash_table cancel_table_locked; |
9b797a37 | 257 | struct io_alloc_cache apoll_cache; |
43e0bbbd | 258 | struct io_alloc_cache netmsg_cache; |
644c4a7a PB |
259 | |
260 | /* | |
261 | * ->iopoll_list is protected by the ctx->uring_lock for | |
262 | * io_uring instances that don't use IORING_SETUP_SQPOLL. | |
263 | * For SQPOLL, only the single threaded io_sq_thread() will | |
264 | * manipulate the list, hence no extra locking is needed there. | |
265 | */ | |
266 | struct io_wq_work_list iopoll_list; | |
267 | bool poll_multi_queue; | |
e27f928e JA |
268 | } ____cacheline_aligned_in_smp; |
269 | ||
e27f928e JA |
270 | struct { |
271 | /* | |
272 | * We cache a range of free CQEs we can use, once exhausted it | |
273 | * should go through a slower range setup, see __io_get_cqe() | |
274 | */ | |
275 | struct io_uring_cqe *cqe_cached; | |
276 | struct io_uring_cqe *cqe_sentinel; | |
277 | ||
278 | unsigned cached_cq_tail; | |
279 | unsigned cq_entries; | |
280 | struct io_ev_fd __rcu *io_ev_fd; | |
e27f928e | 281 | unsigned cq_extra; |
e27f928e JA |
282 | } ____cacheline_aligned_in_smp; |
283 | ||
c9def23d PB |
284 | /* |
285 | * task_work and async notification delivery cacheline. Expected to | |
286 | * regularly bounce b/w CPUs. | |
287 | */ | |
288 | struct { | |
289 | struct llist_head work_llist; | |
290 | unsigned long check_cq; | |
291 | atomic_t cq_wait_nr; | |
292 | atomic_t cq_timeouts; | |
293 | struct wait_queue_head cq_wait; | |
294 | } ____cacheline_aligned_in_smp; | |
295 | ||
aff5b2df PB |
296 | /* timeouts */ |
297 | struct { | |
298 | spinlock_t timeout_lock; | |
aff5b2df PB |
299 | struct list_head timeout_list; |
300 | struct list_head ltimeout_list; | |
301 | unsigned cq_last_tm_flush; | |
302 | } ____cacheline_aligned_in_smp; | |
303 | ||
0aa7aa5f PB |
304 | struct io_uring_cqe completion_cqes[16]; |
305 | ||
644c4a7a PB |
306 | spinlock_t completion_lock; |
307 | ||
d7f06fea PB |
308 | /* IRQ completion list, under ->completion_lock */ |
309 | struct io_wq_work_list locked_free_list; | |
310 | unsigned int locked_free_nr; | |
311 | ||
18df385f PB |
312 | struct list_head io_buffers_comp; |
313 | struct list_head cq_overflow_list; | |
314 | struct io_hash_table cancel_table; | |
315 | ||
d7f06fea PB |
316 | const struct cred *sq_creds; /* cred used for __io_sq_thread() */ |
317 | struct io_sq_data *sq_data; /* if using sq thread polling */ | |
318 | ||
319 | struct wait_queue_head sqo_sq_wait; | |
320 | struct list_head sqd_list; | |
321 | ||
d7f06fea PB |
322 | unsigned int file_alloc_start; |
323 | unsigned int file_alloc_end; | |
324 | ||
325 | struct xarray personalities; | |
326 | u32 pers_next; | |
327 | ||
18df385f PB |
328 | struct list_head io_buffers_cache; |
329 | ||
e27f928e | 330 | /* Keep this last, we don't need it for the fast path */ |
7b235dd8 | 331 | struct wait_queue_head poll_wq; |
22eb2a3f PB |
332 | struct io_restriction restrictions; |
333 | ||
334 | /* slow path rsrc auxilary data, used by update/register */ | |
22eb2a3f PB |
335 | struct io_mapped_ubuf *dummy_ubuf; |
336 | struct io_rsrc_data *file_data; | |
337 | struct io_rsrc_data *buf_data; | |
338 | ||
0a4813b1 | 339 | /* protected by ->uring_lock */ |
22eb2a3f | 340 | struct list_head rsrc_ref_list; |
9eae8655 | 341 | struct io_alloc_cache rsrc_node_cache; |
4ea15b56 | 342 | struct wait_queue_head rsrc_quiesce_wq; |
0b222eeb | 343 | unsigned rsrc_quiesce; |
22eb2a3f PB |
344 | |
345 | struct list_head io_buffers_pages; | |
346 | ||
347 | #if defined(CONFIG_UNIX) | |
348 | struct socket *ring_sock; | |
349 | #endif | |
350 | /* hashed buffered write serialization */ | |
351 | struct io_wq_hash *hash_map; | |
352 | ||
353 | /* Only used for accounting purposes */ | |
354 | struct user_struct *user; | |
355 | struct mm_struct *mm_account; | |
356 | ||
357 | /* ctx exit and cancelation */ | |
358 | struct llist_head fallback_llist; | |
359 | struct delayed_work fallback_work; | |
360 | struct work_struct exit_work; | |
361 | struct list_head tctx_list; | |
362 | struct completion ref_comp; | |
363 | ||
364 | /* io-wq management, e.g. thread count */ | |
365 | u32 iowq_limits[2]; | |
366 | bool iowq_limits_set; | |
367 | ||
bca39f39 | 368 | struct callback_head poll_wq_task_work; |
22eb2a3f PB |
369 | struct list_head defer_list; |
370 | unsigned sq_thread_idle; | |
305bef98 PB |
371 | /* protected by ->completion_lock */ |
372 | unsigned evfd_last_cq_tail; | |
18df385f PB |
373 | |
374 | /* | |
375 | * If IORING_SETUP_NO_MMAP is used, then the below holds | |
376 | * the gup'ed pages for the two rings, and the sqes. | |
377 | */ | |
378 | unsigned short n_ring_pages; | |
379 | unsigned short n_sqe_pages; | |
380 | struct page **ring_pages; | |
381 | struct page **sqe_pages; | |
e27f928e JA |
382 | }; |
383 | ||
a282967c PB |
384 | struct io_tw_state { |
385 | /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */ | |
386 | bool locked; | |
387 | }; | |
388 | ||
e27f928e JA |
389 | enum { |
390 | REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, | |
391 | REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, | |
392 | REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, | |
393 | REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, | |
394 | REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, | |
395 | REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, | |
396 | REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, | |
397 | ||
398 | /* first byte is taken by user flags, shift it to not overlap */ | |
399 | REQ_F_FAIL_BIT = 8, | |
400 | REQ_F_INFLIGHT_BIT, | |
401 | REQ_F_CUR_POS_BIT, | |
402 | REQ_F_NOWAIT_BIT, | |
403 | REQ_F_LINK_TIMEOUT_BIT, | |
404 | REQ_F_NEED_CLEANUP_BIT, | |
405 | REQ_F_POLLED_BIT, | |
406 | REQ_F_BUFFER_SELECTED_BIT, | |
407 | REQ_F_BUFFER_RING_BIT, | |
e27f928e JA |
408 | REQ_F_REISSUE_BIT, |
409 | REQ_F_CREDS_BIT, | |
410 | REQ_F_REFCOUNT_BIT, | |
411 | REQ_F_ARM_LTIMEOUT_BIT, | |
412 | REQ_F_ASYNC_DATA_BIT, | |
413 | REQ_F_SKIP_LINK_CQES_BIT, | |
414 | REQ_F_SINGLE_POLL_BIT, | |
415 | REQ_F_DOUBLE_POLL_BIT, | |
416 | REQ_F_PARTIAL_IO_BIT, | |
e27f928e JA |
417 | REQ_F_APOLL_MULTISHOT_BIT, |
418 | REQ_F_CLEAR_POLLIN_BIT, | |
9ca9fb24 | 419 | REQ_F_HASH_LOCKED_BIT, |
e27f928e JA |
420 | /* keep async read/write and isreg together and in order */ |
421 | REQ_F_SUPPORT_NOWAIT_BIT, | |
422 | REQ_F_ISREG_BIT, | |
423 | ||
424 | /* not a real bit, just to check we're not overflowing the space */ | |
425 | __REQ_F_LAST_BIT, | |
426 | }; | |
427 | ||
428 | enum { | |
429 | /* ctx owns file */ | |
430 | REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT), | |
431 | /* drain existing IO first */ | |
432 | REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT), | |
433 | /* linked sqes */ | |
434 | REQ_F_LINK = BIT(REQ_F_LINK_BIT), | |
435 | /* doesn't sever on completion < 0 */ | |
436 | REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT), | |
437 | /* IOSQE_ASYNC */ | |
438 | REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT), | |
439 | /* IOSQE_BUFFER_SELECT */ | |
440 | REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT), | |
441 | /* IOSQE_CQE_SKIP_SUCCESS */ | |
442 | REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT), | |
443 | ||
444 | /* fail rest of links */ | |
445 | REQ_F_FAIL = BIT(REQ_F_FAIL_BIT), | |
446 | /* on inflight list, should be cancelled and waited on exit reliably */ | |
447 | REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), | |
448 | /* read/write uses file position */ | |
449 | REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), | |
450 | /* must not punt to workers */ | |
451 | REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), | |
452 | /* has or had linked timeout */ | |
453 | REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), | |
454 | /* needs cleanup */ | |
455 | REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), | |
456 | /* already went through poll handler */ | |
457 | REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), | |
458 | /* buffer already selected */ | |
459 | REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), | |
460 | /* buffer selected from ring, needs commit */ | |
461 | REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), | |
e27f928e JA |
462 | /* caller should reissue async */ |
463 | REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), | |
464 | /* supports async reads/writes */ | |
465 | REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT), | |
466 | /* regular file */ | |
467 | REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), | |
468 | /* has creds assigned */ | |
469 | REQ_F_CREDS = BIT(REQ_F_CREDS_BIT), | |
470 | /* skip refcounting if not set */ | |
471 | REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), | |
472 | /* there is a linked timeout that has to be armed */ | |
473 | REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), | |
474 | /* ->async_data allocated */ | |
475 | REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT), | |
476 | /* don't post CQEs while failing linked requests */ | |
477 | REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT), | |
478 | /* single poll may be active */ | |
479 | REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT), | |
480 | /* double poll may active */ | |
481 | REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT), | |
482 | /* request has already done partial IO */ | |
483 | REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), | |
484 | /* fast poll multishot mode */ | |
485 | REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT), | |
e27f928e JA |
486 | /* recvmsg special flag, clear EPOLLIN */ |
487 | REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), | |
9ca9fb24 PB |
488 | /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ |
489 | REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT), | |
e27f928e JA |
490 | }; |
491 | ||
a282967c | 492 | typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); |
e27f928e JA |
493 | |
494 | struct io_task_work { | |
3218e5d3 | 495 | struct llist_node node; |
e27f928e JA |
496 | io_req_tw_func_t func; |
497 | }; | |
498 | ||
499 | struct io_cqe { | |
500 | __u64 user_data; | |
501 | __s32 res; | |
502 | /* fd initially, then cflags for completion */ | |
503 | union { | |
504 | __u32 flags; | |
505 | int fd; | |
506 | }; | |
507 | }; | |
508 | ||
509 | /* | |
510 | * Each request type overlays its private data structure on top of this one. | |
511 | * They must not exceed this one in size. | |
512 | */ | |
513 | struct io_cmd_data { | |
514 | struct file *file; | |
515 | /* each command gets 56 bytes of data */ | |
516 | __u8 data[56]; | |
517 | }; | |
518 | ||
f2ccb5ae SM |
519 | static inline void io_kiocb_cmd_sz_check(size_t cmd_sz) |
520 | { | |
521 | BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data)); | |
522 | } | |
523 | #define io_kiocb_to_cmd(req, cmd_type) ( \ | |
524 | io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \ | |
525 | ((cmd_type *)&(req)->cmd) \ | |
526 | ) | |
e27f928e JA |
527 | #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr) |
528 | ||
529 | struct io_kiocb { | |
530 | union { | |
531 | /* | |
532 | * NOTE! Each of the io_kiocb union members has the file pointer | |
533 | * as the first entry in their struct definition. So you can | |
534 | * access the file pointer through any of the sub-structs, | |
535 | * or directly as just 'file' in this struct. | |
536 | */ | |
537 | struct file *file; | |
538 | struct io_cmd_data cmd; | |
539 | }; | |
540 | ||
541 | u8 opcode; | |
542 | /* polled IO has completed */ | |
543 | u8 iopoll_completed; | |
544 | /* | |
545 | * Can be either a fixed buffer index, or used with provided buffers. | |
546 | * For the latter, before issue it points to the buffer group ID, | |
547 | * and after selection it points to the buffer ID itself. | |
548 | */ | |
549 | u16 buf_index; | |
550 | unsigned int flags; | |
551 | ||
552 | struct io_cqe cqe; | |
553 | ||
554 | struct io_ring_ctx *ctx; | |
555 | struct task_struct *task; | |
556 | ||
557 | struct io_rsrc_node *rsrc_node; | |
558 | ||
559 | union { | |
560 | /* store used ubuf, so we can prevent reloading */ | |
561 | struct io_mapped_ubuf *imu; | |
562 | ||
563 | /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ | |
564 | struct io_buffer *kbuf; | |
565 | ||
566 | /* | |
567 | * stores buffer ID for ring provided buffers, valid IFF | |
568 | * REQ_F_BUFFER_RING is set. | |
569 | */ | |
570 | struct io_buffer_list *buf_list; | |
571 | }; | |
572 | ||
573 | union { | |
574 | /* used by request caches, completion batching and iopoll */ | |
575 | struct io_wq_work_node comp_list; | |
576 | /* cache ->apoll->events */ | |
577 | __poll_t apoll_events; | |
578 | }; | |
579 | atomic_t refs; | |
580 | atomic_t poll_refs; | |
581 | struct io_task_work io_task_work; | |
8751d154 | 582 | unsigned nr_tw; |
e27f928e | 583 | /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ |
b24c5d75 | 584 | struct hlist_node hash_node; |
e27f928e JA |
585 | /* internal polling, see IORING_FEAT_FAST_POLL */ |
586 | struct async_poll *apoll; | |
587 | /* opcode allocated if it needs to store data for async defer */ | |
588 | void *async_data; | |
589 | /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ | |
590 | struct io_kiocb *link; | |
591 | /* custom credentials, valid IFF REQ_F_CREDS is set */ | |
592 | const struct cred *creds; | |
593 | struct io_wq_work work; | |
b24c5d75 PB |
594 | |
595 | struct { | |
596 | u64 extra1; | |
597 | u64 extra2; | |
598 | } big_cqe; | |
e27f928e JA |
599 | }; |
600 | ||
a4ad4f74 JA |
601 | struct io_overflow_cqe { |
602 | struct list_head list; | |
603 | struct io_uring_cqe cqe; | |
604 | }; | |
605 | ||
e27f928e | 606 | #endif |