Commit | Line | Data |
---|---|---|
e27f928e JA |
1 | #ifndef IO_URING_TYPES_H |
2 | #define IO_URING_TYPES_H | |
3 | ||
4 | #include <linux/blkdev.h> | |
5 | #include <linux/task_work.h> | |
6 | ||
7 | #include "io-wq.h" | |
453b329b | 8 | #include "filetable.h" |
e27f928e JA |
9 | |
10 | struct io_uring { | |
11 | u32 head ____cacheline_aligned_in_smp; | |
12 | u32 tail ____cacheline_aligned_in_smp; | |
13 | }; | |
14 | ||
15 | /* | |
16 | * This data is shared with the application through the mmap at offsets | |
17 | * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. | |
18 | * | |
19 | * The offsets to the member fields are published through struct | |
20 | * io_sqring_offsets when calling io_uring_setup. | |
21 | */ | |
22 | struct io_rings { | |
23 | /* | |
24 | * Head and tail offsets into the ring; the offsets need to be | |
25 | * masked to get valid indices. | |
26 | * | |
27 | * The kernel controls head of the sq ring and the tail of the cq ring, | |
28 | * and the application controls tail of the sq ring and the head of the | |
29 | * cq ring. | |
30 | */ | |
31 | struct io_uring sq, cq; | |
32 | /* | |
33 | * Bitmasks to apply to head and tail offsets (constant, equals | |
34 | * ring_entries - 1) | |
35 | */ | |
36 | u32 sq_ring_mask, cq_ring_mask; | |
37 | /* Ring sizes (constant, power of 2) */ | |
38 | u32 sq_ring_entries, cq_ring_entries; | |
39 | /* | |
40 | * Number of invalid entries dropped by the kernel due to | |
41 | * invalid index stored in array | |
42 | * | |
43 | * Written by the kernel, shouldn't be modified by the | |
44 | * application (i.e. get number of "new events" by comparing to | |
45 | * cached value). | |
46 | * | |
47 | * After a new SQ head value was read by the application this | |
48 | * counter includes all submissions that were dropped reaching | |
49 | * the new SQ head (and possibly more). | |
50 | */ | |
51 | u32 sq_dropped; | |
52 | /* | |
53 | * Runtime SQ flags | |
54 | * | |
55 | * Written by the kernel, shouldn't be modified by the | |
56 | * application. | |
57 | * | |
58 | * The application needs a full memory barrier before checking | |
59 | * for IORING_SQ_NEED_WAKEUP after updating the sq tail. | |
60 | */ | |
61 | atomic_t sq_flags; | |
62 | /* | |
63 | * Runtime CQ flags | |
64 | * | |
65 | * Written by the application, shouldn't be modified by the | |
66 | * kernel. | |
67 | */ | |
68 | u32 cq_flags; | |
69 | /* | |
70 | * Number of completion events lost because the queue was full; | |
71 | * this should be avoided by the application by making sure | |
72 | * there are not more requests pending than there is space in | |
73 | * the completion queue. | |
74 | * | |
75 | * Written by the kernel, shouldn't be modified by the | |
76 | * application (i.e. get number of "new events" by comparing to | |
77 | * cached value). | |
78 | * | |
79 | * As completion events come in out of order this counter is not | |
80 | * ordered with any other data. | |
81 | */ | |
82 | u32 cq_overflow; | |
83 | /* | |
84 | * Ring buffer of completion events. | |
85 | * | |
86 | * The kernel writes completion events fresh every time they are | |
87 | * produced, so the application is allowed to modify pending | |
88 | * entries. | |
89 | */ | |
90 | struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; | |
91 | }; | |
92 | ||
93 | struct io_restriction { | |
94 | DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); | |
95 | DECLARE_BITMAP(sqe_op, IORING_OP_LAST); | |
96 | u8 sqe_flags_allowed; | |
97 | u8 sqe_flags_required; | |
98 | bool registered; | |
99 | }; | |
100 | ||
101 | struct io_submit_link { | |
102 | struct io_kiocb *head; | |
103 | struct io_kiocb *last; | |
104 | }; | |
105 | ||
106 | struct io_submit_state { | |
107 | /* inline/task_work completion list, under ->uring_lock */ | |
108 | struct io_wq_work_node free_list; | |
109 | /* batch completion logic */ | |
110 | struct io_wq_work_list compl_reqs; | |
111 | struct io_submit_link link; | |
112 | ||
113 | bool plug_started; | |
114 | bool need_plug; | |
115 | bool flush_cqes; | |
116 | unsigned short submit_nr; | |
117 | struct blk_plug plug; | |
118 | }; | |
119 | ||
120 | struct io_ev_fd { | |
121 | struct eventfd_ctx *cq_ev_fd; | |
122 | unsigned int eventfd_async: 1; | |
123 | struct rcu_head rcu; | |
124 | }; | |
125 | ||
e27f928e JA |
126 | struct io_ring_ctx { |
127 | /* const or read-mostly hot data */ | |
128 | struct { | |
129 | struct percpu_ref refs; | |
130 | ||
131 | struct io_rings *rings; | |
132 | unsigned int flags; | |
133 | enum task_work_notify_mode notify_method; | |
134 | unsigned int compat: 1; | |
135 | unsigned int drain_next: 1; | |
136 | unsigned int restricted: 1; | |
137 | unsigned int off_timeout_used: 1; | |
138 | unsigned int drain_active: 1; | |
139 | unsigned int drain_disabled: 1; | |
140 | unsigned int has_evfd: 1; | |
141 | unsigned int syscall_iopoll: 1; | |
142 | } ____cacheline_aligned_in_smp; | |
143 | ||
144 | /* submission data */ | |
145 | struct { | |
146 | struct mutex uring_lock; | |
147 | ||
148 | /* | |
149 | * Ring buffer of indices into array of io_uring_sqe, which is | |
150 | * mmapped by the application using the IORING_OFF_SQES offset. | |
151 | * | |
152 | * This indirection could e.g. be used to assign fixed | |
153 | * io_uring_sqe entries to operations and only submit them to | |
154 | * the queue when needed. | |
155 | * | |
156 | * The kernel modifies neither the indices array nor the entries | |
157 | * array. | |
158 | */ | |
159 | u32 *sq_array; | |
160 | struct io_uring_sqe *sq_sqes; | |
161 | unsigned cached_sq_head; | |
162 | unsigned sq_entries; | |
163 | struct list_head defer_list; | |
164 | ||
165 | /* | |
166 | * Fixed resources fast path, should be accessed only under | |
167 | * uring_lock, and updated through io_uring_register(2) | |
168 | */ | |
169 | struct io_rsrc_node *rsrc_node; | |
170 | int rsrc_cached_refs; | |
171 | atomic_t cancel_seq; | |
172 | struct io_file_table file_table; | |
173 | unsigned nr_user_files; | |
174 | unsigned nr_user_bufs; | |
175 | struct io_mapped_ubuf **user_bufs; | |
176 | ||
177 | struct io_submit_state submit_state; | |
178 | ||
179 | struct io_buffer_list *io_bl; | |
180 | struct xarray io_bl_xa; | |
181 | struct list_head io_buffers_cache; | |
182 | ||
183 | struct list_head timeout_list; | |
184 | struct list_head ltimeout_list; | |
185 | struct list_head cq_overflow_list; | |
186 | struct list_head apoll_cache; | |
187 | struct xarray personalities; | |
188 | u32 pers_next; | |
189 | unsigned sq_thread_idle; | |
190 | } ____cacheline_aligned_in_smp; | |
191 | ||
192 | /* IRQ completion list, under ->completion_lock */ | |
193 | struct io_wq_work_list locked_free_list; | |
194 | unsigned int locked_free_nr; | |
195 | ||
196 | const struct cred *sq_creds; /* cred used for __io_sq_thread() */ | |
197 | struct io_sq_data *sq_data; /* if using sq thread polling */ | |
198 | ||
199 | struct wait_queue_head sqo_sq_wait; | |
200 | struct list_head sqd_list; | |
201 | ||
202 | unsigned long check_cq; | |
203 | ||
204 | struct { | |
205 | /* | |
206 | * We cache a range of free CQEs we can use, once exhausted it | |
207 | * should go through a slower range setup, see __io_get_cqe() | |
208 | */ | |
209 | struct io_uring_cqe *cqe_cached; | |
210 | struct io_uring_cqe *cqe_sentinel; | |
211 | ||
212 | unsigned cached_cq_tail; | |
213 | unsigned cq_entries; | |
214 | struct io_ev_fd __rcu *io_ev_fd; | |
215 | struct wait_queue_head cq_wait; | |
216 | unsigned cq_extra; | |
217 | atomic_t cq_timeouts; | |
218 | unsigned cq_last_tm_flush; | |
219 | } ____cacheline_aligned_in_smp; | |
220 | ||
221 | struct { | |
222 | spinlock_t completion_lock; | |
223 | ||
224 | spinlock_t timeout_lock; | |
225 | ||
226 | /* | |
227 | * ->iopoll_list is protected by the ctx->uring_lock for | |
228 | * io_uring instances that don't use IORING_SETUP_SQPOLL. | |
229 | * For SQPOLL, only the single threaded io_sq_thread() will | |
230 | * manipulate the list, hence no extra locking is needed there. | |
231 | */ | |
232 | struct io_wq_work_list iopoll_list; | |
233 | struct hlist_head *cancel_hash; | |
234 | unsigned cancel_hash_bits; | |
235 | bool poll_multi_queue; | |
236 | ||
237 | struct list_head io_buffers_comp; | |
238 | } ____cacheline_aligned_in_smp; | |
239 | ||
240 | struct io_restriction restrictions; | |
241 | ||
242 | /* slow path rsrc auxilary data, used by update/register */ | |
243 | struct { | |
244 | struct io_rsrc_node *rsrc_backup_node; | |
245 | struct io_mapped_ubuf *dummy_ubuf; | |
246 | struct io_rsrc_data *file_data; | |
247 | struct io_rsrc_data *buf_data; | |
248 | ||
249 | struct delayed_work rsrc_put_work; | |
250 | struct llist_head rsrc_put_llist; | |
251 | struct list_head rsrc_ref_list; | |
252 | spinlock_t rsrc_ref_lock; | |
253 | ||
254 | struct list_head io_buffers_pages; | |
255 | }; | |
256 | ||
257 | /* Keep this last, we don't need it for the fast path */ | |
258 | struct { | |
259 | #if defined(CONFIG_UNIX) | |
260 | struct socket *ring_sock; | |
261 | #endif | |
262 | /* hashed buffered write serialization */ | |
263 | struct io_wq_hash *hash_map; | |
264 | ||
265 | /* Only used for accounting purposes */ | |
266 | struct user_struct *user; | |
267 | struct mm_struct *mm_account; | |
268 | ||
269 | /* ctx exit and cancelation */ | |
270 | struct llist_head fallback_llist; | |
271 | struct delayed_work fallback_work; | |
272 | struct work_struct exit_work; | |
273 | struct list_head tctx_list; | |
274 | struct completion ref_comp; | |
275 | u32 iowq_limits[2]; | |
276 | bool iowq_limits_set; | |
277 | }; | |
278 | }; | |
279 | ||
280 | enum { | |
281 | REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, | |
282 | REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, | |
283 | REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, | |
284 | REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, | |
285 | REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, | |
286 | REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, | |
287 | REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, | |
288 | ||
289 | /* first byte is taken by user flags, shift it to not overlap */ | |
290 | REQ_F_FAIL_BIT = 8, | |
291 | REQ_F_INFLIGHT_BIT, | |
292 | REQ_F_CUR_POS_BIT, | |
293 | REQ_F_NOWAIT_BIT, | |
294 | REQ_F_LINK_TIMEOUT_BIT, | |
295 | REQ_F_NEED_CLEANUP_BIT, | |
296 | REQ_F_POLLED_BIT, | |
297 | REQ_F_BUFFER_SELECTED_BIT, | |
298 | REQ_F_BUFFER_RING_BIT, | |
299 | REQ_F_COMPLETE_INLINE_BIT, | |
300 | REQ_F_REISSUE_BIT, | |
301 | REQ_F_CREDS_BIT, | |
302 | REQ_F_REFCOUNT_BIT, | |
303 | REQ_F_ARM_LTIMEOUT_BIT, | |
304 | REQ_F_ASYNC_DATA_BIT, | |
305 | REQ_F_SKIP_LINK_CQES_BIT, | |
306 | REQ_F_SINGLE_POLL_BIT, | |
307 | REQ_F_DOUBLE_POLL_BIT, | |
308 | REQ_F_PARTIAL_IO_BIT, | |
309 | REQ_F_CQE32_INIT_BIT, | |
310 | REQ_F_APOLL_MULTISHOT_BIT, | |
311 | REQ_F_CLEAR_POLLIN_BIT, | |
312 | /* keep async read/write and isreg together and in order */ | |
313 | REQ_F_SUPPORT_NOWAIT_BIT, | |
314 | REQ_F_ISREG_BIT, | |
315 | ||
316 | /* not a real bit, just to check we're not overflowing the space */ | |
317 | __REQ_F_LAST_BIT, | |
318 | }; | |
319 | ||
320 | enum { | |
321 | /* ctx owns file */ | |
322 | REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT), | |
323 | /* drain existing IO first */ | |
324 | REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT), | |
325 | /* linked sqes */ | |
326 | REQ_F_LINK = BIT(REQ_F_LINK_BIT), | |
327 | /* doesn't sever on completion < 0 */ | |
328 | REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT), | |
329 | /* IOSQE_ASYNC */ | |
330 | REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT), | |
331 | /* IOSQE_BUFFER_SELECT */ | |
332 | REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT), | |
333 | /* IOSQE_CQE_SKIP_SUCCESS */ | |
334 | REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT), | |
335 | ||
336 | /* fail rest of links */ | |
337 | REQ_F_FAIL = BIT(REQ_F_FAIL_BIT), | |
338 | /* on inflight list, should be cancelled and waited on exit reliably */ | |
339 | REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), | |
340 | /* read/write uses file position */ | |
341 | REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), | |
342 | /* must not punt to workers */ | |
343 | REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), | |
344 | /* has or had linked timeout */ | |
345 | REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), | |
346 | /* needs cleanup */ | |
347 | REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), | |
348 | /* already went through poll handler */ | |
349 | REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), | |
350 | /* buffer already selected */ | |
351 | REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), | |
352 | /* buffer selected from ring, needs commit */ | |
353 | REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), | |
354 | /* completion is deferred through io_comp_state */ | |
355 | REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), | |
356 | /* caller should reissue async */ | |
357 | REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), | |
358 | /* supports async reads/writes */ | |
359 | REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT), | |
360 | /* regular file */ | |
361 | REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), | |
362 | /* has creds assigned */ | |
363 | REQ_F_CREDS = BIT(REQ_F_CREDS_BIT), | |
364 | /* skip refcounting if not set */ | |
365 | REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), | |
366 | /* there is a linked timeout that has to be armed */ | |
367 | REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), | |
368 | /* ->async_data allocated */ | |
369 | REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT), | |
370 | /* don't post CQEs while failing linked requests */ | |
371 | REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT), | |
372 | /* single poll may be active */ | |
373 | REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT), | |
374 | /* double poll may active */ | |
375 | REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT), | |
376 | /* request has already done partial IO */ | |
377 | REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), | |
378 | /* fast poll multishot mode */ | |
379 | REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT), | |
380 | /* ->extra1 and ->extra2 are initialised */ | |
381 | REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT), | |
382 | /* recvmsg special flag, clear EPOLLIN */ | |
383 | REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), | |
384 | }; | |
385 | ||
386 | typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked); | |
387 | ||
388 | struct io_task_work { | |
389 | union { | |
390 | struct io_wq_work_node node; | |
391 | struct llist_node fallback_node; | |
392 | }; | |
393 | io_req_tw_func_t func; | |
394 | }; | |
395 | ||
396 | struct io_cqe { | |
397 | __u64 user_data; | |
398 | __s32 res; | |
399 | /* fd initially, then cflags for completion */ | |
400 | union { | |
401 | __u32 flags; | |
402 | int fd; | |
403 | }; | |
404 | }; | |
405 | ||
406 | /* | |
407 | * Each request type overlays its private data structure on top of this one. | |
408 | * They must not exceed this one in size. | |
409 | */ | |
410 | struct io_cmd_data { | |
411 | struct file *file; | |
412 | /* each command gets 56 bytes of data */ | |
413 | __u8 data[56]; | |
414 | }; | |
415 | ||
416 | #define io_kiocb_to_cmd(req) ((void *) &(req)->cmd) | |
417 | #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr) | |
418 | ||
419 | struct io_kiocb { | |
420 | union { | |
421 | /* | |
422 | * NOTE! Each of the io_kiocb union members has the file pointer | |
423 | * as the first entry in their struct definition. So you can | |
424 | * access the file pointer through any of the sub-structs, | |
425 | * or directly as just 'file' in this struct. | |
426 | */ | |
427 | struct file *file; | |
428 | struct io_cmd_data cmd; | |
429 | }; | |
430 | ||
431 | u8 opcode; | |
432 | /* polled IO has completed */ | |
433 | u8 iopoll_completed; | |
434 | /* | |
435 | * Can be either a fixed buffer index, or used with provided buffers. | |
436 | * For the latter, before issue it points to the buffer group ID, | |
437 | * and after selection it points to the buffer ID itself. | |
438 | */ | |
439 | u16 buf_index; | |
440 | unsigned int flags; | |
441 | ||
442 | struct io_cqe cqe; | |
443 | ||
444 | struct io_ring_ctx *ctx; | |
445 | struct task_struct *task; | |
446 | ||
447 | struct io_rsrc_node *rsrc_node; | |
448 | ||
449 | union { | |
450 | /* store used ubuf, so we can prevent reloading */ | |
451 | struct io_mapped_ubuf *imu; | |
452 | ||
453 | /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ | |
454 | struct io_buffer *kbuf; | |
455 | ||
456 | /* | |
457 | * stores buffer ID for ring provided buffers, valid IFF | |
458 | * REQ_F_BUFFER_RING is set. | |
459 | */ | |
460 | struct io_buffer_list *buf_list; | |
461 | }; | |
462 | ||
463 | union { | |
464 | /* used by request caches, completion batching and iopoll */ | |
465 | struct io_wq_work_node comp_list; | |
466 | /* cache ->apoll->events */ | |
467 | __poll_t apoll_events; | |
468 | }; | |
469 | atomic_t refs; | |
470 | atomic_t poll_refs; | |
471 | struct io_task_work io_task_work; | |
472 | /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ | |
473 | union { | |
474 | struct hlist_node hash_node; | |
475 | struct { | |
476 | u64 extra1; | |
477 | u64 extra2; | |
478 | }; | |
479 | }; | |
480 | /* internal polling, see IORING_FEAT_FAST_POLL */ | |
481 | struct async_poll *apoll; | |
482 | /* opcode allocated if it needs to store data for async defer */ | |
483 | void *async_data; | |
484 | /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ | |
485 | struct io_kiocb *link; | |
486 | /* custom credentials, valid IFF REQ_F_CREDS is set */ | |
487 | const struct cred *creds; | |
488 | struct io_wq_work work; | |
489 | }; | |
490 | ||
491 | #endif |