1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
4 #include <linux/blkdev.h>
5 #include <linux/task_work.h>
6 #include <linux/bitmap.h>
7 #include <linux/llist.h>
8 #include <uapi/linux/io_uring.h>
12 * A hint to not wake right away but delay until there are enough of
13 * tw's queued to match the number of CQEs the task is waiting for.
15 * Must not be used wirh requests generating more than one CQE.
16 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
18 IOU_F_TWQ_LAZY_WAKE = 1,
21 enum io_uring_cmd_flags {
22 IO_URING_F_COMPLETE_DEFER = 1,
23 IO_URING_F_UNLOCKED = 2,
24 /* the request is executed from poll, it should not be freed */
25 IO_URING_F_MULTISHOT = 4,
26 /* executed by io-wq */
28 /* int's last bit, sign checks are usually faster than a bit test */
29 IO_URING_F_NONBLOCK = INT_MIN,
31 /* ctx state flags, for URING_CMD */
32 IO_URING_F_SQE128 = (1 << 8),
33 IO_URING_F_CQE32 = (1 << 9),
34 IO_URING_F_IOPOLL = (1 << 10),
36 /* set when uring wants to cancel a previously issued command */
37 IO_URING_F_CANCEL = (1 << 11),
38 IO_URING_F_COMPAT = (1 << 12),
41 struct io_wq_work_node {
42 struct io_wq_work_node *next;
45 struct io_wq_work_list {
46 struct io_wq_work_node *first;
47 struct io_wq_work_node *last;
51 struct io_wq_work_node list;
53 /* place it here instead of io_kiocb as it fills padding and saves 4B */
57 struct io_fixed_file {
58 /* file * with additional FFS_* flags */
59 unsigned long file_ptr;
62 struct io_file_table {
63 struct io_fixed_file *files;
64 unsigned long *bitmap;
65 unsigned int alloc_hint;
68 struct io_hash_bucket {
70 struct hlist_head list;
71 } ____cacheline_aligned_in_smp;
73 struct io_hash_table {
74 struct io_hash_bucket *hbs;
79 * Arbitrary limit, can be raised if need be
81 #define IO_RINGFD_REG_MAX 16
83 struct io_uring_task {
86 const struct io_ring_ctx *last;
88 struct file *registered_rings[IO_RINGFD_REG_MAX];
91 struct wait_queue_head wait;
93 atomic_t inflight_tracked;
94 struct percpu_counter inflight;
96 struct { /* task_work */
97 struct llist_head task_list;
98 struct callback_head task_work;
99 } ____cacheline_aligned_in_smp;
108 * This data is shared with the application through the mmap at offsets
109 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
111 * The offsets to the member fields are published through struct
112 * io_sqring_offsets when calling io_uring_setup.
116 * Head and tail offsets into the ring; the offsets need to be
117 * masked to get valid indices.
119 * The kernel controls head of the sq ring and the tail of the cq ring,
120 * and the application controls tail of the sq ring and the head of the
123 struct io_uring sq, cq;
125 * Bitmasks to apply to head and tail offsets (constant, equals
128 u32 sq_ring_mask, cq_ring_mask;
129 /* Ring sizes (constant, power of 2) */
130 u32 sq_ring_entries, cq_ring_entries;
132 * Number of invalid entries dropped by the kernel due to
133 * invalid index stored in array
135 * Written by the kernel, shouldn't be modified by the
136 * application (i.e. get number of "new events" by comparing to
139 * After a new SQ head value was read by the application this
140 * counter includes all submissions that were dropped reaching
141 * the new SQ head (and possibly more).
147 * Written by the kernel, shouldn't be modified by the
150 * The application needs a full memory barrier before checking
151 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 * Written by the application, shouldn't be modified by the
162 * Number of completion events lost because the queue was full;
163 * this should be avoided by the application by making sure
164 * there are not more requests pending than there is space in
165 * the completion queue.
167 * Written by the kernel, shouldn't be modified by the
168 * application (i.e. get number of "new events" by comparing to
171 * As completion events come in out of order this counter is not
172 * ordered with any other data.
176 * Ring buffer of completion events.
178 * The kernel writes completion events fresh every time they are
179 * produced, so the application is allowed to modify pending
182 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
185 struct io_restriction {
186 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
187 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
188 u8 sqe_flags_allowed;
189 u8 sqe_flags_required;
193 struct io_submit_link {
194 struct io_kiocb *head;
195 struct io_kiocb *last;
198 struct io_submit_state {
199 /* inline/task_work completion list, under ->uring_lock */
200 struct io_wq_work_node free_list;
201 /* batch completion logic */
202 struct io_wq_work_list compl_reqs;
203 struct io_submit_link link;
207 unsigned short submit_nr;
208 unsigned int cqes_count;
209 struct blk_plug plug;
213 struct eventfd_ctx *cq_ev_fd;
214 unsigned int eventfd_async: 1;
220 struct io_alloc_cache {
221 struct io_wq_work_node list;
222 unsigned int nr_cached;
223 unsigned int max_cached;
228 /* const or read-mostly hot data */
231 unsigned int drain_next: 1;
232 unsigned int restricted: 1;
233 unsigned int off_timeout_used: 1;
234 unsigned int drain_active: 1;
235 unsigned int has_evfd: 1;
236 /* all CQEs should be posted only by the submitter task */
237 unsigned int task_complete: 1;
238 unsigned int lockless_cq: 1;
239 unsigned int syscall_iopoll: 1;
240 unsigned int poll_activated: 1;
241 unsigned int drain_disabled: 1;
242 unsigned int compat: 1;
244 struct task_struct *submitter_task;
245 struct io_rings *rings;
246 struct percpu_ref refs;
248 enum task_work_notify_mode notify_method;
249 } ____cacheline_aligned_in_smp;
251 /* submission data */
253 struct mutex uring_lock;
256 * Ring buffer of indices into array of io_uring_sqe, which is
257 * mmapped by the application using the IORING_OFF_SQES offset.
259 * This indirection could e.g. be used to assign fixed
260 * io_uring_sqe entries to operations and only submit them to
261 * the queue when needed.
263 * The kernel modifies neither the indices array nor the entries
267 struct io_uring_sqe *sq_sqes;
268 unsigned cached_sq_head;
272 * Fixed resources fast path, should be accessed only under
273 * uring_lock, and updated through io_uring_register(2)
275 struct io_rsrc_node *rsrc_node;
277 struct io_file_table file_table;
278 unsigned nr_user_files;
279 unsigned nr_user_bufs;
280 struct io_mapped_ubuf **user_bufs;
282 struct io_submit_state submit_state;
284 struct io_buffer_list *io_bl;
285 struct xarray io_bl_xa;
287 struct io_hash_table cancel_table_locked;
288 struct io_alloc_cache apoll_cache;
289 struct io_alloc_cache netmsg_cache;
292 * ->iopoll_list is protected by the ctx->uring_lock for
293 * io_uring instances that don't use IORING_SETUP_SQPOLL.
294 * For SQPOLL, only the single threaded io_sq_thread() will
295 * manipulate the list, hence no extra locking is needed there.
297 struct io_wq_work_list iopoll_list;
298 bool poll_multi_queue;
301 * Any cancelable uring_cmd is added to this list in
302 * ->uring_cmd() by io_uring_cmd_insert_cancelable()
304 struct hlist_head cancelable_uring_cmd;
305 } ____cacheline_aligned_in_smp;
309 * We cache a range of free CQEs we can use, once exhausted it
310 * should go through a slower range setup, see __io_get_cqe()
312 struct io_uring_cqe *cqe_cached;
313 struct io_uring_cqe *cqe_sentinel;
315 unsigned cached_cq_tail;
317 struct io_ev_fd __rcu *io_ev_fd;
319 } ____cacheline_aligned_in_smp;
322 * task_work and async notification delivery cacheline. Expected to
323 * regularly bounce b/w CPUs.
326 struct llist_head work_llist;
327 unsigned long check_cq;
329 atomic_t cq_timeouts;
330 struct wait_queue_head cq_wait;
331 } ____cacheline_aligned_in_smp;
335 spinlock_t timeout_lock;
336 struct list_head timeout_list;
337 struct list_head ltimeout_list;
338 unsigned cq_last_tm_flush;
339 } ____cacheline_aligned_in_smp;
341 struct io_uring_cqe completion_cqes[16];
343 spinlock_t completion_lock;
345 /* IRQ completion list, under ->completion_lock */
346 struct io_wq_work_list locked_free_list;
347 unsigned int locked_free_nr;
349 struct list_head io_buffers_comp;
350 struct list_head cq_overflow_list;
351 struct io_hash_table cancel_table;
353 struct hlist_head waitid_list;
356 struct hlist_head futex_list;
357 struct io_alloc_cache futex_cache;
360 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
361 struct io_sq_data *sq_data; /* if using sq thread polling */
363 struct wait_queue_head sqo_sq_wait;
364 struct list_head sqd_list;
366 unsigned int file_alloc_start;
367 unsigned int file_alloc_end;
369 struct xarray personalities;
372 struct list_head io_buffers_cache;
374 /* deferred free list, protected by ->uring_lock */
375 struct hlist_head io_buf_list;
377 /* Keep this last, we don't need it for the fast path */
378 struct wait_queue_head poll_wq;
379 struct io_restriction restrictions;
381 /* slow path rsrc auxilary data, used by update/register */
382 struct io_mapped_ubuf *dummy_ubuf;
383 struct io_rsrc_data *file_data;
384 struct io_rsrc_data *buf_data;
386 /* protected by ->uring_lock */
387 struct list_head rsrc_ref_list;
388 struct io_alloc_cache rsrc_node_cache;
389 struct wait_queue_head rsrc_quiesce_wq;
390 unsigned rsrc_quiesce;
392 /* hashed buffered write serialization */
393 struct io_wq_hash *hash_map;
395 /* Only used for accounting purposes */
396 struct user_struct *user;
397 struct mm_struct *mm_account;
399 /* ctx exit and cancelation */
400 struct llist_head fallback_llist;
401 struct delayed_work fallback_work;
402 struct work_struct exit_work;
403 struct list_head tctx_list;
404 struct completion ref_comp;
406 /* io-wq management, e.g. thread count */
408 bool iowq_limits_set;
410 struct callback_head poll_wq_task_work;
411 struct list_head defer_list;
412 unsigned sq_thread_idle;
413 /* protected by ->completion_lock */
414 unsigned evfd_last_cq_tail;
417 * If IORING_SETUP_NO_MMAP is used, then the below holds
418 * the gup'ed pages for the two rings, and the sqes.
420 unsigned short n_ring_pages;
421 unsigned short n_sqe_pages;
422 struct page **ring_pages;
423 struct page **sqe_pages;
427 /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
432 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
433 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
434 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
435 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
436 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
437 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
438 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
440 /* first byte is taken by user flags, shift it to not overlap */
445 REQ_F_LINK_TIMEOUT_BIT,
446 REQ_F_NEED_CLEANUP_BIT,
448 REQ_F_BUFFER_SELECTED_BIT,
449 REQ_F_BUFFER_RING_BIT,
453 REQ_F_ARM_LTIMEOUT_BIT,
454 REQ_F_ASYNC_DATA_BIT,
455 REQ_F_SKIP_LINK_CQES_BIT,
456 REQ_F_SINGLE_POLL_BIT,
457 REQ_F_DOUBLE_POLL_BIT,
458 REQ_F_PARTIAL_IO_BIT,
459 REQ_F_APOLL_MULTISHOT_BIT,
460 REQ_F_CLEAR_POLLIN_BIT,
461 REQ_F_HASH_LOCKED_BIT,
462 /* keep async read/write and isreg together and in order */
463 REQ_F_SUPPORT_NOWAIT_BIT,
465 REQ_F_POLL_NO_LAZY_BIT,
467 /* not a real bit, just to check we're not overflowing the space */
473 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
474 /* drain existing IO first */
475 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
477 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
478 /* doesn't sever on completion < 0 */
479 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
481 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
482 /* IOSQE_BUFFER_SELECT */
483 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
484 /* IOSQE_CQE_SKIP_SUCCESS */
485 REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
487 /* fail rest of links */
488 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
489 /* on inflight list, should be cancelled and waited on exit reliably */
490 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
491 /* read/write uses file position */
492 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
493 /* must not punt to workers */
494 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
495 /* has or had linked timeout */
496 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
498 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
499 /* already went through poll handler */
500 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
501 /* buffer already selected */
502 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
503 /* buffer selected from ring, needs commit */
504 REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
505 /* caller should reissue async */
506 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
507 /* supports async reads/writes */
508 REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
510 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
511 /* has creds assigned */
512 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
513 /* skip refcounting if not set */
514 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
515 /* there is a linked timeout that has to be armed */
516 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
517 /* ->async_data allocated */
518 REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
519 /* don't post CQEs while failing linked requests */
520 REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
521 /* single poll may be active */
522 REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
523 /* double poll may active */
524 REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
525 /* request has already done partial IO */
526 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
527 /* fast poll multishot mode */
528 REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
529 /* recvmsg special flag, clear EPOLLIN */
530 REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
531 /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
532 REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
533 /* don't use lazy poll wake for this request */
534 REQ_F_POLL_NO_LAZY = BIT(REQ_F_POLL_NO_LAZY_BIT),
537 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
539 struct io_task_work {
540 struct llist_node node;
541 io_req_tw_func_t func;
547 /* fd initially, then cflags for completion */
555 * Each request type overlays its private data structure on top of this one.
556 * They must not exceed this one in size.
560 /* each command gets 56 bytes of data */
564 static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
566 BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
568 #define io_kiocb_to_cmd(req, cmd_type) ( \
569 io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
570 ((cmd_type *)&(req)->cmd) \
572 #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
577 * NOTE! Each of the io_kiocb union members has the file pointer
578 * as the first entry in their struct definition. So you can
579 * access the file pointer through any of the sub-structs,
580 * or directly as just 'file' in this struct.
583 struct io_cmd_data cmd;
587 /* polled IO has completed */
590 * Can be either a fixed buffer index, or used with provided buffers.
591 * For the latter, before issue it points to the buffer group ID,
592 * and after selection it points to the buffer ID itself.
599 struct io_ring_ctx *ctx;
600 struct task_struct *task;
602 struct io_rsrc_node *rsrc_node;
605 /* store used ubuf, so we can prevent reloading */
606 struct io_mapped_ubuf *imu;
608 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
609 struct io_buffer *kbuf;
612 * stores buffer ID for ring provided buffers, valid IFF
613 * REQ_F_BUFFER_RING is set.
615 struct io_buffer_list *buf_list;
619 /* used by request caches, completion batching and iopoll */
620 struct io_wq_work_node comp_list;
621 /* cache ->apoll->events */
622 __poll_t apoll_events;
626 struct io_task_work io_task_work;
628 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
629 struct hlist_node hash_node;
630 /* internal polling, see IORING_FEAT_FAST_POLL */
631 struct async_poll *apoll;
632 /* opcode allocated if it needs to store data for async defer */
634 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
635 struct io_kiocb *link;
636 /* custom credentials, valid IFF REQ_F_CREDS is set */
637 const struct cred *creds;
638 struct io_wq_work work;
646 struct io_overflow_cqe {
647 struct list_head list;
648 struct io_uring_cqe cqe;