1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
4 #include <linux/blkdev.h>
5 #include <linux/task_work.h>
6 #include <linux/bitmap.h>
7 #include <uapi/linux/io_uring.h>
10 #include "filetable.h"
12 struct io_hash_bucket {
14 struct hlist_head list;
15 } ____cacheline_aligned_in_smp;
17 struct io_hash_table {
18 struct io_hash_bucket *hbs;
23 u32 head ____cacheline_aligned_in_smp;
24 u32 tail ____cacheline_aligned_in_smp;
28 * This data is shared with the application through the mmap at offsets
29 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
31 * The offsets to the member fields are published through struct
32 * io_sqring_offsets when calling io_uring_setup.
36 * Head and tail offsets into the ring; the offsets need to be
37 * masked to get valid indices.
39 * The kernel controls head of the sq ring and the tail of the cq ring,
40 * and the application controls tail of the sq ring and the head of the
43 struct io_uring sq, cq;
45 * Bitmasks to apply to head and tail offsets (constant, equals
48 u32 sq_ring_mask, cq_ring_mask;
49 /* Ring sizes (constant, power of 2) */
50 u32 sq_ring_entries, cq_ring_entries;
52 * Number of invalid entries dropped by the kernel due to
53 * invalid index stored in array
55 * Written by the kernel, shouldn't be modified by the
56 * application (i.e. get number of "new events" by comparing to
59 * After a new SQ head value was read by the application this
60 * counter includes all submissions that were dropped reaching
61 * the new SQ head (and possibly more).
67 * Written by the kernel, shouldn't be modified by the
70 * The application needs a full memory barrier before checking
71 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
77 * Written by the application, shouldn't be modified by the
82 * Number of completion events lost because the queue was full;
83 * this should be avoided by the application by making sure
84 * there are not more requests pending than there is space in
85 * the completion queue.
87 * Written by the kernel, shouldn't be modified by the
88 * application (i.e. get number of "new events" by comparing to
91 * As completion events come in out of order this counter is not
92 * ordered with any other data.
96 * Ring buffer of completion events.
98 * The kernel writes completion events fresh every time they are
99 * produced, so the application is allowed to modify pending
102 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
105 struct io_restriction {
106 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
107 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
108 u8 sqe_flags_allowed;
109 u8 sqe_flags_required;
113 struct io_submit_link {
114 struct io_kiocb *head;
115 struct io_kiocb *last;
118 struct io_submit_state {
119 /* inline/task_work completion list, under ->uring_lock */
120 struct io_wq_work_node free_list;
121 /* batch completion logic */
122 struct io_wq_work_list compl_reqs;
123 struct io_submit_link link;
128 unsigned short submit_nr;
129 struct blk_plug plug;
133 struct eventfd_ctx *cq_ev_fd;
134 unsigned int eventfd_async: 1;
139 /* const or read-mostly hot data */
141 struct percpu_ref refs;
143 struct io_rings *rings;
145 enum task_work_notify_mode notify_method;
146 unsigned int compat: 1;
147 unsigned int drain_next: 1;
148 unsigned int restricted: 1;
149 unsigned int off_timeout_used: 1;
150 unsigned int drain_active: 1;
151 unsigned int drain_disabled: 1;
152 unsigned int has_evfd: 1;
153 unsigned int syscall_iopoll: 1;
154 } ____cacheline_aligned_in_smp;
156 /* submission data */
158 struct mutex uring_lock;
161 * Ring buffer of indices into array of io_uring_sqe, which is
162 * mmapped by the application using the IORING_OFF_SQES offset.
164 * This indirection could e.g. be used to assign fixed
165 * io_uring_sqe entries to operations and only submit them to
166 * the queue when needed.
168 * The kernel modifies neither the indices array nor the entries
172 struct io_uring_sqe *sq_sqes;
173 unsigned cached_sq_head;
177 * Fixed resources fast path, should be accessed only under
178 * uring_lock, and updated through io_uring_register(2)
180 struct io_rsrc_node *rsrc_node;
181 int rsrc_cached_refs;
183 struct io_file_table file_table;
184 unsigned nr_user_files;
185 unsigned nr_user_bufs;
186 struct io_mapped_ubuf **user_bufs;
188 struct io_submit_state submit_state;
190 struct io_buffer_list *io_bl;
191 struct xarray io_bl_xa;
192 struct list_head io_buffers_cache;
194 struct list_head cq_overflow_list;
195 struct list_head apoll_cache;
196 struct xarray personalities;
198 } ____cacheline_aligned_in_smp;
200 /* IRQ completion list, under ->completion_lock */
201 struct io_wq_work_list locked_free_list;
202 unsigned int locked_free_nr;
204 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
205 struct io_sq_data *sq_data; /* if using sq thread polling */
207 struct wait_queue_head sqo_sq_wait;
208 struct list_head sqd_list;
210 unsigned long check_cq;
214 * We cache a range of free CQEs we can use, once exhausted it
215 * should go through a slower range setup, see __io_get_cqe()
217 struct io_uring_cqe *cqe_cached;
218 struct io_uring_cqe *cqe_sentinel;
220 unsigned cached_cq_tail;
222 struct io_ev_fd __rcu *io_ev_fd;
223 struct wait_queue_head cq_wait;
225 } ____cacheline_aligned_in_smp;
228 spinlock_t completion_lock;
231 * ->iopoll_list is protected by the ctx->uring_lock for
232 * io_uring instances that don't use IORING_SETUP_SQPOLL.
233 * For SQPOLL, only the single threaded io_sq_thread() will
234 * manipulate the list, hence no extra locking is needed there.
236 struct io_wq_work_list iopoll_list;
237 struct io_hash_table cancel_table;
238 bool poll_multi_queue;
240 struct list_head io_buffers_comp;
241 } ____cacheline_aligned_in_smp;
245 spinlock_t timeout_lock;
246 atomic_t cq_timeouts;
247 struct list_head timeout_list;
248 struct list_head ltimeout_list;
249 unsigned cq_last_tm_flush;
250 } ____cacheline_aligned_in_smp;
252 /* Keep this last, we don't need it for the fast path */
254 struct io_restriction restrictions;
255 struct task_struct *submitter_task;
257 /* slow path rsrc auxilary data, used by update/register */
258 struct io_rsrc_node *rsrc_backup_node;
259 struct io_mapped_ubuf *dummy_ubuf;
260 struct io_rsrc_data *file_data;
261 struct io_rsrc_data *buf_data;
263 struct delayed_work rsrc_put_work;
264 struct llist_head rsrc_put_llist;
265 struct list_head rsrc_ref_list;
266 spinlock_t rsrc_ref_lock;
268 struct list_head io_buffers_pages;
270 #if defined(CONFIG_UNIX)
271 struct socket *ring_sock;
273 /* hashed buffered write serialization */
274 struct io_wq_hash *hash_map;
276 /* Only used for accounting purposes */
277 struct user_struct *user;
278 struct mm_struct *mm_account;
280 /* ctx exit and cancelation */
281 struct llist_head fallback_llist;
282 struct delayed_work fallback_work;
283 struct work_struct exit_work;
284 struct list_head tctx_list;
285 struct completion ref_comp;
287 /* io-wq management, e.g. thread count */
289 bool iowq_limits_set;
291 struct list_head defer_list;
292 unsigned sq_thread_idle;
296 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
297 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
298 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
299 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
300 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
301 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
302 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
304 /* first byte is taken by user flags, shift it to not overlap */
309 REQ_F_LINK_TIMEOUT_BIT,
310 REQ_F_NEED_CLEANUP_BIT,
312 REQ_F_BUFFER_SELECTED_BIT,
313 REQ_F_BUFFER_RING_BIT,
317 REQ_F_ARM_LTIMEOUT_BIT,
318 REQ_F_ASYNC_DATA_BIT,
319 REQ_F_SKIP_LINK_CQES_BIT,
320 REQ_F_SINGLE_POLL_BIT,
321 REQ_F_DOUBLE_POLL_BIT,
322 REQ_F_PARTIAL_IO_BIT,
323 REQ_F_CQE32_INIT_BIT,
324 REQ_F_APOLL_MULTISHOT_BIT,
325 REQ_F_CLEAR_POLLIN_BIT,
326 /* keep async read/write and isreg together and in order */
327 REQ_F_SUPPORT_NOWAIT_BIT,
330 /* not a real bit, just to check we're not overflowing the space */
336 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
337 /* drain existing IO first */
338 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
340 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
341 /* doesn't sever on completion < 0 */
342 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
344 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
345 /* IOSQE_BUFFER_SELECT */
346 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
347 /* IOSQE_CQE_SKIP_SUCCESS */
348 REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
350 /* fail rest of links */
351 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
352 /* on inflight list, should be cancelled and waited on exit reliably */
353 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
354 /* read/write uses file position */
355 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
356 /* must not punt to workers */
357 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
358 /* has or had linked timeout */
359 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
361 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
362 /* already went through poll handler */
363 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
364 /* buffer already selected */
365 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
366 /* buffer selected from ring, needs commit */
367 REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
368 /* caller should reissue async */
369 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
370 /* supports async reads/writes */
371 REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
373 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
374 /* has creds assigned */
375 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
376 /* skip refcounting if not set */
377 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
378 /* there is a linked timeout that has to be armed */
379 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
380 /* ->async_data allocated */
381 REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
382 /* don't post CQEs while failing linked requests */
383 REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
384 /* single poll may be active */
385 REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
386 /* double poll may active */
387 REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
388 /* request has already done partial IO */
389 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
390 /* fast poll multishot mode */
391 REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
392 /* ->extra1 and ->extra2 are initialised */
393 REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
394 /* recvmsg special flag, clear EPOLLIN */
395 REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
398 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
400 struct io_task_work {
402 struct io_wq_work_node node;
403 struct llist_node fallback_node;
405 io_req_tw_func_t func;
411 /* fd initially, then cflags for completion */
419 * Each request type overlays its private data structure on top of this one.
420 * They must not exceed this one in size.
424 /* each command gets 56 bytes of data */
428 #define io_kiocb_to_cmd(req) ((void *) &(req)->cmd)
429 #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
434 * NOTE! Each of the io_kiocb union members has the file pointer
435 * as the first entry in their struct definition. So you can
436 * access the file pointer through any of the sub-structs,
437 * or directly as just 'file' in this struct.
440 struct io_cmd_data cmd;
444 /* polled IO has completed */
447 * Can be either a fixed buffer index, or used with provided buffers.
448 * For the latter, before issue it points to the buffer group ID,
449 * and after selection it points to the buffer ID itself.
456 struct io_ring_ctx *ctx;
457 struct task_struct *task;
459 struct io_rsrc_node *rsrc_node;
462 /* store used ubuf, so we can prevent reloading */
463 struct io_mapped_ubuf *imu;
465 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
466 struct io_buffer *kbuf;
469 * stores buffer ID for ring provided buffers, valid IFF
470 * REQ_F_BUFFER_RING is set.
472 struct io_buffer_list *buf_list;
476 /* used by request caches, completion batching and iopoll */
477 struct io_wq_work_node comp_list;
478 /* cache ->apoll->events */
479 __poll_t apoll_events;
483 struct io_task_work io_task_work;
484 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
486 struct hlist_node hash_node;
492 /* internal polling, see IORING_FEAT_FAST_POLL */
493 struct async_poll *apoll;
494 /* opcode allocated if it needs to store data for async defer */
496 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
497 struct io_kiocb *link;
498 /* custom credentials, valid IFF REQ_F_CREDS is set */
499 const struct cred *creds;
500 struct io_wq_work work;
503 struct io_cancel_data {
504 struct io_ring_ctx *ctx;
513 struct io_overflow_cqe {
514 struct list_head list;
515 struct io_uring_cqe cqe;
518 struct io_mapped_ubuf {
521 unsigned int nr_bvecs;
522 unsigned long acct_pages;
523 struct bio_vec bvec[];