1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
4 #include <linux/blkdev.h>
5 #include <linux/task_work.h>
6 #include <linux/bitmap.h>
7 #include <uapi/linux/io_uring.h>
10 #include "filetable.h"
13 u32 head ____cacheline_aligned_in_smp;
14 u32 tail ____cacheline_aligned_in_smp;
18 * This data is shared with the application through the mmap at offsets
19 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
21 * The offsets to the member fields are published through struct
22 * io_sqring_offsets when calling io_uring_setup.
26 * Head and tail offsets into the ring; the offsets need to be
27 * masked to get valid indices.
29 * The kernel controls head of the sq ring and the tail of the cq ring,
30 * and the application controls tail of the sq ring and the head of the
33 struct io_uring sq, cq;
35 * Bitmasks to apply to head and tail offsets (constant, equals
38 u32 sq_ring_mask, cq_ring_mask;
39 /* Ring sizes (constant, power of 2) */
40 u32 sq_ring_entries, cq_ring_entries;
42 * Number of invalid entries dropped by the kernel due to
43 * invalid index stored in array
45 * Written by the kernel, shouldn't be modified by the
46 * application (i.e. get number of "new events" by comparing to
49 * After a new SQ head value was read by the application this
50 * counter includes all submissions that were dropped reaching
51 * the new SQ head (and possibly more).
57 * Written by the kernel, shouldn't be modified by the
60 * The application needs a full memory barrier before checking
61 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
67 * Written by the application, shouldn't be modified by the
72 * Number of completion events lost because the queue was full;
73 * this should be avoided by the application by making sure
74 * there are not more requests pending than there is space in
75 * the completion queue.
77 * Written by the kernel, shouldn't be modified by the
78 * application (i.e. get number of "new events" by comparing to
81 * As completion events come in out of order this counter is not
82 * ordered with any other data.
86 * Ring buffer of completion events.
88 * The kernel writes completion events fresh every time they are
89 * produced, so the application is allowed to modify pending
92 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
95 struct io_restriction {
96 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
97 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
99 u8 sqe_flags_required;
103 struct io_submit_link {
104 struct io_kiocb *head;
105 struct io_kiocb *last;
108 struct io_submit_state {
109 /* inline/task_work completion list, under ->uring_lock */
110 struct io_wq_work_node free_list;
111 /* batch completion logic */
112 struct io_wq_work_list compl_reqs;
113 struct io_submit_link link;
118 unsigned short submit_nr;
119 struct blk_plug plug;
123 struct eventfd_ctx *cq_ev_fd;
124 unsigned int eventfd_async: 1;
129 /* const or read-mostly hot data */
131 struct percpu_ref refs;
133 struct io_rings *rings;
135 enum task_work_notify_mode notify_method;
136 unsigned int compat: 1;
137 unsigned int drain_next: 1;
138 unsigned int restricted: 1;
139 unsigned int off_timeout_used: 1;
140 unsigned int drain_active: 1;
141 unsigned int drain_disabled: 1;
142 unsigned int has_evfd: 1;
143 unsigned int syscall_iopoll: 1;
144 } ____cacheline_aligned_in_smp;
146 /* submission data */
148 struct mutex uring_lock;
151 * Ring buffer of indices into array of io_uring_sqe, which is
152 * mmapped by the application using the IORING_OFF_SQES offset.
154 * This indirection could e.g. be used to assign fixed
155 * io_uring_sqe entries to operations and only submit them to
156 * the queue when needed.
158 * The kernel modifies neither the indices array nor the entries
162 struct io_uring_sqe *sq_sqes;
163 unsigned cached_sq_head;
167 * Fixed resources fast path, should be accessed only under
168 * uring_lock, and updated through io_uring_register(2)
170 struct io_rsrc_node *rsrc_node;
171 int rsrc_cached_refs;
173 struct io_file_table file_table;
174 unsigned nr_user_files;
175 unsigned nr_user_bufs;
176 struct io_mapped_ubuf **user_bufs;
178 struct io_submit_state submit_state;
180 struct io_buffer_list *io_bl;
181 struct xarray io_bl_xa;
182 struct list_head io_buffers_cache;
184 struct list_head cq_overflow_list;
185 struct list_head apoll_cache;
186 struct xarray personalities;
188 } ____cacheline_aligned_in_smp;
190 /* IRQ completion list, under ->completion_lock */
191 struct io_wq_work_list locked_free_list;
192 unsigned int locked_free_nr;
194 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
195 struct io_sq_data *sq_data; /* if using sq thread polling */
197 struct wait_queue_head sqo_sq_wait;
198 struct list_head sqd_list;
200 unsigned long check_cq;
204 * We cache a range of free CQEs we can use, once exhausted it
205 * should go through a slower range setup, see __io_get_cqe()
207 struct io_uring_cqe *cqe_cached;
208 struct io_uring_cqe *cqe_sentinel;
210 unsigned cached_cq_tail;
212 struct io_ev_fd __rcu *io_ev_fd;
213 struct wait_queue_head cq_wait;
215 } ____cacheline_aligned_in_smp;
218 spinlock_t completion_lock;
221 * ->iopoll_list is protected by the ctx->uring_lock for
222 * io_uring instances that don't use IORING_SETUP_SQPOLL.
223 * For SQPOLL, only the single threaded io_sq_thread() will
224 * manipulate the list, hence no extra locking is needed there.
226 struct io_wq_work_list iopoll_list;
227 struct io_hash_bucket *cancel_hash;
228 unsigned cancel_hash_bits;
229 bool poll_multi_queue;
231 struct list_head io_buffers_comp;
232 } ____cacheline_aligned_in_smp;
236 spinlock_t timeout_lock;
237 atomic_t cq_timeouts;
238 struct list_head timeout_list;
239 struct list_head ltimeout_list;
240 unsigned cq_last_tm_flush;
241 } ____cacheline_aligned_in_smp;
243 /* Keep this last, we don't need it for the fast path */
245 struct io_restriction restrictions;
247 /* slow path rsrc auxilary data, used by update/register */
248 struct io_rsrc_node *rsrc_backup_node;
249 struct io_mapped_ubuf *dummy_ubuf;
250 struct io_rsrc_data *file_data;
251 struct io_rsrc_data *buf_data;
253 struct delayed_work rsrc_put_work;
254 struct llist_head rsrc_put_llist;
255 struct list_head rsrc_ref_list;
256 spinlock_t rsrc_ref_lock;
258 struct list_head io_buffers_pages;
260 #if defined(CONFIG_UNIX)
261 struct socket *ring_sock;
263 /* hashed buffered write serialization */
264 struct io_wq_hash *hash_map;
266 /* Only used for accounting purposes */
267 struct user_struct *user;
268 struct mm_struct *mm_account;
270 /* ctx exit and cancelation */
271 struct llist_head fallback_llist;
272 struct delayed_work fallback_work;
273 struct work_struct exit_work;
274 struct list_head tctx_list;
275 struct completion ref_comp;
277 /* io-wq management, e.g. thread count */
279 bool iowq_limits_set;
281 struct list_head defer_list;
282 unsigned sq_thread_idle;
286 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
287 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
288 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
289 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
290 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
291 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
292 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
294 /* first byte is taken by user flags, shift it to not overlap */
299 REQ_F_LINK_TIMEOUT_BIT,
300 REQ_F_NEED_CLEANUP_BIT,
302 REQ_F_BUFFER_SELECTED_BIT,
303 REQ_F_BUFFER_RING_BIT,
307 REQ_F_ARM_LTIMEOUT_BIT,
308 REQ_F_ASYNC_DATA_BIT,
309 REQ_F_SKIP_LINK_CQES_BIT,
310 REQ_F_SINGLE_POLL_BIT,
311 REQ_F_DOUBLE_POLL_BIT,
312 REQ_F_PARTIAL_IO_BIT,
313 REQ_F_CQE32_INIT_BIT,
314 REQ_F_APOLL_MULTISHOT_BIT,
315 REQ_F_CLEAR_POLLIN_BIT,
316 /* keep async read/write and isreg together and in order */
317 REQ_F_SUPPORT_NOWAIT_BIT,
320 /* not a real bit, just to check we're not overflowing the space */
326 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
327 /* drain existing IO first */
328 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
330 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
331 /* doesn't sever on completion < 0 */
332 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
334 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
335 /* IOSQE_BUFFER_SELECT */
336 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
337 /* IOSQE_CQE_SKIP_SUCCESS */
338 REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
340 /* fail rest of links */
341 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
342 /* on inflight list, should be cancelled and waited on exit reliably */
343 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
344 /* read/write uses file position */
345 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
346 /* must not punt to workers */
347 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
348 /* has or had linked timeout */
349 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
351 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
352 /* already went through poll handler */
353 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
354 /* buffer already selected */
355 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
356 /* buffer selected from ring, needs commit */
357 REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
358 /* caller should reissue async */
359 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
360 /* supports async reads/writes */
361 REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
363 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
364 /* has creds assigned */
365 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
366 /* skip refcounting if not set */
367 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
368 /* there is a linked timeout that has to be armed */
369 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
370 /* ->async_data allocated */
371 REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
372 /* don't post CQEs while failing linked requests */
373 REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
374 /* single poll may be active */
375 REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
376 /* double poll may active */
377 REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
378 /* request has already done partial IO */
379 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
380 /* fast poll multishot mode */
381 REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
382 /* ->extra1 and ->extra2 are initialised */
383 REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
384 /* recvmsg special flag, clear EPOLLIN */
385 REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
388 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
390 struct io_task_work {
392 struct io_wq_work_node node;
393 struct llist_node fallback_node;
395 io_req_tw_func_t func;
401 /* fd initially, then cflags for completion */
409 * Each request type overlays its private data structure on top of this one.
410 * They must not exceed this one in size.
414 /* each command gets 56 bytes of data */
418 #define io_kiocb_to_cmd(req) ((void *) &(req)->cmd)
419 #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
424 * NOTE! Each of the io_kiocb union members has the file pointer
425 * as the first entry in their struct definition. So you can
426 * access the file pointer through any of the sub-structs,
427 * or directly as just 'file' in this struct.
430 struct io_cmd_data cmd;
434 /* polled IO has completed */
437 * Can be either a fixed buffer index, or used with provided buffers.
438 * For the latter, before issue it points to the buffer group ID,
439 * and after selection it points to the buffer ID itself.
446 struct io_ring_ctx *ctx;
447 struct task_struct *task;
449 struct io_rsrc_node *rsrc_node;
452 /* store used ubuf, so we can prevent reloading */
453 struct io_mapped_ubuf *imu;
455 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
456 struct io_buffer *kbuf;
459 * stores buffer ID for ring provided buffers, valid IFF
460 * REQ_F_BUFFER_RING is set.
462 struct io_buffer_list *buf_list;
466 /* used by request caches, completion batching and iopoll */
467 struct io_wq_work_node comp_list;
468 /* cache ->apoll->events */
469 __poll_t apoll_events;
473 struct io_task_work io_task_work;
474 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
476 struct hlist_node hash_node;
482 /* internal polling, see IORING_FEAT_FAST_POLL */
483 struct async_poll *apoll;
484 /* opcode allocated if it needs to store data for async defer */
486 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
487 struct io_kiocb *link;
488 /* custom credentials, valid IFF REQ_F_CREDS is set */
489 const struct cred *creds;
490 struct io_wq_work work;
493 struct io_cancel_data {
494 struct io_ring_ctx *ctx;
503 struct io_overflow_cqe {
504 struct list_head list;
505 struct io_uring_cqe cqe;
508 struct io_mapped_ubuf {
511 unsigned int nr_bvecs;
512 unsigned long acct_pages;
513 struct bio_vec bvec[];