Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / include / uapi / linux / io_uring.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
2/*
3 * Header file for the io_uring interface.
4 *
5 * Copyright (C) 2019 Jens Axboe
6 * Copyright (C) 2019 Christoph Hellwig
7 */
8#ifndef LINUX_IO_URING_H
9#define LINUX_IO_URING_H
10
11#include <linux/fs.h>
12#include <linux/types.h>
13/*
14 * this file is shared with liburing and that has to autodetect
15 * if linux/time_types.h is available or not, it can
16 * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
17 * if linux/time_types.h is not available
18 */
19#ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
20#include <linux/time_types.h>
21#endif
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
27/*
28 * IO submission data structure (Submission Queue Entry)
29 */
30struct io_uring_sqe {
31 __u8 opcode; /* type of operation for this sqe */
32 __u8 flags; /* IOSQE_ flags */
33 __u16 ioprio; /* ioprio for the request */
34 __s32 fd; /* file descriptor to do IO on */
35 union {
36 __u64 off; /* offset into file */
37 __u64 addr2;
38 struct {
39 __u32 cmd_op;
40 __u32 __pad1;
41 };
42 };
43 union {
44 __u64 addr; /* pointer to buffer or iovecs */
45 __u64 splice_off_in;
46 struct {
47 __u32 level;
48 __u32 optname;
49 };
50 };
51 __u32 len; /* buffer size or number of iovecs */
52 union {
53 __kernel_rwf_t rw_flags;
54 __u32 fsync_flags;
55 __u16 poll_events; /* compatibility */
56 __u32 poll32_events; /* word-reversed for BE */
57 __u32 sync_range_flags;
58 __u32 msg_flags;
59 __u32 timeout_flags;
60 __u32 accept_flags;
61 __u32 cancel_flags;
62 __u32 open_flags;
63 __u32 statx_flags;
64 __u32 fadvise_advice;
65 __u32 splice_flags;
66 __u32 rename_flags;
67 __u32 unlink_flags;
68 __u32 hardlink_flags;
69 __u32 xattr_flags;
70 __u32 msg_ring_flags;
71 __u32 uring_cmd_flags;
72 __u32 waitid_flags;
73 __u32 futex_flags;
74 __u32 install_fd_flags;
75 __u32 nop_flags;
76 __u32 pipe_flags;
77 };
78 __u64 user_data; /* data to be passed back at completion time */
79 /* pack this to avoid bogus arm OABI complaints */
80 union {
81 /* index into fixed buffers, if used */
82 __u16 buf_index;
83 /* for grouped buffer selection */
84 __u16 buf_group;
85 } __attribute__((packed));
86 /* personality to use, if used */
87 __u16 personality;
88 union {
89 __s32 splice_fd_in;
90 __u32 file_index;
91 __u32 zcrx_ifq_idx;
92 __u32 optlen;
93 struct {
94 __u16 addr_len;
95 __u16 __pad3[1];
96 };
97 struct {
98 __u8 write_stream;
99 __u8 __pad4[3];
100 };
101 };
102 union {
103 struct {
104 __u64 addr3;
105 __u64 __pad2[1];
106 };
107 struct {
108 __u64 attr_ptr; /* pointer to attribute information */
109 __u64 attr_type_mask; /* bit mask of attributes */
110 };
111 __u64 optval;
112 /*
113 * If the ring is initialized with IORING_SETUP_SQE128, then
114 * this field is used for 80 bytes of arbitrary command data
115 */
116 __u8 cmd[0];
117 };
118};
119
120/* sqe->attr_type_mask flags */
121#define IORING_RW_ATTR_FLAG_PI (1U << 0)
122/* PI attribute information */
123struct io_uring_attr_pi {
124 __u16 flags;
125 __u16 app_tag;
126 __u32 len;
127 __u64 addr;
128 __u64 seed;
129 __u64 rsvd;
130};
131
132/*
133 * If sqe->file_index is set to this for opcodes that instantiate a new
134 * direct descriptor (like openat/openat2/accept), then io_uring will allocate
135 * an available direct descriptor instead of having the application pass one
136 * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
137 * if the space is full.
138 */
139#define IORING_FILE_INDEX_ALLOC (~0U)
140
141enum io_uring_sqe_flags_bit {
142 IOSQE_FIXED_FILE_BIT,
143 IOSQE_IO_DRAIN_BIT,
144 IOSQE_IO_LINK_BIT,
145 IOSQE_IO_HARDLINK_BIT,
146 IOSQE_ASYNC_BIT,
147 IOSQE_BUFFER_SELECT_BIT,
148 IOSQE_CQE_SKIP_SUCCESS_BIT,
149};
150
151/*
152 * sqe->flags
153 */
154/* use fixed fileset */
155#define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT)
156/* issue after inflight IO */
157#define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT)
158/* links next sqe */
159#define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT)
160/* like LINK, but stronger */
161#define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT)
162/* always go async */
163#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
164/* select buffer from sqe->buf_group */
165#define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
166/* don't post CQE if request succeeded */
167#define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT)
168
169/*
170 * io_uring_setup() flags
171 */
172#define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */
173#define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */
174#define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
175#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
176#define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
177#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
178#define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */
179#define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */
180/*
181 * Cooperative task running. When requests complete, they often require
182 * forcing the submitter to transition to the kernel to complete. If this
183 * flag is set, work will be done when the task transitions anyway, rather
184 * than force an inter-processor interrupt reschedule. This avoids interrupting
185 * a task running in userspace, and saves an IPI.
186 */
187#define IORING_SETUP_COOP_TASKRUN (1U << 8)
188/*
189 * If COOP_TASKRUN is set, get notified if task work is available for
190 * running and a kernel transition would be needed to run it. This sets
191 * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
192 */
193#define IORING_SETUP_TASKRUN_FLAG (1U << 9)
194#define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
195#define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */
196/*
197 * Only one task is allowed to submit requests
198 */
199#define IORING_SETUP_SINGLE_ISSUER (1U << 12)
200
201/*
202 * Defer running task work to get events.
203 * Rather than running bits of task work whenever the task transitions
204 * try to do it just before it is needed.
205 */
206#define IORING_SETUP_DEFER_TASKRUN (1U << 13)
207
208/*
209 * Application provides the memory for the rings
210 */
211#define IORING_SETUP_NO_MMAP (1U << 14)
212
213/*
214 * Register the ring fd in itself for use with
215 * IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather
216 * than an fd.
217 */
218#define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15)
219
220/*
221 * Removes indirection through the SQ index array.
222 */
223#define IORING_SETUP_NO_SQARRAY (1U << 16)
224
225/* Use hybrid poll in iopoll process */
226#define IORING_SETUP_HYBRID_IOPOLL (1U << 17)
227
228enum io_uring_op {
229 IORING_OP_NOP,
230 IORING_OP_READV,
231 IORING_OP_WRITEV,
232 IORING_OP_FSYNC,
233 IORING_OP_READ_FIXED,
234 IORING_OP_WRITE_FIXED,
235 IORING_OP_POLL_ADD,
236 IORING_OP_POLL_REMOVE,
237 IORING_OP_SYNC_FILE_RANGE,
238 IORING_OP_SENDMSG,
239 IORING_OP_RECVMSG,
240 IORING_OP_TIMEOUT,
241 IORING_OP_TIMEOUT_REMOVE,
242 IORING_OP_ACCEPT,
243 IORING_OP_ASYNC_CANCEL,
244 IORING_OP_LINK_TIMEOUT,
245 IORING_OP_CONNECT,
246 IORING_OP_FALLOCATE,
247 IORING_OP_OPENAT,
248 IORING_OP_CLOSE,
249 IORING_OP_FILES_UPDATE,
250 IORING_OP_STATX,
251 IORING_OP_READ,
252 IORING_OP_WRITE,
253 IORING_OP_FADVISE,
254 IORING_OP_MADVISE,
255 IORING_OP_SEND,
256 IORING_OP_RECV,
257 IORING_OP_OPENAT2,
258 IORING_OP_EPOLL_CTL,
259 IORING_OP_SPLICE,
260 IORING_OP_PROVIDE_BUFFERS,
261 IORING_OP_REMOVE_BUFFERS,
262 IORING_OP_TEE,
263 IORING_OP_SHUTDOWN,
264 IORING_OP_RENAMEAT,
265 IORING_OP_UNLINKAT,
266 IORING_OP_MKDIRAT,
267 IORING_OP_SYMLINKAT,
268 IORING_OP_LINKAT,
269 IORING_OP_MSG_RING,
270 IORING_OP_FSETXATTR,
271 IORING_OP_SETXATTR,
272 IORING_OP_FGETXATTR,
273 IORING_OP_GETXATTR,
274 IORING_OP_SOCKET,
275 IORING_OP_URING_CMD,
276 IORING_OP_SEND_ZC,
277 IORING_OP_SENDMSG_ZC,
278 IORING_OP_READ_MULTISHOT,
279 IORING_OP_WAITID,
280 IORING_OP_FUTEX_WAIT,
281 IORING_OP_FUTEX_WAKE,
282 IORING_OP_FUTEX_WAITV,
283 IORING_OP_FIXED_FD_INSTALL,
284 IORING_OP_FTRUNCATE,
285 IORING_OP_BIND,
286 IORING_OP_LISTEN,
287 IORING_OP_RECV_ZC,
288 IORING_OP_EPOLL_WAIT,
289 IORING_OP_READV_FIXED,
290 IORING_OP_WRITEV_FIXED,
291 IORING_OP_PIPE,
292
293 /* this goes last, obviously */
294 IORING_OP_LAST,
295};
296
297/*
298 * sqe->uring_cmd_flags top 8bits aren't available for userspace
299 * IORING_URING_CMD_FIXED use registered buffer; pass this flag
300 * along with setting sqe->buf_index.
301 */
302#define IORING_URING_CMD_FIXED (1U << 0)
303#define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED
304
305
306/*
307 * sqe->fsync_flags
308 */
309#define IORING_FSYNC_DATASYNC (1U << 0)
310
311/*
312 * sqe->timeout_flags
313 */
314#define IORING_TIMEOUT_ABS (1U << 0)
315#define IORING_TIMEOUT_UPDATE (1U << 1)
316#define IORING_TIMEOUT_BOOTTIME (1U << 2)
317#define IORING_TIMEOUT_REALTIME (1U << 3)
318#define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
319#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
320#define IORING_TIMEOUT_MULTISHOT (1U << 6)
321#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
322#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
323/*
324 * sqe->splice_flags
325 * extends splice(2) flags
326 */
327#define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */
328
329/*
330 * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
331 * command flags for POLL_ADD are stored in sqe->len.
332 *
333 * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if
334 * the poll handler will continue to report
335 * CQEs on behalf of the same SQE.
336 *
337 * IORING_POLL_UPDATE Update existing poll request, matching
338 * sqe->addr as the old user_data field.
339 *
340 * IORING_POLL_LEVEL Level triggered poll.
341 */
342#define IORING_POLL_ADD_MULTI (1U << 0)
343#define IORING_POLL_UPDATE_EVENTS (1U << 1)
344#define IORING_POLL_UPDATE_USER_DATA (1U << 2)
345#define IORING_POLL_ADD_LEVEL (1U << 3)
346
347/*
348 * ASYNC_CANCEL flags.
349 *
350 * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key
351 * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the
352 * request 'user_data'
353 * IORING_ASYNC_CANCEL_ANY Match any request
354 * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
355 * IORING_ASYNC_CANCEL_USERDATA Match on user_data, default for no other key
356 * IORING_ASYNC_CANCEL_OP Match request based on opcode
357 */
358#define IORING_ASYNC_CANCEL_ALL (1U << 0)
359#define IORING_ASYNC_CANCEL_FD (1U << 1)
360#define IORING_ASYNC_CANCEL_ANY (1U << 2)
361#define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3)
362#define IORING_ASYNC_CANCEL_USERDATA (1U << 4)
363#define IORING_ASYNC_CANCEL_OP (1U << 5)
364
365/*
366 * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
367 *
368 * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send
369 * or receive and arm poll if that yields an
370 * -EAGAIN result, arm poll upfront and skip
371 * the initial transfer attempt.
372 *
373 * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if
374 * the handler will continue to report
375 * CQEs on behalf of the same SQE.
376 *
377 * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
378 * the buf_index field.
379 *
380 * IORING_SEND_ZC_REPORT_USAGE
381 * If set, SEND[MSG]_ZC should report
382 * the zerocopy usage in cqe.res
383 * for the IORING_CQE_F_NOTIF cqe.
384 * 0 is reported if zerocopy was actually possible.
385 * IORING_NOTIF_USAGE_ZC_COPIED if data was copied
386 * (at least partially).
387 *
388 * IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or
389 * recv will grab as many buffers from the buffer
390 * group ID given and send them all. The completion
391 * result will be the number of buffers send, with
392 * the starting buffer ID in cqe->flags as per
393 * usual for provided buffer usage. The buffers
394 * will be contiguous from the starting buffer ID.
395 */
396#define IORING_RECVSEND_POLL_FIRST (1U << 0)
397#define IORING_RECV_MULTISHOT (1U << 1)
398#define IORING_RECVSEND_FIXED_BUF (1U << 2)
399#define IORING_SEND_ZC_REPORT_USAGE (1U << 3)
400#define IORING_RECVSEND_BUNDLE (1U << 4)
401
402/*
403 * cqe.res for IORING_CQE_F_NOTIF if
404 * IORING_SEND_ZC_REPORT_USAGE was requested
405 *
406 * It should be treated as a flag, all other
407 * bits of cqe.res should be treated as reserved!
408 */
409#define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31)
410
411/*
412 * accept flags stored in sqe->ioprio
413 */
414#define IORING_ACCEPT_MULTISHOT (1U << 0)
415#define IORING_ACCEPT_DONTWAIT (1U << 1)
416#define IORING_ACCEPT_POLL_FIRST (1U << 2)
417
418/*
419 * IORING_OP_MSG_RING command types, stored in sqe->addr
420 */
421enum io_uring_msg_ring_flags {
422 IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
423 IORING_MSG_SEND_FD, /* send a registered fd to another ring */
424};
425
426/*
427 * IORING_OP_MSG_RING flags (sqe->msg_ring_flags)
428 *
429 * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not
430 * applicable for IORING_MSG_DATA, obviously.
431 */
432#define IORING_MSG_RING_CQE_SKIP (1U << 0)
433/* Pass through the flags from sqe->file_index to cqe->flags */
434#define IORING_MSG_RING_FLAGS_PASS (1U << 1)
435
436/*
437 * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags)
438 *
439 * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC
440 */
441#define IORING_FIXED_FD_NO_CLOEXEC (1U << 0)
442
443/*
444 * IORING_OP_NOP flags (sqe->nop_flags)
445 *
446 * IORING_NOP_INJECT_RESULT Inject result from sqe->result
447 */
448#define IORING_NOP_INJECT_RESULT (1U << 0)
449#define IORING_NOP_FILE (1U << 1)
450#define IORING_NOP_FIXED_FILE (1U << 2)
451#define IORING_NOP_FIXED_BUFFER (1U << 3)
452
453/*
454 * IO completion data structure (Completion Queue Entry)
455 */
456struct io_uring_cqe {
457 __u64 user_data; /* sqe->user_data value passed back */
458 __s32 res; /* result code for this event */
459 __u32 flags;
460
461 /*
462 * If the ring is initialized with IORING_SETUP_CQE32, then this field
463 * contains 16-bytes of padding, doubling the size of the CQE.
464 */
465 __u64 big_cqe[];
466};
467
468/*
469 * cqe->flags
470 *
471 * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
472 * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
473 * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
474 * IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
475 * them from sends.
476 * IORING_CQE_F_BUF_MORE If set, the buffer ID set in the completion will get
477 * more completions. In other words, the buffer is being
478 * partially consumed, and will be used by the kernel for
479 * more completions. This is only set for buffers used via
480 * the incremental buffer consumption, as provided by
481 * a ring buffer setup with IOU_PBUF_RING_INC. For any
482 * other provided buffer type, all completions with a
483 * buffer passed back is automatically returned to the
484 * application.
485 */
486#define IORING_CQE_F_BUFFER (1U << 0)
487#define IORING_CQE_F_MORE (1U << 1)
488#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
489#define IORING_CQE_F_NOTIF (1U << 3)
490#define IORING_CQE_F_BUF_MORE (1U << 4)
491
492#define IORING_CQE_BUFFER_SHIFT 16
493
494/*
495 * Magic offsets for the application to mmap the data it needs
496 */
497#define IORING_OFF_SQ_RING 0ULL
498#define IORING_OFF_CQ_RING 0x8000000ULL
499#define IORING_OFF_SQES 0x10000000ULL
500#define IORING_OFF_PBUF_RING 0x80000000ULL
501#define IORING_OFF_PBUF_SHIFT 16
502#define IORING_OFF_MMAP_MASK 0xf8000000ULL
503
504/*
505 * Filled with the offset for mmap(2)
506 */
507struct io_sqring_offsets {
508 __u32 head;
509 __u32 tail;
510 __u32 ring_mask;
511 __u32 ring_entries;
512 __u32 flags;
513 __u32 dropped;
514 __u32 array;
515 __u32 resv1;
516 __u64 user_addr;
517};
518
519/*
520 * sq_ring->flags
521 */
522#define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */
523#define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */
524#define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */
525
526struct io_cqring_offsets {
527 __u32 head;
528 __u32 tail;
529 __u32 ring_mask;
530 __u32 ring_entries;
531 __u32 overflow;
532 __u32 cqes;
533 __u32 flags;
534 __u32 resv1;
535 __u64 user_addr;
536};
537
538/*
539 * cq_ring->flags
540 */
541
542/* disable eventfd notifications */
543#define IORING_CQ_EVENTFD_DISABLED (1U << 0)
544
545/*
546 * io_uring_enter(2) flags
547 */
548#define IORING_ENTER_GETEVENTS (1U << 0)
549#define IORING_ENTER_SQ_WAKEUP (1U << 1)
550#define IORING_ENTER_SQ_WAIT (1U << 2)
551#define IORING_ENTER_EXT_ARG (1U << 3)
552#define IORING_ENTER_REGISTERED_RING (1U << 4)
553#define IORING_ENTER_ABS_TIMER (1U << 5)
554#define IORING_ENTER_EXT_ARG_REG (1U << 6)
555#define IORING_ENTER_NO_IOWAIT (1U << 7)
556
557/*
558 * Passed in for io_uring_setup(2). Copied back with updated info on success
559 */
560struct io_uring_params {
561 __u32 sq_entries;
562 __u32 cq_entries;
563 __u32 flags;
564 __u32 sq_thread_cpu;
565 __u32 sq_thread_idle;
566 __u32 features;
567 __u32 wq_fd;
568 __u32 resv[3];
569 struct io_sqring_offsets sq_off;
570 struct io_cqring_offsets cq_off;
571};
572
573/*
574 * io_uring_params->features flags
575 */
576#define IORING_FEAT_SINGLE_MMAP (1U << 0)
577#define IORING_FEAT_NODROP (1U << 1)
578#define IORING_FEAT_SUBMIT_STABLE (1U << 2)
579#define IORING_FEAT_RW_CUR_POS (1U << 3)
580#define IORING_FEAT_CUR_PERSONALITY (1U << 4)
581#define IORING_FEAT_FAST_POLL (1U << 5)
582#define IORING_FEAT_POLL_32BITS (1U << 6)
583#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
584#define IORING_FEAT_EXT_ARG (1U << 8)
585#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
586#define IORING_FEAT_RSRC_TAGS (1U << 10)
587#define IORING_FEAT_CQE_SKIP (1U << 11)
588#define IORING_FEAT_LINKED_FILE (1U << 12)
589#define IORING_FEAT_REG_REG_RING (1U << 13)
590#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
591#define IORING_FEAT_MIN_TIMEOUT (1U << 15)
592#define IORING_FEAT_RW_ATTR (1U << 16)
593#define IORING_FEAT_NO_IOWAIT (1U << 17)
594
595/*
596 * io_uring_register(2) opcodes and arguments
597 */
598enum io_uring_register_op {
599 IORING_REGISTER_BUFFERS = 0,
600 IORING_UNREGISTER_BUFFERS = 1,
601 IORING_REGISTER_FILES = 2,
602 IORING_UNREGISTER_FILES = 3,
603 IORING_REGISTER_EVENTFD = 4,
604 IORING_UNREGISTER_EVENTFD = 5,
605 IORING_REGISTER_FILES_UPDATE = 6,
606 IORING_REGISTER_EVENTFD_ASYNC = 7,
607 IORING_REGISTER_PROBE = 8,
608 IORING_REGISTER_PERSONALITY = 9,
609 IORING_UNREGISTER_PERSONALITY = 10,
610 IORING_REGISTER_RESTRICTIONS = 11,
611 IORING_REGISTER_ENABLE_RINGS = 12,
612
613 /* extended with tagging */
614 IORING_REGISTER_FILES2 = 13,
615 IORING_REGISTER_FILES_UPDATE2 = 14,
616 IORING_REGISTER_BUFFERS2 = 15,
617 IORING_REGISTER_BUFFERS_UPDATE = 16,
618
619 /* set/clear io-wq thread affinities */
620 IORING_REGISTER_IOWQ_AFF = 17,
621 IORING_UNREGISTER_IOWQ_AFF = 18,
622
623 /* set/get max number of io-wq workers */
624 IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
625
626 /* register/unregister io_uring fd with the ring */
627 IORING_REGISTER_RING_FDS = 20,
628 IORING_UNREGISTER_RING_FDS = 21,
629
630 /* register ring based provide buffer group */
631 IORING_REGISTER_PBUF_RING = 22,
632 IORING_UNREGISTER_PBUF_RING = 23,
633
634 /* sync cancelation API */
635 IORING_REGISTER_SYNC_CANCEL = 24,
636
637 /* register a range of fixed file slots for automatic slot allocation */
638 IORING_REGISTER_FILE_ALLOC_RANGE = 25,
639
640 /* return status information for a buffer group */
641 IORING_REGISTER_PBUF_STATUS = 26,
642
643 /* set/clear busy poll settings */
644 IORING_REGISTER_NAPI = 27,
645 IORING_UNREGISTER_NAPI = 28,
646
647 IORING_REGISTER_CLOCK = 29,
648
649 /* clone registered buffers from source ring to current ring */
650 IORING_REGISTER_CLONE_BUFFERS = 30,
651
652 /* send MSG_RING without having a ring */
653 IORING_REGISTER_SEND_MSG_RING = 31,
654
655 /* register a netdev hw rx queue for zerocopy */
656 IORING_REGISTER_ZCRX_IFQ = 32,
657
658 /* resize CQ ring */
659 IORING_REGISTER_RESIZE_RINGS = 33,
660
661 IORING_REGISTER_MEM_REGION = 34,
662
663 /* this goes last */
664 IORING_REGISTER_LAST,
665
666 /* flag added to the opcode to use a registered ring fd */
667 IORING_REGISTER_USE_REGISTERED_RING = 1U << 31
668};
669
670/* io-wq worker categories */
671enum io_wq_type {
672 IO_WQ_BOUND,
673 IO_WQ_UNBOUND,
674};
675
676/* deprecated, see struct io_uring_rsrc_update */
677struct io_uring_files_update {
678 __u32 offset;
679 __u32 resv;
680 __aligned_u64 /* __s32 * */ fds;
681};
682
683enum {
684 /* initialise with user provided memory pointed by user_addr */
685 IORING_MEM_REGION_TYPE_USER = 1,
686};
687
688struct io_uring_region_desc {
689 __u64 user_addr;
690 __u64 size;
691 __u32 flags;
692 __u32 id;
693 __u64 mmap_offset;
694 __u64 __resv[4];
695};
696
697enum {
698 /* expose the region as registered wait arguments */
699 IORING_MEM_REGION_REG_WAIT_ARG = 1,
700};
701
702struct io_uring_mem_region_reg {
703 __u64 region_uptr; /* struct io_uring_region_desc * */
704 __u64 flags;
705 __u64 __resv[2];
706};
707
708/*
709 * Register a fully sparse file space, rather than pass in an array of all
710 * -1 file descriptors.
711 */
712#define IORING_RSRC_REGISTER_SPARSE (1U << 0)
713
714struct io_uring_rsrc_register {
715 __u32 nr;
716 __u32 flags;
717 __u64 resv2;
718 __aligned_u64 data;
719 __aligned_u64 tags;
720};
721
722struct io_uring_rsrc_update {
723 __u32 offset;
724 __u32 resv;
725 __aligned_u64 data;
726};
727
728struct io_uring_rsrc_update2 {
729 __u32 offset;
730 __u32 resv;
731 __aligned_u64 data;
732 __aligned_u64 tags;
733 __u32 nr;
734 __u32 resv2;
735};
736
737/* Skip updating fd indexes set to this value in the fd table */
738#define IORING_REGISTER_FILES_SKIP (-2)
739
740#define IO_URING_OP_SUPPORTED (1U << 0)
741
742struct io_uring_probe_op {
743 __u8 op;
744 __u8 resv;
745 __u16 flags; /* IO_URING_OP_* flags */
746 __u32 resv2;
747};
748
749struct io_uring_probe {
750 __u8 last_op; /* last opcode supported */
751 __u8 ops_len; /* length of ops[] array below */
752 __u16 resv;
753 __u32 resv2[3];
754 struct io_uring_probe_op ops[];
755};
756
757struct io_uring_restriction {
758 __u16 opcode;
759 union {
760 __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
761 __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */
762 __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */
763 };
764 __u8 resv;
765 __u32 resv2[3];
766};
767
768struct io_uring_clock_register {
769 __u32 clockid;
770 __u32 __resv[3];
771};
772
773enum {
774 IORING_REGISTER_SRC_REGISTERED = (1U << 0),
775 IORING_REGISTER_DST_REPLACE = (1U << 1),
776};
777
778struct io_uring_clone_buffers {
779 __u32 src_fd;
780 __u32 flags;
781 __u32 src_off;
782 __u32 dst_off;
783 __u32 nr;
784 __u32 pad[3];
785};
786
787struct io_uring_buf {
788 __u64 addr;
789 __u32 len;
790 __u16 bid;
791 __u16 resv;
792};
793
794struct io_uring_buf_ring {
795 union {
796 /*
797 * To avoid spilling into more pages than we need to, the
798 * ring tail is overlaid with the io_uring_buf->resv field.
799 */
800 struct {
801 __u64 resv1;
802 __u32 resv2;
803 __u16 resv3;
804 __u16 tail;
805 };
806 __DECLARE_FLEX_ARRAY(struct io_uring_buf, bufs);
807 };
808};
809
810/*
811 * Flags for IORING_REGISTER_PBUF_RING.
812 *
813 * IOU_PBUF_RING_MMAP: If set, kernel will allocate the memory for the ring.
814 * The application must not set a ring_addr in struct
815 * io_uring_buf_reg, instead it must subsequently call
816 * mmap(2) with the offset set as:
817 * IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
818 * to get a virtual mapping for the ring.
819 * IOU_PBUF_RING_INC: If set, buffers consumed from this buffer ring can be
820 * consumed incrementally. Normally one (or more) buffers
821 * are fully consumed. With incremental consumptions, it's
822 * feasible to register big ranges of buffers, and each
823 * use of it will consume only as much as it needs. This
824 * requires that both the kernel and application keep
825 * track of where the current read/recv index is at.
826 */
827enum io_uring_register_pbuf_ring_flags {
828 IOU_PBUF_RING_MMAP = 1,
829 IOU_PBUF_RING_INC = 2,
830};
831
832/* argument for IORING_(UN)REGISTER_PBUF_RING */
833struct io_uring_buf_reg {
834 __u64 ring_addr;
835 __u32 ring_entries;
836 __u16 bgid;
837 __u16 flags;
838 __u64 resv[3];
839};
840
841/* argument for IORING_REGISTER_PBUF_STATUS */
842struct io_uring_buf_status {
843 __u32 buf_group; /* input */
844 __u32 head; /* output */
845 __u32 resv[8];
846};
847
848enum io_uring_napi_op {
849 /* register/ungister backward compatible opcode */
850 IO_URING_NAPI_REGISTER_OP = 0,
851
852 /* opcodes to update napi_list when static tracking is used */
853 IO_URING_NAPI_STATIC_ADD_ID = 1,
854 IO_URING_NAPI_STATIC_DEL_ID = 2
855};
856
857enum io_uring_napi_tracking_strategy {
858 /* value must be 0 for backward compatibility */
859 IO_URING_NAPI_TRACKING_DYNAMIC = 0,
860 IO_URING_NAPI_TRACKING_STATIC = 1,
861 IO_URING_NAPI_TRACKING_INACTIVE = 255
862};
863
864/* argument for IORING_(UN)REGISTER_NAPI */
865struct io_uring_napi {
866 __u32 busy_poll_to;
867 __u8 prefer_busy_poll;
868
869 /* a io_uring_napi_op value */
870 __u8 opcode;
871 __u8 pad[2];
872
873 /*
874 * for IO_URING_NAPI_REGISTER_OP, it is a
875 * io_uring_napi_tracking_strategy value.
876 *
877 * for IO_URING_NAPI_STATIC_ADD_ID/IO_URING_NAPI_STATIC_DEL_ID
878 * it is the napi id to add/del from napi_list.
879 */
880 __u32 op_param;
881 __u32 resv;
882};
883
884/*
885 * io_uring_restriction->opcode values
886 */
887enum io_uring_register_restriction_op {
888 /* Allow an io_uring_register(2) opcode */
889 IORING_RESTRICTION_REGISTER_OP = 0,
890
891 /* Allow an sqe opcode */
892 IORING_RESTRICTION_SQE_OP = 1,
893
894 /* Allow sqe flags */
895 IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2,
896
897 /* Require sqe flags (these flags must be set on each submission) */
898 IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3,
899
900 IORING_RESTRICTION_LAST
901};
902
903enum {
904 IORING_REG_WAIT_TS = (1U << 0),
905};
906
907/*
908 * Argument for io_uring_enter(2) with
909 * IORING_GETEVENTS | IORING_ENTER_EXT_ARG_REG set, where the actual argument
910 * is an index into a previously registered fixed wait region described by
911 * the below structure.
912 */
913struct io_uring_reg_wait {
914 struct __kernel_timespec ts;
915 __u32 min_wait_usec;
916 __u32 flags;
917 __u64 sigmask;
918 __u32 sigmask_sz;
919 __u32 pad[3];
920 __u64 pad2[2];
921};
922
923/*
924 * Argument for io_uring_enter(2) with IORING_GETEVENTS | IORING_ENTER_EXT_ARG
925 */
926struct io_uring_getevents_arg {
927 __u64 sigmask;
928 __u32 sigmask_sz;
929 __u32 min_wait_usec;
930 __u64 ts;
931};
932
933/*
934 * Argument for IORING_REGISTER_SYNC_CANCEL
935 */
936struct io_uring_sync_cancel_reg {
937 __u64 addr;
938 __s32 fd;
939 __u32 flags;
940 struct __kernel_timespec timeout;
941 __u8 opcode;
942 __u8 pad[7];
943 __u64 pad2[3];
944};
945
946/*
947 * Argument for IORING_REGISTER_FILE_ALLOC_RANGE
948 * The range is specified as [off, off + len)
949 */
950struct io_uring_file_index_range {
951 __u32 off;
952 __u32 len;
953 __u64 resv;
954};
955
956struct io_uring_recvmsg_out {
957 __u32 namelen;
958 __u32 controllen;
959 __u32 payloadlen;
960 __u32 flags;
961};
962
963/*
964 * Argument for IORING_OP_URING_CMD when file is a socket
965 */
966enum io_uring_socket_op {
967 SOCKET_URING_OP_SIOCINQ = 0,
968 SOCKET_URING_OP_SIOCOUTQ,
969 SOCKET_URING_OP_GETSOCKOPT,
970 SOCKET_URING_OP_SETSOCKOPT,
971};
972
973/* Zero copy receive refill queue entry */
974struct io_uring_zcrx_rqe {
975 __u64 off;
976 __u32 len;
977 __u32 __pad;
978};
979
980struct io_uring_zcrx_cqe {
981 __u64 off;
982 __u64 __pad;
983};
984
985/* The bit from which area id is encoded into offsets */
986#define IORING_ZCRX_AREA_SHIFT 48
987#define IORING_ZCRX_AREA_MASK (~(((__u64)1 << IORING_ZCRX_AREA_SHIFT) - 1))
988
989struct io_uring_zcrx_offsets {
990 __u32 head;
991 __u32 tail;
992 __u32 rqes;
993 __u32 __resv2;
994 __u64 __resv[2];
995};
996
997enum io_uring_zcrx_area_flags {
998 IORING_ZCRX_AREA_DMABUF = 1,
999};
1000
1001struct io_uring_zcrx_area_reg {
1002 __u64 addr;
1003 __u64 len;
1004 __u64 rq_area_token;
1005 __u32 flags;
1006 __u32 dmabuf_fd;
1007 __u64 __resv2[2];
1008};
1009
1010/*
1011 * Argument for IORING_REGISTER_ZCRX_IFQ
1012 */
1013struct io_uring_zcrx_ifq_reg {
1014 __u32 if_idx;
1015 __u32 if_rxq;
1016 __u32 rq_entries;
1017 __u32 flags;
1018
1019 __u64 area_ptr; /* pointer to struct io_uring_zcrx_area_reg */
1020 __u64 region_ptr; /* struct io_uring_region_desc * */
1021
1022 struct io_uring_zcrx_offsets offsets;
1023 __u32 zcrx_id;
1024 __u32 __resv2;
1025 __u64 __resv[3];
1026};
1027
1028#ifdef __cplusplus
1029}
1030#endif
1031
1032#endif