io_uring: add support for passing fixed file descriptors
[linux-block.git] / include / uapi / linux / io_uring.h
CommitLineData
9f5834c8 1/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
2b188cc1
JA
2/*
3 * Header file for the io_uring interface.
4 *
5 * Copyright (C) 2019 Jens Axboe
6 * Copyright (C) 2019 Christoph Hellwig
7 */
8#ifndef LINUX_IO_URING_H
9#define LINUX_IO_URING_H
10
11#include <linux/fs.h>
12#include <linux/types.h>
78a861b9 13#include <linux/time_types.h>
2b188cc1
JA
14
15/*
16 * IO submission data structure (Submission Queue Entry)
17 */
18struct io_uring_sqe {
19 __u8 opcode; /* type of operation for this sqe */
6b06314c 20 __u8 flags; /* IOSQE_ flags */
2b188cc1
JA
21 __u16 ioprio; /* ioprio for the request */
22 __s32 fd; /* file descriptor to do IO on */
17f2fe35
JA
23 union {
24 __u64 off; /* offset into file */
25 __u64 addr2;
bdb2c48e
PB
26 struct {
27 __u32 cmd_op;
28 __u32 __pad1;
29 };
17f2fe35 30 };
7d67af2c
PB
31 union {
32 __u64 addr; /* pointer to buffer or iovecs */
33 __u64 splice_off_in;
34 };
2b188cc1
JA
35 __u32 len; /* buffer size or number of iovecs */
36 union {
37 __kernel_rwf_t rw_flags;
c992fe29 38 __u32 fsync_flags;
5769a351
JX
39 __u16 poll_events; /* compatibility */
40 __u32 poll32_events; /* word-reversed for BE */
5d17b4a4 41 __u32 sync_range_flags;
0fa03c62 42 __u32 msg_flags;
5262f567 43 __u32 timeout_flags;
17f2fe35 44 __u32 accept_flags;
62755e35 45 __u32 cancel_flags;
15b71abe 46 __u32 open_flags;
eddc7ef5 47 __u32 statx_flags;
4840e418 48 __u32 fadvise_advice;
7d67af2c 49 __u32 splice_flags;
80a261fd 50 __u32 rename_flags;
14a1143b 51 __u32 unlink_flags;
cf30da90 52 __u32 hardlink_flags;
e9621e2b 53 __u32 xattr_flags;
e6130eba 54 __u32 msg_ring_flags;
2b188cc1
JA
55 };
56 __u64 user_data; /* data to be passed back at completion time */
9ba6a1c0 57 /* pack this to avoid bogus arm OABI complaints */
edafccee 58 union {
9ba6a1c0
PB
59 /* index into fixed buffers, if used */
60 __u16 buf_index;
61 /* for grouped buffer selection */
62 __u16 buf_group;
63 } __attribute__((packed));
64 /* personality to use, if used */
65 __u16 personality;
b9445598
PB
66 union {
67 __s32 splice_fd_in;
68 __u32 file_index;
69 };
ee692a21
JA
70 union {
71 struct {
72 __u64 addr3;
73 __u64 __pad2[1];
74 };
75 /*
76 * If the ring is initialized with IORING_SETUP_SQE128, then
77 * this field is used for 80 bytes of arbitrary command data
78 */
79 __u8 cmd[0];
80 };
2b188cc1
JA
81};
82
1339f24b
JA
83/*
84 * If sqe->file_index is set to this for opcodes that instantiate a new
85 * direct descriptor (like openat/openat2/accept), then io_uring will allocate
86 * an available direct descriptor instead of having the application pass one
87 * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
88 * if the space is full.
89 */
90#define IORING_FILE_INDEX_ALLOC (~0U)
91
6b47ee6e
PB
92enum {
93 IOSQE_FIXED_FILE_BIT,
94 IOSQE_IO_DRAIN_BIT,
95 IOSQE_IO_LINK_BIT,
96 IOSQE_IO_HARDLINK_BIT,
97 IOSQE_ASYNC_BIT,
bcda7baa 98 IOSQE_BUFFER_SELECT_BIT,
04c76b41 99 IOSQE_CQE_SKIP_SUCCESS_BIT,
6b47ee6e
PB
100};
101
6b06314c
JA
102/*
103 * sqe->flags
104 */
6b47ee6e
PB
105/* use fixed fileset */
106#define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT)
107/* issue after inflight IO */
108#define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT)
109/* links next sqe */
110#define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT)
111/* like LINK, but stronger */
112#define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT)
113/* always go async */
114#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
bcda7baa
JA
115/* select buffer from sqe->buf_group */
116#define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
04c76b41
PB
117/* don't post CQE if request succeeded */
118#define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT)
6b06314c 119
def596e9
JA
120/*
121 * io_uring_setup() flags
122 */
123#define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */
6c271ce2
JA
124#define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */
125#define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
33a107f0 126#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
8110c1a6 127#define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
24369c2e 128#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
7e84e1c7 129#define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */
bcbb7bf6 130#define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */
e1169f06
JA
131/*
132 * Cooperative task running. When requests complete, they often require
133 * forcing the submitter to transition to the kernel to complete. If this
134 * flag is set, work will be done when the task transitions anyway, rather
135 * than force an inter-processor interrupt reschedule. This avoids interrupting
136 * a task running in userspace, and saves an IPI.
137 */
138#define IORING_SETUP_COOP_TASKRUN (1U << 8)
ef060ea9
JA
139/*
140 * If COOP_TASKRUN is set, get notified if task work is available for
141 * running and a kernel transition would be needed to run it. This sets
142 * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
143 */
144#define IORING_SETUP_TASKRUN_FLAG (1U << 9)
ebdeb7c0 145#define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
7a51e5b4 146#define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */
97bbdc06
PB
147/*
148 * Only one task is allowed to submit requests
149 */
150#define IORING_SETUP_SINGLE_ISSUER (1U << 12)
ebdeb7c0 151
cc51eaa8 152enum io_uring_op {
9e3aa61a
JA
153 IORING_OP_NOP,
154 IORING_OP_READV,
155 IORING_OP_WRITEV,
156 IORING_OP_FSYNC,
157 IORING_OP_READ_FIXED,
158 IORING_OP_WRITE_FIXED,
159 IORING_OP_POLL_ADD,
160 IORING_OP_POLL_REMOVE,
161 IORING_OP_SYNC_FILE_RANGE,
162 IORING_OP_SENDMSG,
163 IORING_OP_RECVMSG,
164 IORING_OP_TIMEOUT,
165 IORING_OP_TIMEOUT_REMOVE,
166 IORING_OP_ACCEPT,
167 IORING_OP_ASYNC_CANCEL,
168 IORING_OP_LINK_TIMEOUT,
169 IORING_OP_CONNECT,
d63d1b5e 170 IORING_OP_FALLOCATE,
15b71abe 171 IORING_OP_OPENAT,
b5dba59e 172 IORING_OP_CLOSE,
05f3fb3c 173 IORING_OP_FILES_UPDATE,
eddc7ef5 174 IORING_OP_STATX,
3a6820f2
JA
175 IORING_OP_READ,
176 IORING_OP_WRITE,
4840e418 177 IORING_OP_FADVISE,
c1ca757b 178 IORING_OP_MADVISE,
fddaface
JA
179 IORING_OP_SEND,
180 IORING_OP_RECV,
cebdb986 181 IORING_OP_OPENAT2,
3e4827b0 182 IORING_OP_EPOLL_CTL,
7d67af2c 183 IORING_OP_SPLICE,
ddf0322d 184 IORING_OP_PROVIDE_BUFFERS,
067524e9 185 IORING_OP_REMOVE_BUFFERS,
f2a8d5c7 186 IORING_OP_TEE,
36f4fa68 187 IORING_OP_SHUTDOWN,
80a261fd 188 IORING_OP_RENAMEAT,
14a1143b 189 IORING_OP_UNLINKAT,
e34a02dc 190 IORING_OP_MKDIRAT,
7a8721f8 191 IORING_OP_SYMLINKAT,
cf30da90 192 IORING_OP_LINKAT,
4f57f06c 193 IORING_OP_MSG_RING,
e9621e2b
SR
194 IORING_OP_FSETXATTR,
195 IORING_OP_SETXATTR,
a56834e0
SR
196 IORING_OP_FGETXATTR,
197 IORING_OP_GETXATTR,
1374e08e 198 IORING_OP_SOCKET,
ee692a21 199 IORING_OP_URING_CMD,
9e3aa61a
JA
200
201 /* this goes last, obviously */
202 IORING_OP_LAST,
203};
c992fe29
CH
204
205/*
206 * sqe->fsync_flags
207 */
208#define IORING_FSYNC_DATASYNC (1U << 0)
2b188cc1 209
a41525ab
JA
210/*
211 * sqe->timeout_flags
212 */
f1042b6c
PB
213#define IORING_TIMEOUT_ABS (1U << 0)
214#define IORING_TIMEOUT_UPDATE (1U << 1)
215#define IORING_TIMEOUT_BOOTTIME (1U << 2)
216#define IORING_TIMEOUT_REALTIME (1U << 3)
217#define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
6224590d 218#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
50c1df2b 219#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
f1042b6c 220#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
7d67af2c
PB
221/*
222 * sqe->splice_flags
223 * extends splice(2) flags
224 */
225#define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */
226
88e41cf9
JA
227/*
228 * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
229 * command flags for POLL_ADD are stored in sqe->len.
230 *
231 * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if
232 * the poll handler will continue to report
233 * CQEs on behalf of the same SQE.
b69de288
JA
234 *
235 * IORING_POLL_UPDATE Update existing poll request, matching
236 * sqe->addr as the old user_data field.
b9ba8a44
JA
237 *
238 * IORING_POLL_LEVEL Level triggered poll.
88e41cf9
JA
239 */
240#define IORING_POLL_ADD_MULTI (1U << 0)
b69de288
JA
241#define IORING_POLL_UPDATE_EVENTS (1U << 1)
242#define IORING_POLL_UPDATE_USER_DATA (1U << 2)
b9ba8a44 243#define IORING_POLL_ADD_LEVEL (1U << 3)
88e41cf9 244
8e29da69
JA
245/*
246 * ASYNC_CANCEL flags.
247 *
248 * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key
4bf94615
JA
249 * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the
250 * request 'user_data'
970f256e 251 * IORING_ASYNC_CANCEL_ANY Match any request
7d8ca725 252 * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
8e29da69
JA
253 */
254#define IORING_ASYNC_CANCEL_ALL (1U << 0)
4bf94615 255#define IORING_ASYNC_CANCEL_FD (1U << 1)
970f256e 256#define IORING_ASYNC_CANCEL_ANY (1U << 2)
7d8ca725 257#define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3)
8e29da69 258
0455d4cc 259/*
29c1ac23 260 * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
0455d4cc
JA
261 *
262 * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send
263 * or receive and arm poll if that yields an
264 * -EAGAIN result, arm poll upfront and skip
265 * the initial transfer attempt.
266 */
267#define IORING_RECVSEND_POLL_FIRST (1U << 0)
268
390ed29b
HX
269/*
270 * accept flags stored in sqe->ioprio
271 */
272#define IORING_ACCEPT_MULTISHOT (1U << 0)
273
e6130eba
JA
274/*
275 * IORING_OP_MSG_RING command types, stored in sqe->addr
276 */
277enum {
278 IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
279 IORING_MSG_SEND_FD, /* send a registered fd to another ring */
280};
281
282/*
283 * IORING_OP_MSG_RING flags (sqe->msg_ring_flags)
284 *
285 * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not
286 * applicable for IORING_MSG_DATA, obviously.
287 */
288#define IORING_MSG_RING_CQE_SKIP (1U << 0)
289
2b188cc1
JA
290/*
291 * IO completion data structure (Completion Queue Entry)
292 */
293struct io_uring_cqe {
294 __u64 user_data; /* sqe->data submission passed back */
295 __s32 res; /* result code for this event */
296 __u32 flags;
7a51e5b4
SR
297
298 /*
299 * If the ring is initialized with IORING_SETUP_CQE32, then this field
300 * contains 16-bytes of padding, doubling the size of the CQE.
301 */
302 __u64 big_cqe[];
2b188cc1
JA
303};
304
bcda7baa
JA
305/*
306 * cqe->flags
307 *
308 * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
88e41cf9 309 * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
f548a12e 310 * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
bcda7baa
JA
311 */
312#define IORING_CQE_F_BUFFER (1U << 0)
88e41cf9 313#define IORING_CQE_F_MORE (1U << 1)
f548a12e 314#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
bcda7baa
JA
315
316enum {
317 IORING_CQE_BUFFER_SHIFT = 16,
318};
319
2b188cc1
JA
320/*
321 * Magic offsets for the application to mmap the data it needs
322 */
323#define IORING_OFF_SQ_RING 0ULL
324#define IORING_OFF_CQ_RING 0x8000000ULL
325#define IORING_OFF_SQES 0x10000000ULL
326
327/*
328 * Filled with the offset for mmap(2)
329 */
330struct io_sqring_offsets {
331 __u32 head;
332 __u32 tail;
333 __u32 ring_mask;
334 __u32 ring_entries;
335 __u32 flags;
336 __u32 dropped;
337 __u32 array;
338 __u32 resv1;
339 __u64 resv2;
340};
341
6c271ce2
JA
342/*
343 * sq_ring->flags
344 */
345#define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */
6d5f9049 346#define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */
ef060ea9 347#define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */
6c271ce2 348
2b188cc1
JA
349struct io_cqring_offsets {
350 __u32 head;
351 __u32 tail;
352 __u32 ring_mask;
353 __u32 ring_entries;
354 __u32 overflow;
355 __u32 cqes;
0d9b5b3a
SG
356 __u32 flags;
357 __u32 resv1;
358 __u64 resv2;
2b188cc1
JA
359};
360
7e55a19c
SG
361/*
362 * cq_ring->flags
363 */
364
365/* disable eventfd notifications */
366#define IORING_CQ_EVENTFD_DISABLED (1U << 0)
367
2b188cc1
JA
368/*
369 * io_uring_enter(2) flags
370 */
e7a6c00d
JA
371#define IORING_ENTER_GETEVENTS (1U << 0)
372#define IORING_ENTER_SQ_WAKEUP (1U << 1)
373#define IORING_ENTER_SQ_WAIT (1U << 2)
374#define IORING_ENTER_EXT_ARG (1U << 3)
375#define IORING_ENTER_REGISTERED_RING (1U << 4)
2b188cc1
JA
376
377/*
378 * Passed in for io_uring_setup(2). Copied back with updated info on success
379 */
380struct io_uring_params {
381 __u32 sq_entries;
382 __u32 cq_entries;
383 __u32 flags;
6c271ce2
JA
384 __u32 sq_thread_cpu;
385 __u32 sq_thread_idle;
ac90f249 386 __u32 features;
24369c2e
PB
387 __u32 wq_fd;
388 __u32 resv[3];
2b188cc1
JA
389 struct io_sqring_offsets sq_off;
390 struct io_cqring_offsets cq_off;
391};
392
ac90f249
JA
393/*
394 * io_uring_params->features flags
395 */
396#define IORING_FEAT_SINGLE_MMAP (1U << 0)
1d7bb1d5 397#define IORING_FEAT_NODROP (1U << 1)
da8c9690 398#define IORING_FEAT_SUBMIT_STABLE (1U << 2)
ba04291e 399#define IORING_FEAT_RW_CUR_POS (1U << 3)
cccf0ee8 400#define IORING_FEAT_CUR_PERSONALITY (1U << 4)
d7718a9d 401#define IORING_FEAT_FAST_POLL (1U << 5)
5769a351 402#define IORING_FEAT_POLL_32BITS (1U << 6)
28cea78a 403#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
c73ebb68 404#define IORING_FEAT_EXT_ARG (1U << 8)
1c0aa1fa 405#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
9690557e 406#define IORING_FEAT_RSRC_TAGS (1U << 10)
04c76b41 407#define IORING_FEAT_CQE_SKIP (1U << 11)
c4212f3e 408#define IORING_FEAT_LINKED_FILE (1U << 12)
ac90f249 409
edafccee
JA
410/*
411 * io_uring_register(2) opcodes and arguments
412 */
9d4a75ef
SG
413enum {
414 IORING_REGISTER_BUFFERS = 0,
415 IORING_UNREGISTER_BUFFERS = 1,
416 IORING_REGISTER_FILES = 2,
417 IORING_UNREGISTER_FILES = 3,
418 IORING_REGISTER_EVENTFD = 4,
419 IORING_UNREGISTER_EVENTFD = 5,
420 IORING_REGISTER_FILES_UPDATE = 6,
421 IORING_REGISTER_EVENTFD_ASYNC = 7,
422 IORING_REGISTER_PROBE = 8,
423 IORING_REGISTER_PERSONALITY = 9,
424 IORING_UNREGISTER_PERSONALITY = 10,
21b55dbc 425 IORING_REGISTER_RESTRICTIONS = 11,
7e84e1c7 426 IORING_REGISTER_ENABLE_RINGS = 12,
992da01a
PB
427
428 /* extended with tagging */
429 IORING_REGISTER_FILES2 = 13,
430 IORING_REGISTER_FILES_UPDATE2 = 14,
431 IORING_REGISTER_BUFFERS2 = 15,
432 IORING_REGISTER_BUFFERS_UPDATE = 16,
9d4a75ef 433
fe76421d
JA
434 /* set/clear io-wq thread affinities */
435 IORING_REGISTER_IOWQ_AFF = 17,
436 IORING_UNREGISTER_IOWQ_AFF = 18,
437
dd47c104 438 /* set/get max number of io-wq workers */
2e480058
JA
439 IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
440
e7a6c00d
JA
441 /* register/unregister io_uring fd with the ring */
442 IORING_REGISTER_RING_FDS = 20,
443 IORING_UNREGISTER_RING_FDS = 21,
444
c7fb1942
JA
445 /* register ring based provide buffer group */
446 IORING_REGISTER_PBUF_RING = 22,
447 IORING_UNREGISTER_PBUF_RING = 23,
448
78a861b9
JA
449 /* sync cancelation API */
450 IORING_REGISTER_SYNC_CANCEL = 24,
451
9d4a75ef
SG
452 /* this goes last */
453 IORING_REGISTER_LAST
454};
c3a31e60 455
dd47c104
ES
456/* io-wq worker categories */
457enum {
458 IO_WQ_BOUND,
459 IO_WQ_UNBOUND,
460};
461
269bbe5f 462/* deprecated, see struct io_uring_rsrc_update */
c3a31e60
JA
463struct io_uring_files_update {
464 __u32 offset;
1292e972
ES
465 __u32 resv;
466 __aligned_u64 /* __s32 * */ fds;
c3a31e60 467};
edafccee 468
a8da73a3
JA
469/*
470 * Register a fully sparse file space, rather than pass in an array of all
471 * -1 file descriptors.
472 */
473#define IORING_RSRC_REGISTER_SPARSE (1U << 0)
474
792e3582 475struct io_uring_rsrc_register {
792e3582 476 __u32 nr;
a8da73a3 477 __u32 flags;
992da01a 478 __u64 resv2;
792e3582
PB
479 __aligned_u64 data;
480 __aligned_u64 tags;
481};
482
c3bdad02
PB
483struct io_uring_rsrc_update {
484 __u32 offset;
485 __u32 resv;
486 __aligned_u64 data;
487};
488
489struct io_uring_rsrc_update2 {
490 __u32 offset;
491 __u32 resv;
492 __aligned_u64 data;
493 __aligned_u64 tags;
c3bdad02 494 __u32 nr;
992da01a 495 __u32 resv2;
c3bdad02
PB
496};
497
4e0377a1 498/* Skip updating fd indexes set to this value in the fd table */
499#define IORING_REGISTER_FILES_SKIP (-2)
500
66f4af93
JA
501#define IO_URING_OP_SUPPORTED (1U << 0)
502
503struct io_uring_probe_op {
504 __u8 op;
505 __u8 resv;
506 __u16 flags; /* IO_URING_OP_* flags */
507 __u32 resv2;
508};
509
510struct io_uring_probe {
511 __u8 last_op; /* last opcode supported */
512 __u8 ops_len; /* length of ops[] array below */
513 __u16 resv;
514 __u32 resv2[3];
8fcf4c48 515 struct io_uring_probe_op ops[];
66f4af93
JA
516};
517
21b55dbc
SG
518struct io_uring_restriction {
519 __u16 opcode;
520 union {
521 __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
522 __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */
523 __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */
524 };
525 __u8 resv;
526 __u32 resv2[3];
527};
528
c7fb1942
JA
529struct io_uring_buf {
530 __u64 addr;
531 __u32 len;
532 __u16 bid;
533 __u16 resv;
534};
535
536struct io_uring_buf_ring {
537 union {
538 /*
539 * To avoid spilling into more pages than we need to, the
540 * ring tail is overlaid with the io_uring_buf->resv field.
541 */
542 struct {
543 __u64 resv1;
544 __u32 resv2;
545 __u16 resv3;
546 __u16 tail;
547 };
548 struct io_uring_buf bufs[0];
549 };
550};
551
552/* argument for IORING_(UN)REGISTER_PBUF_RING */
553struct io_uring_buf_reg {
554 __u64 ring_addr;
555 __u32 ring_entries;
556 __u16 bgid;
557 __u16 pad;
558 __u64 resv[3];
559};
560
21b55dbc
SG
561/*
562 * io_uring_restriction->opcode values
563 */
564enum {
565 /* Allow an io_uring_register(2) opcode */
566 IORING_RESTRICTION_REGISTER_OP = 0,
567
568 /* Allow an sqe opcode */
569 IORING_RESTRICTION_SQE_OP = 1,
570
571 /* Allow sqe flags */
572 IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2,
573
574 /* Require sqe flags (these flags must be set on each submission) */
575 IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3,
576
577 IORING_RESTRICTION_LAST
578};
579
c73ebb68
HX
580struct io_uring_getevents_arg {
581 __u64 sigmask;
582 __u32 sigmask_sz;
583 __u32 pad;
584 __u64 ts;
585};
586
78a861b9
JA
587/*
588 * Argument for IORING_REGISTER_SYNC_CANCEL
589 */
590struct io_uring_sync_cancel_reg {
591 __u64 addr;
592 __s32 fd;
593 __u32 flags;
594 struct __kernel_timespec timeout;
595 __u64 pad[4];
596};
597
2b188cc1 598#endif