io_uring: move timeout opcodes and handling into its own file
[linux-2.6-block.git] / io_uring / io_uring.h
CommitLineData
de23077e
JA
1#ifndef IOU_CORE_H
2#define IOU_CORE_H
3
4#include <linux/errno.h>
cd40cae2 5#include <linux/lockdep.h>
de23077e
JA
6#include "io_uring_types.h"
7
97b388d7
JA
8enum {
9 IOU_OK = 0,
10 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
11};
12
531113bb
JA
13static inline void req_set_fail(struct io_kiocb *req)
14{
15 req->flags |= REQ_F_FAIL;
16 if (req->flags & REQ_F_CQE_SKIP) {
17 req->flags &= ~REQ_F_CQE_SKIP;
18 req->flags |= REQ_F_SKIP_LINK_CQES;
19 }
20}
21
de23077e
JA
22static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
23{
24 req->cqe.res = res;
25 req->cqe.flags = cflags;
26}
27
99f15d8d
JA
28static inline bool req_has_async_data(struct io_kiocb *req)
29{
30 return req->flags & REQ_F_ASYNC_DATA;
31}
32
531113bb
JA
33static inline void io_put_file(struct file *file)
34{
35 if (file)
36 fput(file);
37}
38
cd40cae2
JA
39static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
40 unsigned issue_flags)
41{
42 lockdep_assert_held(&ctx->uring_lock);
43 if (issue_flags & IO_URING_F_UNLOCKED)
44 mutex_unlock(&ctx->uring_lock);
45}
46
47static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
48 unsigned issue_flags)
49{
50 /*
51 * "Normal" inline submissions always hold the uring_lock, since we
52 * grab it from the system call. Same is true for the SQPOLL offload.
53 * The only exception is when we've detached the request and issue it
54 * from an async worker thread, grab the lock for that case.
55 */
56 if (issue_flags & IO_URING_F_UNLOCKED)
57 mutex_lock(&ctx->uring_lock);
58 lockdep_assert_held(&ctx->uring_lock);
59}
60
f9ead18c
JA
61static inline void io_commit_cqring(struct io_ring_ctx *ctx)
62{
63 /* order cqe stores with ring update */
64 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
65}
66
99f15d8d 67void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
59915143
JA
68void io_req_complete_post(struct io_kiocb *req);
69void __io_req_complete_post(struct io_kiocb *req);
f9ead18c
JA
70bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
71 u32 cflags);
72void io_cqring_ev_posted(struct io_ring_ctx *ctx);
73void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
74 unsigned int issue_flags);
75unsigned int io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
76
77static inline bool io_do_buffer_select(struct io_kiocb *req)
78{
79 if (!(req->flags & REQ_F_BUFFER_SELECT))
80 return false;
81 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
82}
83
531113bb
JA
84struct file *io_file_get_normal(struct io_kiocb *req, int fd);
85struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
86 unsigned issue_flags);
cd40cae2
JA
87int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
88 struct file *file, unsigned int file_slot);
f9ead18c
JA
89int io_install_fixed_file(struct io_kiocb *req, struct file *file,
90 unsigned int issue_flags, u32 slot_index);
cd40cae2
JA
91
92int io_rsrc_node_switch_start(struct io_ring_ctx *ctx);
93int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
94 struct io_rsrc_node *node, void *rsrc);
95void io_rsrc_node_switch(struct io_ring_ctx *ctx,
96 struct io_rsrc_data *data_to_kill);
97bool io_is_uring_fops(struct file *file);
99f15d8d
JA
98bool io_alloc_async_data(struct io_kiocb *req);
99void io_req_task_work_add(struct io_kiocb *req);
59915143
JA
100void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
101void io_req_task_complete(struct io_kiocb *req, bool *locked);
102void io_req_task_queue_fail(struct io_kiocb *req, int ret);
103int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd);
104
105void io_free_req(struct io_kiocb *req);
106void io_queue_next(struct io_kiocb *req);
107
108#define io_for_each_link(pos, head) \
109 for (pos = (head); pos; pos = pos->link)
531113bb 110
de23077e 111#endif