io_uring: trace task_work_run
[linux-block.git] / io_uring / kbuf.h
CommitLineData
3b77495a
JA
1// SPDX-License-Identifier: GPL-2.0
2#ifndef IOU_KBUF_H
3#define IOU_KBUF_H
4
5#include <uapi/linux/io_uring.h>
6
7struct io_buffer_list {
8 /*
9 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 * then these are classic provided buffers and ->buf_list is used.
11 */
12 union {
13 struct list_head buf_list;
14 struct {
15 struct page **buf_pages;
16 struct io_uring_buf_ring *buf_ring;
17 };
18 };
19 __u16 bgid;
20
21 /* below is for ring provided buffers */
22 __u16 buf_nr_pages;
23 __u16 nr_entries;
24 __u16 head;
25 __u16 mask;
26};
27
28struct io_buffer {
29 struct list_head list;
30 __u64 addr;
31 __u32 len;
32 __u16 bid;
33 __u16 bgid;
34};
35
36void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
37 unsigned int issue_flags);
38void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags);
39void io_destroy_buffers(struct io_ring_ctx *ctx);
40
41int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
42int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
43
44int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
45int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
46
47int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
48int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
49
53ccf69b
PB
50unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
51
3b77495a
JA
52static inline bool io_do_buffer_select(struct io_kiocb *req)
53{
54 if (!(req->flags & REQ_F_BUFFER_SELECT))
55 return false;
56 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
57}
58
59static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
60{
61 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
62 return;
63 /*
64 * For legacy provided buffer mode, don't recycle if we already did
65 * IO to this buffer. For ring-mapped provided buffer mode, we should
66 * increment ring->head to explicitly monopolize the buffer to avoid
67 * multiple use.
68 */
69 if ((req->flags & REQ_F_BUFFER_SELECTED) &&
70 (req->flags & REQ_F_PARTIAL_IO))
71 return;
72
73 /*
74 * READV uses fields in `struct io_rw` (len/addr) to stash the selected
75 * buffer data. However if that buffer is recycled the original request
76 * data stored in addr is lost. Therefore forbid recycling for now.
77 */
78 if (req->opcode == IORING_OP_READV)
79 return;
80
81 __io_kbuf_recycle(req, issue_flags);
82}
83
53ccf69b
PB
84static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
85 struct list_head *list)
3b77495a
JA
86{
87 if (req->flags & REQ_F_BUFFER_RING) {
88 if (req->buf_list)
89 req->buf_list->head++;
90 req->flags &= ~REQ_F_BUFFER_RING;
91 } else {
92 list_add(&req->kbuf->list, list);
93 req->flags &= ~REQ_F_BUFFER_SELECTED;
94 }
95
96 return IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
97}
98
99static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
100{
101 lockdep_assert_held(&req->ctx->completion_lock);
102
103 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
104 return 0;
53ccf69b 105 return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
3b77495a
JA
106}
107
108static inline unsigned int io_put_kbuf(struct io_kiocb *req,
109 unsigned issue_flags)
110{
3b77495a
JA
111
112 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
113 return 0;
53ccf69b 114 return __io_put_kbuf(req, issue_flags);
3b77495a
JA
115}
116#endif