io_uring/kbuf: prune deferred locked cache when tearing down
[linux-block.git] / io_uring / kbuf.h
CommitLineData
3b77495a
JA
1// SPDX-License-Identifier: GPL-2.0
2#ifndef IOU_KBUF_H
3#define IOU_KBUF_H
4
5#include <uapi/linux/io_uring.h>
6
7struct io_buffer_list {
8 /*
9 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 * then these are classic provided buffers and ->buf_list is used.
11 */
12 union {
13 struct list_head buf_list;
14 struct {
15 struct page **buf_pages;
16 struct io_uring_buf_ring *buf_ring;
17 };
18 };
19 __u16 bgid;
20
21 /* below is for ring provided buffers */
22 __u16 buf_nr_pages;
23 __u16 nr_entries;
24 __u16 head;
25 __u16 mask;
25a2c188
JA
26
27 /* ring mapped provided buffers */
28 __u8 is_mapped;
c56e022c
JA
29 /* ring mapped provided buffers, but mmap'ed by application */
30 __u8 is_mmap;
3b77495a
JA
31};
32
33struct io_buffer {
34 struct list_head list;
35 __u64 addr;
36 __u32 len;
37 __u16 bid;
38 __u16 bgid;
39};
40
41void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
42 unsigned int issue_flags);
3b77495a
JA
43void io_destroy_buffers(struct io_ring_ctx *ctx);
44
45int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
46int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
47
48int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
50
51int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
52int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
53
c392cbec
JA
54void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
55
53ccf69b
PB
56unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
57
89d528ba 58bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
795bbbc8 59
c56e022c
JA
60void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
61
89d528ba 62static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
795bbbc8
HX
63{
64 /*
65 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
66 * the flag and hence ensure that bl->head doesn't get incremented.
67 * If the tail has already been incremented, hang on to it.
68 * The exception is partial io, that case we should increment bl->head
69 * to monopolize the buffer.
70 */
71 if (req->buf_list) {
72 if (req->flags & REQ_F_PARTIAL_IO) {
73 /*
74 * If we end up here, then the io_uring_lock has
75 * been kept held since we retrieved the buffer.
76 * For the io-wq case, we already cleared
77 * req->buf_list when the buffer was retrieved,
78 * hence it cannot be set here for that case.
79 */
80 req->buf_list->head++;
81 req->buf_list = NULL;
82 } else {
83 req->buf_index = req->buf_list->bgid;
84 req->flags &= ~REQ_F_BUFFER_RING;
89d528ba 85 return true;
795bbbc8
HX
86 }
87 }
89d528ba 88 return false;
795bbbc8 89}
024b8fde 90
3b77495a
JA
91static inline bool io_do_buffer_select(struct io_kiocb *req)
92{
93 if (!(req->flags & REQ_F_BUFFER_SELECT))
94 return false;
95 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
96}
97
89d528ba 98static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
3b77495a 99{
024b8fde 100 if (req->flags & REQ_F_BUFFER_SELECTED)
89d528ba 101 return io_kbuf_recycle_legacy(req, issue_flags);
024b8fde 102 if (req->flags & REQ_F_BUFFER_RING)
89d528ba
DY
103 return io_kbuf_recycle_ring(req);
104 return false;
3b77495a
JA
105}
106
53ccf69b
PB
107static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
108 struct list_head *list)
3b77495a 109{
32f3c434
DY
110 unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
111
3b77495a 112 if (req->flags & REQ_F_BUFFER_RING) {
32f3c434
DY
113 if (req->buf_list) {
114 req->buf_index = req->buf_list->bgid;
3b77495a 115 req->buf_list->head++;
32f3c434 116 }
3b77495a
JA
117 req->flags &= ~REQ_F_BUFFER_RING;
118 } else {
32f3c434 119 req->buf_index = req->kbuf->bgid;
3b77495a
JA
120 list_add(&req->kbuf->list, list);
121 req->flags &= ~REQ_F_BUFFER_SELECTED;
122 }
123
32f3c434 124 return ret;
3b77495a
JA
125}
126
127static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
128{
129 lockdep_assert_held(&req->ctx->completion_lock);
130
131 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
132 return 0;
53ccf69b 133 return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
3b77495a
JA
134}
135
136static inline unsigned int io_put_kbuf(struct io_kiocb *req,
137 unsigned issue_flags)
138{
3b77495a
JA
139
140 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
141 return 0;
53ccf69b 142 return __io_put_kbuf(req, issue_flags);
3b77495a
JA
143}
144#endif