io_uring: mutex locked poll hashing
[linux-2.6-block.git] / io_uring / kbuf.c
CommitLineData
3b77495a
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/namei.h>
9#include <linux/poll.h>
10#include <linux/io_uring.h>
11
12#include <uapi/linux/io_uring.h>
13
14#include "io_uring_types.h"
15#include "io_uring.h"
16#include "opdef.h"
17#include "kbuf.h"
18
19#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
20
21#define BGID_ARRAY 64
22
23struct io_provide_buf {
24 struct file *file;
25 __u64 addr;
26 __u32 len;
27 __u32 bgid;
28 __u16 nbufs;
29 __u16 bid;
30};
31
32static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
33 unsigned int bgid)
34{
35 if (ctx->io_bl && bgid < BGID_ARRAY)
36 return &ctx->io_bl[bgid];
37
38 return xa_load(&ctx->io_bl_xa, bgid);
39}
40
41void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
42{
43 struct io_ring_ctx *ctx = req->ctx;
44 struct io_buffer_list *bl;
45 struct io_buffer *buf;
46
47 /*
48 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
49 * the flag and hence ensure that bl->head doesn't get incremented.
50 * If the tail has already been incremented, hang on to it.
51 */
52 if (req->flags & REQ_F_BUFFER_RING) {
53 if (req->buf_list) {
54 if (req->flags & REQ_F_PARTIAL_IO) {
55 req->buf_list->head++;
56 req->buf_list = NULL;
57 } else {
58 req->buf_index = req->buf_list->bgid;
59 req->flags &= ~REQ_F_BUFFER_RING;
60 }
61 }
62 return;
63 }
64
65 io_ring_submit_lock(ctx, issue_flags);
66
67 buf = req->kbuf;
68 bl = io_buffer_get_list(ctx, buf->bgid);
69 list_add(&buf->list, &bl->buf_list);
70 req->flags &= ~REQ_F_BUFFER_SELECTED;
71 req->buf_index = buf->bgid;
72
73 io_ring_submit_unlock(ctx, issue_flags);
74}
75
76static int io_buffer_add_list(struct io_ring_ctx *ctx,
77 struct io_buffer_list *bl, unsigned int bgid)
78{
79 bl->bgid = bgid;
80 if (bgid < BGID_ARRAY)
81 return 0;
82
83 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
84}
85
53ccf69b
PB
86unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
87{
88 unsigned int cflags;
89
90 /*
91 * We can add this buffer back to two lists:
92 *
93 * 1) The io_buffers_cache list. This one is protected by the
94 * ctx->uring_lock. If we already hold this lock, add back to this
95 * list as we can grab it from issue as well.
96 * 2) The io_buffers_comp list. This one is protected by the
97 * ctx->completion_lock.
98 *
99 * We migrate buffers from the comp_list to the issue cache list
100 * when we need one.
101 */
102 if (req->flags & REQ_F_BUFFER_RING) {
103 /* no buffers to recycle for this case */
104 cflags = __io_put_kbuf_list(req, NULL);
105 } else if (issue_flags & IO_URING_F_UNLOCKED) {
106 struct io_ring_ctx *ctx = req->ctx;
107
108 spin_lock(&ctx->completion_lock);
109 cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
110 spin_unlock(&ctx->completion_lock);
111 } else {
112 lockdep_assert_held(&req->ctx->uring_lock);
113
114 cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
115 }
116 return cflags;
117}
118
3b77495a
JA
119static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
120 struct io_buffer_list *bl)
121{
122 if (!list_empty(&bl->buf_list)) {
123 struct io_buffer *kbuf;
124
125 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
126 list_del(&kbuf->list);
127 if (*len > kbuf->len)
128 *len = kbuf->len;
129 req->flags |= REQ_F_BUFFER_SELECTED;
130 req->kbuf = kbuf;
131 req->buf_index = kbuf->bid;
132 return u64_to_user_ptr(kbuf->addr);
133 }
134 return NULL;
135}
136
137static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
138 struct io_buffer_list *bl,
139 unsigned int issue_flags)
140{
141 struct io_uring_buf_ring *br = bl->buf_ring;
142 struct io_uring_buf *buf;
143 __u16 head = bl->head;
144
145 if (unlikely(smp_load_acquire(&br->tail) == head))
146 return NULL;
147
148 head &= bl->mask;
149 if (head < IO_BUFFER_LIST_BUF_PER_PAGE) {
150 buf = &br->bufs[head];
151 } else {
152 int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
153 int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
154 buf = page_address(bl->buf_pages[index]);
155 buf += off;
156 }
157 if (*len > buf->len)
158 *len = buf->len;
159 req->flags |= REQ_F_BUFFER_RING;
160 req->buf_list = bl;
161 req->buf_index = buf->bid;
162
163 if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
164 /*
165 * If we came in unlocked, we have no choice but to consume the
166 * buffer here. This does mean it'll be pinned until the IO
167 * completes. But coming in unlocked means we're in io-wq
168 * context, hence there should be no further retry. For the
169 * locked case, the caller must ensure to call the commit when
170 * the transfer completes (or if we get -EAGAIN and must poll
171 * or retry).
172 */
173 req->buf_list = NULL;
174 bl->head++;
175 }
176 return u64_to_user_ptr(buf->addr);
177}
178
179void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
180 unsigned int issue_flags)
181{
182 struct io_ring_ctx *ctx = req->ctx;
183 struct io_buffer_list *bl;
184 void __user *ret = NULL;
185
186 io_ring_submit_lock(req->ctx, issue_flags);
187
188 bl = io_buffer_get_list(ctx, req->buf_index);
189 if (likely(bl)) {
190 if (bl->buf_nr_pages)
191 ret = io_ring_buffer_select(req, len, bl, issue_flags);
192 else
193 ret = io_provided_buffer_select(req, len, bl);
194 }
195 io_ring_submit_unlock(req->ctx, issue_flags);
196 return ret;
197}
198
199static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
200{
201 int i;
202
203 ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
204 GFP_KERNEL);
205 if (!ctx->io_bl)
206 return -ENOMEM;
207
208 for (i = 0; i < BGID_ARRAY; i++) {
209 INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
210 ctx->io_bl[i].bgid = i;
211 }
212
213 return 0;
214}
215
216static int __io_remove_buffers(struct io_ring_ctx *ctx,
217 struct io_buffer_list *bl, unsigned nbufs)
218{
219 unsigned i = 0;
220
221 /* shouldn't happen */
222 if (!nbufs)
223 return 0;
224
225 if (bl->buf_nr_pages) {
226 int j;
227
228 i = bl->buf_ring->tail - bl->head;
229 for (j = 0; j < bl->buf_nr_pages; j++)
230 unpin_user_page(bl->buf_pages[j]);
231 kvfree(bl->buf_pages);
232 bl->buf_pages = NULL;
233 bl->buf_nr_pages = 0;
234 /* make sure it's seen as empty */
235 INIT_LIST_HEAD(&bl->buf_list);
236 return i;
237 }
238
239 /* the head kbuf is the list itself */
240 while (!list_empty(&bl->buf_list)) {
241 struct io_buffer *nxt;
242
243 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
244 list_del(&nxt->list);
245 if (++i == nbufs)
246 return i;
247 cond_resched();
248 }
249 i++;
250
251 return i;
252}
253
254void io_destroy_buffers(struct io_ring_ctx *ctx)
255{
256 struct io_buffer_list *bl;
257 unsigned long index;
258 int i;
259
260 for (i = 0; i < BGID_ARRAY; i++) {
261 if (!ctx->io_bl)
262 break;
263 __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
264 }
265
266 xa_for_each(&ctx->io_bl_xa, index, bl) {
267 xa_erase(&ctx->io_bl_xa, bl->bgid);
268 __io_remove_buffers(ctx, bl, -1U);
269 kfree(bl);
270 }
271
272 while (!list_empty(&ctx->io_buffers_pages)) {
273 struct page *page;
274
275 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
276 list_del_init(&page->lru);
277 __free_page(page);
278 }
279}
280
281int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
282{
283 struct io_provide_buf *p = io_kiocb_to_cmd(req);
284 u64 tmp;
285
286 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
287 sqe->splice_fd_in)
288 return -EINVAL;
289
290 tmp = READ_ONCE(sqe->fd);
291 if (!tmp || tmp > USHRT_MAX)
292 return -EINVAL;
293
294 memset(p, 0, sizeof(*p));
295 p->nbufs = tmp;
296 p->bgid = READ_ONCE(sqe->buf_group);
297 return 0;
298}
299
300int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
301{
302 struct io_provide_buf *p = io_kiocb_to_cmd(req);
303 struct io_ring_ctx *ctx = req->ctx;
304 struct io_buffer_list *bl;
305 int ret = 0;
306
307 io_ring_submit_lock(ctx, issue_flags);
308
309 ret = -ENOENT;
310 bl = io_buffer_get_list(ctx, p->bgid);
311 if (bl) {
312 ret = -EINVAL;
313 /* can't use provide/remove buffers command on mapped buffers */
314 if (!bl->buf_nr_pages)
315 ret = __io_remove_buffers(ctx, bl, p->nbufs);
316 }
317 if (ret < 0)
318 req_set_fail(req);
319
320 /* complete before unlock, IOPOLL may need the lock */
321 io_req_set_res(req, ret, 0);
322 __io_req_complete(req, issue_flags);
323 io_ring_submit_unlock(ctx, issue_flags);
324 return IOU_ISSUE_SKIP_COMPLETE;
325}
326
327int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
328{
329 unsigned long size, tmp_check;
330 struct io_provide_buf *p = io_kiocb_to_cmd(req);
331 u64 tmp;
332
333 if (sqe->rw_flags || sqe->splice_fd_in)
334 return -EINVAL;
335
336 tmp = READ_ONCE(sqe->fd);
337 if (!tmp || tmp > USHRT_MAX)
338 return -E2BIG;
339 p->nbufs = tmp;
340 p->addr = READ_ONCE(sqe->addr);
341 p->len = READ_ONCE(sqe->len);
342
343 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
344 &size))
345 return -EOVERFLOW;
346 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
347 return -EOVERFLOW;
348
349 size = (unsigned long)p->len * p->nbufs;
350 if (!access_ok(u64_to_user_ptr(p->addr), size))
351 return -EFAULT;
352
353 p->bgid = READ_ONCE(sqe->buf_group);
354 tmp = READ_ONCE(sqe->off);
355 if (tmp > USHRT_MAX)
356 return -E2BIG;
357 p->bid = tmp;
358 return 0;
359}
360
361static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
362{
363 struct io_buffer *buf;
364 struct page *page;
365 int bufs_in_page;
366
367 /*
368 * Completions that don't happen inline (eg not under uring_lock) will
369 * add to ->io_buffers_comp. If we don't have any free buffers, check
370 * the completion list and splice those entries first.
371 */
372 if (!list_empty_careful(&ctx->io_buffers_comp)) {
373 spin_lock(&ctx->completion_lock);
374 if (!list_empty(&ctx->io_buffers_comp)) {
375 list_splice_init(&ctx->io_buffers_comp,
376 &ctx->io_buffers_cache);
377 spin_unlock(&ctx->completion_lock);
378 return 0;
379 }
380 spin_unlock(&ctx->completion_lock);
381 }
382
383 /*
384 * No free buffers and no completion entries either. Allocate a new
385 * page worth of buffer entries and add those to our freelist.
386 */
387 page = alloc_page(GFP_KERNEL_ACCOUNT);
388 if (!page)
389 return -ENOMEM;
390
391 list_add(&page->lru, &ctx->io_buffers_pages);
392
393 buf = page_address(page);
394 bufs_in_page = PAGE_SIZE / sizeof(*buf);
395 while (bufs_in_page) {
396 list_add_tail(&buf->list, &ctx->io_buffers_cache);
397 buf++;
398 bufs_in_page--;
399 }
400
401 return 0;
402}
403
404static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
405 struct io_buffer_list *bl)
406{
407 struct io_buffer *buf;
408 u64 addr = pbuf->addr;
409 int i, bid = pbuf->bid;
410
411 for (i = 0; i < pbuf->nbufs; i++) {
412 if (list_empty(&ctx->io_buffers_cache) &&
413 io_refill_buffer_cache(ctx))
414 break;
415 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
416 list);
417 list_move_tail(&buf->list, &bl->buf_list);
418 buf->addr = addr;
419 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
420 buf->bid = bid;
421 buf->bgid = pbuf->bgid;
422 addr += pbuf->len;
423 bid++;
424 cond_resched();
425 }
426
427 return i ? 0 : -ENOMEM;
428}
429
430int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
431{
432 struct io_provide_buf *p = io_kiocb_to_cmd(req);
433 struct io_ring_ctx *ctx = req->ctx;
434 struct io_buffer_list *bl;
435 int ret = 0;
436
437 io_ring_submit_lock(ctx, issue_flags);
438
439 if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
440 ret = io_init_bl_list(ctx);
441 if (ret)
442 goto err;
443 }
444
445 bl = io_buffer_get_list(ctx, p->bgid);
446 if (unlikely(!bl)) {
447 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
448 if (!bl) {
449 ret = -ENOMEM;
450 goto err;
451 }
452 INIT_LIST_HEAD(&bl->buf_list);
453 ret = io_buffer_add_list(ctx, bl, p->bgid);
454 if (ret) {
455 kfree(bl);
456 goto err;
457 }
458 }
459 /* can't add buffers via this command for a mapped buffer ring */
460 if (bl->buf_nr_pages) {
461 ret = -EINVAL;
462 goto err;
463 }
464
465 ret = io_add_buffers(ctx, p, bl);
466err:
467 if (ret < 0)
468 req_set_fail(req);
469 /* complete before unlock, IOPOLL may need the lock */
470 io_req_set_res(req, ret, 0);
471 __io_req_complete(req, issue_flags);
472 io_ring_submit_unlock(ctx, issue_flags);
473 return IOU_ISSUE_SKIP_COMPLETE;
474}
475
476int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
477{
478 struct io_uring_buf_ring *br;
479 struct io_uring_buf_reg reg;
480 struct io_buffer_list *bl, *free_bl = NULL;
481 struct page **pages;
482 int nr_pages;
483
484 if (copy_from_user(&reg, arg, sizeof(reg)))
485 return -EFAULT;
486
487 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
488 return -EINVAL;
489 if (!reg.ring_addr)
490 return -EFAULT;
491 if (reg.ring_addr & ~PAGE_MASK)
492 return -EINVAL;
493 if (!is_power_of_2(reg.ring_entries))
494 return -EINVAL;
495
496 /* cannot disambiguate full vs empty due to head/tail size */
497 if (reg.ring_entries >= 65536)
498 return -EINVAL;
499
500 if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
501 int ret = io_init_bl_list(ctx);
502 if (ret)
503 return ret;
504 }
505
506 bl = io_buffer_get_list(ctx, reg.bgid);
507 if (bl) {
508 /* if mapped buffer ring OR classic exists, don't allow */
509 if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
510 return -EEXIST;
511 } else {
512 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
513 if (!bl)
514 return -ENOMEM;
515 }
516
517 pages = io_pin_pages(reg.ring_addr,
518 struct_size(br, bufs, reg.ring_entries),
519 &nr_pages);
520 if (IS_ERR(pages)) {
521 kfree(free_bl);
522 return PTR_ERR(pages);
523 }
524
525 br = page_address(pages[0]);
526 bl->buf_pages = pages;
527 bl->buf_nr_pages = nr_pages;
528 bl->nr_entries = reg.ring_entries;
529 bl->buf_ring = br;
530 bl->mask = reg.ring_entries - 1;
531 io_buffer_add_list(ctx, bl, reg.bgid);
532 return 0;
533}
534
535int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
536{
537 struct io_uring_buf_reg reg;
538 struct io_buffer_list *bl;
539
540 if (copy_from_user(&reg, arg, sizeof(reg)))
541 return -EFAULT;
542 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
543 return -EINVAL;
544
545 bl = io_buffer_get_list(ctx, reg.bgid);
546 if (!bl)
547 return -ENOENT;
548 if (!bl->buf_nr_pages)
549 return -EINVAL;
550
551 __io_remove_buffers(ctx, bl, -1U);
552 if (bl->bgid >= BGID_ARRAY) {
553 xa_erase(&ctx->io_bl_xa, bl->bgid);
554 kfree(bl);
555 }
556 return 0;
557}