1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/vmalloc.h>
11 #include <linux/io_uring.h>
13 #include <uapi/linux/io_uring.h>
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
23 struct kmem_cache *io_buf_cachep;
25 struct io_provide_buf {
34 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
37 lockdep_assert_held(&ctx->uring_lock);
39 return xa_load(&ctx->io_bl_xa, bgid);
42 static int io_buffer_add_list(struct io_ring_ctx *ctx,
43 struct io_buffer_list *bl, unsigned int bgid)
46 * Store buffer group ID and finally mark the list as visible.
47 * The normal lookup doesn't care about the visibility as we're
48 * always under the ->uring_lock, but the RCU lookup from mmap does.
51 atomic_set(&bl->refs, 1);
52 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
55 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
57 struct io_ring_ctx *ctx = req->ctx;
58 struct io_buffer_list *bl;
59 struct io_buffer *buf;
61 io_ring_submit_lock(ctx, issue_flags);
64 bl = io_buffer_get_list(ctx, buf->bgid);
65 list_add(&buf->list, &bl->buf_list);
66 req->flags &= ~REQ_F_BUFFER_SELECTED;
67 req->buf_index = buf->bgid;
69 io_ring_submit_unlock(ctx, issue_flags);
73 void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
76 * We can add this buffer back to two lists:
78 * 1) The io_buffers_cache list. This one is protected by the
79 * ctx->uring_lock. If we already hold this lock, add back to this
80 * list as we can grab it from issue as well.
81 * 2) The io_buffers_comp list. This one is protected by the
82 * ctx->completion_lock.
84 * We migrate buffers from the comp_list to the issue cache list
87 if (issue_flags & IO_URING_F_UNLOCKED) {
88 struct io_ring_ctx *ctx = req->ctx;
90 spin_lock(&ctx->completion_lock);
91 __io_put_kbuf_list(req, &ctx->io_buffers_comp);
92 spin_unlock(&ctx->completion_lock);
94 lockdep_assert_held(&req->ctx->uring_lock);
96 __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
100 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
101 struct io_buffer_list *bl)
103 if (!list_empty(&bl->buf_list)) {
104 struct io_buffer *kbuf;
106 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
107 list_del(&kbuf->list);
108 if (*len == 0 || *len > kbuf->len)
110 if (list_empty(&bl->buf_list))
111 req->flags |= REQ_F_BL_EMPTY;
112 req->flags |= REQ_F_BUFFER_SELECTED;
114 req->buf_index = kbuf->bid;
115 return u64_to_user_ptr(kbuf->addr);
120 static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
121 struct io_buffer_list *bl,
126 buf = io_provided_buffer_select(req, len, bl);
130 iov[0].iov_base = buf;
131 iov[0].iov_len = *len;
135 static struct io_uring_buf *io_ring_head_to_buf(struct io_uring_buf_ring *br,
136 __u16 head, __u16 mask)
138 return &br->bufs[head & mask];
141 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
142 struct io_buffer_list *bl,
143 unsigned int issue_flags)
145 struct io_uring_buf_ring *br = bl->buf_ring;
146 __u16 tail, head = bl->head;
147 struct io_uring_buf *buf;
149 tail = smp_load_acquire(&br->tail);
150 if (unlikely(tail == head))
153 if (head + 1 == tail)
154 req->flags |= REQ_F_BL_EMPTY;
156 buf = io_ring_head_to_buf(br, head, bl->mask);
157 if (*len == 0 || *len > buf->len)
159 req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
161 req->buf_index = buf->bid;
163 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
165 * If we came in unlocked, we have no choice but to consume the
166 * buffer here, otherwise nothing ensures that the buffer won't
167 * get used by others. This does mean it'll be pinned until the
168 * IO completes, coming in unlocked means we're being called from
169 * io-wq context and there may be further retries in async hybrid
170 * mode. For the locked case, the caller must call commit when
171 * the transfer completes (or if we get -EAGAIN and must poll of
174 req->flags &= ~REQ_F_BUFFERS_COMMIT;
175 req->buf_list = NULL;
178 return u64_to_user_ptr(buf->addr);
181 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
182 unsigned int issue_flags)
184 struct io_ring_ctx *ctx = req->ctx;
185 struct io_buffer_list *bl;
186 void __user *ret = NULL;
188 io_ring_submit_lock(req->ctx, issue_flags);
190 bl = io_buffer_get_list(ctx, req->buf_index);
193 ret = io_ring_buffer_select(req, len, bl, issue_flags);
195 ret = io_provided_buffer_select(req, len, bl);
197 io_ring_submit_unlock(req->ctx, issue_flags);
201 /* cap it at a reasonable 256, will be one page even for 4K */
202 #define PEEK_MAX_IMPORT 256
204 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
205 struct io_buffer_list *bl)
207 struct io_uring_buf_ring *br = bl->buf_ring;
208 struct iovec *iov = arg->iovs;
209 int nr_iovs = arg->nr_iovs;
210 __u16 nr_avail, tail, head;
211 struct io_uring_buf *buf;
213 tail = smp_load_acquire(&br->tail);
215 nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
216 if (unlikely(!nr_avail))
219 buf = io_ring_head_to_buf(br, head, bl->mask);
223 needed = (arg->max_len + buf->len - 1) / buf->len;
224 needed = min(needed, PEEK_MAX_IMPORT);
225 if (nr_avail > needed)
230 * only alloc a bigger array if we know we have data to map, eg not
231 * a speculative peek operation.
233 if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
234 iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
237 if (arg->mode & KBUF_MODE_FREE)
241 } else if (nr_avail < nr_iovs) {
245 /* set it to max, if not set, so we can use it unconditionally */
247 arg->max_len = INT_MAX;
249 req->buf_index = buf->bid;
251 /* truncate end piece, if needed */
252 if (buf->len > arg->max_len)
253 buf->len = arg->max_len;
255 iov->iov_base = u64_to_user_ptr(buf->addr);
256 iov->iov_len = buf->len;
259 arg->out_len += buf->len;
260 arg->max_len -= buf->len;
264 buf = io_ring_head_to_buf(br, ++head, bl->mask);
268 req->flags |= REQ_F_BL_EMPTY;
270 req->flags |= REQ_F_BUFFER_RING;
272 return iov - arg->iovs;
275 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
276 unsigned int issue_flags)
278 struct io_ring_ctx *ctx = req->ctx;
279 struct io_buffer_list *bl;
282 io_ring_submit_lock(ctx, issue_flags);
283 bl = io_buffer_get_list(ctx, req->buf_index);
287 if (bl->is_buf_ring) {
288 ret = io_ring_buffers_peek(req, arg, bl);
290 * Don't recycle these buffers if we need to go through poll.
291 * Nobody else can use them anyway, and holding on to provided
292 * buffers for a send/write operation would happen on the app
293 * side anyway with normal buffers. Besides, we already
294 * committed them, they cannot be put back in the queue.
297 req->flags |= REQ_F_BL_NO_RECYCLE;
298 req->buf_list->head += ret;
301 ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
304 io_ring_submit_unlock(ctx, issue_flags);
308 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
310 struct io_ring_ctx *ctx = req->ctx;
311 struct io_buffer_list *bl;
314 lockdep_assert_held(&ctx->uring_lock);
316 bl = io_buffer_get_list(ctx, req->buf_index);
320 if (bl->is_buf_ring) {
321 ret = io_ring_buffers_peek(req, arg, bl);
323 req->flags |= REQ_F_BUFFERS_COMMIT;
327 /* don't support multiple buffer selections for legacy */
328 return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
331 static int __io_remove_buffers(struct io_ring_ctx *ctx,
332 struct io_buffer_list *bl, unsigned nbufs)
336 /* shouldn't happen */
340 if (bl->is_buf_ring) {
341 i = bl->buf_ring->tail - bl->head;
342 if (bl->buf_nr_pages) {
346 for (j = 0; j < bl->buf_nr_pages; j++)
347 unpin_user_page(bl->buf_pages[j]);
349 io_pages_unmap(bl->buf_ring, &bl->buf_pages,
350 &bl->buf_nr_pages, bl->is_mmap);
353 /* make sure it's seen as empty */
354 INIT_LIST_HEAD(&bl->buf_list);
359 /* protects io_buffers_cache */
360 lockdep_assert_held(&ctx->uring_lock);
362 while (!list_empty(&bl->buf_list)) {
363 struct io_buffer *nxt;
365 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
366 list_move(&nxt->list, &ctx->io_buffers_cache);
375 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
377 if (atomic_dec_and_test(&bl->refs)) {
378 __io_remove_buffers(ctx, bl, -1U);
383 void io_destroy_buffers(struct io_ring_ctx *ctx)
385 struct io_buffer_list *bl;
386 struct list_head *item, *tmp;
387 struct io_buffer *buf;
390 xa_for_each(&ctx->io_bl_xa, index, bl) {
391 xa_erase(&ctx->io_bl_xa, bl->bgid);
396 * Move deferred locked entries to cache before pruning
398 spin_lock(&ctx->completion_lock);
399 if (!list_empty(&ctx->io_buffers_comp))
400 list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
401 spin_unlock(&ctx->completion_lock);
403 list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
404 buf = list_entry(item, struct io_buffer, list);
405 kmem_cache_free(io_buf_cachep, buf);
409 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
411 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
414 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
418 tmp = READ_ONCE(sqe->fd);
419 if (!tmp || tmp > MAX_BIDS_PER_BGID)
422 memset(p, 0, sizeof(*p));
424 p->bgid = READ_ONCE(sqe->buf_group);
428 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
430 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
431 struct io_ring_ctx *ctx = req->ctx;
432 struct io_buffer_list *bl;
435 io_ring_submit_lock(ctx, issue_flags);
438 bl = io_buffer_get_list(ctx, p->bgid);
441 /* can't use provide/remove buffers command on mapped buffers */
442 if (!bl->is_buf_ring)
443 ret = __io_remove_buffers(ctx, bl, p->nbufs);
445 io_ring_submit_unlock(ctx, issue_flags);
448 io_req_set_res(req, ret, 0);
452 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
454 unsigned long size, tmp_check;
455 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
458 if (sqe->rw_flags || sqe->splice_fd_in)
461 tmp = READ_ONCE(sqe->fd);
462 if (!tmp || tmp > MAX_BIDS_PER_BGID)
465 p->addr = READ_ONCE(sqe->addr);
466 p->len = READ_ONCE(sqe->len);
468 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
471 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
474 size = (unsigned long)p->len * p->nbufs;
475 if (!access_ok(u64_to_user_ptr(p->addr), size))
478 p->bgid = READ_ONCE(sqe->buf_group);
479 tmp = READ_ONCE(sqe->off);
482 if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
488 #define IO_BUFFER_ALLOC_BATCH 64
490 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
492 struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
496 * Completions that don't happen inline (eg not under uring_lock) will
497 * add to ->io_buffers_comp. If we don't have any free buffers, check
498 * the completion list and splice those entries first.
500 if (!list_empty_careful(&ctx->io_buffers_comp)) {
501 spin_lock(&ctx->completion_lock);
502 if (!list_empty(&ctx->io_buffers_comp)) {
503 list_splice_init(&ctx->io_buffers_comp,
504 &ctx->io_buffers_cache);
505 spin_unlock(&ctx->completion_lock);
508 spin_unlock(&ctx->completion_lock);
512 * No free buffers and no completion entries either. Allocate a new
513 * batch of buffer entries and add those to our freelist.
516 allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
517 ARRAY_SIZE(bufs), (void **) bufs);
518 if (unlikely(!allocated)) {
520 * Bulk alloc is all-or-nothing. If we fail to get a batch,
521 * retry single alloc to be on the safe side.
523 bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
530 list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
535 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
536 struct io_buffer_list *bl)
538 struct io_buffer *buf;
539 u64 addr = pbuf->addr;
540 int i, bid = pbuf->bid;
542 for (i = 0; i < pbuf->nbufs; i++) {
543 if (list_empty(&ctx->io_buffers_cache) &&
544 io_refill_buffer_cache(ctx))
546 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
548 list_move_tail(&buf->list, &bl->buf_list);
550 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
552 buf->bgid = pbuf->bgid;
558 return i ? 0 : -ENOMEM;
561 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
563 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
564 struct io_ring_ctx *ctx = req->ctx;
565 struct io_buffer_list *bl;
568 io_ring_submit_lock(ctx, issue_flags);
570 bl = io_buffer_get_list(ctx, p->bgid);
572 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
577 INIT_LIST_HEAD(&bl->buf_list);
578 ret = io_buffer_add_list(ctx, bl, p->bgid);
581 * Doesn't need rcu free as it was never visible, but
582 * let's keep it consistent throughout.
588 /* can't add buffers via this command for a mapped buffer ring */
589 if (bl->is_buf_ring) {
594 ret = io_add_buffers(ctx, p, bl);
596 io_ring_submit_unlock(ctx, issue_flags);
600 io_req_set_res(req, ret, 0);
604 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
605 struct io_buffer_list *bl)
607 struct io_uring_buf_ring *br = NULL;
611 pages = io_pin_pages(reg->ring_addr,
612 flex_array_size(br, bufs, reg->ring_entries),
615 return PTR_ERR(pages);
617 br = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
625 * On platforms that have specific aliasing requirements, SHM_COLOUR
626 * is set and we must guarantee that the kernel and user side align
627 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
628 * the application mmap's the provided ring buffer. Fail the request
629 * if we, by chance, don't end up with aligned addresses. The app
630 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
631 * this transparently.
633 if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) {
638 bl->buf_pages = pages;
639 bl->buf_nr_pages = nr_pages;
645 unpin_user_pages(pages, nr_pages);
651 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
652 struct io_uring_buf_reg *reg,
653 struct io_buffer_list *bl)
657 ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
659 bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size);
668 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
670 struct io_uring_buf_reg reg;
671 struct io_buffer_list *bl, *free_bl = NULL;
674 lockdep_assert_held(&ctx->uring_lock);
676 if (copy_from_user(®, arg, sizeof(reg)))
679 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
681 if (reg.flags & ~IOU_PBUF_RING_MMAP)
683 if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
686 if (reg.ring_addr & ~PAGE_MASK)
693 if (!is_power_of_2(reg.ring_entries))
696 /* cannot disambiguate full vs empty due to head/tail size */
697 if (reg.ring_entries >= 65536)
700 bl = io_buffer_get_list(ctx, reg.bgid);
702 /* if mapped buffer ring OR classic exists, don't allow */
703 if (bl->is_buf_ring || !list_empty(&bl->buf_list))
706 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
711 if (!(reg.flags & IOU_PBUF_RING_MMAP))
712 ret = io_pin_pbuf_ring(®, bl);
714 ret = io_alloc_pbuf_ring(ctx, ®, bl);
717 bl->nr_entries = reg.ring_entries;
718 bl->mask = reg.ring_entries - 1;
720 io_buffer_add_list(ctx, bl, reg.bgid);
724 kfree_rcu(free_bl, rcu);
728 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
730 struct io_uring_buf_reg reg;
731 struct io_buffer_list *bl;
733 lockdep_assert_held(&ctx->uring_lock);
735 if (copy_from_user(®, arg, sizeof(reg)))
737 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
742 bl = io_buffer_get_list(ctx, reg.bgid);
745 if (!bl->is_buf_ring)
748 xa_erase(&ctx->io_bl_xa, bl->bgid);
753 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
755 struct io_uring_buf_status buf_status;
756 struct io_buffer_list *bl;
759 if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
762 for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++)
763 if (buf_status.resv[i])
766 bl = io_buffer_get_list(ctx, buf_status.buf_group);
769 if (!bl->is_buf_ring)
772 buf_status.head = bl->head;
773 if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
779 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
782 struct io_buffer_list *bl;
786 * We have to be a bit careful here - we're inside mmap and cannot grab
787 * the uring_lock. This means the buffer_list could be simultaneously
788 * going away, if someone is trying to be sneaky. Look it up under rcu
789 * so we know it's not going away, and attempt to grab a reference to
790 * it. If the ref is already zero, then fail the mapping. If successful,
791 * the caller will call io_put_bl() to drop the the reference at at the
792 * end. This may then safely free the buffer_list (and drop the pages)
793 * at that point, vm_insert_pages() would've already grabbed the
794 * necessary vma references.
797 bl = xa_load(&ctx->io_bl_xa, bgid);
798 /* must be a mmap'able buffer ring and have pages */
800 if (bl && bl->is_mmap)
801 ret = atomic_inc_not_zero(&bl->refs);
807 return ERR_PTR(-EINVAL);
810 int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
812 struct io_ring_ctx *ctx = file->private_data;
813 loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT;
814 struct io_buffer_list *bl;
817 bgid = (pgoff & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
818 bl = io_pbuf_get_bl(ctx, bgid);
822 ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages);