4 #include <linux/irq_work.h>
5 #include <linux/slab.h>
6 #include <linux/filter.h>
8 #include <linux/vmalloc.h>
9 #include <linux/wait.h>
10 #include <linux/poll.h>
11 #include <linux/kmemleak.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/btf_ids.h>
15 #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
17 /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
18 #define RINGBUF_PGOFF \
19 (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
20 /* consumer page and producer page */
21 #define RINGBUF_POS_PAGES 2
22 #define RINGBUF_NR_META_PAGES (RINGBUF_PGOFF + RINGBUF_POS_PAGES)
24 #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
26 /* Maximum size of ring buffer area is limited by 32-bit page offset within
27 * record header, counted in pages. Reserve 8 bits for extensibility, and take
28 * into account few extra pages for consumer/producer pages and
29 * non-mmap()'able parts. This gives 64GB limit, which seems plenty for single
32 #define RINGBUF_MAX_DATA_SZ \
33 (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
36 wait_queue_head_t waitq;
41 spinlock_t spinlock ____cacheline_aligned_in_smp;
42 /* For user-space producer ring buffers, an atomic_t busy bit is used
43 * to synchronize access to the ring buffers in the kernel, rather than
44 * the spinlock that is used for kernel-producer ring buffers. This is
45 * done because the ring buffer must hold a lock across a BPF program's
48 * __bpf_user_ringbuf_peek() // lock acquired
49 * -> program callback_fn()
50 * -> __bpf_user_ringbuf_sample_release() // lock released
52 * It is unsafe and incorrect to hold an IRQ spinlock across what could
53 * be a long execution window, so we instead simply disallow concurrent
54 * access to the ring buffer by kernel consumers, and return -EBUSY from
55 * __bpf_user_ringbuf_peek() if the busy bit is held by another task.
57 atomic_t busy ____cacheline_aligned_in_smp;
58 /* Consumer and producer counters are put into separate pages to
59 * allow each position to be mapped with different permissions.
60 * This prevents a user-space application from modifying the
61 * position and ruining in-kernel tracking. The permissions of the
62 * pages depend on who is producing samples: user-space or the
67 * The producer position and data pages are mapped as r/o in
68 * userspace. For this approach, bits in the header of samples are
69 * used to signal to user-space, and to other producers, whether a
70 * sample is currently being written.
74 * Only the page containing the consumer position is mapped r/o in
75 * user-space. User-space producers also use bits of the header to
76 * communicate to the kernel, but the kernel must carefully check and
77 * validate each sample to ensure that they're correctly formatted, and
78 * fully contained within the ring buffer.
80 unsigned long consumer_pos __aligned(PAGE_SIZE);
81 unsigned long producer_pos __aligned(PAGE_SIZE);
82 char data[] __aligned(PAGE_SIZE);
85 struct bpf_ringbuf_map {
87 struct bpf_ringbuf *rb;
90 /* 8-byte ring buffer record header structure */
91 struct bpf_ringbuf_hdr {
96 static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
98 const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
99 __GFP_NOWARN | __GFP_ZERO;
100 int nr_meta_pages = RINGBUF_NR_META_PAGES;
101 int nr_data_pages = data_sz >> PAGE_SHIFT;
102 int nr_pages = nr_meta_pages + nr_data_pages;
103 struct page **pages, *page;
104 struct bpf_ringbuf *rb;
108 /* Each data page is mapped twice to allow "virtual"
109 * continuous read of samples wrapping around the end of ring
111 * ------------------------------------------------------
112 * | meta pages | real data pages | same data pages |
113 * ------------------------------------------------------
114 * | | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
115 * ------------------------------------------------------
116 * | | TA DA | TA DA |
117 * ------------------------------------------------------
120 * Here, no need to worry about special handling of wrapped-around
121 * data due to double-mapped data pages. This works both in kernel and
122 * when mmap()'ed in user-space, simplifying both kernel and
123 * user-space implementations significantly.
125 array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
126 pages = bpf_map_area_alloc(array_size, numa_node);
130 for (i = 0; i < nr_pages; i++) {
131 page = alloc_pages_node(numa_node, flags, 0);
137 if (i >= nr_meta_pages)
138 pages[nr_data_pages + i] = page;
141 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
142 VM_MAP | VM_USERMAP, PAGE_KERNEL);
144 kmemleak_not_leak(pages);
146 rb->nr_pages = nr_pages;
151 for (i = 0; i < nr_pages; i++)
152 __free_page(pages[i]);
153 bpf_map_area_free(pages);
157 static void bpf_ringbuf_notify(struct irq_work *work)
159 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
161 wake_up_all(&rb->waitq);
164 static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
166 struct bpf_ringbuf *rb;
168 rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
172 spin_lock_init(&rb->spinlock);
173 atomic_set(&rb->busy, 0);
174 init_waitqueue_head(&rb->waitq);
175 init_irq_work(&rb->work, bpf_ringbuf_notify);
177 rb->mask = data_sz - 1;
178 rb->consumer_pos = 0;
179 rb->producer_pos = 0;
184 static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
186 struct bpf_ringbuf_map *rb_map;
188 if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
189 return ERR_PTR(-EINVAL);
191 if (attr->key_size || attr->value_size ||
192 !is_power_of_2(attr->max_entries) ||
193 !PAGE_ALIGNED(attr->max_entries))
194 return ERR_PTR(-EINVAL);
197 /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
198 if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
199 return ERR_PTR(-E2BIG);
202 rb_map = bpf_map_area_alloc(sizeof(*rb_map), NUMA_NO_NODE);
204 return ERR_PTR(-ENOMEM);
206 bpf_map_init_from_attr(&rb_map->map, attr);
208 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
210 bpf_map_area_free(rb_map);
211 return ERR_PTR(-ENOMEM);
217 static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
219 /* copy pages pointer and nr_pages to local variable, as we are going
220 * to unmap rb itself with vunmap() below
222 struct page **pages = rb->pages;
223 int i, nr_pages = rb->nr_pages;
226 for (i = 0; i < nr_pages; i++)
227 __free_page(pages[i]);
228 bpf_map_area_free(pages);
231 static void ringbuf_map_free(struct bpf_map *map)
233 struct bpf_ringbuf_map *rb_map;
235 rb_map = container_of(map, struct bpf_ringbuf_map, map);
236 bpf_ringbuf_free(rb_map->rb);
237 bpf_map_area_free(rb_map);
240 static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
242 return ERR_PTR(-ENOTSUPP);
245 static long ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
251 static long ringbuf_map_delete_elem(struct bpf_map *map, void *key)
256 static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
262 static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma)
264 struct bpf_ringbuf_map *rb_map;
266 rb_map = container_of(map, struct bpf_ringbuf_map, map);
268 if (vma->vm_flags & VM_WRITE) {
269 /* allow writable mapping for the consumer_pos only */
270 if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
273 vm_flags_clear(vma, VM_MAYWRITE);
275 /* remap_vmalloc_range() checks size and offset constraints */
276 return remap_vmalloc_range(vma, rb_map->rb,
277 vma->vm_pgoff + RINGBUF_PGOFF);
280 static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma)
282 struct bpf_ringbuf_map *rb_map;
284 rb_map = container_of(map, struct bpf_ringbuf_map, map);
286 if (vma->vm_flags & VM_WRITE) {
287 if (vma->vm_pgoff == 0)
288 /* Disallow writable mappings to the consumer pointer,
289 * and allow writable mappings to both the producer
290 * position, and the ring buffer data itself.
294 vm_flags_clear(vma, VM_MAYWRITE);
296 /* remap_vmalloc_range() checks size and offset constraints */
297 return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
300 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
302 unsigned long cons_pos, prod_pos;
304 cons_pos = smp_load_acquire(&rb->consumer_pos);
305 prod_pos = smp_load_acquire(&rb->producer_pos);
306 return prod_pos - cons_pos;
309 static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb)
314 static __poll_t ringbuf_map_poll_kern(struct bpf_map *map, struct file *filp,
315 struct poll_table_struct *pts)
317 struct bpf_ringbuf_map *rb_map;
319 rb_map = container_of(map, struct bpf_ringbuf_map, map);
320 poll_wait(filp, &rb_map->rb->waitq, pts);
322 if (ringbuf_avail_data_sz(rb_map->rb))
323 return EPOLLIN | EPOLLRDNORM;
327 static __poll_t ringbuf_map_poll_user(struct bpf_map *map, struct file *filp,
328 struct poll_table_struct *pts)
330 struct bpf_ringbuf_map *rb_map;
332 rb_map = container_of(map, struct bpf_ringbuf_map, map);
333 poll_wait(filp, &rb_map->rb->waitq, pts);
335 if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb))
336 return EPOLLOUT | EPOLLWRNORM;
340 static u64 ringbuf_map_mem_usage(const struct bpf_map *map)
342 struct bpf_ringbuf *rb;
345 u64 usage = sizeof(struct bpf_ringbuf_map);
347 rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
348 usage += (u64)rb->nr_pages << PAGE_SHIFT;
349 nr_meta_pages = RINGBUF_NR_META_PAGES;
350 nr_data_pages = map->max_entries >> PAGE_SHIFT;
351 usage += (nr_meta_pages + 2 * nr_data_pages) * sizeof(struct page *);
355 BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
356 const struct bpf_map_ops ringbuf_map_ops = {
357 .map_meta_equal = bpf_map_meta_equal,
358 .map_alloc = ringbuf_map_alloc,
359 .map_free = ringbuf_map_free,
360 .map_mmap = ringbuf_map_mmap_kern,
361 .map_poll = ringbuf_map_poll_kern,
362 .map_lookup_elem = ringbuf_map_lookup_elem,
363 .map_update_elem = ringbuf_map_update_elem,
364 .map_delete_elem = ringbuf_map_delete_elem,
365 .map_get_next_key = ringbuf_map_get_next_key,
366 .map_mem_usage = ringbuf_map_mem_usage,
367 .map_btf_id = &ringbuf_map_btf_ids[0],
370 BTF_ID_LIST_SINGLE(user_ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
371 const struct bpf_map_ops user_ringbuf_map_ops = {
372 .map_meta_equal = bpf_map_meta_equal,
373 .map_alloc = ringbuf_map_alloc,
374 .map_free = ringbuf_map_free,
375 .map_mmap = ringbuf_map_mmap_user,
376 .map_poll = ringbuf_map_poll_user,
377 .map_lookup_elem = ringbuf_map_lookup_elem,
378 .map_update_elem = ringbuf_map_update_elem,
379 .map_delete_elem = ringbuf_map_delete_elem,
380 .map_get_next_key = ringbuf_map_get_next_key,
381 .map_mem_usage = ringbuf_map_mem_usage,
382 .map_btf_id = &user_ringbuf_map_btf_ids[0],
385 /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
386 * calculate offset from record metadata to ring buffer in pages, rounded
387 * down. This page offset is stored as part of record metadata and allows to
388 * restore struct bpf_ringbuf * from record pointer. This page offset is
389 * stored at offset 4 of record metadata header.
391 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
392 struct bpf_ringbuf_hdr *hdr)
394 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
397 /* Given pointer to ring buffer record header, restore pointer to struct
398 * bpf_ringbuf itself by using page offset stored at offset 4
400 static struct bpf_ringbuf *
401 bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
403 unsigned long addr = (unsigned long)(void *)hdr;
404 unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT;
406 return (void*)((addr & PAGE_MASK) - off);
409 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
411 unsigned long cons_pos, prod_pos, new_prod_pos, flags;
413 struct bpf_ringbuf_hdr *hdr;
415 if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
418 len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
419 if (len > ringbuf_total_data_sz(rb))
422 cons_pos = smp_load_acquire(&rb->consumer_pos);
425 if (!spin_trylock_irqsave(&rb->spinlock, flags))
428 spin_lock_irqsave(&rb->spinlock, flags);
431 prod_pos = rb->producer_pos;
432 new_prod_pos = prod_pos + len;
434 /* check for out of ringbuf space by ensuring producer position
435 * doesn't advance more than (ringbuf_size - 1) ahead
437 if (new_prod_pos - cons_pos > rb->mask) {
438 spin_unlock_irqrestore(&rb->spinlock, flags);
442 hdr = (void *)rb->data + (prod_pos & rb->mask);
443 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
444 hdr->len = size | BPF_RINGBUF_BUSY_BIT;
445 hdr->pg_off = pg_off;
447 /* pairs with consumer's smp_load_acquire() */
448 smp_store_release(&rb->producer_pos, new_prod_pos);
450 spin_unlock_irqrestore(&rb->spinlock, flags);
452 return (void *)hdr + BPF_RINGBUF_HDR_SZ;
455 BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
457 struct bpf_ringbuf_map *rb_map;
462 rb_map = container_of(map, struct bpf_ringbuf_map, map);
463 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
466 const struct bpf_func_proto bpf_ringbuf_reserve_proto = {
467 .func = bpf_ringbuf_reserve,
468 .ret_type = RET_PTR_TO_RINGBUF_MEM_OR_NULL,
469 .arg1_type = ARG_CONST_MAP_PTR,
470 .arg2_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
471 .arg3_type = ARG_ANYTHING,
474 static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
476 unsigned long rec_pos, cons_pos;
477 struct bpf_ringbuf_hdr *hdr;
478 struct bpf_ringbuf *rb;
481 hdr = sample - BPF_RINGBUF_HDR_SZ;
482 rb = bpf_ringbuf_restore_from_rec(hdr);
483 new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT;
485 new_len |= BPF_RINGBUF_DISCARD_BIT;
487 /* update record header with correct final size prefix */
488 xchg(&hdr->len, new_len);
490 /* if consumer caught up and is waiting for our record, notify about
491 * new data availability
493 rec_pos = (void *)hdr - (void *)rb->data;
494 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
496 if (flags & BPF_RB_FORCE_WAKEUP)
497 irq_work_queue(&rb->work);
498 else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP))
499 irq_work_queue(&rb->work);
502 BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
504 bpf_ringbuf_commit(sample, flags, false /* discard */);
508 const struct bpf_func_proto bpf_ringbuf_submit_proto = {
509 .func = bpf_ringbuf_submit,
510 .ret_type = RET_VOID,
511 .arg1_type = ARG_PTR_TO_RINGBUF_MEM | OBJ_RELEASE,
512 .arg2_type = ARG_ANYTHING,
515 BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
517 bpf_ringbuf_commit(sample, flags, true /* discard */);
521 const struct bpf_func_proto bpf_ringbuf_discard_proto = {
522 .func = bpf_ringbuf_discard,
523 .ret_type = RET_VOID,
524 .arg1_type = ARG_PTR_TO_RINGBUF_MEM | OBJ_RELEASE,
525 .arg2_type = ARG_ANYTHING,
528 BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
531 struct bpf_ringbuf_map *rb_map;
534 if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP)))
537 rb_map = container_of(map, struct bpf_ringbuf_map, map);
538 rec = __bpf_ringbuf_reserve(rb_map->rb, size);
542 memcpy(rec, data, size);
543 bpf_ringbuf_commit(rec, flags, false /* discard */);
547 const struct bpf_func_proto bpf_ringbuf_output_proto = {
548 .func = bpf_ringbuf_output,
549 .ret_type = RET_INTEGER,
550 .arg1_type = ARG_CONST_MAP_PTR,
551 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
552 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
553 .arg4_type = ARG_ANYTHING,
556 BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
558 struct bpf_ringbuf *rb;
560 rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
563 case BPF_RB_AVAIL_DATA:
564 return ringbuf_avail_data_sz(rb);
565 case BPF_RB_RING_SIZE:
566 return ringbuf_total_data_sz(rb);
567 case BPF_RB_CONS_POS:
568 return smp_load_acquire(&rb->consumer_pos);
569 case BPF_RB_PROD_POS:
570 return smp_load_acquire(&rb->producer_pos);
576 const struct bpf_func_proto bpf_ringbuf_query_proto = {
577 .func = bpf_ringbuf_query,
578 .ret_type = RET_INTEGER,
579 .arg1_type = ARG_CONST_MAP_PTR,
580 .arg2_type = ARG_ANYTHING,
583 BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags,
584 struct bpf_dynptr_kern *, ptr)
586 struct bpf_ringbuf_map *rb_map;
590 if (unlikely(flags)) {
591 bpf_dynptr_set_null(ptr);
595 err = bpf_dynptr_check_size(size);
597 bpf_dynptr_set_null(ptr);
601 rb_map = container_of(map, struct bpf_ringbuf_map, map);
603 sample = __bpf_ringbuf_reserve(rb_map->rb, size);
605 bpf_dynptr_set_null(ptr);
609 bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size);
614 const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
615 .func = bpf_ringbuf_reserve_dynptr,
616 .ret_type = RET_INTEGER,
617 .arg1_type = ARG_CONST_MAP_PTR,
618 .arg2_type = ARG_ANYTHING,
619 .arg3_type = ARG_ANYTHING,
620 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT,
623 BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
628 bpf_ringbuf_commit(ptr->data, flags, false /* discard */);
630 bpf_dynptr_set_null(ptr);
635 const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto = {
636 .func = bpf_ringbuf_submit_dynptr,
637 .ret_type = RET_VOID,
638 .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
639 .arg2_type = ARG_ANYTHING,
642 BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
647 bpf_ringbuf_commit(ptr->data, flags, true /* discard */);
649 bpf_dynptr_set_null(ptr);
654 const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto = {
655 .func = bpf_ringbuf_discard_dynptr,
656 .ret_type = RET_VOID,
657 .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
658 .arg2_type = ARG_ANYTHING,
661 static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size)
664 u32 hdr_len, sample_len, total_len, flags, *hdr;
665 u64 cons_pos, prod_pos;
667 /* Synchronizes with smp_store_release() in user-space producer. */
668 prod_pos = smp_load_acquire(&rb->producer_pos);
672 /* Synchronizes with smp_store_release() in __bpf_user_ringbuf_sample_release() */
673 cons_pos = smp_load_acquire(&rb->consumer_pos);
674 if (cons_pos >= prod_pos)
677 hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask));
678 /* Synchronizes with smp_store_release() in user-space producer. */
679 hdr_len = smp_load_acquire(hdr);
680 flags = hdr_len & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT);
681 sample_len = hdr_len & ~flags;
682 total_len = round_up(sample_len + BPF_RINGBUF_HDR_SZ, 8);
684 /* The sample must fit within the region advertised by the producer position. */
685 if (total_len > prod_pos - cons_pos)
688 /* The sample must fit within the data region of the ring buffer. */
689 if (total_len > ringbuf_total_data_sz(rb))
692 /* The sample must fit into a struct bpf_dynptr. */
693 err = bpf_dynptr_check_size(sample_len);
697 if (flags & BPF_RINGBUF_DISCARD_BIT) {
698 /* If the discard bit is set, the sample should be skipped.
700 * Update the consumer pos, and return -EAGAIN so the caller
701 * knows to skip this sample and try to read the next one.
703 smp_store_release(&rb->consumer_pos, cons_pos + total_len);
707 if (flags & BPF_RINGBUF_BUSY_BIT)
710 *sample = (void *)((uintptr_t)rb->data +
711 (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask));
716 static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags)
719 u32 rounded_size = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
721 /* Using smp_load_acquire() is unnecessary here, as the busy-bit
722 * prevents another task from writing to consumer_pos after it was read
723 * by this task with smp_load_acquire() in __bpf_user_ringbuf_peek().
725 consumer_pos = rb->consumer_pos;
726 /* Synchronizes with smp_load_acquire() in user-space producer. */
727 smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size);
730 BPF_CALL_4(bpf_user_ringbuf_drain, struct bpf_map *, map,
731 void *, callback_fn, void *, callback_ctx, u64, flags)
733 struct bpf_ringbuf *rb;
734 long samples, discarded_samples = 0, ret = 0;
735 bpf_callback_t callback = (bpf_callback_t)callback_fn;
736 u64 wakeup_flags = BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP;
739 if (unlikely(flags & ~wakeup_flags))
742 rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
744 /* If another consumer is already consuming a sample, wait for them to finish. */
745 if (!atomic_try_cmpxchg(&rb->busy, &busy, 1))
748 for (samples = 0; samples < BPF_MAX_USER_RINGBUF_SAMPLES && ret == 0; samples++) {
752 struct bpf_dynptr_kern dynptr;
754 err = __bpf_user_ringbuf_peek(rb, &sample, &size);
756 if (err == -ENODATA) {
758 } else if (err == -EAGAIN) {
763 goto schedule_work_return;
767 bpf_dynptr_init(&dynptr, sample, BPF_DYNPTR_TYPE_LOCAL, 0, size);
768 ret = callback((uintptr_t)&dynptr, (uintptr_t)callback_ctx, 0, 0, 0);
769 __bpf_user_ringbuf_sample_release(rb, size, flags);
771 ret = samples - discarded_samples;
773 schedule_work_return:
774 /* Prevent the clearing of the busy-bit from being reordered before the
775 * storing of any rb consumer or producer positions.
777 smp_mb__before_atomic();
778 atomic_set(&rb->busy, 0);
780 if (flags & BPF_RB_FORCE_WAKEUP)
781 irq_work_queue(&rb->work);
782 else if (!(flags & BPF_RB_NO_WAKEUP) && samples > 0)
783 irq_work_queue(&rb->work);
787 const struct bpf_func_proto bpf_user_ringbuf_drain_proto = {
788 .func = bpf_user_ringbuf_drain,
789 .ret_type = RET_INTEGER,
790 .arg1_type = ARG_CONST_MAP_PTR,
791 .arg2_type = ARG_PTR_TO_FUNC,
792 .arg3_type = ARG_PTR_TO_STACK_OR_NULL,
793 .arg4_type = ARG_ANYTHING,