Merge tag 'i2c-for-6.4-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / kernel / bpf / ringbuf.c
1 #include <linux/bpf.h>
2 #include <linux/btf.h>
3 #include <linux/err.h>
4 #include <linux/irq_work.h>
5 #include <linux/slab.h>
6 #include <linux/filter.h>
7 #include <linux/mm.h>
8 #include <linux/vmalloc.h>
9 #include <linux/wait.h>
10 #include <linux/poll.h>
11 #include <linux/kmemleak.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/btf_ids.h>
14
15 #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
16
17 /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
18 #define RINGBUF_PGOFF \
19         (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
20 /* consumer page and producer page */
21 #define RINGBUF_POS_PAGES 2
22 #define RINGBUF_NR_META_PAGES (RINGBUF_PGOFF + RINGBUF_POS_PAGES)
23
24 #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
25
26 /* Maximum size of ring buffer area is limited by 32-bit page offset within
27  * record header, counted in pages. Reserve 8 bits for extensibility, and take
28  * into account few extra pages for consumer/producer pages and
29  * non-mmap()'able parts. This gives 64GB limit, which seems plenty for single
30  * ring buffer.
31  */
32 #define RINGBUF_MAX_DATA_SZ \
33         (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
34
35 struct bpf_ringbuf {
36         wait_queue_head_t waitq;
37         struct irq_work work;
38         u64 mask;
39         struct page **pages;
40         int nr_pages;
41         spinlock_t spinlock ____cacheline_aligned_in_smp;
42         /* For user-space producer ring buffers, an atomic_t busy bit is used
43          * to synchronize access to the ring buffers in the kernel, rather than
44          * the spinlock that is used for kernel-producer ring buffers. This is
45          * done because the ring buffer must hold a lock across a BPF program's
46          * callback:
47          *
48          *    __bpf_user_ringbuf_peek() // lock acquired
49          * -> program callback_fn()
50          * -> __bpf_user_ringbuf_sample_release() // lock released
51          *
52          * It is unsafe and incorrect to hold an IRQ spinlock across what could
53          * be a long execution window, so we instead simply disallow concurrent
54          * access to the ring buffer by kernel consumers, and return -EBUSY from
55          * __bpf_user_ringbuf_peek() if the busy bit is held by another task.
56          */
57         atomic_t busy ____cacheline_aligned_in_smp;
58         /* Consumer and producer counters are put into separate pages to
59          * allow each position to be mapped with different permissions.
60          * This prevents a user-space application from modifying the
61          * position and ruining in-kernel tracking. The permissions of the
62          * pages depend on who is producing samples: user-space or the
63          * kernel.
64          *
65          * Kernel-producer
66          * ---------------
67          * The producer position and data pages are mapped as r/o in
68          * userspace. For this approach, bits in the header of samples are
69          * used to signal to user-space, and to other producers, whether a
70          * sample is currently being written.
71          *
72          * User-space producer
73          * -------------------
74          * Only the page containing the consumer position is mapped r/o in
75          * user-space. User-space producers also use bits of the header to
76          * communicate to the kernel, but the kernel must carefully check and
77          * validate each sample to ensure that they're correctly formatted, and
78          * fully contained within the ring buffer.
79          */
80         unsigned long consumer_pos __aligned(PAGE_SIZE);
81         unsigned long producer_pos __aligned(PAGE_SIZE);
82         char data[] __aligned(PAGE_SIZE);
83 };
84
85 struct bpf_ringbuf_map {
86         struct bpf_map map;
87         struct bpf_ringbuf *rb;
88 };
89
90 /* 8-byte ring buffer record header structure */
91 struct bpf_ringbuf_hdr {
92         u32 len;
93         u32 pg_off;
94 };
95
96 static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
97 {
98         const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
99                             __GFP_NOWARN | __GFP_ZERO;
100         int nr_meta_pages = RINGBUF_NR_META_PAGES;
101         int nr_data_pages = data_sz >> PAGE_SHIFT;
102         int nr_pages = nr_meta_pages + nr_data_pages;
103         struct page **pages, *page;
104         struct bpf_ringbuf *rb;
105         size_t array_size;
106         int i;
107
108         /* Each data page is mapped twice to allow "virtual"
109          * continuous read of samples wrapping around the end of ring
110          * buffer area:
111          * ------------------------------------------------------
112          * | meta pages |  real data pages  |  same data pages  |
113          * ------------------------------------------------------
114          * |            | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
115          * ------------------------------------------------------
116          * |            | TA             DA | TA             DA |
117          * ------------------------------------------------------
118          *                               ^^^^^^^
119          *                                  |
120          * Here, no need to worry about special handling of wrapped-around
121          * data due to double-mapped data pages. This works both in kernel and
122          * when mmap()'ed in user-space, simplifying both kernel and
123          * user-space implementations significantly.
124          */
125         array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
126         pages = bpf_map_area_alloc(array_size, numa_node);
127         if (!pages)
128                 return NULL;
129
130         for (i = 0; i < nr_pages; i++) {
131                 page = alloc_pages_node(numa_node, flags, 0);
132                 if (!page) {
133                         nr_pages = i;
134                         goto err_free_pages;
135                 }
136                 pages[i] = page;
137                 if (i >= nr_meta_pages)
138                         pages[nr_data_pages + i] = page;
139         }
140
141         rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
142                   VM_MAP | VM_USERMAP, PAGE_KERNEL);
143         if (rb) {
144                 kmemleak_not_leak(pages);
145                 rb->pages = pages;
146                 rb->nr_pages = nr_pages;
147                 return rb;
148         }
149
150 err_free_pages:
151         for (i = 0; i < nr_pages; i++)
152                 __free_page(pages[i]);
153         bpf_map_area_free(pages);
154         return NULL;
155 }
156
157 static void bpf_ringbuf_notify(struct irq_work *work)
158 {
159         struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
160
161         wake_up_all(&rb->waitq);
162 }
163
164 static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
165 {
166         struct bpf_ringbuf *rb;
167
168         rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
169         if (!rb)
170                 return NULL;
171
172         spin_lock_init(&rb->spinlock);
173         atomic_set(&rb->busy, 0);
174         init_waitqueue_head(&rb->waitq);
175         init_irq_work(&rb->work, bpf_ringbuf_notify);
176
177         rb->mask = data_sz - 1;
178         rb->consumer_pos = 0;
179         rb->producer_pos = 0;
180
181         return rb;
182 }
183
184 static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
185 {
186         struct bpf_ringbuf_map *rb_map;
187
188         if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
189                 return ERR_PTR(-EINVAL);
190
191         if (attr->key_size || attr->value_size ||
192             !is_power_of_2(attr->max_entries) ||
193             !PAGE_ALIGNED(attr->max_entries))
194                 return ERR_PTR(-EINVAL);
195
196 #ifdef CONFIG_64BIT
197         /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
198         if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
199                 return ERR_PTR(-E2BIG);
200 #endif
201
202         rb_map = bpf_map_area_alloc(sizeof(*rb_map), NUMA_NO_NODE);
203         if (!rb_map)
204                 return ERR_PTR(-ENOMEM);
205
206         bpf_map_init_from_attr(&rb_map->map, attr);
207
208         rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
209         if (!rb_map->rb) {
210                 bpf_map_area_free(rb_map);
211                 return ERR_PTR(-ENOMEM);
212         }
213
214         return &rb_map->map;
215 }
216
217 static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
218 {
219         /* copy pages pointer and nr_pages to local variable, as we are going
220          * to unmap rb itself with vunmap() below
221          */
222         struct page **pages = rb->pages;
223         int i, nr_pages = rb->nr_pages;
224
225         vunmap(rb);
226         for (i = 0; i < nr_pages; i++)
227                 __free_page(pages[i]);
228         bpf_map_area_free(pages);
229 }
230
231 static void ringbuf_map_free(struct bpf_map *map)
232 {
233         struct bpf_ringbuf_map *rb_map;
234
235         rb_map = container_of(map, struct bpf_ringbuf_map, map);
236         bpf_ringbuf_free(rb_map->rb);
237         bpf_map_area_free(rb_map);
238 }
239
240 static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
241 {
242         return ERR_PTR(-ENOTSUPP);
243 }
244
245 static long ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
246                                     u64 flags)
247 {
248         return -ENOTSUPP;
249 }
250
251 static long ringbuf_map_delete_elem(struct bpf_map *map, void *key)
252 {
253         return -ENOTSUPP;
254 }
255
256 static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
257                                     void *next_key)
258 {
259         return -ENOTSUPP;
260 }
261
262 static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma)
263 {
264         struct bpf_ringbuf_map *rb_map;
265
266         rb_map = container_of(map, struct bpf_ringbuf_map, map);
267
268         if (vma->vm_flags & VM_WRITE) {
269                 /* allow writable mapping for the consumer_pos only */
270                 if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
271                         return -EPERM;
272         } else {
273                 vm_flags_clear(vma, VM_MAYWRITE);
274         }
275         /* remap_vmalloc_range() checks size and offset constraints */
276         return remap_vmalloc_range(vma, rb_map->rb,
277                                    vma->vm_pgoff + RINGBUF_PGOFF);
278 }
279
280 static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma)
281 {
282         struct bpf_ringbuf_map *rb_map;
283
284         rb_map = container_of(map, struct bpf_ringbuf_map, map);
285
286         if (vma->vm_flags & VM_WRITE) {
287                 if (vma->vm_pgoff == 0)
288                         /* Disallow writable mappings to the consumer pointer,
289                          * and allow writable mappings to both the producer
290                          * position, and the ring buffer data itself.
291                          */
292                         return -EPERM;
293         } else {
294                 vm_flags_clear(vma, VM_MAYWRITE);
295         }
296         /* remap_vmalloc_range() checks size and offset constraints */
297         return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
298 }
299
300 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
301 {
302         unsigned long cons_pos, prod_pos;
303
304         cons_pos = smp_load_acquire(&rb->consumer_pos);
305         prod_pos = smp_load_acquire(&rb->producer_pos);
306         return prod_pos - cons_pos;
307 }
308
309 static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb)
310 {
311         return rb->mask + 1;
312 }
313
314 static __poll_t ringbuf_map_poll_kern(struct bpf_map *map, struct file *filp,
315                                       struct poll_table_struct *pts)
316 {
317         struct bpf_ringbuf_map *rb_map;
318
319         rb_map = container_of(map, struct bpf_ringbuf_map, map);
320         poll_wait(filp, &rb_map->rb->waitq, pts);
321
322         if (ringbuf_avail_data_sz(rb_map->rb))
323                 return EPOLLIN | EPOLLRDNORM;
324         return 0;
325 }
326
327 static __poll_t ringbuf_map_poll_user(struct bpf_map *map, struct file *filp,
328                                       struct poll_table_struct *pts)
329 {
330         struct bpf_ringbuf_map *rb_map;
331
332         rb_map = container_of(map, struct bpf_ringbuf_map, map);
333         poll_wait(filp, &rb_map->rb->waitq, pts);
334
335         if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb))
336                 return EPOLLOUT | EPOLLWRNORM;
337         return 0;
338 }
339
340 static u64 ringbuf_map_mem_usage(const struct bpf_map *map)
341 {
342         struct bpf_ringbuf *rb;
343         int nr_data_pages;
344         int nr_meta_pages;
345         u64 usage = sizeof(struct bpf_ringbuf_map);
346
347         rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
348         usage += (u64)rb->nr_pages << PAGE_SHIFT;
349         nr_meta_pages = RINGBUF_NR_META_PAGES;
350         nr_data_pages = map->max_entries >> PAGE_SHIFT;
351         usage += (nr_meta_pages + 2 * nr_data_pages) * sizeof(struct page *);
352         return usage;
353 }
354
355 BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
356 const struct bpf_map_ops ringbuf_map_ops = {
357         .map_meta_equal = bpf_map_meta_equal,
358         .map_alloc = ringbuf_map_alloc,
359         .map_free = ringbuf_map_free,
360         .map_mmap = ringbuf_map_mmap_kern,
361         .map_poll = ringbuf_map_poll_kern,
362         .map_lookup_elem = ringbuf_map_lookup_elem,
363         .map_update_elem = ringbuf_map_update_elem,
364         .map_delete_elem = ringbuf_map_delete_elem,
365         .map_get_next_key = ringbuf_map_get_next_key,
366         .map_mem_usage = ringbuf_map_mem_usage,
367         .map_btf_id = &ringbuf_map_btf_ids[0],
368 };
369
370 BTF_ID_LIST_SINGLE(user_ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
371 const struct bpf_map_ops user_ringbuf_map_ops = {
372         .map_meta_equal = bpf_map_meta_equal,
373         .map_alloc = ringbuf_map_alloc,
374         .map_free = ringbuf_map_free,
375         .map_mmap = ringbuf_map_mmap_user,
376         .map_poll = ringbuf_map_poll_user,
377         .map_lookup_elem = ringbuf_map_lookup_elem,
378         .map_update_elem = ringbuf_map_update_elem,
379         .map_delete_elem = ringbuf_map_delete_elem,
380         .map_get_next_key = ringbuf_map_get_next_key,
381         .map_mem_usage = ringbuf_map_mem_usage,
382         .map_btf_id = &user_ringbuf_map_btf_ids[0],
383 };
384
385 /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
386  * calculate offset from record metadata to ring buffer in pages, rounded
387  * down. This page offset is stored as part of record metadata and allows to
388  * restore struct bpf_ringbuf * from record pointer. This page offset is
389  * stored at offset 4 of record metadata header.
390  */
391 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
392                                      struct bpf_ringbuf_hdr *hdr)
393 {
394         return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
395 }
396
397 /* Given pointer to ring buffer record header, restore pointer to struct
398  * bpf_ringbuf itself by using page offset stored at offset 4
399  */
400 static struct bpf_ringbuf *
401 bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
402 {
403         unsigned long addr = (unsigned long)(void *)hdr;
404         unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT;
405
406         return (void*)((addr & PAGE_MASK) - off);
407 }
408
409 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
410 {
411         unsigned long cons_pos, prod_pos, new_prod_pos, flags;
412         u32 len, pg_off;
413         struct bpf_ringbuf_hdr *hdr;
414
415         if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
416                 return NULL;
417
418         len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
419         if (len > ringbuf_total_data_sz(rb))
420                 return NULL;
421
422         cons_pos = smp_load_acquire(&rb->consumer_pos);
423
424         if (in_nmi()) {
425                 if (!spin_trylock_irqsave(&rb->spinlock, flags))
426                         return NULL;
427         } else {
428                 spin_lock_irqsave(&rb->spinlock, flags);
429         }
430
431         prod_pos = rb->producer_pos;
432         new_prod_pos = prod_pos + len;
433
434         /* check for out of ringbuf space by ensuring producer position
435          * doesn't advance more than (ringbuf_size - 1) ahead
436          */
437         if (new_prod_pos - cons_pos > rb->mask) {
438                 spin_unlock_irqrestore(&rb->spinlock, flags);
439                 return NULL;
440         }
441
442         hdr = (void *)rb->data + (prod_pos & rb->mask);
443         pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
444         hdr->len = size | BPF_RINGBUF_BUSY_BIT;
445         hdr->pg_off = pg_off;
446
447         /* pairs with consumer's smp_load_acquire() */
448         smp_store_release(&rb->producer_pos, new_prod_pos);
449
450         spin_unlock_irqrestore(&rb->spinlock, flags);
451
452         return (void *)hdr + BPF_RINGBUF_HDR_SZ;
453 }
454
455 BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
456 {
457         struct bpf_ringbuf_map *rb_map;
458
459         if (unlikely(flags))
460                 return 0;
461
462         rb_map = container_of(map, struct bpf_ringbuf_map, map);
463         return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
464 }
465
466 const struct bpf_func_proto bpf_ringbuf_reserve_proto = {
467         .func           = bpf_ringbuf_reserve,
468         .ret_type       = RET_PTR_TO_RINGBUF_MEM_OR_NULL,
469         .arg1_type      = ARG_CONST_MAP_PTR,
470         .arg2_type      = ARG_CONST_ALLOC_SIZE_OR_ZERO,
471         .arg3_type      = ARG_ANYTHING,
472 };
473
474 static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
475 {
476         unsigned long rec_pos, cons_pos;
477         struct bpf_ringbuf_hdr *hdr;
478         struct bpf_ringbuf *rb;
479         u32 new_len;
480
481         hdr = sample - BPF_RINGBUF_HDR_SZ;
482         rb = bpf_ringbuf_restore_from_rec(hdr);
483         new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT;
484         if (discard)
485                 new_len |= BPF_RINGBUF_DISCARD_BIT;
486
487         /* update record header with correct final size prefix */
488         xchg(&hdr->len, new_len);
489
490         /* if consumer caught up and is waiting for our record, notify about
491          * new data availability
492          */
493         rec_pos = (void *)hdr - (void *)rb->data;
494         cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
495
496         if (flags & BPF_RB_FORCE_WAKEUP)
497                 irq_work_queue(&rb->work);
498         else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP))
499                 irq_work_queue(&rb->work);
500 }
501
502 BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
503 {
504         bpf_ringbuf_commit(sample, flags, false /* discard */);
505         return 0;
506 }
507
508 const struct bpf_func_proto bpf_ringbuf_submit_proto = {
509         .func           = bpf_ringbuf_submit,
510         .ret_type       = RET_VOID,
511         .arg1_type      = ARG_PTR_TO_RINGBUF_MEM | OBJ_RELEASE,
512         .arg2_type      = ARG_ANYTHING,
513 };
514
515 BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
516 {
517         bpf_ringbuf_commit(sample, flags, true /* discard */);
518         return 0;
519 }
520
521 const struct bpf_func_proto bpf_ringbuf_discard_proto = {
522         .func           = bpf_ringbuf_discard,
523         .ret_type       = RET_VOID,
524         .arg1_type      = ARG_PTR_TO_RINGBUF_MEM | OBJ_RELEASE,
525         .arg2_type      = ARG_ANYTHING,
526 };
527
528 BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
529            u64, flags)
530 {
531         struct bpf_ringbuf_map *rb_map;
532         void *rec;
533
534         if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP)))
535                 return -EINVAL;
536
537         rb_map = container_of(map, struct bpf_ringbuf_map, map);
538         rec = __bpf_ringbuf_reserve(rb_map->rb, size);
539         if (!rec)
540                 return -EAGAIN;
541
542         memcpy(rec, data, size);
543         bpf_ringbuf_commit(rec, flags, false /* discard */);
544         return 0;
545 }
546
547 const struct bpf_func_proto bpf_ringbuf_output_proto = {
548         .func           = bpf_ringbuf_output,
549         .ret_type       = RET_INTEGER,
550         .arg1_type      = ARG_CONST_MAP_PTR,
551         .arg2_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
552         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
553         .arg4_type      = ARG_ANYTHING,
554 };
555
556 BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
557 {
558         struct bpf_ringbuf *rb;
559
560         rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
561
562         switch (flags) {
563         case BPF_RB_AVAIL_DATA:
564                 return ringbuf_avail_data_sz(rb);
565         case BPF_RB_RING_SIZE:
566                 return ringbuf_total_data_sz(rb);
567         case BPF_RB_CONS_POS:
568                 return smp_load_acquire(&rb->consumer_pos);
569         case BPF_RB_PROD_POS:
570                 return smp_load_acquire(&rb->producer_pos);
571         default:
572                 return 0;
573         }
574 }
575
576 const struct bpf_func_proto bpf_ringbuf_query_proto = {
577         .func           = bpf_ringbuf_query,
578         .ret_type       = RET_INTEGER,
579         .arg1_type      = ARG_CONST_MAP_PTR,
580         .arg2_type      = ARG_ANYTHING,
581 };
582
583 BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags,
584            struct bpf_dynptr_kern *, ptr)
585 {
586         struct bpf_ringbuf_map *rb_map;
587         void *sample;
588         int err;
589
590         if (unlikely(flags)) {
591                 bpf_dynptr_set_null(ptr);
592                 return -EINVAL;
593         }
594
595         err = bpf_dynptr_check_size(size);
596         if (err) {
597                 bpf_dynptr_set_null(ptr);
598                 return err;
599         }
600
601         rb_map = container_of(map, struct bpf_ringbuf_map, map);
602
603         sample = __bpf_ringbuf_reserve(rb_map->rb, size);
604         if (!sample) {
605                 bpf_dynptr_set_null(ptr);
606                 return -EINVAL;
607         }
608
609         bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size);
610
611         return 0;
612 }
613
614 const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
615         .func           = bpf_ringbuf_reserve_dynptr,
616         .ret_type       = RET_INTEGER,
617         .arg1_type      = ARG_CONST_MAP_PTR,
618         .arg2_type      = ARG_ANYTHING,
619         .arg3_type      = ARG_ANYTHING,
620         .arg4_type      = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT,
621 };
622
623 BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
624 {
625         if (!ptr->data)
626                 return 0;
627
628         bpf_ringbuf_commit(ptr->data, flags, false /* discard */);
629
630         bpf_dynptr_set_null(ptr);
631
632         return 0;
633 }
634
635 const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto = {
636         .func           = bpf_ringbuf_submit_dynptr,
637         .ret_type       = RET_VOID,
638         .arg1_type      = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
639         .arg2_type      = ARG_ANYTHING,
640 };
641
642 BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
643 {
644         if (!ptr->data)
645                 return 0;
646
647         bpf_ringbuf_commit(ptr->data, flags, true /* discard */);
648
649         bpf_dynptr_set_null(ptr);
650
651         return 0;
652 }
653
654 const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto = {
655         .func           = bpf_ringbuf_discard_dynptr,
656         .ret_type       = RET_VOID,
657         .arg1_type      = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
658         .arg2_type      = ARG_ANYTHING,
659 };
660
661 static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size)
662 {
663         int err;
664         u32 hdr_len, sample_len, total_len, flags, *hdr;
665         u64 cons_pos, prod_pos;
666
667         /* Synchronizes with smp_store_release() in user-space producer. */
668         prod_pos = smp_load_acquire(&rb->producer_pos);
669         if (prod_pos % 8)
670                 return -EINVAL;
671
672         /* Synchronizes with smp_store_release() in __bpf_user_ringbuf_sample_release() */
673         cons_pos = smp_load_acquire(&rb->consumer_pos);
674         if (cons_pos >= prod_pos)
675                 return -ENODATA;
676
677         hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask));
678         /* Synchronizes with smp_store_release() in user-space producer. */
679         hdr_len = smp_load_acquire(hdr);
680         flags = hdr_len & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT);
681         sample_len = hdr_len & ~flags;
682         total_len = round_up(sample_len + BPF_RINGBUF_HDR_SZ, 8);
683
684         /* The sample must fit within the region advertised by the producer position. */
685         if (total_len > prod_pos - cons_pos)
686                 return -EINVAL;
687
688         /* The sample must fit within the data region of the ring buffer. */
689         if (total_len > ringbuf_total_data_sz(rb))
690                 return -E2BIG;
691
692         /* The sample must fit into a struct bpf_dynptr. */
693         err = bpf_dynptr_check_size(sample_len);
694         if (err)
695                 return -E2BIG;
696
697         if (flags & BPF_RINGBUF_DISCARD_BIT) {
698                 /* If the discard bit is set, the sample should be skipped.
699                  *
700                  * Update the consumer pos, and return -EAGAIN so the caller
701                  * knows to skip this sample and try to read the next one.
702                  */
703                 smp_store_release(&rb->consumer_pos, cons_pos + total_len);
704                 return -EAGAIN;
705         }
706
707         if (flags & BPF_RINGBUF_BUSY_BIT)
708                 return -ENODATA;
709
710         *sample = (void *)((uintptr_t)rb->data +
711                            (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask));
712         *size = sample_len;
713         return 0;
714 }
715
716 static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags)
717 {
718         u64 consumer_pos;
719         u32 rounded_size = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
720
721         /* Using smp_load_acquire() is unnecessary here, as the busy-bit
722          * prevents another task from writing to consumer_pos after it was read
723          * by this task with smp_load_acquire() in __bpf_user_ringbuf_peek().
724          */
725         consumer_pos = rb->consumer_pos;
726          /* Synchronizes with smp_load_acquire() in user-space producer. */
727         smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size);
728 }
729
730 BPF_CALL_4(bpf_user_ringbuf_drain, struct bpf_map *, map,
731            void *, callback_fn, void *, callback_ctx, u64, flags)
732 {
733         struct bpf_ringbuf *rb;
734         long samples, discarded_samples = 0, ret = 0;
735         bpf_callback_t callback = (bpf_callback_t)callback_fn;
736         u64 wakeup_flags = BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP;
737         int busy = 0;
738
739         if (unlikely(flags & ~wakeup_flags))
740                 return -EINVAL;
741
742         rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
743
744         /* If another consumer is already consuming a sample, wait for them to finish. */
745         if (!atomic_try_cmpxchg(&rb->busy, &busy, 1))
746                 return -EBUSY;
747
748         for (samples = 0; samples < BPF_MAX_USER_RINGBUF_SAMPLES && ret == 0; samples++) {
749                 int err;
750                 u32 size;
751                 void *sample;
752                 struct bpf_dynptr_kern dynptr;
753
754                 err = __bpf_user_ringbuf_peek(rb, &sample, &size);
755                 if (err) {
756                         if (err == -ENODATA) {
757                                 break;
758                         } else if (err == -EAGAIN) {
759                                 discarded_samples++;
760                                 continue;
761                         } else {
762                                 ret = err;
763                                 goto schedule_work_return;
764                         }
765                 }
766
767                 bpf_dynptr_init(&dynptr, sample, BPF_DYNPTR_TYPE_LOCAL, 0, size);
768                 ret = callback((uintptr_t)&dynptr, (uintptr_t)callback_ctx, 0, 0, 0);
769                 __bpf_user_ringbuf_sample_release(rb, size, flags);
770         }
771         ret = samples - discarded_samples;
772
773 schedule_work_return:
774         /* Prevent the clearing of the busy-bit from being reordered before the
775          * storing of any rb consumer or producer positions.
776          */
777         smp_mb__before_atomic();
778         atomic_set(&rb->busy, 0);
779
780         if (flags & BPF_RB_FORCE_WAKEUP)
781                 irq_work_queue(&rb->work);
782         else if (!(flags & BPF_RB_NO_WAKEUP) && samples > 0)
783                 irq_work_queue(&rb->work);
784         return ret;
785 }
786
787 const struct bpf_func_proto bpf_user_ringbuf_drain_proto = {
788         .func           = bpf_user_ringbuf_drain,
789         .ret_type       = RET_INTEGER,
790         .arg1_type      = ARG_CONST_MAP_PTR,
791         .arg2_type      = ARG_PTR_TO_FUNC,
792         .arg3_type      = ARG_PTR_TO_STACK_OR_NULL,
793         .arg4_type      = ARG_ANYTHING,
794 };