1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
17 #include <net/net_namespace.h>
18 #include <linux/error-injection.h>
19 #include <linux/smp.h>
20 #include <linux/sock_diag.h>
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/bpf_test_run.h>
26 struct bpf_test_timer {
27 enum { NO_PREEMPT, NO_MIGRATE } mode;
29 u64 time_start, time_spent;
32 static void bpf_test_timer_enter(struct bpf_test_timer *t)
36 if (t->mode == NO_PREEMPT)
41 t->time_start = ktime_get_ns();
44 static void bpf_test_timer_leave(struct bpf_test_timer *t)
49 if (t->mode == NO_PREEMPT)
56 static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
62 t->time_spent += ktime_get_ns() - t->time_start;
63 do_div(t->time_spent, t->i);
64 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
69 if (signal_pending(current)) {
70 /* During iteration: we've been cancelled, abort. */
76 /* During iteration: we need to reschedule between runs. */
77 t->time_spent += ktime_get_ns() - t->time_start;
78 bpf_test_timer_leave(t);
80 bpf_test_timer_enter(t);
83 /* Do another round. */
91 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
92 u32 *retval, u32 *time, bool xdp)
94 struct bpf_prog_array_item item = {.prog = prog};
95 struct bpf_run_ctx *old_ctx;
96 struct bpf_cg_run_ctx run_ctx;
97 struct bpf_test_timer t = { NO_MIGRATE };
98 enum bpf_cgroup_storage_type stype;
101 for_each_cgroup_storage_type(stype) {
102 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
103 if (IS_ERR(item.cgroup_storage[stype])) {
104 item.cgroup_storage[stype] = NULL;
105 for_each_cgroup_storage_type(stype)
106 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
114 bpf_test_timer_enter(&t);
115 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
117 run_ctx.prog_item = &item;
119 *retval = bpf_prog_run_xdp(prog, ctx);
121 *retval = bpf_prog_run(prog, ctx);
122 } while (bpf_test_timer_continue(&t, repeat, &ret, time));
123 bpf_reset_run_ctx(old_ctx);
124 bpf_test_timer_leave(&t);
126 for_each_cgroup_storage_type(stype)
127 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
132 static int bpf_test_finish(const union bpf_attr *kattr,
133 union bpf_attr __user *uattr, const void *data,
134 struct skb_shared_info *sinfo, u32 size,
135 u32 retval, u32 duration)
137 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
139 u32 copy_size = size;
141 /* Clamp copy if the user has provided a size hint, but copy the full
142 * buffer if not to retain old behaviour.
144 if (kattr->test.data_size_out &&
145 copy_size > kattr->test.data_size_out) {
146 copy_size = kattr->test.data_size_out;
151 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
158 if (copy_to_user(data_out, data, len))
165 for (i = 0; i < sinfo->nr_frags; i++) {
166 skb_frag_t *frag = &sinfo->frags[i];
168 if (offset >= copy_size) {
173 data_len = min_t(u32, copy_size - offset,
174 skb_frag_size(frag));
176 if (copy_to_user(data_out + offset,
177 skb_frag_address(frag),
186 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
188 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
190 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
195 trace_bpf_test_finish(&err);
199 /* Integer types of various sizes and pointer combinations cover variety of
200 * architecture dependent calling conventions. 7+ can be supported in the
204 __diag_ignore_all("-Wmissing-prototypes",
205 "Global functions as their definitions will be in vmlinux BTF");
206 int noinline bpf_fentry_test1(int a)
210 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
211 ALLOW_ERROR_INJECTION(bpf_fentry_test1, ERRNO);
213 int noinline bpf_fentry_test2(int a, u64 b)
218 int noinline bpf_fentry_test3(char a, int b, u64 c)
223 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
225 return (long)a + b + c + d;
228 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
230 return a + (long)b + c + d + e;
233 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
235 return a + (long)b + c + d + (long)e + f;
238 struct bpf_fentry_test_t {
239 struct bpf_fentry_test_t *a;
242 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
247 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
252 int noinline bpf_modify_return_test(int a, int *b)
258 u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
260 return a + b + c + d;
263 int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
268 struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
273 struct prog_test_ref_kfunc {
276 struct prog_test_ref_kfunc *next;
279 static struct prog_test_ref_kfunc prog_test_struct = {
282 .next = &prog_test_struct,
285 noinline struct prog_test_ref_kfunc *
286 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
288 /* randomly return NULL */
289 if (get_jiffies_64() % 2)
291 return &prog_test_struct;
294 noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
298 struct prog_test_pass1 {
311 struct prog_test_pass2 {
316 unsigned long arr3[8];
320 struct prog_test_fail1 {
325 struct prog_test_fail2 {
327 struct prog_test_pass1 x;
330 struct prog_test_fail3 {
336 noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
340 noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
344 noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
348 noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
352 noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
356 noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
360 noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
364 noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
368 noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
374 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
376 BTF_SET_START(test_sk_check_kfunc_ids)
377 BTF_ID(func, bpf_kfunc_call_test1)
378 BTF_ID(func, bpf_kfunc_call_test2)
379 BTF_ID(func, bpf_kfunc_call_test3)
380 BTF_ID(func, bpf_kfunc_call_test_acquire)
381 BTF_ID(func, bpf_kfunc_call_test_release)
382 BTF_ID(func, bpf_kfunc_call_test_pass_ctx)
383 BTF_ID(func, bpf_kfunc_call_test_pass1)
384 BTF_ID(func, bpf_kfunc_call_test_pass2)
385 BTF_ID(func, bpf_kfunc_call_test_fail1)
386 BTF_ID(func, bpf_kfunc_call_test_fail2)
387 BTF_ID(func, bpf_kfunc_call_test_fail3)
388 BTF_ID(func, bpf_kfunc_call_test_mem_len_pass1)
389 BTF_ID(func, bpf_kfunc_call_test_mem_len_fail1)
390 BTF_ID(func, bpf_kfunc_call_test_mem_len_fail2)
391 BTF_SET_END(test_sk_check_kfunc_ids)
393 BTF_SET_START(test_sk_acquire_kfunc_ids)
394 BTF_ID(func, bpf_kfunc_call_test_acquire)
395 BTF_SET_END(test_sk_acquire_kfunc_ids)
397 BTF_SET_START(test_sk_release_kfunc_ids)
398 BTF_ID(func, bpf_kfunc_call_test_release)
399 BTF_SET_END(test_sk_release_kfunc_ids)
401 BTF_SET_START(test_sk_ret_null_kfunc_ids)
402 BTF_ID(func, bpf_kfunc_call_test_acquire)
403 BTF_SET_END(test_sk_ret_null_kfunc_ids)
405 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
406 u32 size, u32 headroom, u32 tailroom)
408 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
411 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
412 return ERR_PTR(-EINVAL);
414 if (user_size > size)
415 return ERR_PTR(-EMSGSIZE);
417 data = kzalloc(size + headroom + tailroom, GFP_USER);
419 return ERR_PTR(-ENOMEM);
421 if (copy_from_user(data + headroom, data_in, user_size)) {
423 return ERR_PTR(-EFAULT);
429 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
430 const union bpf_attr *kattr,
431 union bpf_attr __user *uattr)
433 struct bpf_fentry_test_t arg = {};
434 u16 side_effect = 0, ret = 0;
435 int b = 2, err = -EFAULT;
438 if (kattr->test.flags || kattr->test.cpu)
441 switch (prog->expected_attach_type) {
442 case BPF_TRACE_FENTRY:
443 case BPF_TRACE_FEXIT:
444 if (bpf_fentry_test1(1) != 2 ||
445 bpf_fentry_test2(2, 3) != 5 ||
446 bpf_fentry_test3(4, 5, 6) != 15 ||
447 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
448 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
449 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
450 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
451 bpf_fentry_test8(&arg) != 0)
454 case BPF_MODIFY_RETURN:
455 ret = bpf_modify_return_test(1, &b);
463 retval = ((u32)side_effect << 16) | ret;
464 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
469 trace_bpf_test_finish(&err);
473 struct bpf_raw_tp_test_run_info {
474 struct bpf_prog *prog;
480 __bpf_prog_test_run_raw_tp(void *data)
482 struct bpf_raw_tp_test_run_info *info = data;
485 info->retval = bpf_prog_run(info->prog, info->ctx);
489 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
490 const union bpf_attr *kattr,
491 union bpf_attr __user *uattr)
493 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
494 __u32 ctx_size_in = kattr->test.ctx_size_in;
495 struct bpf_raw_tp_test_run_info info;
496 int cpu = kattr->test.cpu, err = 0;
499 /* doesn't support data_in/out, ctx_out, duration, or repeat */
500 if (kattr->test.data_in || kattr->test.data_out ||
501 kattr->test.ctx_out || kattr->test.duration ||
505 if (ctx_size_in < prog->aux->max_ctx_offset ||
506 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
509 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
513 info.ctx = memdup_user(ctx_in, ctx_size_in);
514 if (IS_ERR(info.ctx))
515 return PTR_ERR(info.ctx);
522 current_cpu = get_cpu();
523 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
524 cpu == current_cpu) {
525 __bpf_prog_test_run_raw_tp(&info);
526 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
527 /* smp_call_function_single() also checks cpu_online()
528 * after csd_lock(). However, since cpu is from user
529 * space, let's do an extra quick check to filter out
530 * invalid value before smp_call_function_single().
534 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
540 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
547 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
549 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
550 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
551 u32 size = kattr->test.ctx_size_in;
555 if (!data_in && !data_out)
558 data = kzalloc(max_size, GFP_USER);
560 return ERR_PTR(-ENOMEM);
563 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
569 size = min_t(u32, max_size, size);
570 if (copy_from_user(data, data_in, size)) {
572 return ERR_PTR(-EFAULT);
578 static int bpf_ctx_finish(const union bpf_attr *kattr,
579 union bpf_attr __user *uattr, const void *data,
582 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
584 u32 copy_size = size;
586 if (!data || !data_out)
589 if (copy_size > kattr->test.ctx_size_out) {
590 copy_size = kattr->test.ctx_size_out;
594 if (copy_to_user(data_out, data, copy_size))
596 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
605 * range_is_zero - test whether buffer is initialized
606 * @buf: buffer to check
607 * @from: check from this position
608 * @to: check up until (excluding) this position
610 * This function returns true if the there is a non-zero byte
611 * in the buf in the range [from,to).
613 static inline bool range_is_zero(void *buf, size_t from, size_t to)
615 return !memchr_inv((u8 *)buf + from, 0, to - from);
618 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
620 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
625 /* make sure the fields we don't use are zeroed */
626 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
629 /* mark is allowed */
631 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
632 offsetof(struct __sk_buff, priority)))
635 /* priority is allowed */
636 /* ingress_ifindex is allowed */
637 /* ifindex is allowed */
639 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
640 offsetof(struct __sk_buff, cb)))
645 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
646 offsetof(struct __sk_buff, tstamp)))
649 /* tstamp is allowed */
650 /* wire_len is allowed */
651 /* gso_segs is allowed */
653 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
654 offsetof(struct __sk_buff, gso_size)))
657 /* gso_size is allowed */
659 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
660 offsetof(struct __sk_buff, hwtstamp)))
663 /* hwtstamp is allowed */
665 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
666 sizeof(struct __sk_buff)))
669 skb->mark = __skb->mark;
670 skb->priority = __skb->priority;
671 skb->skb_iif = __skb->ingress_ifindex;
672 skb->tstamp = __skb->tstamp;
673 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
675 if (__skb->wire_len == 0) {
676 cb->pkt_len = skb->len;
678 if (__skb->wire_len < skb->len ||
679 __skb->wire_len > GSO_MAX_SIZE)
681 cb->pkt_len = __skb->wire_len;
684 if (__skb->gso_segs > GSO_MAX_SEGS)
686 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
687 skb_shinfo(skb)->gso_size = __skb->gso_size;
688 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
693 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
695 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
700 __skb->mark = skb->mark;
701 __skb->priority = skb->priority;
702 __skb->ingress_ifindex = skb->skb_iif;
703 __skb->ifindex = skb->dev->ifindex;
704 __skb->tstamp = skb->tstamp;
705 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
706 __skb->wire_len = cb->pkt_len;
707 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
708 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
711 static struct proto bpf_dummy_proto = {
713 .owner = THIS_MODULE,
714 .obj_size = sizeof(struct sock),
717 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
718 union bpf_attr __user *uattr)
720 bool is_l2 = false, is_direct_pkt_access = false;
721 struct net *net = current->nsproxy->net_ns;
722 struct net_device *dev = net->loopback_dev;
723 u32 size = kattr->test.data_size_in;
724 u32 repeat = kattr->test.repeat;
725 struct __sk_buff *ctx = NULL;
726 u32 retval, duration;
727 int hh_len = ETH_HLEN;
733 if (kattr->test.flags || kattr->test.cpu)
736 data = bpf_test_init(kattr, kattr->test.data_size_in,
737 size, NET_SKB_PAD + NET_IP_ALIGN,
738 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
740 return PTR_ERR(data);
742 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
748 switch (prog->type) {
749 case BPF_PROG_TYPE_SCHED_CLS:
750 case BPF_PROG_TYPE_SCHED_ACT:
753 case BPF_PROG_TYPE_LWT_IN:
754 case BPF_PROG_TYPE_LWT_OUT:
755 case BPF_PROG_TYPE_LWT_XMIT:
756 is_direct_pkt_access = true;
762 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
768 sock_init_data(NULL, sk);
770 skb = build_skb(data, 0);
779 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
780 __skb_put(skb, size);
781 if (ctx && ctx->ifindex > 1) {
782 dev = dev_get_by_index(net, ctx->ifindex);
788 skb->protocol = eth_type_trans(skb, dev);
789 skb_reset_network_header(skb);
791 switch (skb->protocol) {
792 case htons(ETH_P_IP):
793 sk->sk_family = AF_INET;
794 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
795 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
796 sk->sk_daddr = ip_hdr(skb)->daddr;
799 #if IS_ENABLED(CONFIG_IPV6)
800 case htons(ETH_P_IPV6):
801 sk->sk_family = AF_INET6;
802 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
803 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
804 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
813 __skb_push(skb, hh_len);
814 if (is_direct_pkt_access)
815 bpf_compute_data_pointers(skb);
816 ret = convert___skb_to_skb(skb, ctx);
819 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
823 if (skb_headroom(skb) < hh_len) {
824 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
826 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
831 memset(__skb_push(skb, hh_len), 0, hh_len);
833 convert_skb_to___skb(skb, ctx);
836 /* bpf program can never convert linear skb to non-linear */
837 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
838 size = skb_headlen(skb);
839 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
842 ret = bpf_ctx_finish(kattr, uattr, ctx,
843 sizeof(struct __sk_buff));
845 if (dev && dev != net->loopback_dev)
853 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
855 unsigned int ingress_ifindex, rx_queue_index;
856 struct netdev_rx_queue *rxqueue;
857 struct net_device *device;
862 if (xdp_md->egress_ifindex != 0)
865 ingress_ifindex = xdp_md->ingress_ifindex;
866 rx_queue_index = xdp_md->rx_queue_index;
868 if (!ingress_ifindex && rx_queue_index)
871 if (ingress_ifindex) {
872 device = dev_get_by_index(current->nsproxy->net_ns,
877 if (rx_queue_index >= device->real_num_rx_queues)
880 rxqueue = __netif_get_rx_queue(device, rx_queue_index);
882 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
885 xdp->rxq = &rxqueue->xdp_rxq;
886 /* The device is now tracked in the xdp->rxq for later
891 xdp->data = xdp->data_meta + xdp_md->data;
899 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
904 xdp_md->data = xdp->data - xdp->data_meta;
905 xdp_md->data_end = xdp->data_end - xdp->data_meta;
907 if (xdp_md->ingress_ifindex)
908 dev_put(xdp->rxq->dev);
911 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
912 union bpf_attr __user *uattr)
914 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
915 u32 size = kattr->test.data_size_in;
916 u32 headroom = XDP_PACKET_HEADROOM;
917 u32 retval, duration, max_data_sz;
918 u32 repeat = kattr->test.repeat;
919 struct netdev_rx_queue *rxqueue;
920 struct skb_shared_info *sinfo;
921 struct xdp_buff xdp = {};
922 int i, ret = -EINVAL;
926 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
927 prog->expected_attach_type == BPF_XDP_CPUMAP)
930 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
935 /* There can't be user provided data before the meta data */
936 if (ctx->data_meta || ctx->data_end != size ||
937 ctx->data > ctx->data_end ||
938 unlikely(xdp_metalen_invalid(ctx->data)))
940 /* Meta data is allocated from the headroom */
941 headroom -= ctx->data;
944 max_data_sz = 4096 - headroom - tailroom;
945 size = min_t(u32, size, max_data_sz);
947 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
953 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
954 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
955 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
956 xdp_prepare_buff(&xdp, data, headroom, size, true);
957 sinfo = xdp_get_shared_info_from_buff(&xdp);
959 ret = xdp_convert_md_to_buff(ctx, &xdp);
963 if (unlikely(kattr->test.data_size_in > size)) {
964 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
966 while (size < kattr->test.data_size_in) {
971 if (sinfo->nr_frags == MAX_SKB_FRAGS) {
976 page = alloc_page(GFP_KERNEL);
982 frag = &sinfo->frags[sinfo->nr_frags++];
983 __skb_frag_set_page(frag, page);
985 data_len = min_t(u32, kattr->test.data_size_in - size,
987 skb_frag_size_set(frag, data_len);
989 if (copy_from_user(page_address(page), data_in + size,
994 sinfo->xdp_frags_size += data_len;
997 xdp_buff_set_frags_flag(&xdp);
1001 bpf_prog_change_xdp(NULL, prog);
1003 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1004 /* We convert the xdp_buff back to an xdp_md before checking the return
1005 * code so the reference count of any held netdevice will be decremented
1006 * even if the test run failed.
1008 xdp_convert_buff_to_md(&xdp, ctx);
1012 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1013 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1016 ret = bpf_ctx_finish(kattr, uattr, ctx,
1017 sizeof(struct xdp_md));
1021 bpf_prog_change_xdp(prog, NULL);
1023 for (i = 0; i < sinfo->nr_frags; i++)
1024 __free_page(skb_frag_page(&sinfo->frags[i]));
1031 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1033 /* make sure the fields we don't use are zeroed */
1034 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1037 /* flags is allowed */
1039 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1040 sizeof(struct bpf_flow_keys)))
1046 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1047 const union bpf_attr *kattr,
1048 union bpf_attr __user *uattr)
1050 struct bpf_test_timer t = { NO_PREEMPT };
1051 u32 size = kattr->test.data_size_in;
1052 struct bpf_flow_dissector ctx = {};
1053 u32 repeat = kattr->test.repeat;
1054 struct bpf_flow_keys *user_ctx;
1055 struct bpf_flow_keys flow_keys;
1056 const struct ethhdr *eth;
1057 unsigned int flags = 0;
1058 u32 retval, duration;
1062 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
1065 if (kattr->test.flags || kattr->test.cpu)
1068 if (size < ETH_HLEN)
1071 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1073 return PTR_ERR(data);
1075 eth = (struct ethhdr *)data;
1080 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1081 if (IS_ERR(user_ctx)) {
1083 return PTR_ERR(user_ctx);
1086 ret = verify_user_bpf_flow_keys(user_ctx);
1089 flags = user_ctx->flags;
1092 ctx.flow_keys = &flow_keys;
1094 ctx.data_end = (__u8 *)data + size;
1096 bpf_test_timer_enter(&t);
1098 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1100 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
1101 bpf_test_timer_leave(&t);
1106 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1107 sizeof(flow_keys), retval, duration);
1109 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1110 sizeof(struct bpf_flow_keys));
1118 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1119 union bpf_attr __user *uattr)
1121 struct bpf_test_timer t = { NO_PREEMPT };
1122 struct bpf_prog_array *progs = NULL;
1123 struct bpf_sk_lookup_kern ctx = {};
1124 u32 repeat = kattr->test.repeat;
1125 struct bpf_sk_lookup *user_ctx;
1126 u32 retval, duration;
1129 if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
1132 if (kattr->test.flags || kattr->test.cpu)
1135 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1136 kattr->test.data_size_out)
1142 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1143 if (IS_ERR(user_ctx))
1144 return PTR_ERR(user_ctx);
1152 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1155 if (user_ctx->local_port > U16_MAX) {
1160 ctx.family = (u16)user_ctx->family;
1161 ctx.protocol = (u16)user_ctx->protocol;
1162 ctx.dport = (u16)user_ctx->local_port;
1163 ctx.sport = user_ctx->remote_port;
1165 switch (ctx.family) {
1167 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1168 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1171 #if IS_ENABLED(CONFIG_IPV6)
1173 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1174 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1179 ret = -EAFNOSUPPORT;
1183 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1189 progs->items[0].prog = prog;
1191 bpf_test_timer_enter(&t);
1193 ctx.selected_sk = NULL;
1194 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1195 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
1196 bpf_test_timer_leave(&t);
1201 user_ctx->cookie = 0;
1202 if (ctx.selected_sk) {
1203 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1208 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1211 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1213 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1216 bpf_prog_array_free(progs);
1221 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1222 const union bpf_attr *kattr,
1223 union bpf_attr __user *uattr)
1225 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1226 __u32 ctx_size_in = kattr->test.ctx_size_in;
1231 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1232 if (kattr->test.data_in || kattr->test.data_out ||
1233 kattr->test.ctx_out || kattr->test.duration ||
1234 kattr->test.repeat || kattr->test.flags)
1237 if (ctx_size_in < prog->aux->max_ctx_offset ||
1238 ctx_size_in > U16_MAX)
1242 ctx = memdup_user(ctx_in, ctx_size_in);
1244 return PTR_ERR(ctx);
1247 rcu_read_lock_trace();
1248 retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1249 rcu_read_unlock_trace();
1251 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1256 if (copy_to_user(ctx_in, ctx, ctx_size_in))
1263 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1264 .owner = THIS_MODULE,
1265 .check_set = &test_sk_check_kfunc_ids,
1266 .acquire_set = &test_sk_acquire_kfunc_ids,
1267 .release_set = &test_sk_release_kfunc_ids,
1268 .ret_null_set = &test_sk_ret_null_kfunc_ids,
1271 static int __init bpf_prog_test_run_init(void)
1273 return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1275 late_initcall(bpf_prog_test_run_init);