Commit | Line | Data |
---|---|---|
25763b3c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1cf1cae9 | 2 | /* Copyright (c) 2017 Facebook |
1cf1cae9 AS |
3 | */ |
4 | #include <linux/bpf.h> | |
c48e51c8 | 5 | #include <linux/btf.h> |
7bd1590d | 6 | #include <linux/btf_ids.h> |
1cf1cae9 | 7 | #include <linux/slab.h> |
b202d844 | 8 | #include <linux/init.h> |
1cf1cae9 AS |
9 | #include <linux/vmalloc.h> |
10 | #include <linux/etherdevice.h> | |
11 | #include <linux/filter.h> | |
87b7b533 | 12 | #include <linux/rcupdate_trace.h> |
1cf1cae9 | 13 | #include <linux/sched/signal.h> |
6ac99e8f | 14 | #include <net/bpf_sk_storage.h> |
aa70d2d1 | 15 | #include <net/hotdata.h> |
2cb494a3 SL |
16 | #include <net/sock.h> |
17 | #include <net/tcp.h> | |
7c32e8f8 | 18 | #include <net/net_namespace.h> |
a9ca9f9c | 19 | #include <net/page_pool/helpers.h> |
3d08b6f2 | 20 | #include <linux/error-injection.h> |
1b4d60ec | 21 | #include <linux/smp.h> |
7c32e8f8 | 22 | #include <linux/sock_diag.h> |
2b99ef22 | 23 | #include <linux/netfilter.h> |
49e47a5b | 24 | #include <net/netdev_rx_queue.h> |
47316f4a | 25 | #include <net/xdp.h> |
2b99ef22 | 26 | #include <net/netfilter/nf_bpf_link.h> |
1cf1cae9 | 27 | |
e950e843 MM |
28 | #define CREATE_TRACE_POINTS |
29 | #include <trace/events/bpf_test_run.h> | |
30 | ||
607b9cc9 LB |
31 | struct bpf_test_timer { |
32 | enum { NO_PREEMPT, NO_MIGRATE } mode; | |
33 | u32 i; | |
34 | u64 time_start, time_spent; | |
35 | }; | |
36 | ||
37 | static void bpf_test_timer_enter(struct bpf_test_timer *t) | |
38 | __acquires(rcu) | |
39 | { | |
40 | rcu_read_lock(); | |
41 | if (t->mode == NO_PREEMPT) | |
42 | preempt_disable(); | |
43 | else | |
44 | migrate_disable(); | |
45 | ||
46 | t->time_start = ktime_get_ns(); | |
47 | } | |
48 | ||
49 | static void bpf_test_timer_leave(struct bpf_test_timer *t) | |
50 | __releases(rcu) | |
51 | { | |
52 | t->time_start = 0; | |
53 | ||
54 | if (t->mode == NO_PREEMPT) | |
55 | preempt_enable(); | |
56 | else | |
57 | migrate_enable(); | |
58 | rcu_read_unlock(); | |
59 | } | |
60 | ||
b530e9e1 THJ |
61 | static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations, |
62 | u32 repeat, int *err, u32 *duration) | |
607b9cc9 LB |
63 | __must_hold(rcu) |
64 | { | |
b530e9e1 | 65 | t->i += iterations; |
607b9cc9 LB |
66 | if (t->i >= repeat) { |
67 | /* We're done. */ | |
68 | t->time_spent += ktime_get_ns() - t->time_start; | |
69 | do_div(t->time_spent, t->i); | |
70 | *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; | |
71 | *err = 0; | |
72 | goto reset; | |
73 | } | |
74 | ||
75 | if (signal_pending(current)) { | |
76 | /* During iteration: we've been cancelled, abort. */ | |
77 | *err = -EINTR; | |
78 | goto reset; | |
79 | } | |
80 | ||
81 | if (need_resched()) { | |
82 | /* During iteration: we need to reschedule between runs. */ | |
83 | t->time_spent += ktime_get_ns() - t->time_start; | |
84 | bpf_test_timer_leave(t); | |
85 | cond_resched(); | |
86 | bpf_test_timer_enter(t); | |
87 | } | |
88 | ||
89 | /* Do another round. */ | |
90 | return true; | |
91 | ||
92 | reset: | |
93 | t->i = 0; | |
94 | return false; | |
95 | } | |
96 | ||
b530e9e1 THJ |
97 | /* We put this struct at the head of each page with a context and frame |
98 | * initialised when the page is allocated, so we don't have to do this on each | |
99 | * repetition of the test run. | |
100 | */ | |
101 | struct xdp_page_head { | |
102 | struct xdp_buff orig_ctx; | |
103 | struct xdp_buff ctx; | |
294635a8 AL |
104 | union { |
105 | /* ::data_hard_start starts here */ | |
106 | DECLARE_FLEX_ARRAY(struct xdp_frame, frame); | |
107 | DECLARE_FLEX_ARRAY(u8, data); | |
108 | }; | |
b530e9e1 THJ |
109 | }; |
110 | ||
111 | struct xdp_test_data { | |
112 | struct xdp_buff *orig_ctx; | |
113 | struct xdp_rxq_info rxq; | |
114 | struct net_device *dev; | |
115 | struct page_pool *pp; | |
116 | struct xdp_frame **frames; | |
117 | struct sk_buff **skbs; | |
425d2393 | 118 | struct xdp_mem_info mem; |
b530e9e1 THJ |
119 | u32 batch_size; |
120 | u32 frame_cnt; | |
121 | }; | |
122 | ||
294635a8 AL |
123 | /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE |
124 | * must be updated accordingly this gets changed, otherwise BPF selftests | |
125 | * will fail. | |
126 | */ | |
b6f1f780 | 127 | #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) |
b530e9e1 THJ |
128 | #define TEST_XDP_MAX_BATCH 256 |
129 | ||
130 | static void xdp_test_run_init_page(struct page *page, void *arg) | |
131 | { | |
132 | struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); | |
133 | struct xdp_buff *new_ctx, *orig_ctx; | |
134 | u32 headroom = XDP_PACKET_HEADROOM; | |
135 | struct xdp_test_data *xdp = arg; | |
136 | size_t frm_len, meta_len; | |
137 | struct xdp_frame *frm; | |
138 | void *data; | |
139 | ||
140 | orig_ctx = xdp->orig_ctx; | |
141 | frm_len = orig_ctx->data_end - orig_ctx->data_meta; | |
142 | meta_len = orig_ctx->data - orig_ctx->data_meta; | |
143 | headroom -= meta_len; | |
144 | ||
145 | new_ctx = &head->ctx; | |
294635a8 AL |
146 | frm = head->frame; |
147 | data = head->data; | |
b530e9e1 THJ |
148 | memcpy(data + headroom, orig_ctx->data_meta, frm_len); |
149 | ||
150 | xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); | |
151 | xdp_prepare_buff(new_ctx, data, headroom, frm_len, true); | |
152 | new_ctx->data = new_ctx->data_meta + meta_len; | |
153 | ||
154 | xdp_update_frame_from_buff(new_ctx, frm); | |
155 | frm->mem = new_ctx->rxq->mem; | |
156 | ||
157 | memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx)); | |
158 | } | |
159 | ||
160 | static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) | |
161 | { | |
b530e9e1 THJ |
162 | struct page_pool *pp; |
163 | int err = -ENOMEM; | |
164 | struct page_pool_params pp_params = { | |
165 | .order = 0, | |
166 | .flags = 0, | |
167 | .pool_size = xdp->batch_size, | |
168 | .nid = NUMA_NO_NODE, | |
b530e9e1 THJ |
169 | .init_callback = xdp_test_run_init_page, |
170 | .init_arg = xdp, | |
171 | }; | |
172 | ||
173 | xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); | |
174 | if (!xdp->frames) | |
175 | return -ENOMEM; | |
176 | ||
177 | xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); | |
178 | if (!xdp->skbs) | |
179 | goto err_skbs; | |
180 | ||
181 | pp = page_pool_create(&pp_params); | |
182 | if (IS_ERR(pp)) { | |
183 | err = PTR_ERR(pp); | |
184 | goto err_pp; | |
185 | } | |
186 | ||
187 | /* will copy 'mem.id' into pp->xdp_mem_id */ | |
425d2393 | 188 | err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp); |
b530e9e1 THJ |
189 | if (err) |
190 | goto err_mmodel; | |
191 | ||
192 | xdp->pp = pp; | |
193 | ||
194 | /* We create a 'fake' RXQ referencing the original dev, but with an | |
195 | * xdp_mem_info pointing to our page_pool | |
196 | */ | |
197 | xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0); | |
198 | xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL; | |
199 | xdp->rxq.mem.id = pp->xdp_mem_id; | |
200 | xdp->dev = orig_ctx->rxq->dev; | |
201 | xdp->orig_ctx = orig_ctx; | |
202 | ||
203 | return 0; | |
204 | ||
205 | err_mmodel: | |
206 | page_pool_destroy(pp); | |
207 | err_pp: | |
743bec1b | 208 | kvfree(xdp->skbs); |
b530e9e1 | 209 | err_skbs: |
743bec1b | 210 | kvfree(xdp->frames); |
b530e9e1 THJ |
211 | return err; |
212 | } | |
213 | ||
214 | static void xdp_test_run_teardown(struct xdp_test_data *xdp) | |
215 | { | |
425d2393 | 216 | xdp_unreg_mem_model(&xdp->mem); |
b530e9e1 THJ |
217 | page_pool_destroy(xdp->pp); |
218 | kfree(xdp->frames); | |
219 | kfree(xdp->skbs); | |
220 | } | |
221 | ||
e5995bc7 AL |
222 | static bool frame_was_changed(const struct xdp_page_head *head) |
223 | { | |
224 | /* xdp_scrub_frame() zeroes the data pointer, flags is the last field, | |
225 | * i.e. has the highest chances to be overwritten. If those two are | |
226 | * untouched, it's most likely safe to skip the context reset. | |
227 | */ | |
c2865b11 JK |
228 | return head->frame->data != head->orig_ctx.data || |
229 | head->frame->flags != head->orig_ctx.flags; | |
e5995bc7 AL |
230 | } |
231 | ||
b530e9e1 THJ |
232 | static bool ctx_was_changed(struct xdp_page_head *head) |
233 | { | |
234 | return head->orig_ctx.data != head->ctx.data || | |
235 | head->orig_ctx.data_meta != head->ctx.data_meta || | |
236 | head->orig_ctx.data_end != head->ctx.data_end; | |
237 | } | |
238 | ||
239 | static void reset_ctx(struct xdp_page_head *head) | |
240 | { | |
e5995bc7 | 241 | if (likely(!frame_was_changed(head) && !ctx_was_changed(head))) |
b530e9e1 THJ |
242 | return; |
243 | ||
244 | head->ctx.data = head->orig_ctx.data; | |
245 | head->ctx.data_meta = head->orig_ctx.data_meta; | |
246 | head->ctx.data_end = head->orig_ctx.data_end; | |
294635a8 | 247 | xdp_update_frame_from_buff(&head->ctx, head->frame); |
b530e9e1 THJ |
248 | } |
249 | ||
250 | static int xdp_recv_frames(struct xdp_frame **frames, int nframes, | |
251 | struct sk_buff **skbs, | |
252 | struct net_device *dev) | |
253 | { | |
254 | gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; | |
255 | int i, n; | |
256 | LIST_HEAD(list); | |
257 | ||
aa70d2d1 ED |
258 | n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes, |
259 | (void **)skbs); | |
b530e9e1 THJ |
260 | if (unlikely(n == 0)) { |
261 | for (i = 0; i < nframes; i++) | |
262 | xdp_return_frame(frames[i]); | |
263 | return -ENOMEM; | |
264 | } | |
265 | ||
266 | for (i = 0; i < nframes; i++) { | |
267 | struct xdp_frame *xdpf = frames[i]; | |
268 | struct sk_buff *skb = skbs[i]; | |
269 | ||
270 | skb = __xdp_build_skb_from_frame(xdpf, skb, dev); | |
271 | if (!skb) { | |
272 | xdp_return_frame(xdpf); | |
273 | continue; | |
274 | } | |
275 | ||
276 | list_add_tail(&skb->list, &list); | |
277 | } | |
278 | netif_receive_skb_list(&list); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, | |
284 | u32 repeat) | |
285 | { | |
286 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
287 | int err = 0, act, ret, i, nframes = 0, batch_sz; | |
288 | struct xdp_frame **frames = xdp->frames; | |
289 | struct xdp_page_head *head; | |
290 | struct xdp_frame *frm; | |
291 | bool redirect = false; | |
292 | struct xdp_buff *ctx; | |
293 | struct page *page; | |
294 | ||
295 | batch_sz = min_t(u32, repeat, xdp->batch_size); | |
296 | ||
297 | local_bh_disable(); | |
298 | xdp_set_return_frame_no_direct(); | |
299 | ||
300 | for (i = 0; i < batch_sz; i++) { | |
301 | page = page_pool_dev_alloc_pages(xdp->pp); | |
302 | if (!page) { | |
303 | err = -ENOMEM; | |
304 | goto out; | |
305 | } | |
306 | ||
307 | head = phys_to_virt(page_to_phys(page)); | |
308 | reset_ctx(head); | |
309 | ctx = &head->ctx; | |
294635a8 | 310 | frm = head->frame; |
b530e9e1 THJ |
311 | xdp->frame_cnt++; |
312 | ||
313 | act = bpf_prog_run_xdp(prog, ctx); | |
314 | ||
315 | /* if program changed pkt bounds we need to update the xdp_frame */ | |
316 | if (unlikely(ctx_was_changed(head))) { | |
317 | ret = xdp_update_frame_from_buff(ctx, frm); | |
318 | if (ret) { | |
319 | xdp_return_buff(ctx); | |
320 | continue; | |
321 | } | |
322 | } | |
323 | ||
324 | switch (act) { | |
325 | case XDP_TX: | |
326 | /* we can't do a real XDP_TX since we're not in the | |
327 | * driver, so turn it into a REDIRECT back to the same | |
328 | * index | |
329 | */ | |
330 | ri->tgt_index = xdp->dev->ifindex; | |
331 | ri->map_id = INT_MAX; | |
332 | ri->map_type = BPF_MAP_TYPE_UNSPEC; | |
333 | fallthrough; | |
334 | case XDP_REDIRECT: | |
335 | redirect = true; | |
336 | ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); | |
337 | if (ret) | |
338 | xdp_return_buff(ctx); | |
339 | break; | |
340 | case XDP_PASS: | |
341 | frames[nframes++] = frm; | |
342 | break; | |
343 | default: | |
344 | bpf_warn_invalid_xdp_action(NULL, prog, act); | |
345 | fallthrough; | |
346 | case XDP_DROP: | |
347 | xdp_return_buff(ctx); | |
348 | break; | |
349 | } | |
350 | } | |
351 | ||
352 | out: | |
353 | if (redirect) | |
354 | xdp_do_flush(); | |
355 | if (nframes) { | |
356 | ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); | |
357 | if (ret) | |
358 | err = ret; | |
359 | } | |
360 | ||
361 | xdp_clear_return_frame_no_direct(); | |
362 | local_bh_enable(); | |
363 | return err; | |
364 | } | |
365 | ||
366 | static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, | |
367 | u32 repeat, u32 batch_size, u32 *time) | |
368 | ||
369 | { | |
370 | struct xdp_test_data xdp = { .batch_size = batch_size }; | |
371 | struct bpf_test_timer t = { .mode = NO_MIGRATE }; | |
372 | int ret; | |
373 | ||
374 | if (!repeat) | |
375 | repeat = 1; | |
376 | ||
377 | ret = xdp_test_run_setup(&xdp, ctx); | |
378 | if (ret) | |
379 | return ret; | |
380 | ||
381 | bpf_test_timer_enter(&t); | |
382 | do { | |
383 | xdp.frame_cnt = 0; | |
384 | ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); | |
385 | if (unlikely(ret < 0)) | |
386 | break; | |
387 | } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); | |
388 | bpf_test_timer_leave(&t); | |
389 | ||
390 | xdp_test_run_teardown(&xdp); | |
391 | return ret; | |
392 | } | |
393 | ||
df1a2cb7 | 394 | static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, |
f23c4b39 | 395 | u32 *retval, u32 *time, bool xdp) |
1cf1cae9 | 396 | { |
c7603cfa AN |
397 | struct bpf_prog_array_item item = {.prog = prog}; |
398 | struct bpf_run_ctx *old_ctx; | |
399 | struct bpf_cg_run_ctx run_ctx; | |
607b9cc9 | 400 | struct bpf_test_timer t = { NO_MIGRATE }; |
8bad74f9 | 401 | enum bpf_cgroup_storage_type stype; |
607b9cc9 | 402 | int ret; |
1cf1cae9 | 403 | |
8bad74f9 | 404 | for_each_cgroup_storage_type(stype) { |
c7603cfa AN |
405 | item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype); |
406 | if (IS_ERR(item.cgroup_storage[stype])) { | |
407 | item.cgroup_storage[stype] = NULL; | |
8bad74f9 | 408 | for_each_cgroup_storage_type(stype) |
c7603cfa | 409 | bpf_cgroup_storage_free(item.cgroup_storage[stype]); |
8bad74f9 RG |
410 | return -ENOMEM; |
411 | } | |
412 | } | |
f42ee093 | 413 | |
1cf1cae9 AS |
414 | if (!repeat) |
415 | repeat = 1; | |
df1a2cb7 | 416 | |
607b9cc9 | 417 | bpf_test_timer_enter(&t); |
c7603cfa | 418 | old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
607b9cc9 | 419 | do { |
c7603cfa | 420 | run_ctx.prog_item = &item; |
af2d0d09 | 421 | local_bh_disable(); |
f23c4b39 BT |
422 | if (xdp) |
423 | *retval = bpf_prog_run_xdp(prog, ctx); | |
424 | else | |
fb7dd8bc | 425 | *retval = bpf_prog_run(prog, ctx); |
af2d0d09 | 426 | local_bh_enable(); |
b530e9e1 | 427 | } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); |
c7603cfa | 428 | bpf_reset_run_ctx(old_ctx); |
607b9cc9 | 429 | bpf_test_timer_leave(&t); |
1cf1cae9 | 430 | |
8bad74f9 | 431 | for_each_cgroup_storage_type(stype) |
c7603cfa | 432 | bpf_cgroup_storage_free(item.cgroup_storage[stype]); |
f42ee093 | 433 | |
df1a2cb7 | 434 | return ret; |
1cf1cae9 AS |
435 | } |
436 | ||
78e52272 DM |
437 | static int bpf_test_finish(const union bpf_attr *kattr, |
438 | union bpf_attr __user *uattr, const void *data, | |
7855e0db LB |
439 | struct skb_shared_info *sinfo, u32 size, |
440 | u32 retval, u32 duration) | |
1cf1cae9 | 441 | { |
78e52272 | 442 | void __user *data_out = u64_to_user_ptr(kattr->test.data_out); |
1cf1cae9 | 443 | int err = -EFAULT; |
b5a36b1e | 444 | u32 copy_size = size; |
1cf1cae9 | 445 | |
b5a36b1e LB |
446 | /* Clamp copy if the user has provided a size hint, but copy the full |
447 | * buffer if not to retain old behaviour. | |
448 | */ | |
449 | if (kattr->test.data_size_out && | |
450 | copy_size > kattr->test.data_size_out) { | |
451 | copy_size = kattr->test.data_size_out; | |
452 | err = -ENOSPC; | |
453 | } | |
454 | ||
7855e0db LB |
455 | if (data_out) { |
456 | int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size; | |
457 | ||
530e214c SF |
458 | if (len < 0) { |
459 | err = -ENOSPC; | |
460 | goto out; | |
461 | } | |
462 | ||
7855e0db LB |
463 | if (copy_to_user(data_out, data, len)) |
464 | goto out; | |
465 | ||
466 | if (sinfo) { | |
5d1e9f43 SF |
467 | int i, offset = len; |
468 | u32 data_len; | |
7855e0db LB |
469 | |
470 | for (i = 0; i < sinfo->nr_frags; i++) { | |
471 | skb_frag_t *frag = &sinfo->frags[i]; | |
472 | ||
473 | if (offset >= copy_size) { | |
474 | err = -ENOSPC; | |
475 | break; | |
476 | } | |
477 | ||
5d1e9f43 | 478 | data_len = min_t(u32, copy_size - offset, |
7855e0db LB |
479 | skb_frag_size(frag)); |
480 | ||
481 | if (copy_to_user(data_out + offset, | |
482 | skb_frag_address(frag), | |
483 | data_len)) | |
484 | goto out; | |
485 | ||
486 | offset += data_len; | |
487 | } | |
488 | } | |
489 | } | |
490 | ||
1cf1cae9 AS |
491 | if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) |
492 | goto out; | |
493 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) | |
494 | goto out; | |
495 | if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) | |
496 | goto out; | |
b5a36b1e LB |
497 | if (err != -ENOSPC) |
498 | err = 0; | |
1cf1cae9 | 499 | out: |
e950e843 | 500 | trace_bpf_test_finish(&err); |
1cf1cae9 AS |
501 | return err; |
502 | } | |
503 | ||
faeb2dce AS |
504 | /* Integer types of various sizes and pointer combinations cover variety of |
505 | * architecture dependent calling conventions. 7+ can be supported in the | |
506 | * future. | |
507 | */ | |
391145ba DM |
508 | __bpf_kfunc_start_defs(); |
509 | ||
400031e0 | 510 | __bpf_kfunc int bpf_fentry_test1(int a) |
faeb2dce AS |
511 | { |
512 | return a + 1; | |
513 | } | |
46565696 | 514 | EXPORT_SYMBOL_GPL(bpf_fentry_test1); |
faeb2dce AS |
515 | |
516 | int noinline bpf_fentry_test2(int a, u64 b) | |
517 | { | |
518 | return a + b; | |
519 | } | |
520 | ||
521 | int noinline bpf_fentry_test3(char a, int b, u64 c) | |
522 | { | |
523 | return a + b + c; | |
524 | } | |
525 | ||
526 | int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) | |
527 | { | |
528 | return (long)a + b + c + d; | |
529 | } | |
530 | ||
531 | int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) | |
532 | { | |
533 | return a + (long)b + c + d + e; | |
534 | } | |
535 | ||
536 | int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) | |
537 | { | |
538 | return a + (long)b + c + d + (long)e + f; | |
539 | } | |
540 | ||
d923021c YS |
541 | struct bpf_fentry_test_t { |
542 | struct bpf_fentry_test_t *a; | |
543 | }; | |
544 | ||
545 | int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) | |
546 | { | |
b16904fd | 547 | asm volatile ("": "+r"(arg)); |
d923021c YS |
548 | return (long)arg; |
549 | } | |
550 | ||
551 | int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) | |
552 | { | |
553 | return (long)arg->a; | |
554 | } | |
555 | ||
75dcef8d FZ |
556 | __bpf_kfunc u32 bpf_fentry_test9(u32 *a) |
557 | { | |
558 | return *a; | |
559 | } | |
560 | ||
2597a25c SF |
561 | void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo) |
562 | { | |
563 | } | |
564 | ||
400031e0 | 565 | __bpf_kfunc int bpf_modify_return_test(int a, int *b) |
3d08b6f2 KS |
566 | { |
567 | *b += 1; | |
568 | return a + *b; | |
569 | } | |
7bd1590d | 570 | |
5e9cf77d MD |
571 | __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d, |
572 | void *e, char f, int g) | |
573 | { | |
574 | *b += 1; | |
575 | return a + *b + c + d + (long)e + f + g; | |
576 | } | |
577 | ||
aa3d65de VM |
578 | int noinline bpf_fentry_shadow_test(int a) |
579 | { | |
580 | return a + 1; | |
581 | } | |
582 | ||
792c0a34 KKD |
583 | struct prog_test_member1 { |
584 | int a; | |
585 | }; | |
586 | ||
8218ccb5 | 587 | struct prog_test_member { |
792c0a34 KKD |
588 | struct prog_test_member1 m; |
589 | int c; | |
8218ccb5 KKD |
590 | }; |
591 | ||
c1ff181f KKD |
592 | struct prog_test_ref_kfunc { |
593 | int a; | |
594 | int b; | |
8218ccb5 | 595 | struct prog_test_member memb; |
c1ff181f | 596 | struct prog_test_ref_kfunc *next; |
5cdccadc | 597 | refcount_t cnt; |
c1ff181f KKD |
598 | }; |
599 | ||
400031e0 | 600 | __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) |
c1ff181f | 601 | { |
5cdccadc | 602 | refcount_dec(&p->cnt); |
c1ff181f KKD |
603 | } |
604 | ||
e4c00339 PZ |
605 | __bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p) |
606 | { | |
607 | bpf_kfunc_call_test_release(p); | |
608 | } | |
609 | CFI_NOSEAL(bpf_kfunc_call_test_release_dtor); | |
610 | ||
400031e0 | 611 | __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p) |
8218ccb5 KKD |
612 | { |
613 | } | |
614 | ||
e4c00339 PZ |
615 | __bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p) |
616 | { | |
617 | } | |
618 | CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor); | |
619 | ||
391145ba | 620 | __bpf_kfunc_end_defs(); |
3d08b6f2 | 621 | |
6f3189f3 | 622 | BTF_KFUNCS_START(bpf_test_modify_return_ids) |
5b481aca | 623 | BTF_ID_FLAGS(func, bpf_modify_return_test) |
5e9cf77d | 624 | BTF_ID_FLAGS(func, bpf_modify_return_test2) |
5b481aca | 625 | BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE) |
6f3189f3 | 626 | BTF_KFUNCS_END(bpf_test_modify_return_ids) |
5b481aca BT |
627 | |
628 | static const struct btf_kfunc_id_set bpf_test_modify_return_set = { | |
629 | .owner = THIS_MODULE, | |
630 | .set = &bpf_test_modify_return_ids, | |
631 | }; | |
3d08b6f2 | 632 | |
6f3189f3 | 633 | BTF_KFUNCS_START(test_sk_check_kfunc_ids) |
a4703e31 KKD |
634 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) |
635 | BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) | |
6f3189f3 | 636 | BTF_KFUNCS_END(test_sk_check_kfunc_ids) |
05a945de | 637 | |
be3d72a2 LB |
638 | static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, |
639 | u32 size, u32 headroom, u32 tailroom) | |
1cf1cae9 AS |
640 | { |
641 | void __user *data_in = u64_to_user_ptr(kattr->test.data_in); | |
642 | void *data; | |
643 | ||
644 | if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) | |
645 | return ERR_PTR(-EINVAL); | |
646 | ||
d800bad6 JDB |
647 | if (user_size > size) |
648 | return ERR_PTR(-EMSGSIZE); | |
649 | ||
d3fd203f | 650 | size = SKB_DATA_ALIGN(size); |
1cf1cae9 AS |
651 | data = kzalloc(size + headroom + tailroom, GFP_USER); |
652 | if (!data) | |
653 | return ERR_PTR(-ENOMEM); | |
654 | ||
d800bad6 | 655 | if (copy_from_user(data + headroom, data_in, user_size)) { |
1cf1cae9 AS |
656 | kfree(data); |
657 | return ERR_PTR(-EFAULT); | |
658 | } | |
da00d2f1 | 659 | |
1cf1cae9 AS |
660 | return data; |
661 | } | |
662 | ||
da00d2f1 KS |
663 | int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
664 | const union bpf_attr *kattr, | |
665 | union bpf_attr __user *uattr) | |
666 | { | |
d923021c | 667 | struct bpf_fentry_test_t arg = {}; |
3d08b6f2 KS |
668 | u16 side_effect = 0, ret = 0; |
669 | int b = 2, err = -EFAULT; | |
670 | u32 retval = 0; | |
da00d2f1 | 671 | |
b530e9e1 | 672 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
1b4d60ec SL |
673 | return -EINVAL; |
674 | ||
da00d2f1 KS |
675 | switch (prog->expected_attach_type) { |
676 | case BPF_TRACE_FENTRY: | |
677 | case BPF_TRACE_FEXIT: | |
678 | if (bpf_fentry_test1(1) != 2 || | |
679 | bpf_fentry_test2(2, 3) != 5 || | |
680 | bpf_fentry_test3(4, 5, 6) != 15 || | |
681 | bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || | |
682 | bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || | |
d923021c YS |
683 | bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || |
684 | bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || | |
75dcef8d FZ |
685 | bpf_fentry_test8(&arg) != 0 || |
686 | bpf_fentry_test9(&retval) != 0) | |
da00d2f1 KS |
687 | goto out; |
688 | break; | |
3d08b6f2 KS |
689 | case BPF_MODIFY_RETURN: |
690 | ret = bpf_modify_return_test(1, &b); | |
691 | if (b != 2) | |
5e9cf77d MD |
692 | side_effect++; |
693 | b = 2; | |
694 | ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7); | |
695 | if (b != 2) | |
696 | side_effect++; | |
3d08b6f2 | 697 | break; |
da00d2f1 KS |
698 | default: |
699 | goto out; | |
700 | } | |
701 | ||
3d08b6f2 KS |
702 | retval = ((u32)side_effect << 16) | ret; |
703 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) | |
704 | goto out; | |
705 | ||
da00d2f1 KS |
706 | err = 0; |
707 | out: | |
708 | trace_bpf_test_finish(&err); | |
709 | return err; | |
710 | } | |
711 | ||
1b4d60ec SL |
712 | struct bpf_raw_tp_test_run_info { |
713 | struct bpf_prog *prog; | |
714 | void *ctx; | |
715 | u32 retval; | |
716 | }; | |
717 | ||
718 | static void | |
719 | __bpf_prog_test_run_raw_tp(void *data) | |
720 | { | |
721 | struct bpf_raw_tp_test_run_info *info = data; | |
722 | ||
723 | rcu_read_lock(); | |
fb7dd8bc | 724 | info->retval = bpf_prog_run(info->prog, info->ctx); |
1b4d60ec SL |
725 | rcu_read_unlock(); |
726 | } | |
727 | ||
728 | int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, | |
729 | const union bpf_attr *kattr, | |
730 | union bpf_attr __user *uattr) | |
731 | { | |
732 | void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); | |
733 | __u32 ctx_size_in = kattr->test.ctx_size_in; | |
734 | struct bpf_raw_tp_test_run_info info; | |
735 | int cpu = kattr->test.cpu, err = 0; | |
963ec27a | 736 | int current_cpu; |
1b4d60ec SL |
737 | |
738 | /* doesn't support data_in/out, ctx_out, duration, or repeat */ | |
739 | if (kattr->test.data_in || kattr->test.data_out || | |
740 | kattr->test.ctx_out || kattr->test.duration || | |
b530e9e1 | 741 | kattr->test.repeat || kattr->test.batch_size) |
1b4d60ec SL |
742 | return -EINVAL; |
743 | ||
7ac6ad05 SL |
744 | if (ctx_size_in < prog->aux->max_ctx_offset || |
745 | ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) | |
1b4d60ec SL |
746 | return -EINVAL; |
747 | ||
748 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) | |
749 | return -EINVAL; | |
750 | ||
751 | if (ctx_size_in) { | |
db5b6a46 QW |
752 | info.ctx = memdup_user(ctx_in, ctx_size_in); |
753 | if (IS_ERR(info.ctx)) | |
754 | return PTR_ERR(info.ctx); | |
1b4d60ec SL |
755 | } else { |
756 | info.ctx = NULL; | |
757 | } | |
758 | ||
759 | info.prog = prog; | |
760 | ||
963ec27a | 761 | current_cpu = get_cpu(); |
1b4d60ec | 762 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || |
963ec27a | 763 | cpu == current_cpu) { |
1b4d60ec | 764 | __bpf_prog_test_run_raw_tp(&info); |
963ec27a | 765 | } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
1b4d60ec SL |
766 | /* smp_call_function_single() also checks cpu_online() |
767 | * after csd_lock(). However, since cpu is from user | |
768 | * space, let's do an extra quick check to filter out | |
769 | * invalid value before smp_call_function_single(). | |
770 | */ | |
963ec27a SL |
771 | err = -ENXIO; |
772 | } else { | |
1b4d60ec SL |
773 | err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, |
774 | &info, 1); | |
1b4d60ec | 775 | } |
963ec27a | 776 | put_cpu(); |
1b4d60ec | 777 | |
963ec27a SL |
778 | if (!err && |
779 | copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) | |
1b4d60ec SL |
780 | err = -EFAULT; |
781 | ||
1b4d60ec SL |
782 | kfree(info.ctx); |
783 | return err; | |
784 | } | |
785 | ||
b0b9395d SF |
786 | static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) |
787 | { | |
788 | void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); | |
789 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); | |
790 | u32 size = kattr->test.ctx_size_in; | |
791 | void *data; | |
792 | int err; | |
793 | ||
794 | if (!data_in && !data_out) | |
795 | return NULL; | |
796 | ||
797 | data = kzalloc(max_size, GFP_USER); | |
798 | if (!data) | |
799 | return ERR_PTR(-ENOMEM); | |
800 | ||
801 | if (data_in) { | |
af2ac3e1 | 802 | err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size); |
b0b9395d SF |
803 | if (err) { |
804 | kfree(data); | |
805 | return ERR_PTR(err); | |
806 | } | |
807 | ||
808 | size = min_t(u32, max_size, size); | |
809 | if (copy_from_user(data, data_in, size)) { | |
810 | kfree(data); | |
811 | return ERR_PTR(-EFAULT); | |
812 | } | |
813 | } | |
814 | return data; | |
815 | } | |
816 | ||
817 | static int bpf_ctx_finish(const union bpf_attr *kattr, | |
818 | union bpf_attr __user *uattr, const void *data, | |
819 | u32 size) | |
820 | { | |
821 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); | |
822 | int err = -EFAULT; | |
823 | u32 copy_size = size; | |
824 | ||
825 | if (!data || !data_out) | |
826 | return 0; | |
827 | ||
828 | if (copy_size > kattr->test.ctx_size_out) { | |
829 | copy_size = kattr->test.ctx_size_out; | |
830 | err = -ENOSPC; | |
831 | } | |
832 | ||
833 | if (copy_to_user(data_out, data, copy_size)) | |
834 | goto out; | |
835 | if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) | |
836 | goto out; | |
837 | if (err != -ENOSPC) | |
838 | err = 0; | |
839 | out: | |
840 | return err; | |
841 | } | |
842 | ||
843 | /** | |
844 | * range_is_zero - test whether buffer is initialized | |
845 | * @buf: buffer to check | |
846 | * @from: check from this position | |
847 | * @to: check up until (excluding) this position | |
848 | * | |
849 | * This function returns true if the there is a non-zero byte | |
850 | * in the buf in the range [from,to). | |
851 | */ | |
852 | static inline bool range_is_zero(void *buf, size_t from, size_t to) | |
853 | { | |
854 | return !memchr_inv((u8 *)buf + from, 0, to - from); | |
855 | } | |
856 | ||
857 | static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) | |
858 | { | |
859 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; | |
860 | ||
861 | if (!__skb) | |
862 | return 0; | |
863 | ||
864 | /* make sure the fields we don't use are zeroed */ | |
6de6c1f8 NS |
865 | if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) |
866 | return -EINVAL; | |
867 | ||
868 | /* mark is allowed */ | |
869 | ||
870 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), | |
871 | offsetof(struct __sk_buff, priority))) | |
b0b9395d SF |
872 | return -EINVAL; |
873 | ||
874 | /* priority is allowed */ | |
b238290b | 875 | /* ingress_ifindex is allowed */ |
21594c44 DY |
876 | /* ifindex is allowed */ |
877 | ||
878 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), | |
b0b9395d SF |
879 | offsetof(struct __sk_buff, cb))) |
880 | return -EINVAL; | |
881 | ||
882 | /* cb is allowed */ | |
883 | ||
b590cb5f | 884 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), |
ba940948 SF |
885 | offsetof(struct __sk_buff, tstamp))) |
886 | return -EINVAL; | |
887 | ||
888 | /* tstamp is allowed */ | |
850a88cc SF |
889 | /* wire_len is allowed */ |
890 | /* gso_segs is allowed */ | |
ba940948 | 891 | |
850a88cc | 892 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), |
cf62089b WB |
893 | offsetof(struct __sk_buff, gso_size))) |
894 | return -EINVAL; | |
895 | ||
896 | /* gso_size is allowed */ | |
897 | ||
898 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), | |
3384c7c7 VF |
899 | offsetof(struct __sk_buff, hwtstamp))) |
900 | return -EINVAL; | |
901 | ||
902 | /* hwtstamp is allowed */ | |
903 | ||
904 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), | |
b0b9395d SF |
905 | sizeof(struct __sk_buff))) |
906 | return -EINVAL; | |
907 | ||
6de6c1f8 | 908 | skb->mark = __skb->mark; |
b0b9395d | 909 | skb->priority = __skb->priority; |
b238290b | 910 | skb->skb_iif = __skb->ingress_ifindex; |
ba940948 | 911 | skb->tstamp = __skb->tstamp; |
b0b9395d SF |
912 | memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); |
913 | ||
850a88cc SF |
914 | if (__skb->wire_len == 0) { |
915 | cb->pkt_len = skb->len; | |
916 | } else { | |
917 | if (__skb->wire_len < skb->len || | |
7c4e983c | 918 | __skb->wire_len > GSO_LEGACY_MAX_SIZE) |
850a88cc SF |
919 | return -EINVAL; |
920 | cb->pkt_len = __skb->wire_len; | |
921 | } | |
922 | ||
923 | if (__skb->gso_segs > GSO_MAX_SEGS) | |
924 | return -EINVAL; | |
925 | skb_shinfo(skb)->gso_segs = __skb->gso_segs; | |
cf62089b | 926 | skb_shinfo(skb)->gso_size = __skb->gso_size; |
3384c7c7 | 927 | skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; |
850a88cc | 928 | |
b0b9395d SF |
929 | return 0; |
930 | } | |
931 | ||
932 | static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) | |
933 | { | |
934 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; | |
935 | ||
936 | if (!__skb) | |
937 | return; | |
938 | ||
6de6c1f8 | 939 | __skb->mark = skb->mark; |
b0b9395d | 940 | __skb->priority = skb->priority; |
b238290b | 941 | __skb->ingress_ifindex = skb->skb_iif; |
21594c44 | 942 | __skb->ifindex = skb->dev->ifindex; |
ba940948 | 943 | __skb->tstamp = skb->tstamp; |
b0b9395d | 944 | memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); |
850a88cc SF |
945 | __skb->wire_len = cb->pkt_len; |
946 | __skb->gso_segs = skb_shinfo(skb)->gso_segs; | |
3384c7c7 | 947 | __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; |
b0b9395d SF |
948 | } |
949 | ||
435b08ec DB |
950 | static struct proto bpf_dummy_proto = { |
951 | .name = "bpf_dummy", | |
952 | .owner = THIS_MODULE, | |
953 | .obj_size = sizeof(struct sock), | |
954 | }; | |
955 | ||
1cf1cae9 AS |
956 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, |
957 | union bpf_attr __user *uattr) | |
958 | { | |
959 | bool is_l2 = false, is_direct_pkt_access = false; | |
21594c44 DY |
960 | struct net *net = current->nsproxy->net_ns; |
961 | struct net_device *dev = net->loopback_dev; | |
1cf1cae9 AS |
962 | u32 size = kattr->test.data_size_in; |
963 | u32 repeat = kattr->test.repeat; | |
b0b9395d | 964 | struct __sk_buff *ctx = NULL; |
1cf1cae9 | 965 | u32 retval, duration; |
6e6fddc7 | 966 | int hh_len = ETH_HLEN; |
1cf1cae9 | 967 | struct sk_buff *skb; |
2cb494a3 | 968 | struct sock *sk; |
1cf1cae9 AS |
969 | void *data; |
970 | int ret; | |
971 | ||
b530e9e1 | 972 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
1b4d60ec SL |
973 | return -EINVAL; |
974 | ||
be3d72a2 LB |
975 | data = bpf_test_init(kattr, kattr->test.data_size_in, |
976 | size, NET_SKB_PAD + NET_IP_ALIGN, | |
1cf1cae9 AS |
977 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
978 | if (IS_ERR(data)) | |
979 | return PTR_ERR(data); | |
980 | ||
b0b9395d SF |
981 | ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); |
982 | if (IS_ERR(ctx)) { | |
983 | kfree(data); | |
984 | return PTR_ERR(ctx); | |
985 | } | |
986 | ||
1cf1cae9 AS |
987 | switch (prog->type) { |
988 | case BPF_PROG_TYPE_SCHED_CLS: | |
989 | case BPF_PROG_TYPE_SCHED_ACT: | |
990 | is_l2 = true; | |
df561f66 | 991 | fallthrough; |
1cf1cae9 AS |
992 | case BPF_PROG_TYPE_LWT_IN: |
993 | case BPF_PROG_TYPE_LWT_OUT: | |
994 | case BPF_PROG_TYPE_LWT_XMIT: | |
995 | is_direct_pkt_access = true; | |
996 | break; | |
997 | default: | |
998 | break; | |
999 | } | |
1000 | ||
435b08ec | 1001 | sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1); |
2cb494a3 SL |
1002 | if (!sk) { |
1003 | kfree(data); | |
b0b9395d | 1004 | kfree(ctx); |
2cb494a3 SL |
1005 | return -ENOMEM; |
1006 | } | |
2cb494a3 SL |
1007 | sock_init_data(NULL, sk); |
1008 | ||
ce098da1 | 1009 | skb = slab_build_skb(data); |
1cf1cae9 AS |
1010 | if (!skb) { |
1011 | kfree(data); | |
b0b9395d | 1012 | kfree(ctx); |
435b08ec | 1013 | sk_free(sk); |
1cf1cae9 AS |
1014 | return -ENOMEM; |
1015 | } | |
2cb494a3 | 1016 | skb->sk = sk; |
1cf1cae9 | 1017 | |
586f8525 | 1018 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
1cf1cae9 | 1019 | __skb_put(skb, size); |
21594c44 DY |
1020 | if (ctx && ctx->ifindex > 1) { |
1021 | dev = dev_get_by_index(net, ctx->ifindex); | |
1022 | if (!dev) { | |
1023 | ret = -ENODEV; | |
1024 | goto out; | |
1025 | } | |
1026 | } | |
1027 | skb->protocol = eth_type_trans(skb, dev); | |
1cf1cae9 AS |
1028 | skb_reset_network_header(skb); |
1029 | ||
fa5cb548 DY |
1030 | switch (skb->protocol) { |
1031 | case htons(ETH_P_IP): | |
1032 | sk->sk_family = AF_INET; | |
1033 | if (sizeof(struct iphdr) <= skb_headlen(skb)) { | |
1034 | sk->sk_rcv_saddr = ip_hdr(skb)->saddr; | |
1035 | sk->sk_daddr = ip_hdr(skb)->daddr; | |
1036 | } | |
1037 | break; | |
1038 | #if IS_ENABLED(CONFIG_IPV6) | |
1039 | case htons(ETH_P_IPV6): | |
1040 | sk->sk_family = AF_INET6; | |
1041 | if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { | |
1042 | sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; | |
1043 | sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; | |
1044 | } | |
1045 | break; | |
1046 | #endif | |
1047 | default: | |
1048 | break; | |
1049 | } | |
1050 | ||
1cf1cae9 | 1051 | if (is_l2) |
6e6fddc7 | 1052 | __skb_push(skb, hh_len); |
1cf1cae9 | 1053 | if (is_direct_pkt_access) |
6aaae2b6 | 1054 | bpf_compute_data_pointers(skb); |
b0b9395d SF |
1055 | ret = convert___skb_to_skb(skb, ctx); |
1056 | if (ret) | |
1057 | goto out; | |
f23c4b39 | 1058 | ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); |
b0b9395d SF |
1059 | if (ret) |
1060 | goto out; | |
6e6fddc7 DB |
1061 | if (!is_l2) { |
1062 | if (skb_headroom(skb) < hh_len) { | |
1063 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); | |
1064 | ||
1065 | if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { | |
b0b9395d SF |
1066 | ret = -ENOMEM; |
1067 | goto out; | |
6e6fddc7 DB |
1068 | } |
1069 | } | |
1070 | memset(__skb_push(skb, hh_len), 0, hh_len); | |
1071 | } | |
b0b9395d | 1072 | convert_skb_to___skb(skb, ctx); |
6e6fddc7 | 1073 | |
1cf1cae9 AS |
1074 | size = skb->len; |
1075 | /* bpf program can never convert linear skb to non-linear */ | |
1076 | if (WARN_ON_ONCE(skb_is_nonlinear(skb))) | |
1077 | size = skb_headlen(skb); | |
7855e0db LB |
1078 | ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval, |
1079 | duration); | |
b0b9395d SF |
1080 | if (!ret) |
1081 | ret = bpf_ctx_finish(kattr, uattr, ctx, | |
1082 | sizeof(struct __sk_buff)); | |
1083 | out: | |
21594c44 DY |
1084 | if (dev && dev != net->loopback_dev) |
1085 | dev_put(dev); | |
1cf1cae9 | 1086 | kfree_skb(skb); |
435b08ec | 1087 | sk_free(sk); |
b0b9395d | 1088 | kfree(ctx); |
1cf1cae9 AS |
1089 | return ret; |
1090 | } | |
1091 | ||
47316f4a ZE |
1092 | static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp) |
1093 | { | |
ec94670f ZE |
1094 | unsigned int ingress_ifindex, rx_queue_index; |
1095 | struct netdev_rx_queue *rxqueue; | |
1096 | struct net_device *device; | |
1097 | ||
47316f4a ZE |
1098 | if (!xdp_md) |
1099 | return 0; | |
1100 | ||
1101 | if (xdp_md->egress_ifindex != 0) | |
1102 | return -EINVAL; | |
1103 | ||
ec94670f ZE |
1104 | ingress_ifindex = xdp_md->ingress_ifindex; |
1105 | rx_queue_index = xdp_md->rx_queue_index; | |
1106 | ||
1107 | if (!ingress_ifindex && rx_queue_index) | |
47316f4a ZE |
1108 | return -EINVAL; |
1109 | ||
ec94670f ZE |
1110 | if (ingress_ifindex) { |
1111 | device = dev_get_by_index(current->nsproxy->net_ns, | |
1112 | ingress_ifindex); | |
1113 | if (!device) | |
1114 | return -ENODEV; | |
1115 | ||
1116 | if (rx_queue_index >= device->real_num_rx_queues) | |
1117 | goto free_dev; | |
1118 | ||
1119 | rxqueue = __netif_get_rx_queue(device, rx_queue_index); | |
47316f4a | 1120 | |
ec94670f ZE |
1121 | if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq)) |
1122 | goto free_dev; | |
1123 | ||
1124 | xdp->rxq = &rxqueue->xdp_rxq; | |
1125 | /* The device is now tracked in the xdp->rxq for later | |
1126 | * dev_put() | |
1127 | */ | |
1128 | } | |
1129 | ||
1130 | xdp->data = xdp->data_meta + xdp_md->data; | |
47316f4a | 1131 | return 0; |
ec94670f ZE |
1132 | |
1133 | free_dev: | |
1134 | dev_put(device); | |
1135 | return -EINVAL; | |
1136 | } | |
1137 | ||
1138 | static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) | |
1139 | { | |
1140 | if (!xdp_md) | |
1141 | return; | |
1142 | ||
1143 | xdp_md->data = xdp->data - xdp->data_meta; | |
1144 | xdp_md->data_end = xdp->data_end - xdp->data_meta; | |
1145 | ||
1146 | if (xdp_md->ingress_ifindex) | |
1147 | dev_put(xdp->rxq->dev); | |
47316f4a ZE |
1148 | } |
1149 | ||
1cf1cae9 AS |
1150 | int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, |
1151 | union bpf_attr __user *uattr) | |
1152 | { | |
b530e9e1 | 1153 | bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); |
bc56c919 | 1154 | u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
b530e9e1 | 1155 | u32 batch_size = kattr->test.batch_size; |
eecbfd97 | 1156 | u32 retval = 0, duration, max_data_sz; |
1cf1cae9 | 1157 | u32 size = kattr->test.data_size_in; |
1c194998 | 1158 | u32 headroom = XDP_PACKET_HEADROOM; |
1cf1cae9 | 1159 | u32 repeat = kattr->test.repeat; |
65073a67 | 1160 | struct netdev_rx_queue *rxqueue; |
1c194998 | 1161 | struct skb_shared_info *sinfo; |
1cf1cae9 | 1162 | struct xdp_buff xdp = {}; |
1c194998 | 1163 | int i, ret = -EINVAL; |
47316f4a | 1164 | struct xdp_md *ctx; |
1cf1cae9 | 1165 | void *data; |
1cf1cae9 | 1166 | |
5e21bb4e XZ |
1167 | if (prog->expected_attach_type == BPF_XDP_DEVMAP || |
1168 | prog->expected_attach_type == BPF_XDP_CPUMAP) | |
1169 | return -EINVAL; | |
6d4eb36d | 1170 | |
b530e9e1 THJ |
1171 | if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) |
1172 | return -EINVAL; | |
1173 | ||
3d76a4d3 SF |
1174 | if (bpf_prog_is_dev_bound(prog->aux)) |
1175 | return -EINVAL; | |
1176 | ||
b530e9e1 THJ |
1177 | if (do_live) { |
1178 | if (!batch_size) | |
1179 | batch_size = NAPI_POLL_WEIGHT; | |
1180 | else if (batch_size > TEST_XDP_MAX_BATCH) | |
1181 | return -E2BIG; | |
b6f1f780 THJ |
1182 | |
1183 | headroom += sizeof(struct xdp_page_head); | |
b530e9e1 THJ |
1184 | } else if (batch_size) { |
1185 | return -EINVAL; | |
1186 | } | |
1187 | ||
47316f4a ZE |
1188 | ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); |
1189 | if (IS_ERR(ctx)) | |
1190 | return PTR_ERR(ctx); | |
1191 | ||
1192 | if (ctx) { | |
1193 | /* There can't be user provided data before the meta data */ | |
1194 | if (ctx->data_meta || ctx->data_end != size || | |
1195 | ctx->data > ctx->data_end || | |
b530e9e1 THJ |
1196 | unlikely(xdp_metalen_invalid(ctx->data)) || |
1197 | (do_live && (kattr->test.data_out || kattr->test.ctx_out))) | |
47316f4a ZE |
1198 | goto free_ctx; |
1199 | /* Meta data is allocated from the headroom */ | |
1200 | headroom -= ctx->data; | |
1201 | } | |
947e8b59 | 1202 | |
bc56c919 | 1203 | max_data_sz = 4096 - headroom - tailroom; |
b530e9e1 THJ |
1204 | if (size > max_data_sz) { |
1205 | /* disallow live data mode for jumbo frames */ | |
1206 | if (do_live) | |
1207 | goto free_ctx; | |
1208 | size = max_data_sz; | |
1209 | } | |
bc56c919 | 1210 | |
1c194998 | 1211 | data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); |
47316f4a ZE |
1212 | if (IS_ERR(data)) { |
1213 | ret = PTR_ERR(data); | |
1214 | goto free_ctx; | |
1215 | } | |
1cf1cae9 | 1216 | |
65073a67 | 1217 | rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); |
1c194998 LB |
1218 | rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; |
1219 | xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); | |
be9df4af | 1220 | xdp_prepare_buff(&xdp, data, headroom, size, true); |
1c194998 | 1221 | sinfo = xdp_get_shared_info_from_buff(&xdp); |
be9df4af | 1222 | |
47316f4a ZE |
1223 | ret = xdp_convert_md_to_buff(ctx, &xdp); |
1224 | if (ret) | |
1225 | goto free_data; | |
1226 | ||
1c194998 LB |
1227 | if (unlikely(kattr->test.data_size_in > size)) { |
1228 | void __user *data_in = u64_to_user_ptr(kattr->test.data_in); | |
1229 | ||
1230 | while (size < kattr->test.data_size_in) { | |
1231 | struct page *page; | |
1232 | skb_frag_t *frag; | |
9d63b59d | 1233 | u32 data_len; |
1c194998 | 1234 | |
a6763080 LB |
1235 | if (sinfo->nr_frags == MAX_SKB_FRAGS) { |
1236 | ret = -ENOMEM; | |
1237 | goto out; | |
1238 | } | |
1239 | ||
1c194998 LB |
1240 | page = alloc_page(GFP_KERNEL); |
1241 | if (!page) { | |
1242 | ret = -ENOMEM; | |
1243 | goto out; | |
1244 | } | |
1245 | ||
1246 | frag = &sinfo->frags[sinfo->nr_frags++]; | |
1c194998 | 1247 | |
9d63b59d | 1248 | data_len = min_t(u32, kattr->test.data_size_in - size, |
1c194998 | 1249 | PAGE_SIZE); |
b51f4113 | 1250 | skb_frag_fill_page_desc(frag, page, 0, data_len); |
1c194998 LB |
1251 | |
1252 | if (copy_from_user(page_address(page), data_in + size, | |
1253 | data_len)) { | |
1254 | ret = -EFAULT; | |
1255 | goto out; | |
1256 | } | |
1257 | sinfo->xdp_frags_size += data_len; | |
1258 | size += data_len; | |
1259 | } | |
1260 | xdp_buff_set_frags_flag(&xdp); | |
1261 | } | |
1262 | ||
de21d8bf LB |
1263 | if (repeat > 1) |
1264 | bpf_prog_change_xdp(NULL, prog); | |
1c194998 | 1265 | |
b530e9e1 THJ |
1266 | if (do_live) |
1267 | ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); | |
1268 | else | |
1269 | ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); | |
ec94670f ZE |
1270 | /* We convert the xdp_buff back to an xdp_md before checking the return |
1271 | * code so the reference count of any held netdevice will be decremented | |
1272 | * even if the test run failed. | |
1273 | */ | |
1274 | xdp_convert_buff_to_md(&xdp, ctx); | |
dcb40590 RG |
1275 | if (ret) |
1276 | goto out; | |
47316f4a | 1277 | |
1c194998 | 1278 | size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size; |
7855e0db LB |
1279 | ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size, |
1280 | retval, duration); | |
47316f4a ZE |
1281 | if (!ret) |
1282 | ret = bpf_ctx_finish(kattr, uattr, ctx, | |
1283 | sizeof(struct xdp_md)); | |
1284 | ||
dcb40590 | 1285 | out: |
de21d8bf LB |
1286 | if (repeat > 1) |
1287 | bpf_prog_change_xdp(prog, NULL); | |
47316f4a | 1288 | free_data: |
1c194998 LB |
1289 | for (i = 0; i < sinfo->nr_frags; i++) |
1290 | __free_page(skb_frag_page(&sinfo->frags[i])); | |
1cf1cae9 | 1291 | kfree(data); |
47316f4a ZE |
1292 | free_ctx: |
1293 | kfree(ctx); | |
1cf1cae9 AS |
1294 | return ret; |
1295 | } | |
b7a1848e | 1296 | |
b2ca4e1c SF |
1297 | static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) |
1298 | { | |
1299 | /* make sure the fields we don't use are zeroed */ | |
1300 | if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) | |
1301 | return -EINVAL; | |
1302 | ||
1303 | /* flags is allowed */ | |
1304 | ||
b590cb5f | 1305 | if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), |
b2ca4e1c SF |
1306 | sizeof(struct bpf_flow_keys))) |
1307 | return -EINVAL; | |
1308 | ||
1309 | return 0; | |
1310 | } | |
1311 | ||
b7a1848e SF |
1312 | int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
1313 | const union bpf_attr *kattr, | |
1314 | union bpf_attr __user *uattr) | |
1315 | { | |
607b9cc9 | 1316 | struct bpf_test_timer t = { NO_PREEMPT }; |
b7a1848e | 1317 | u32 size = kattr->test.data_size_in; |
7b8a1304 | 1318 | struct bpf_flow_dissector ctx = {}; |
b7a1848e | 1319 | u32 repeat = kattr->test.repeat; |
b2ca4e1c | 1320 | struct bpf_flow_keys *user_ctx; |
b7a1848e | 1321 | struct bpf_flow_keys flow_keys; |
7b8a1304 | 1322 | const struct ethhdr *eth; |
b2ca4e1c | 1323 | unsigned int flags = 0; |
b7a1848e | 1324 | u32 retval, duration; |
b7a1848e SF |
1325 | void *data; |
1326 | int ret; | |
b7a1848e | 1327 | |
b530e9e1 | 1328 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
1b4d60ec SL |
1329 | return -EINVAL; |
1330 | ||
7b8a1304 SF |
1331 | if (size < ETH_HLEN) |
1332 | return -EINVAL; | |
1333 | ||
be3d72a2 | 1334 | data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0); |
b7a1848e SF |
1335 | if (IS_ERR(data)) |
1336 | return PTR_ERR(data); | |
1337 | ||
7b8a1304 | 1338 | eth = (struct ethhdr *)data; |
b7a1848e | 1339 | |
b7a1848e SF |
1340 | if (!repeat) |
1341 | repeat = 1; | |
1342 | ||
b2ca4e1c SF |
1343 | user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); |
1344 | if (IS_ERR(user_ctx)) { | |
1345 | kfree(data); | |
1346 | return PTR_ERR(user_ctx); | |
1347 | } | |
1348 | if (user_ctx) { | |
1349 | ret = verify_user_bpf_flow_keys(user_ctx); | |
1350 | if (ret) | |
1351 | goto out; | |
1352 | flags = user_ctx->flags; | |
1353 | } | |
1354 | ||
7b8a1304 SF |
1355 | ctx.flow_keys = &flow_keys; |
1356 | ctx.data = data; | |
1357 | ctx.data_end = (__u8 *)data + size; | |
1358 | ||
607b9cc9 LB |
1359 | bpf_test_timer_enter(&t); |
1360 | do { | |
7b8a1304 | 1361 | retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, |
b2ca4e1c | 1362 | size, flags); |
b530e9e1 | 1363 | } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); |
607b9cc9 | 1364 | bpf_test_timer_leave(&t); |
7b8a1304 | 1365 | |
607b9cc9 LB |
1366 | if (ret < 0) |
1367 | goto out; | |
b7a1848e | 1368 | |
7855e0db LB |
1369 | ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL, |
1370 | sizeof(flow_keys), retval, duration); | |
b2ca4e1c SF |
1371 | if (!ret) |
1372 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, | |
1373 | sizeof(struct bpf_flow_keys)); | |
b7a1848e | 1374 | |
a439184d | 1375 | out: |
b2ca4e1c | 1376 | kfree(user_ctx); |
7b8a1304 | 1377 | kfree(data); |
b7a1848e SF |
1378 | return ret; |
1379 | } | |
7c32e8f8 LB |
1380 | |
1381 | int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, | |
1382 | union bpf_attr __user *uattr) | |
1383 | { | |
1384 | struct bpf_test_timer t = { NO_PREEMPT }; | |
1385 | struct bpf_prog_array *progs = NULL; | |
1386 | struct bpf_sk_lookup_kern ctx = {}; | |
1387 | u32 repeat = kattr->test.repeat; | |
1388 | struct bpf_sk_lookup *user_ctx; | |
1389 | u32 retval, duration; | |
1390 | int ret = -EINVAL; | |
1391 | ||
b530e9e1 | 1392 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
7c32e8f8 LB |
1393 | return -EINVAL; |
1394 | ||
1395 | if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || | |
1396 | kattr->test.data_size_out) | |
1397 | return -EINVAL; | |
1398 | ||
1399 | if (!repeat) | |
1400 | repeat = 1; | |
1401 | ||
1402 | user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); | |
1403 | if (IS_ERR(user_ctx)) | |
1404 | return PTR_ERR(user_ctx); | |
1405 | ||
1406 | if (!user_ctx) | |
1407 | return -EINVAL; | |
1408 | ||
1409 | if (user_ctx->sk) | |
1410 | goto out; | |
1411 | ||
1412 | if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) | |
1413 | goto out; | |
1414 | ||
9a69e2b3 | 1415 | if (user_ctx->local_port > U16_MAX) { |
7c32e8f8 LB |
1416 | ret = -ERANGE; |
1417 | goto out; | |
1418 | } | |
1419 | ||
1420 | ctx.family = (u16)user_ctx->family; | |
1421 | ctx.protocol = (u16)user_ctx->protocol; | |
1422 | ctx.dport = (u16)user_ctx->local_port; | |
9a69e2b3 | 1423 | ctx.sport = user_ctx->remote_port; |
7c32e8f8 LB |
1424 | |
1425 | switch (ctx.family) { | |
1426 | case AF_INET: | |
1427 | ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; | |
1428 | ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; | |
1429 | break; | |
1430 | ||
1431 | #if IS_ENABLED(CONFIG_IPV6) | |
1432 | case AF_INET6: | |
1433 | ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; | |
1434 | ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; | |
1435 | break; | |
1436 | #endif | |
1437 | ||
1438 | default: | |
1439 | ret = -EAFNOSUPPORT; | |
1440 | goto out; | |
1441 | } | |
1442 | ||
1443 | progs = bpf_prog_array_alloc(1, GFP_KERNEL); | |
1444 | if (!progs) { | |
1445 | ret = -ENOMEM; | |
1446 | goto out; | |
1447 | } | |
1448 | ||
1449 | progs->items[0].prog = prog; | |
1450 | ||
1451 | bpf_test_timer_enter(&t); | |
1452 | do { | |
1453 | ctx.selected_sk = NULL; | |
fb7dd8bc | 1454 | retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run); |
b530e9e1 | 1455 | } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); |
7c32e8f8 LB |
1456 | bpf_test_timer_leave(&t); |
1457 | ||
1458 | if (ret < 0) | |
1459 | goto out; | |
1460 | ||
1461 | user_ctx->cookie = 0; | |
1462 | if (ctx.selected_sk) { | |
1463 | if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { | |
1464 | ret = -EOPNOTSUPP; | |
1465 | goto out; | |
1466 | } | |
1467 | ||
1468 | user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); | |
1469 | } | |
1470 | ||
7855e0db | 1471 | ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); |
7c32e8f8 LB |
1472 | if (!ret) |
1473 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); | |
1474 | ||
1475 | out: | |
1476 | bpf_prog_array_free(progs); | |
1477 | kfree(user_ctx); | |
1478 | return ret; | |
1479 | } | |
79a7f8bd AS |
1480 | |
1481 | int bpf_prog_test_run_syscall(struct bpf_prog *prog, | |
1482 | const union bpf_attr *kattr, | |
1483 | union bpf_attr __user *uattr) | |
1484 | { | |
1485 | void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); | |
1486 | __u32 ctx_size_in = kattr->test.ctx_size_in; | |
1487 | void *ctx = NULL; | |
1488 | u32 retval; | |
1489 | int err = 0; | |
1490 | ||
1491 | /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ | |
1492 | if (kattr->test.data_in || kattr->test.data_out || | |
1493 | kattr->test.ctx_out || kattr->test.duration || | |
b530e9e1 THJ |
1494 | kattr->test.repeat || kattr->test.flags || |
1495 | kattr->test.batch_size) | |
79a7f8bd AS |
1496 | return -EINVAL; |
1497 | ||
1498 | if (ctx_size_in < prog->aux->max_ctx_offset || | |
1499 | ctx_size_in > U16_MAX) | |
1500 | return -EINVAL; | |
1501 | ||
1502 | if (ctx_size_in) { | |
db5b6a46 QW |
1503 | ctx = memdup_user(ctx_in, ctx_size_in); |
1504 | if (IS_ERR(ctx)) | |
1505 | return PTR_ERR(ctx); | |
79a7f8bd | 1506 | } |
87b7b533 YS |
1507 | |
1508 | rcu_read_lock_trace(); | |
79a7f8bd | 1509 | retval = bpf_prog_run_pin_on_cpu(prog, ctx); |
87b7b533 | 1510 | rcu_read_unlock_trace(); |
79a7f8bd AS |
1511 | |
1512 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { | |
1513 | err = -EFAULT; | |
1514 | goto out; | |
1515 | } | |
1516 | if (ctx_size_in) | |
1517 | if (copy_to_user(ctx_in, ctx, ctx_size_in)) | |
1518 | err = -EFAULT; | |
1519 | out: | |
1520 | kfree(ctx); | |
1521 | return err; | |
1522 | } | |
b202d844 | 1523 | |
2b99ef22 FW |
1524 | static int verify_and_copy_hook_state(struct nf_hook_state *state, |
1525 | const struct nf_hook_state *user, | |
1526 | struct net_device *dev) | |
1527 | { | |
1528 | if (user->in || user->out) | |
1529 | return -EINVAL; | |
1530 | ||
1531 | if (user->net || user->sk || user->okfn) | |
1532 | return -EINVAL; | |
1533 | ||
1534 | switch (user->pf) { | |
1535 | case NFPROTO_IPV4: | |
1536 | case NFPROTO_IPV6: | |
1537 | switch (state->hook) { | |
1538 | case NF_INET_PRE_ROUTING: | |
1539 | state->in = dev; | |
1540 | break; | |
1541 | case NF_INET_LOCAL_IN: | |
1542 | state->in = dev; | |
1543 | break; | |
1544 | case NF_INET_FORWARD: | |
1545 | state->in = dev; | |
1546 | state->out = dev; | |
1547 | break; | |
1548 | case NF_INET_LOCAL_OUT: | |
1549 | state->out = dev; | |
1550 | break; | |
1551 | case NF_INET_POST_ROUTING: | |
1552 | state->out = dev; | |
1553 | break; | |
1554 | } | |
1555 | ||
1556 | break; | |
1557 | default: | |
1558 | return -EINVAL; | |
1559 | } | |
1560 | ||
1561 | state->pf = user->pf; | |
1562 | state->hook = user->hook; | |
1563 | ||
1564 | return 0; | |
1565 | } | |
1566 | ||
1567 | static __be16 nfproto_eth(int nfproto) | |
1568 | { | |
1569 | switch (nfproto) { | |
1570 | case NFPROTO_IPV4: | |
1571 | return htons(ETH_P_IP); | |
1572 | case NFPROTO_IPV6: | |
1573 | break; | |
1574 | } | |
1575 | ||
1576 | return htons(ETH_P_IPV6); | |
1577 | } | |
1578 | ||
1579 | int bpf_prog_test_run_nf(struct bpf_prog *prog, | |
1580 | const union bpf_attr *kattr, | |
1581 | union bpf_attr __user *uattr) | |
1582 | { | |
1583 | struct net *net = current->nsproxy->net_ns; | |
1584 | struct net_device *dev = net->loopback_dev; | |
1585 | struct nf_hook_state *user_ctx, hook_state = { | |
1586 | .pf = NFPROTO_IPV4, | |
1587 | .hook = NF_INET_LOCAL_OUT, | |
1588 | }; | |
1589 | u32 size = kattr->test.data_size_in; | |
1590 | u32 repeat = kattr->test.repeat; | |
1591 | struct bpf_nf_ctx ctx = { | |
1592 | .state = &hook_state, | |
1593 | }; | |
1594 | struct sk_buff *skb = NULL; | |
1595 | u32 retval, duration; | |
1596 | void *data; | |
1597 | int ret; | |
1598 | ||
1599 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) | |
1600 | return -EINVAL; | |
1601 | ||
1602 | if (size < sizeof(struct iphdr)) | |
1603 | return -EINVAL; | |
1604 | ||
1605 | data = bpf_test_init(kattr, kattr->test.data_size_in, size, | |
1606 | NET_SKB_PAD + NET_IP_ALIGN, | |
1607 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); | |
1608 | if (IS_ERR(data)) | |
1609 | return PTR_ERR(data); | |
1610 | ||
1611 | if (!repeat) | |
1612 | repeat = 1; | |
1613 | ||
1614 | user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state)); | |
1615 | if (IS_ERR(user_ctx)) { | |
1616 | kfree(data); | |
1617 | return PTR_ERR(user_ctx); | |
1618 | } | |
1619 | ||
1620 | if (user_ctx) { | |
1621 | ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev); | |
1622 | if (ret) | |
1623 | goto out; | |
1624 | } | |
1625 | ||
1626 | skb = slab_build_skb(data); | |
1627 | if (!skb) { | |
1628 | ret = -ENOMEM; | |
1629 | goto out; | |
1630 | } | |
1631 | ||
1632 | data = NULL; /* data released via kfree_skb */ | |
1633 | ||
1634 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | |
1635 | __skb_put(skb, size); | |
1636 | ||
1637 | ret = -EINVAL; | |
1638 | ||
1639 | if (hook_state.hook != NF_INET_LOCAL_OUT) { | |
1640 | if (size < ETH_HLEN + sizeof(struct iphdr)) | |
1641 | goto out; | |
1642 | ||
1643 | skb->protocol = eth_type_trans(skb, dev); | |
1644 | switch (skb->protocol) { | |
1645 | case htons(ETH_P_IP): | |
1646 | if (hook_state.pf == NFPROTO_IPV4) | |
1647 | break; | |
1648 | goto out; | |
1649 | case htons(ETH_P_IPV6): | |
1650 | if (size < ETH_HLEN + sizeof(struct ipv6hdr)) | |
1651 | goto out; | |
1652 | if (hook_state.pf == NFPROTO_IPV6) | |
1653 | break; | |
1654 | goto out; | |
1655 | default: | |
1656 | ret = -EPROTO; | |
1657 | goto out; | |
1658 | } | |
1659 | ||
1660 | skb_reset_network_header(skb); | |
1661 | } else { | |
1662 | skb->protocol = nfproto_eth(hook_state.pf); | |
1663 | } | |
1664 | ||
1665 | ctx.skb = skb; | |
1666 | ||
1667 | ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false); | |
1668 | if (ret) | |
1669 | goto out; | |
1670 | ||
1671 | ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); | |
1672 | ||
1673 | out: | |
1674 | kfree(user_ctx); | |
1675 | kfree_skb(skb); | |
1676 | kfree(data); | |
1677 | return ret; | |
1678 | } | |
1679 | ||
b202d844 | 1680 | static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { |
a4703e31 KKD |
1681 | .owner = THIS_MODULE, |
1682 | .set = &test_sk_check_kfunc_ids, | |
b202d844 KKD |
1683 | }; |
1684 | ||
05a945de KKD |
1685 | BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids) |
1686 | BTF_ID(struct, prog_test_ref_kfunc) | |
e4c00339 | 1687 | BTF_ID(func, bpf_kfunc_call_test_release_dtor) |
05a945de | 1688 | BTF_ID(struct, prog_test_member) |
e4c00339 | 1689 | BTF_ID(func, bpf_kfunc_call_memb_release_dtor) |
05a945de | 1690 | |
b202d844 KKD |
1691 | static int __init bpf_prog_test_run_init(void) |
1692 | { | |
05a945de KKD |
1693 | const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = { |
1694 | { | |
1695 | .btf_id = bpf_prog_test_dtor_kfunc_ids[0], | |
1696 | .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1] | |
1697 | }, | |
1698 | { | |
1699 | .btf_id = bpf_prog_test_dtor_kfunc_ids[2], | |
1700 | .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3], | |
1701 | }, | |
1702 | }; | |
1703 | int ret; | |
1704 | ||
5b481aca BT |
1705 | ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set); |
1706 | ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); | |
1f075262 | 1707 | ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set); |
fb66223a | 1708 | ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); |
05a945de KKD |
1709 | return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, |
1710 | ARRAY_SIZE(bpf_prog_test_dtor_kfunc), | |
1711 | THIS_MODULE); | |
b202d844 KKD |
1712 | } |
1713 | late_initcall(bpf_prog_test_run_init); |