1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
5 #include <linux/if_tun.h>
12 #define CHECK_FLOW_KEYS(desc, got, expected) \
13 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \
17 "addr_proto=0x%x/0x%x " \
19 "is_first_frag=%u/%u " \
21 "ip_proto=0x%x/0x%x " \
22 "n_proto=0x%x/0x%x " \
23 "flow_label=0x%x/0x%x " \
26 got.nhoff, expected.nhoff, \
27 got.thoff, expected.thoff, \
28 got.addr_proto, expected.addr_proto, \
29 got.is_frag, expected.is_frag, \
30 got.is_first_frag, expected.is_first_frag, \
31 got.is_encap, expected.is_encap, \
32 got.ip_proto, expected.ip_proto, \
33 got.n_proto, expected.n_proto, \
34 got.flow_label, expected.flow_label, \
35 got.sport, expected.sport, \
36 got.dport, expected.dport)
47 struct iphdr iph_inner;
51 struct svlan_ipv4_pkt {
65 struct ipv6_frag_pkt {
72 __be32 identification;
77 struct dvlan_ipv6_pkt {
91 struct svlan_ipv4_pkt svlan_ipv4;
94 struct ipv6_frag_pkt ipv6_frag;
95 struct dvlan_ipv6_pkt dvlan_ipv6;
97 struct bpf_flow_keys keys;
103 struct test tests[] = {
107 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
109 .iph.protocol = IPPROTO_TCP,
110 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
117 .thoff = ETH_HLEN + sizeof(struct iphdr),
118 .addr_proto = ETH_P_IP,
119 .ip_proto = IPPROTO_TCP,
120 .n_proto = __bpf_constant_htons(ETH_P_IP),
128 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
129 .iph.nexthdr = IPPROTO_TCP,
130 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
137 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
138 .addr_proto = ETH_P_IPV6,
139 .ip_proto = IPPROTO_TCP,
140 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
146 .name = "802.1q-ipv4",
148 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
149 .vlan_proto = __bpf_constant_htons(ETH_P_IP),
151 .iph.protocol = IPPROTO_TCP,
152 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
158 .nhoff = ETH_HLEN + VLAN_HLEN,
159 .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
160 .addr_proto = ETH_P_IP,
161 .ip_proto = IPPROTO_TCP,
162 .n_proto = __bpf_constant_htons(ETH_P_IP),
168 .name = "802.1ad-ipv6",
170 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
171 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
172 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
173 .iph.nexthdr = IPPROTO_TCP,
174 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
180 .nhoff = ETH_HLEN + VLAN_HLEN * 2,
181 .thoff = ETH_HLEN + VLAN_HLEN * 2 +
182 sizeof(struct ipv6hdr),
183 .addr_proto = ETH_P_IPV6,
184 .ip_proto = IPPROTO_TCP,
185 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
193 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
195 .iph.protocol = IPPROTO_TCP,
196 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
197 .iph.frag_off = __bpf_constant_htons(IP_MF),
203 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
205 .thoff = ETH_HLEN + sizeof(struct iphdr),
206 .addr_proto = ETH_P_IP,
207 .ip_proto = IPPROTO_TCP,
208 .n_proto = __bpf_constant_htons(ETH_P_IP),
210 .is_first_frag = true,
214 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
217 .name = "ipv4-no-frag",
219 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
221 .iph.protocol = IPPROTO_TCP,
222 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
223 .iph.frag_off = __bpf_constant_htons(IP_MF),
230 .thoff = ETH_HLEN + sizeof(struct iphdr),
231 .addr_proto = ETH_P_IP,
232 .ip_proto = IPPROTO_TCP,
233 .n_proto = __bpf_constant_htons(ETH_P_IP),
235 .is_first_frag = true,
241 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
242 .iph.nexthdr = IPPROTO_FRAGMENT,
243 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
244 .ipf.nexthdr = IPPROTO_TCP,
250 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
252 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
253 sizeof(struct frag_hdr),
254 .addr_proto = ETH_P_IPV6,
255 .ip_proto = IPPROTO_TCP,
256 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
258 .is_first_frag = true,
262 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
265 .name = "ipv6-no-frag",
267 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
268 .iph.nexthdr = IPPROTO_FRAGMENT,
269 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
270 .ipf.nexthdr = IPPROTO_TCP,
277 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
278 sizeof(struct frag_hdr),
279 .addr_proto = ETH_P_IPV6,
280 .ip_proto = IPPROTO_TCP,
281 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
283 .is_first_frag = true,
287 .name = "ipv6-flow-label",
289 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
290 .iph.nexthdr = IPPROTO_TCP,
291 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
292 .iph.flow_lbl = { 0xb, 0xee, 0xef },
299 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
300 .addr_proto = ETH_P_IPV6,
301 .ip_proto = IPPROTO_TCP,
302 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
305 .flow_label = __bpf_constant_htonl(0xbeeef),
309 .name = "ipv6-no-flow-label",
311 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
312 .iph.nexthdr = IPPROTO_TCP,
313 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
314 .iph.flow_lbl = { 0xb, 0xee, 0xef },
320 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
322 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
323 .addr_proto = ETH_P_IPV6,
324 .ip_proto = IPPROTO_TCP,
325 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
326 .flow_label = __bpf_constant_htonl(0xbeeef),
328 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
331 .name = "ipip-encap",
333 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
335 .iph.protocol = IPPROTO_IPIP,
336 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
338 .iph_inner.protocol = IPPROTO_TCP,
340 __bpf_constant_htons(MAGIC_BYTES) -
341 sizeof(struct iphdr),
349 .thoff = ETH_HLEN + sizeof(struct iphdr) +
350 sizeof(struct iphdr),
351 .addr_proto = ETH_P_IP,
352 .ip_proto = IPPROTO_TCP,
353 .n_proto = __bpf_constant_htons(ETH_P_IP),
360 .name = "ipip-no-encap",
362 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
364 .iph.protocol = IPPROTO_IPIP,
365 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
367 .iph_inner.protocol = IPPROTO_TCP,
369 __bpf_constant_htons(MAGIC_BYTES) -
370 sizeof(struct iphdr),
376 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
378 .thoff = ETH_HLEN + sizeof(struct iphdr),
379 .addr_proto = ETH_P_IP,
380 .ip_proto = IPPROTO_IPIP,
381 .n_proto = __bpf_constant_htons(ETH_P_IP),
384 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
388 static int create_tap(const char *ifname)
391 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
395 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
397 fd = open("/dev/net/tun", O_RDWR);
401 ret = ioctl(fd, TUNSETIFF, &ifr);
408 static int tx_tap(int fd, void *pkt, size_t len)
410 struct iovec iov[] = {
416 return writev(fd, iov, ARRAY_SIZE(iov));
419 static int ifup(const char *ifname)
421 struct ifreq ifr = {};
424 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
426 sk = socket(PF_INET, SOCK_DGRAM, 0);
430 ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
436 ifr.ifr_flags |= IFF_UP;
437 ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
447 void test_flow_dissector(void)
449 int i, err, prog_fd, keys_fd = -1, tap_fd;
450 struct bpf_object *obj;
453 err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
454 "jmp_table", "last_dissection", &prog_fd, &keys_fd);
460 for (i = 0; i < ARRAY_SIZE(tests); i++) {
461 struct bpf_flow_keys flow_keys;
462 struct bpf_prog_test_run_attr tattr = {
464 .data_in = &tests[i].pkt,
465 .data_size_in = sizeof(tests[i].pkt),
466 .data_out = &flow_keys,
468 static struct bpf_flow_keys ctx = {};
470 if (tests[i].flags) {
472 tattr.ctx_size_in = sizeof(ctx);
473 ctx.flags = tests[i].flags;
476 err = bpf_prog_test_run_xattr(&tattr);
477 CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
478 err || tattr.retval != 1,
480 "err %d errno %d retval %d duration %d size %u/%lu\n",
481 err, errno, tattr.retval, tattr.duration,
482 tattr.data_size_out, sizeof(flow_keys));
483 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
486 /* Do the same tests but for skb-less flow dissector.
487 * We use a known path in the net/tun driver that calls
488 * eth_get_headlen and we manually export bpf_flow_keys
489 * via BPF map in this case.
492 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
493 CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno);
495 tap_fd = create_tap("tap0");
496 CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
498 CHECK(err, "ifup", "err %d errno %d\n", err, errno);
500 for (i = 0; i < ARRAY_SIZE(tests); i++) {
501 /* Keep in sync with 'flags' from eth_get_headlen. */
502 __u32 eth_get_headlen_flags =
503 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
504 struct bpf_prog_test_run_attr tattr = {};
505 struct bpf_flow_keys flow_keys = {};
506 __u32 key = (__u32)(tests[i].keys.sport) << 16 |
509 /* For skb-less case we can't pass input flags; run
510 * only the tests that have a matching set of flags.
513 if (tests[i].flags != eth_get_headlen_flags)
516 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
517 CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
519 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
520 CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
522 CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
523 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
525 err = bpf_map_delete_elem(keys_fd, &key);
526 CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err);
529 bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
530 bpf_object__close(obj);