selftests/bpf: Add test cases to test narrower ctx field loads
authorYonghong Song <yhs@fb.com>
Tue, 13 Jun 2017 22:52:14 +0000 (15:52 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 14 Jun 2017 18:56:25 +0000 (14:56 -0400)
Add test cases in test_verifier and test_progs.
Negative tests are added in test_verifier as well.
The test in test_progs will compare the value of narrower ctx field
load result vs. the masked value of normal full-field load result,
and will fail if they are not the same.

Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_pkt_md_access.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_verifier.c

index 9f0e07ba5334b1412668e2fe4a43779d5e857060..2ca51a8a588c49b14f62dfbaafba253e01cf8bde 100644 (file)
@@ -14,7 +14,8 @@ LDLIBS += -lcap -lelf
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
        test_align
 
-TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o
+TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
+       test_pkt_md_access.o
 
 TEST_PROGS := test_kmod.sh
 
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/test_pkt_md_access.c
new file mode 100644 (file)
index 0000000..71729d4
--- /dev/null
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+
+#define TEST_FIELD(TYPE, FIELD, MASK)                                  \
+       {                                                               \
+               TYPE tmp = *(volatile TYPE *)&skb->FIELD;               \
+               if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK))   \
+                       return TC_ACT_SHOT;                             \
+       }
+
+SEC("test1")
+int process(struct __sk_buff *skb)
+{
+       TEST_FIELD(__u8,  len, 0xFF);
+       TEST_FIELD(__u16, len, 0xFFFF);
+       TEST_FIELD(__u32, len, 0xFFFFFFFF);
+       TEST_FIELD(__u16, protocol, 0xFFFF);
+       TEST_FIELD(__u32, protocol, 0xFFFFFFFF);
+       TEST_FIELD(__u8,  hash, 0xFF);
+       TEST_FIELD(__u16, hash, 0xFFFF);
+       TEST_FIELD(__u32, hash, 0xFFFFFFFF);
+
+       return TC_ACT_OK;
+}
index f10493d4c37cd7682e34ce72ca3263c32988b73d..5855cd3d3d45cbd1ee967263c622d4e9d2e7b417 100644 (file)
@@ -484,6 +484,26 @@ done:
                bpf_object__close(objs[i]);
 }
 
+static void test_pkt_md_access(void)
+{
+       const char *file = "./test_pkt_md_access.o";
+       struct bpf_object *obj;
+       __u32 duration, retval;
+       int err, prog_fd;
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+       if (err)
+               return;
+
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+                               NULL, NULL, &retval, &duration);
+       CHECK(err || retval, "",
+             "err %d errno %d retval %d duration %d\n",
+             err, errno, retval, duration);
+
+       bpf_object__close(obj);
+}
+
 int main(void)
 {
        struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -495,6 +515,7 @@ int main(void)
        test_l4lb();
        test_tcp_estats();
        test_bpf_obj_id();
+       test_pkt_md_access();
 
        printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
        return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
index 13341700930c489cb6bb0d4e4c9d539ce8975209..c0af0195432f5a3ee52df7f8c739c5499b3d687a 100644 (file)
@@ -1094,6 +1094,59 @@ static struct bpf_test tests[] = {
                .errstr = "invalid bpf_context access",
                .result = REJECT,
        },
+       {
+               "check skb->hash byte load permitted",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash)),
+#else
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash) + 3),
+#endif
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "check skb->hash byte load not permitted 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash) + 1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "check skb->hash byte load not permitted 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash) + 2),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "check skb->hash byte load not permitted 3",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash) + 3),
+#else
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash)),
+#endif
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
        {
                "check cb access: byte, wrong type",
                .insns = {
@@ -1187,6 +1240,37 @@ static struct bpf_test tests[] = {
                .errstr = "invalid bpf_context access",
                .result = REJECT,
        },
+       {
+               "check skb->hash half load permitted",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash)),
+#else
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash) + 2),
+#endif
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "check skb->hash half load not permitted",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash) + 2),
+#else
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, hash)),
+#endif
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
        {
                "check cb access: half, wrong type",
                .insns = {
@@ -5103,6 +5187,98 @@ static struct bpf_test tests[] = {
                },
                .result = ACCEPT,
        },
+       {
+               "check bpf_perf_event_data->sample_period byte load permitted",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct bpf_perf_event_data, sample_period) + 7),
+#endif
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+       },
+       {
+               "check bpf_perf_event_data->sample_period half load permitted",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct bpf_perf_event_data, sample_period) + 6),
+#endif
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+       },
+       {
+               "check bpf_perf_event_data->sample_period word load permitted",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct bpf_perf_event_data, sample_period) + 4),
+#endif
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+       },
+       {
+               "check bpf_perf_event_data->sample_period dword load permitted",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct bpf_perf_event_data, sample_period)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+       },
+       {
+               "check skb->data half load not permitted",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+#else
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data) + 2),
+#endif
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid bpf_context access",
+       },
+       {
+               "check skb->tc_classid half load not permitted for lwt prog",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_classid)),
+#else
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_classid) + 2),
+#endif
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid bpf_context access",
+               .prog_type = BPF_PROG_TYPE_LWT_IN,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)