selftests/bpf: Add big-endian support to the ldsx test
authorIlya Leoshkevich <iii@linux.ibm.com>
Tue, 19 Sep 2023 10:09:05 +0000 (12:09 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 21 Sep 2023 21:21:59 +0000 (14:21 -0700)
Prepare the ldsx test to run on big-endian systems by adding the
necessary endianness checks around narrow memory accesses.

Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Link: https://lore.kernel.org/r/20230919101336.2223655-4-iii@linux.ibm.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/progs/test_ldsx_insn.c
tools/testing/selftests/bpf/progs/verifier_ldsx.c

index 67c14ba1e87b4960629013bfd97ba4e2f8834ab6..3709e5eb7dd03118cb4e535715c7f44e9e7a8165 100644 (file)
@@ -104,7 +104,11 @@ int _tc(volatile struct __sk_buff *skb)
                      "%[tmp_mark] = r1"
                      : [tmp_mark]"=r"(tmp_mark)
                      : [ctx]"r"(skb),
-                       [off_mark]"i"(offsetof(struct __sk_buff, mark))
+                       [off_mark]"i"(offsetof(struct __sk_buff, mark)
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+                       + sizeof(skb->mark) - 1
+#endif
+                       )
                      : "r1");
 #else
        tmp_mark = (char)skb->mark;
index 1e1bc379c44fc3d4410da778be276824947bca39..97d35bc1c943789fd13bc09119c6f878f11a83f9 100644 (file)
@@ -13,12 +13,16 @@ __description("LDSX, S8")
 __success __success_unpriv __retval(-2)
 __naked void ldsx_s8(void)
 {
-       asm volatile ("                                 \
-       r1 = 0x3fe;                                     \
-       *(u64 *)(r10 - 8) = r1;                         \
-       r0 = *(s8 *)(r10 - 8);                          \
-       exit;                                           \
-"      ::: __clobber_all);
+       asm volatile (
+       "r1 = 0x3fe;"
+       "*(u64 *)(r10 - 8) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       "r0 = *(s8 *)(r10 - 8);"
+#else
+       "r0 = *(s8 *)(r10 - 1);"
+#endif
+       "exit;"
+       ::: __clobber_all);
 }
 
 SEC("socket")
@@ -26,12 +30,16 @@ __description("LDSX, S16")
 __success __success_unpriv __retval(-2)
 __naked void ldsx_s16(void)
 {
-       asm volatile ("                                 \
-       r1 = 0x3fffe;                                   \
-       *(u64 *)(r10 - 8) = r1;                         \
-       r0 = *(s16 *)(r10 - 8);                         \
-       exit;                                           \
-"      ::: __clobber_all);
+       asm volatile (
+       "r1 = 0x3fffe;"
+       "*(u64 *)(r10 - 8) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       "r0 = *(s16 *)(r10 - 8);"
+#else
+       "r0 = *(s16 *)(r10 - 2);"
+#endif
+       "exit;"
+       ::: __clobber_all);
 }
 
 SEC("socket")
@@ -39,13 +47,17 @@ __description("LDSX, S32")
 __success __success_unpriv __retval(-1)
 __naked void ldsx_s32(void)
 {
-       asm volatile ("                                 \
-       r1 = 0xfffffffe;                                \
-       *(u64 *)(r10 - 8) = r1;                         \
-       r0 = *(s32 *)(r10 - 8);                         \
-       r0 >>= 1;                                       \
-       exit;                                           \
-"      ::: __clobber_all);
+       asm volatile (
+       "r1 = 0xfffffffe;"
+       "*(u64 *)(r10 - 8) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       "r0 = *(s32 *)(r10 - 8);"
+#else
+       "r0 = *(s32 *)(r10 - 4);"
+#endif
+       "r0 >>= 1;"
+       "exit;"
+       ::: __clobber_all);
 }
 
 SEC("socket")
@@ -54,20 +66,24 @@ __log_level(2) __success __retval(1)
 __msg("R1_w=scalar(smin=-128,smax=127)")
 __naked void ldsx_s8_range_priv(void)
 {
-       asm volatile ("                                 \
-       call %[bpf_get_prandom_u32];                    \
-       *(u64 *)(r10 - 8) = r0;                         \
-       r1 = *(s8 *)(r10 - 8);                          \
-       /* r1 with s8 range */                          \
-       if r1 s> 0x7f goto l0_%=;                       \
-       if r1 s< -0x80 goto l0_%=;                      \
-       r0 = 1;                                         \
-l1_%=:                                                 \
-       exit;                                           \
-l0_%=:                                                 \
-       r0 = 2;                                         \
-       goto l1_%=;                                     \
-"      :
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "*(u64 *)(r10 - 8) = r0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       "r1 = *(s8 *)(r10 - 8);"
+#else
+       "r1 = *(s8 *)(r10 - 1);"
+#endif
+       /* r1 with s8 range */
+       "if r1 s> 0x7f goto l0_%=;"
+       "if r1 s< -0x80 goto l0_%=;"
+       "r0 = 1;"
+"l1_%=:"
+       "exit;"
+"l0_%=:"
+       "r0 = 2;"
+       "goto l1_%=;"
+       :
        : __imm(bpf_get_prandom_u32)
        : __clobber_all);
 }
@@ -77,20 +93,24 @@ __description("LDSX, S16 range checking")
 __success __success_unpriv __retval(1)
 __naked void ldsx_s16_range(void)
 {
-       asm volatile ("                                 \
-       call %[bpf_get_prandom_u32];                    \
-       *(u64 *)(r10 - 8) = r0;                         \
-       r1 = *(s16 *)(r10 - 8);                         \
-       /* r1 with s16 range */                         \
-       if r1 s> 0x7fff goto l0_%=;                     \
-       if r1 s< -0x8000 goto l0_%=;                    \
-       r0 = 1;                                         \
-l1_%=:                                                 \
-       exit;                                           \
-l0_%=:                                                 \
-       r0 = 2;                                         \
-       goto l1_%=;                                     \
-"      :
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "*(u64 *)(r10 - 8) = r0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       "r1 = *(s16 *)(r10 - 8);"
+#else
+       "r1 = *(s16 *)(r10 - 2);"
+#endif
+       /* r1 with s16 range */
+       "if r1 s> 0x7fff goto l0_%=;"
+       "if r1 s< -0x8000 goto l0_%=;"
+       "r0 = 1;"
+"l1_%=:"
+       "exit;"
+"l0_%=:"
+       "r0 = 2;"
+       "goto l1_%=;"
+       :
        : __imm(bpf_get_prandom_u32)
        : __clobber_all);
 }
@@ -100,20 +120,24 @@ __description("LDSX, S32 range checking")
 __success __success_unpriv __retval(1)
 __naked void ldsx_s32_range(void)
 {
-       asm volatile ("                                 \
-       call %[bpf_get_prandom_u32];                    \
-       *(u64 *)(r10 - 8) = r0;                         \
-       r1 = *(s32 *)(r10 - 8);                         \
-       /* r1 with s16 range */                         \
-       if r1 s> 0x7fffFFFF goto l0_%=;                 \
-       if r1 s< -0x80000000 goto l0_%=;                \
-       r0 = 1;                                         \
-l1_%=:                                                 \
-       exit;                                           \
-l0_%=:                                                 \
-       r0 = 2;                                         \
-       goto l1_%=;                                     \
-"      :
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "*(u64 *)(r10 - 8) = r0;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       "r1 = *(s32 *)(r10 - 8);"
+#else
+       "r1 = *(s32 *)(r10 - 4);"
+#endif
+       /* r1 with s16 range */
+       "if r1 s> 0x7fffFFFF goto l0_%=;"
+       "if r1 s< -0x80000000 goto l0_%=;"
+       "r0 = 1;"
+"l1_%=:"
+       "exit;"
+"l0_%=:"
+       "r0 = 2;"
+       "goto l1_%=;"
+       :
        : __imm(bpf_get_prandom_u32)
        : __clobber_all);
 }