SEC("raw_tp")
__arch_x86_64
__xlated("1: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("3: exit")
__success
__naked void canary_zero_spills(void)
__arch_x86_64
__log_level(4) __msg("stack depth 16")
__xlated("1: *(u64 *)(r10 -16) = r1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r2 = *(u64 *)(r10 -16)")
__success
__naked void wrong_reg_in_pattern1(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r6")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r6 = *(u64 *)(r10 -16)")
__success
__naked void wrong_reg_in_pattern2(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r0")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r0 = *(u64 *)(r10 -16)")
__success
__naked void wrong_reg_in_pattern3(void)
SEC("raw_tp")
__arch_x86_64
__xlated("2: *(u64 *)(r2 -16) = r1")
+__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("6: r1 = *(u64 *)(r10 -16)")
__success
__naked void wrong_base_in_pattern(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r2 = 1")
__success
__naked void wrong_insn_in_pattern(void)
SEC("raw_tp")
__arch_x86_64
__xlated("2: *(u64 *)(r10 -16) = r1")
+__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("6: r1 = *(u64 *)(r10 -8)")
__success
__naked void wrong_off_in_pattern1(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u32 *)(r10 -4) = r1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r1 = *(u32 *)(r10 -4)")
__success
__naked void wrong_off_in_pattern2(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u32 *)(r10 -16) = r1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r1 = *(u32 *)(r10 -16)")
__success
__naked void wrong_size_in_pattern(void)
SEC("raw_tp")
__arch_x86_64
__xlated("2: *(u32 *)(r10 -8) = r1")
+__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("6: r1 = *(u32 *)(r10 -8)")
__success
__naked void partial_pattern(void)
/* not patched, spills for -8, -16 not removed */
__xlated("2: *(u64 *)(r10 -8) = r1")
__xlated("3: *(u64 *)(r10 -16) = r2")
+__xlated("...")
__xlated("5: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("7: r2 = *(u64 *)(r10 -16)")
__xlated("8: r1 = *(u64 *)(r10 -8)")
/* patched, spills for -24, -32 removed */
+__xlated("...")
__xlated("10: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("12: exit")
__success
__naked void min_stack_offset(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
__success
__naked void bad_fixed_read(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
__success
__naked void bad_fixed_write(void)
SEC("raw_tp")
__arch_x86_64
__xlated("6: *(u64 *)(r10 -16) = r1")
+__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("10: r1 = *(u64 *)(r10 -16)")
__success
__naked void bad_varying_read(void)
SEC("raw_tp")
__arch_x86_64
__xlated("6: *(u64 *)(r10 -16) = r1")
+__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("10: r1 = *(u64 *)(r10 -16)")
__success
__naked void bad_varying_write(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
__success
__naked void bad_write_in_subprog(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
__success
__naked void bad_helper_write(void)
__arch_x86_64
/* main, not patched */
__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
+__xlated("...")
__xlated("9: call pc+1")
+__xlated("...")
__xlated("10: exit")
/* subprogram, patched */
__xlated("11: r1 = 1")
+__xlated("...")
__xlated("13: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("15: exit")
__success
__naked void invalidate_one_subprog(void)
__arch_x86_64
/* main */
__xlated("0: r1 = 1")
+__xlated("...")
__xlated("2: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("4: call pc+1")
__xlated("5: exit")
/* subprogram */
__xlated("6: r1 = 1")
+__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)")
+__xlated("...")
__xlated("10: *(u64 *)(r10 -16) = r1")
__xlated("11: exit")
__success
/* may_goto counter at -16 */
__xlated("0: *(u64 *)(r10 -16) =")
__xlated("1: r1 = 1")
+__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
/* may_goto expansion starts */
__xlated("5: r11 = *(u64 *)(r10 -16)")
__xlated("6: if r11 == 0x0 goto pc+3")
__xlated("6: r2 =")
__xlated("7: r3 = 0")
__xlated("8: r4 = 0")
+__xlated("...")
/* ... part of the inlined bpf_loop */
__xlated("12: *(u64 *)(r10 -32) = r6")
__xlated("13: *(u64 *)(r10 -24) = r7")
__xlated("14: *(u64 *)(r10 -16) = r8")
-/* ... */
+__xlated("...")
__xlated("21: call pc+8") /* dummy_loop_callback */
/* ... last insns of the bpf_loop_interaction1 */
+__xlated("...")
__xlated("28: r0 = 0")
__xlated("29: exit")
/* dummy_loop_callback */
__xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("7: call")
__xlated("8: r1 = *(u64 *)(r10 -16)")
-/* ... */
+__xlated("...")
/* ... part of the inlined bpf_loop */
__xlated("15: *(u64 *)(r10 -40) = r6")
__xlated("16: *(u64 *)(r10 -32) = r7")
const char *description = NULL;
bool has_unpriv_result = false;
bool has_unpriv_retval = false;
+ bool unpriv_xlated_on_next_line = true;
+ bool xlated_on_next_line = true;
bool unpriv_jit_on_next_line;
bool jit_on_next_line;
bool collect_jit = false;
spec->mode_mask |= UNPRIV;
}
} else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) {
- err = push_msg(msg, &spec->priv.expect_xlated);
+ err = push_disasm_msg(msg, &xlated_on_next_line,
+ &spec->priv.expect_xlated);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
} else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) {
- err = push_msg(msg, &spec->unpriv.expect_xlated);
+ err = push_disasm_msg(msg, &unpriv_xlated_on_next_line,
+ &spec->unpriv.expect_xlated);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;