2 "BPF_ATOMIC_AND without fetch",
5 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
6 /* atomic_and(&val, 0x011); */
7 BPF_MOV64_IMM(BPF_REG_1, 0x011),
8 BPF_ATOMIC_OP(BPF_DW, BPF_AND, BPF_REG_10, BPF_REG_1, -8),
9 /* if (val != 0x010) exit(2); */
10 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x010, 2),
12 BPF_MOV64_IMM(BPF_REG_0, 2),
14 /* r1 should not be clobbered, no BPF_FETCH flag */
15 BPF_MOV64_IMM(BPF_REG_0, 0),
16 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
17 BPF_MOV64_IMM(BPF_REG_0, 1),
23 "BPF_ATOMIC_AND with fetch",
25 BPF_MOV64_IMM(BPF_REG_0, 123),
27 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
28 /* old = atomic_fetch_and(&val, 0x011); */
29 BPF_MOV64_IMM(BPF_REG_1, 0x011),
30 BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
31 /* if (old != 0x110) exit(3); */
32 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
33 BPF_MOV64_IMM(BPF_REG_0, 3),
35 /* if (val != 0x010) exit(2); */
36 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
37 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
38 BPF_MOV64_IMM(BPF_REG_1, 2),
40 /* Check R0 wasn't clobbered (for fear of x86 JIT bug) */
41 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
42 BPF_MOV64_IMM(BPF_REG_0, 1),
45 BPF_MOV64_IMM(BPF_REG_0, 0),
51 "BPF_ATOMIC_AND with fetch 32bit",
54 BPF_MOV64_IMM(BPF_REG_0, 0),
55 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
57 BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
58 /* old = atomic_fetch_and(&val, 0x011); */
59 BPF_MOV32_IMM(BPF_REG_1, 0x011),
60 BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
61 /* if (old != 0x110) exit(3); */
62 BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
63 BPF_MOV32_IMM(BPF_REG_0, 3),
65 /* if (val != 0x010) exit(2); */
66 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
67 BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
68 BPF_MOV32_IMM(BPF_REG_1, 2),
70 /* Check R0 wasn't clobbered (for fear of x86 JIT bug)
71 * It should be -1 so add 1 to get exit code.
73 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),