2 "check valid spill/fill",
4 /* spill R1(ctx) into stack */
5 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
6 /* fill it back into R2 */
7 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
8 /* should be able to access R0 = *(R2 + 8) */
9 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
10 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13 .errstr_unpriv = "R0 leaks addr",
15 .result_unpriv = REJECT,
16 .retval = POINTER_VALUE,
19 "check valid spill/fill, skb mark",
21 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
22 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
23 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
24 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
25 offsetof(struct __sk_buff, mark)),
29 .result_unpriv = ACCEPT,
32 "check valid spill/fill, ptr to mem",
34 /* reserve 8 byte ringbuf memory */
35 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
36 BPF_LD_MAP_FD(BPF_REG_1, 0),
37 BPF_MOV64_IMM(BPF_REG_2, 8),
38 BPF_MOV64_IMM(BPF_REG_3, 0),
39 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
40 /* store a pointer to the reserved memory in R6 */
41 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
42 /* check whether the reservation was successful */
43 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
44 /* spill R6(mem) into the stack */
45 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
46 /* fill it back in R7 */
47 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
48 /* should be able to access *(R7) = 0 */
49 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
50 /* submit the reserved ringbuf memory */
51 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
52 BPF_MOV64_IMM(BPF_REG_2, 0),
53 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
54 BPF_MOV64_IMM(BPF_REG_0, 0),
57 .fixup_map_ringbuf = { 1 },
59 .result_unpriv = ACCEPT,
62 "check corrupted spill/fill",
64 /* spill R1(ctx) into stack */
65 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
66 /* mess up with R1 pointer on stack */
67 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
68 /* fill back into R0 is fine for priv.
69 * R0 now becomes SCALAR_VALUE.
71 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
72 /* Load from R0 should fail. */
73 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
76 .errstr_unpriv = "attempt to corrupt spilled",
77 .errstr = "R0 invalid mem access 'inv",
79 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
82 "check corrupted spill/fill, LSB",
84 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
85 BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
86 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
89 .errstr_unpriv = "attempt to corrupt spilled",
90 .result_unpriv = REJECT,
92 .retval = POINTER_VALUE,
95 "check corrupted spill/fill, MSB",
97 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
98 BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
99 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
102 .errstr_unpriv = "attempt to corrupt spilled",
103 .result_unpriv = REJECT,
105 .retval = POINTER_VALUE,
108 "Spill and refill a u32 const scalar. Offset to skb->data",
110 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
111 offsetof(struct __sk_buff, data)),
112 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
113 offsetof(struct __sk_buff, data_end)),
115 BPF_MOV32_IMM(BPF_REG_4, 20),
116 /* *(u32 *)(r10 -8) = r4 */
117 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
118 /* r4 = *(u32 *)(r10 -8) */
119 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
121 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
122 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */
123 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
124 /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */
125 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
126 /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */
127 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
128 BPF_MOV64_IMM(BPF_REG_0, 0),
132 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
135 "Spill a u32 const, refill from another half of the uninit u32 from the stack",
138 BPF_MOV32_IMM(BPF_REG_4, 20),
139 /* *(u32 *)(r10 -8) = r4 */
140 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
141 /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/
142 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
143 BPF_MOV64_IMM(BPF_REG_0, 0),
147 .errstr = "invalid read from stack off -4+0 size 4",
148 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
151 "Spill a u32 const scalar. Refill as u16. Offset to skb->data",
153 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
154 offsetof(struct __sk_buff, data)),
155 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
156 offsetof(struct __sk_buff, data_end)),
158 BPF_MOV32_IMM(BPF_REG_4, 20),
159 /* *(u32 *)(r10 -8) = r4 */
160 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
161 /* r4 = *(u16 *)(r10 -8) */
162 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
164 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
165 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
166 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
167 /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
168 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
169 /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
170 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
171 BPF_MOV64_IMM(BPF_REG_0, 0),
175 .errstr = "invalid access to packet",
176 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
179 "Spill u32 const scalars. Refill as u64. Offset to skb->data",
181 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
182 offsetof(struct __sk_buff, data)),
183 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
184 offsetof(struct __sk_buff, data_end)),
186 BPF_MOV32_IMM(BPF_REG_6, 0),
188 BPF_MOV32_IMM(BPF_REG_7, 20),
189 /* *(u32 *)(r10 -4) = r6 */
190 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
191 /* *(u32 *)(r10 -8) = r7 */
192 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
193 /* r4 = *(u64 *)(r10 -8) */
194 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
196 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
197 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
198 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
199 /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
200 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
201 /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
202 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
203 BPF_MOV64_IMM(BPF_REG_0, 0),
207 .errstr = "invalid access to packet",
208 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
211 "Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data",
213 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
214 offsetof(struct __sk_buff, data)),
215 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
216 offsetof(struct __sk_buff, data_end)),
218 BPF_MOV32_IMM(BPF_REG_4, 20),
219 /* *(u32 *)(r10 -8) = r4 */
220 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
221 /* r4 = *(u16 *)(r10 -6) */
222 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
224 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
225 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
226 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
227 /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
228 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
229 /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
231 BPF_MOV64_IMM(BPF_REG_0, 0),
235 .errstr = "invalid access to packet",
236 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
239 "Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data",
241 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
242 offsetof(struct __sk_buff, data)),
243 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
244 offsetof(struct __sk_buff, data_end)),
246 BPF_MOV32_IMM(BPF_REG_4, 20),
247 /* *(u32 *)(r10 -8) = r4 */
248 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
249 /* *(u32 *)(r10 -4) = r4 */
250 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
251 /* r4 = *(u32 *)(r10 -4), */
252 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
254 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
255 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */
256 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
257 /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
258 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
259 /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
260 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
261 BPF_MOV64_IMM(BPF_REG_0, 0),
265 .errstr = "invalid access to packet",
266 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
269 "Spill and refill a umax=40 bounded scalar. Offset to skb->data",
271 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
272 offsetof(struct __sk_buff, data)),
273 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
274 offsetof(struct __sk_buff, data_end)),
275 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
276 offsetof(struct __sk_buff, tstamp)),
277 BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
278 BPF_MOV64_IMM(BPF_REG_0, 0),
280 /* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */
281 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
282 /* r4 = (*u32 *)(r10 - 8) */
283 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
284 /* r2 += r4 R2=pkt R4=inv,umax=40 */
285 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
286 /* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */
287 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
288 /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
290 /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */
291 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
292 /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */
293 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
294 BPF_MOV64_IMM(BPF_REG_0, 0),
298 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
301 "Spill a u32 scalar at fp-4 and then at fp-8",
304 BPF_MOV32_IMM(BPF_REG_4, 4321),
305 /* *(u32 *)(r10 -4) = r4 */
306 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
307 /* *(u32 *)(r10 -8) = r4 */
308 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
309 /* r4 = *(u64 *)(r10 -8) */
310 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
311 BPF_MOV64_IMM(BPF_REG_0, 0),
315 .prog_type = BPF_PROG_TYPE_SCHED_CLS,