License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6-block.git] / tools / include / linux / filter.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
dabf626f
HK
2/*
3 * Linux Socket Filter Data Structures
4 */
5#ifndef __TOOLS_LINUX_FILTER_H
6#define __TOOLS_LINUX_FILTER_H
7
8#include <linux/bpf.h>
9
10/* ArgX, context and stack frame pointer register positions. Note,
11 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
12 * calls in BPF_CALL instruction.
13 */
14#define BPF_REG_ARG1 BPF_REG_1
15#define BPF_REG_ARG2 BPF_REG_2
16#define BPF_REG_ARG3 BPF_REG_3
17#define BPF_REG_ARG4 BPF_REG_4
18#define BPF_REG_ARG5 BPF_REG_5
19#define BPF_REG_CTX BPF_REG_6
20#define BPF_REG_FP BPF_REG_10
21
22/* Additional register mappings for converted user programs. */
23#define BPF_REG_A BPF_REG_0
24#define BPF_REG_X BPF_REG_7
25#define BPF_REG_TMP BPF_REG_8
26
27/* BPF program can access up to 512 bytes of stack space. */
28#define MAX_BPF_STACK 512
29
30/* Helper macros for filter block array initializers. */
31
32/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
33
34#define BPF_ALU64_REG(OP, DST, SRC) \
35 ((struct bpf_insn) { \
36 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
37 .dst_reg = DST, \
38 .src_reg = SRC, \
39 .off = 0, \
40 .imm = 0 })
41
42#define BPF_ALU32_REG(OP, DST, SRC) \
43 ((struct bpf_insn) { \
44 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
45 .dst_reg = DST, \
46 .src_reg = SRC, \
47 .off = 0, \
48 .imm = 0 })
49
50/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
51
52#define BPF_ALU64_IMM(OP, DST, IMM) \
53 ((struct bpf_insn) { \
54 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
55 .dst_reg = DST, \
56 .src_reg = 0, \
57 .off = 0, \
58 .imm = IMM })
59
60#define BPF_ALU32_IMM(OP, DST, IMM) \
61 ((struct bpf_insn) { \
62 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
63 .dst_reg = DST, \
64 .src_reg = 0, \
65 .off = 0, \
66 .imm = IMM })
67
68/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
69
70#define BPF_ENDIAN(TYPE, DST, LEN) \
71 ((struct bpf_insn) { \
72 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
73 .dst_reg = DST, \
74 .src_reg = 0, \
75 .off = 0, \
76 .imm = LEN })
77
78/* Short form of mov, dst_reg = src_reg */
79
80#define BPF_MOV64_REG(DST, SRC) \
81 ((struct bpf_insn) { \
82 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
83 .dst_reg = DST, \
84 .src_reg = SRC, \
85 .off = 0, \
86 .imm = 0 })
87
88#define BPF_MOV32_REG(DST, SRC) \
89 ((struct bpf_insn) { \
90 .code = BPF_ALU | BPF_MOV | BPF_X, \
91 .dst_reg = DST, \
92 .src_reg = SRC, \
93 .off = 0, \
94 .imm = 0 })
95
96/* Short form of mov, dst_reg = imm32 */
97
98#define BPF_MOV64_IMM(DST, IMM) \
99 ((struct bpf_insn) { \
100 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
101 .dst_reg = DST, \
102 .src_reg = 0, \
103 .off = 0, \
104 .imm = IMM })
105
106#define BPF_MOV32_IMM(DST, IMM) \
107 ((struct bpf_insn) { \
108 .code = BPF_ALU | BPF_MOV | BPF_K, \
109 .dst_reg = DST, \
110 .src_reg = 0, \
111 .off = 0, \
112 .imm = IMM })
113
114/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
115
116#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
117 ((struct bpf_insn) { \
118 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
119 .dst_reg = DST, \
120 .src_reg = SRC, \
121 .off = 0, \
122 .imm = IMM })
123
124#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
125 ((struct bpf_insn) { \
126 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
127 .dst_reg = DST, \
128 .src_reg = SRC, \
129 .off = 0, \
130 .imm = IMM })
131
132/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
133
134#define BPF_LD_ABS(SIZE, IMM) \
135 ((struct bpf_insn) { \
136 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
137 .dst_reg = 0, \
138 .src_reg = 0, \
139 .off = 0, \
140 .imm = IMM })
141
142/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
143
144#define BPF_LD_IND(SIZE, SRC, IMM) \
145 ((struct bpf_insn) { \
146 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
147 .dst_reg = 0, \
148 .src_reg = SRC, \
149 .off = 0, \
150 .imm = IMM })
151
152/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
153
154#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
155 ((struct bpf_insn) { \
156 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
157 .dst_reg = DST, \
158 .src_reg = SRC, \
159 .off = OFF, \
160 .imm = 0 })
161
162/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
163
164#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
165 ((struct bpf_insn) { \
166 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
167 .dst_reg = DST, \
168 .src_reg = SRC, \
169 .off = OFF, \
170 .imm = 0 })
171
02ea80b1
DB
172/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
173
174#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
175 ((struct bpf_insn) { \
176 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
177 .dst_reg = DST, \
178 .src_reg = SRC, \
179 .off = OFF, \
180 .imm = 0 })
181
dabf626f
HK
182/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
183
184#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
185 ((struct bpf_insn) { \
186 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
187 .dst_reg = DST, \
188 .src_reg = 0, \
189 .off = OFF, \
190 .imm = IMM })
191
192/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
193
194#define BPF_JMP_REG(OP, DST, SRC, OFF) \
195 ((struct bpf_insn) { \
196 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
197 .dst_reg = DST, \
198 .src_reg = SRC, \
199 .off = OFF, \
200 .imm = 0 })
201
202/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
203
204#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
205 ((struct bpf_insn) { \
206 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
207 .dst_reg = DST, \
208 .src_reg = 0, \
209 .off = OFF, \
210 .imm = IMM })
211
614d0d77
DB
212/* Unconditional jumps, goto pc + off16 */
213
214#define BPF_JMP_A(OFF) \
215 ((struct bpf_insn) { \
216 .code = BPF_JMP | BPF_JA, \
217 .dst_reg = 0, \
218 .src_reg = 0, \
219 .off = OFF, \
220 .imm = 0 })
221
dabf626f
HK
222/* Function call */
223
224#define BPF_EMIT_CALL(FUNC) \
225 ((struct bpf_insn) { \
226 .code = BPF_JMP | BPF_CALL, \
227 .dst_reg = 0, \
228 .src_reg = 0, \
229 .off = 0, \
230 .imm = ((FUNC) - BPF_FUNC_unspec) })
231
232/* Raw code statement block */
233
234#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
235 ((struct bpf_insn) { \
236 .code = CODE, \
237 .dst_reg = DST, \
238 .src_reg = SRC, \
239 .off = OFF, \
240 .imm = IMM })
241
5aa5bd14
DB
242/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
243
244#define BPF_LD_IMM64(DST, IMM) \
245 BPF_LD_IMM64_RAW(DST, 0, IMM)
246
247#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
248 ((struct bpf_insn) { \
249 .code = BPF_LD | BPF_DW | BPF_IMM, \
250 .dst_reg = DST, \
251 .src_reg = SRC, \
252 .off = 0, \
253 .imm = (__u32) (IMM) }), \
254 ((struct bpf_insn) { \
255 .code = 0, /* zero is reserved opcode */ \
256 .dst_reg = 0, \
257 .src_reg = 0, \
258 .off = 0, \
259 .imm = ((__u64) (IMM)) >> 32 })
260
261/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
262
263#define BPF_LD_MAP_FD(DST, MAP_FD) \
264 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
265
dabf626f
HK
266/* Program exit */
267
268#define BPF_EXIT_INSN() \
269 ((struct bpf_insn) { \
270 .code = BPF_JMP | BPF_EXIT, \
271 .dst_reg = 0, \
272 .src_reg = 0, \
273 .off = 0, \
274 .imm = 0 })
275
276#endif /* __TOOLS_LINUX_FILTER_H */