bpf, x32: remove ld_abs/ld_ind
[linux-block.git] / tools / include / uapi / linux / bpf.h
CommitLineData
fb7df12d 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
971e827b
ACM
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#ifndef _UAPI__LINUX_BPF_H__
9#define _UAPI__LINUX_BPF_H__
10
11#include <linux/types.h>
12#include <linux/bpf_common.h>
13
14/* Extended instruction set based on top of classic BPF */
15
16/* instruction classes */
17#define BPF_ALU64 0x07 /* alu mode in double word width */
18
19/* ld/ldx fields */
d6d4f60c 20#define BPF_DW 0x18 /* double word (64-bit) */
971e827b
ACM
21#define BPF_XADD 0xc0 /* exclusive add */
22
23/* alu/jmp fields */
24#define BPF_MOV 0xb0 /* mov reg to reg */
25#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
26
27/* change endianness of a register */
28#define BPF_END 0xd0 /* flags for endianness conversion: */
29#define BPF_TO_LE 0x00 /* convert to little-endian */
30#define BPF_TO_BE 0x08 /* convert to big-endian */
31#define BPF_FROM_LE BPF_TO_LE
32#define BPF_FROM_BE BPF_TO_BE
33
92b31a9a 34/* jmp encodings */
971e827b 35#define BPF_JNE 0x50 /* jump != */
92b31a9a
DB
36#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
37#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
971e827b
ACM
38#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
39#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
92b31a9a
DB
40#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
41#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
971e827b
ACM
42#define BPF_CALL 0x80 /* function call */
43#define BPF_EXIT 0x90 /* function return */
44
45/* Register numbers */
46enum {
47 BPF_REG_0 = 0,
48 BPF_REG_1,
49 BPF_REG_2,
50 BPF_REG_3,
51 BPF_REG_4,
52 BPF_REG_5,
53 BPF_REG_6,
54 BPF_REG_7,
55 BPF_REG_8,
56 BPF_REG_9,
57 BPF_REG_10,
58 __MAX_BPF_REG,
59};
60
61/* BPF has 10 general purpose 64-bit registers and stack frame. */
62#define MAX_BPF_REG __MAX_BPF_REG
63
64struct bpf_insn {
65 __u8 code; /* opcode */
66 __u8 dst_reg:4; /* dest register */
67 __u8 src_reg:4; /* source register */
68 __s16 off; /* signed offset */
69 __s32 imm; /* signed immediate constant */
70};
71
9a738266
MS
72/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
73struct bpf_lpm_trie_key {
74 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
75 __u8 data[0]; /* Arbitrary size */
76};
77
971e827b
ACM
78/* BPF syscall commands, see bpf(2) man-page for details. */
79enum bpf_cmd {
80 BPF_MAP_CREATE,
81 BPF_MAP_LOOKUP_ELEM,
82 BPF_MAP_UPDATE_ELEM,
83 BPF_MAP_DELETE_ELEM,
84 BPF_MAP_GET_NEXT_KEY,
85 BPF_PROG_LOAD,
86 BPF_OBJ_PIN,
87 BPF_OBJ_GET,
0cb34dc2
JS
88 BPF_PROG_ATTACH,
89 BPF_PROG_DETACH,
30848873 90 BPF_PROG_TEST_RUN,
95b9afd3
MKL
91 BPF_PROG_GET_NEXT_ID,
92 BPF_MAP_GET_NEXT_ID,
93 BPF_PROG_GET_FD_BY_ID,
94 BPF_MAP_GET_FD_BY_ID,
95 BPF_OBJ_GET_INFO_BY_FD,
defd9c47 96 BPF_PROG_QUERY,
a0fe3e57 97 BPF_RAW_TRACEPOINT_OPEN,
3bd86a84 98 BPF_BTF_LOAD,
971e827b
ACM
99};
100
101enum bpf_map_type {
102 BPF_MAP_TYPE_UNSPEC,
103 BPF_MAP_TYPE_HASH,
104 BPF_MAP_TYPE_ARRAY,
105 BPF_MAP_TYPE_PROG_ARRAY,
106 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
107 BPF_MAP_TYPE_PERCPU_HASH,
108 BPF_MAP_TYPE_PERCPU_ARRAY,
109 BPF_MAP_TYPE_STACK_TRACE,
791cceb8 110 BPF_MAP_TYPE_CGROUP_ARRAY,
0cb34dc2
JS
111 BPF_MAP_TYPE_LRU_HASH,
112 BPF_MAP_TYPE_LRU_PERCPU_HASH,
9a738266 113 BPF_MAP_TYPE_LPM_TRIE,
fb30d4b7
MKL
114 BPF_MAP_TYPE_ARRAY_OF_MAPS,
115 BPF_MAP_TYPE_HASH_OF_MAPS,
81f6bf81 116 BPF_MAP_TYPE_DEVMAP,
69e8cc13 117 BPF_MAP_TYPE_SOCKMAP,
6710e112 118 BPF_MAP_TYPE_CPUMAP,
971e827b
ACM
119};
120
121enum bpf_prog_type {
122 BPF_PROG_TYPE_UNSPEC,
123 BPF_PROG_TYPE_SOCKET_FILTER,
124 BPF_PROG_TYPE_KPROBE,
125 BPF_PROG_TYPE_SCHED_CLS,
126 BPF_PROG_TYPE_SCHED_ACT,
127 BPF_PROG_TYPE_TRACEPOINT,
791cceb8 128 BPF_PROG_TYPE_XDP,
0cb34dc2
JS
129 BPF_PROG_TYPE_PERF_EVENT,
130 BPF_PROG_TYPE_CGROUP_SKB,
131 BPF_PROG_TYPE_CGROUP_SOCK,
132 BPF_PROG_TYPE_LWT_IN,
133 BPF_PROG_TYPE_LWT_OUT,
134 BPF_PROG_TYPE_LWT_XMIT,
04df41e3 135 BPF_PROG_TYPE_SOCK_OPS,
69e8cc13 136 BPF_PROG_TYPE_SK_SKB,
ebc614f6 137 BPF_PROG_TYPE_CGROUP_DEVICE,
82a86168 138 BPF_PROG_TYPE_SK_MSG,
a0fe3e57 139 BPF_PROG_TYPE_RAW_TRACEPOINT,
e50b0a6f 140 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
971e827b
ACM
141};
142
0cb34dc2
JS
143enum bpf_attach_type {
144 BPF_CGROUP_INET_INGRESS,
145 BPF_CGROUP_INET_EGRESS,
146 BPF_CGROUP_INET_SOCK_CREATE,
04df41e3 147 BPF_CGROUP_SOCK_OPS,
464bc0fd
JF
148 BPF_SK_SKB_STREAM_PARSER,
149 BPF_SK_SKB_STREAM_VERDICT,
ebc614f6 150 BPF_CGROUP_DEVICE,
82a86168 151 BPF_SK_MSG_VERDICT,
e50b0a6f
AI
152 BPF_CGROUP_INET4_BIND,
153 BPF_CGROUP_INET6_BIND,
622adafb
AI
154 BPF_CGROUP_INET4_CONNECT,
155 BPF_CGROUP_INET6_CONNECT,
1d436885
AI
156 BPF_CGROUP_INET4_POST_BIND,
157 BPF_CGROUP_INET6_POST_BIND,
0cb34dc2
JS
158 __MAX_BPF_ATTACH_TYPE
159};
160
161#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
162
defd9c47
AS
163/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
164 *
165 * NONE(default): No further bpf programs allowed in the subtree.
166 *
167 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
168 * the program in this cgroup yields to sub-cgroup program.
169 *
170 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
171 * that cgroup program gets run in addition to the program in this cgroup.
172 *
173 * Only one program is allowed to be attached to a cgroup with
174 * NONE or BPF_F_ALLOW_OVERRIDE flag.
175 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
176 * release old program and attach the new one. Attach flags has to match.
177 *
178 * Multiple programs are allowed to be attached to a cgroup with
179 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
180 * (those that were attached first, run first)
181 * The programs of sub-cgroup are executed first, then programs of
182 * this cgroup and then programs of parent cgroup.
183 * When children program makes decision (like picking TCP CA or sock bind)
184 * parent program has a chance to override it.
185 *
186 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
187 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
188 * Ex1:
189 * cgrp1 (MULTI progs A, B) ->
190 * cgrp2 (OVERRIDE prog C) ->
191 * cgrp3 (MULTI prog D) ->
192 * cgrp4 (OVERRIDE prog E) ->
193 * cgrp5 (NONE prog F)
194 * the event in cgrp5 triggers execution of F,D,A,B in that order.
195 * if prog F is detached, the execution is E,D,A,B
196 * if prog F and D are detached, the execution is E,A,B
197 * if prog F, E and D are detached, the execution is C,A,B
198 *
199 * All eligible programs are executed regardless of return code from
200 * earlier programs.
5463b3d0
SR
201 */
202#define BPF_F_ALLOW_OVERRIDE (1U << 0)
defd9c47 203#define BPF_F_ALLOW_MULTI (1U << 1)
5463b3d0 204
e07b98d9
DM
205/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
206 * verifier will perform strict alignment checking as if the kernel
207 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
208 * and NET_IP_ALIGN defined to 2.
209 */
210#define BPF_F_STRICT_ALIGNMENT (1U << 0)
211
48cca7e4 212/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
971e827b
ACM
213#define BPF_PSEUDO_MAP_FD 1
214
48cca7e4
AS
215/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
216 * offset to another bpf function
217 */
218#define BPF_PSEUDO_CALL 1
219
971e827b
ACM
220/* flags for BPF_MAP_UPDATE_ELEM command */
221#define BPF_ANY 0 /* create new element or update existing */
222#define BPF_NOEXIST 1 /* create new element if it didn't exist */
223#define BPF_EXIST 2 /* update existing element */
224
ad17d0e6 225/* flags for BPF_MAP_CREATE command */
971e827b 226#define BPF_F_NO_PREALLOC (1U << 0)
0cb34dc2
JS
227/* Instead of having one common LRU list in the
228 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
229 * which can scale and perform better.
230 * Note, the LRU nodes (including free nodes) cannot be moved
231 * across different LRU lists.
232 */
233#define BPF_F_NO_COMMON_LRU (1U << 1)
ad17d0e6
MKL
234/* Specify numa node during map creation */
235#define BPF_F_NUMA_NODE (1U << 2)
971e827b 236
defd9c47
AS
237/* flags for BPF_PROG_QUERY */
238#define BPF_F_QUERY_EFFECTIVE (1U << 0)
239
88cda1c9
MKL
240#define BPF_OBJ_NAME_LEN 16U
241
e27afb84
AS
242/* Flags for accessing BPF object */
243#define BPF_F_RDONLY (1U << 3)
244#define BPF_F_WRONLY (1U << 4)
245
81f77fd0
SL
246/* Flag for stack_map, store build_id+offset instead of pointer */
247#define BPF_F_STACK_BUILD_ID (1U << 5)
248
249enum bpf_stack_build_id_status {
250 /* user space need an empty entry to identify end of a trace */
251 BPF_STACK_BUILD_ID_EMPTY = 0,
252 /* with valid build_id and offset */
253 BPF_STACK_BUILD_ID_VALID = 1,
254 /* couldn't get build_id, fallback to ip */
255 BPF_STACK_BUILD_ID_IP = 2,
256};
257
258#define BPF_BUILD_ID_SIZE 20
259struct bpf_stack_build_id {
260 __s32 status;
261 unsigned char build_id[BPF_BUILD_ID_SIZE];
262 union {
263 __u64 offset;
264 __u64 ip;
265 };
266};
267
971e827b
ACM
268union bpf_attr {
269 struct { /* anonymous struct used by BPF_MAP_CREATE command */
270 __u32 map_type; /* one of enum bpf_map_type */
271 __u32 key_size; /* size of key in bytes */
272 __u32 value_size; /* size of value in bytes */
273 __u32 max_entries; /* max number of entries in a map */
ad17d0e6
MKL
274 __u32 map_flags; /* BPF_MAP_CREATE related
275 * flags defined above.
276 */
fb30d4b7 277 __u32 inner_map_fd; /* fd pointing to the inner map */
ad17d0e6
MKL
278 __u32 numa_node; /* numa node (effective only if
279 * BPF_F_NUMA_NODE is set).
280 */
067cae47 281 char map_name[BPF_OBJ_NAME_LEN];
a3884572 282 __u32 map_ifindex; /* ifindex of netdev to create on */
3bd86a84
MKL
283 __u32 btf_fd; /* fd pointing to a BTF type data */
284 __u32 btf_key_id; /* BTF type_id of the key */
285 __u32 btf_value_id; /* BTF type_id of the value */
971e827b
ACM
286 };
287
288 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
289 __u32 map_fd;
290 __aligned_u64 key;
291 union {
292 __aligned_u64 value;
293 __aligned_u64 next_key;
294 };
295 __u64 flags;
296 };
297
298 struct { /* anonymous struct used by BPF_PROG_LOAD command */
299 __u32 prog_type; /* one of enum bpf_prog_type */
300 __u32 insn_cnt;
301 __aligned_u64 insns;
302 __aligned_u64 license;
303 __u32 log_level; /* verbosity level of verifier */
304 __u32 log_size; /* size of user buffer */
305 __aligned_u64 log_buf; /* user supplied buffer */
306 __u32 kern_version; /* checked when prog_type=kprobe */
e07b98d9 307 __u32 prog_flags;
067cae47 308 char prog_name[BPF_OBJ_NAME_LEN];
1f6f4cb7 309 __u32 prog_ifindex; /* ifindex of netdev to prep for */
d7be143b
AI
310 /* For some prog types expected attach type must be known at
311 * load time to verify attach type specific parts of prog
312 * (context accesses, allowed helpers, etc).
313 */
314 __u32 expected_attach_type;
971e827b
ACM
315 };
316
317 struct { /* anonymous struct used by BPF_OBJ_* commands */
318 __aligned_u64 pathname;
319 __u32 bpf_fd;
e27afb84 320 __u32 file_flags;
971e827b 321 };
0cb34dc2
JS
322
323 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
324 __u32 target_fd; /* container object to attach to */
325 __u32 attach_bpf_fd; /* eBPF program to attach */
326 __u32 attach_type;
5463b3d0 327 __u32 attach_flags;
0cb34dc2 328 };
30848873
AS
329
330 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
331 __u32 prog_fd;
332 __u32 retval;
333 __u32 data_size_in;
334 __u32 data_size_out;
335 __aligned_u64 data_in;
336 __aligned_u64 data_out;
337 __u32 repeat;
338 __u32 duration;
339 } test;
95b9afd3
MKL
340
341 struct { /* anonymous struct used by BPF_*_GET_*_ID */
342 union {
343 __u32 start_id;
344 __u32 prog_id;
345 __u32 map_id;
346 };
347 __u32 next_id;
e27afb84 348 __u32 open_flags;
95b9afd3
MKL
349 };
350
351 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
352 __u32 bpf_fd;
353 __u32 info_len;
354 __aligned_u64 info;
355 } info;
defd9c47
AS
356
357 struct { /* anonymous struct used by BPF_PROG_QUERY command */
358 __u32 target_fd; /* container object to query */
359 __u32 attach_type;
360 __u32 query_flags;
361 __u32 attach_flags;
362 __aligned_u64 prog_ids;
363 __u32 prog_cnt;
364 } query;
a0fe3e57
AS
365
366 struct {
367 __u64 name;
368 __u32 prog_fd;
369 } raw_tracepoint;
3bd86a84
MKL
370
371 struct { /* anonymous struct for BPF_BTF_LOAD */
372 __aligned_u64 btf;
373 __aligned_u64 btf_log_buf;
374 __u32 btf_size;
375 __u32 btf_log_size;
376 __u32 btf_log_level;
377 };
971e827b
ACM
378} __attribute__((aligned(8)));
379
9cde0c88
QM
380/* The description below is an attempt at providing documentation to eBPF
381 * developers about the multiple available eBPF helper functions. It can be
382 * parsed and used to produce a manual page. The workflow is the following,
383 * and requires the rst2man utility:
384 *
385 * $ ./scripts/bpf_helpers_doc.py \
386 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
387 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
388 * $ man /tmp/bpf-helpers.7
389 *
390 * Note that in order to produce this external documentation, some RST
391 * formatting is used in the descriptions to get "bold" and "italics" in
392 * manual pages. Also note that the few trailing white spaces are
393 * intentional, removing them would break paragraphs for rst2man.
394 *
395 * Start of BPF helper function descriptions:
396 *
397 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
398 * Description
399 * Perform a lookup in *map* for an entry associated to *key*.
400 * Return
401 * Map value associated to *key*, or **NULL** if no entry was
402 * found.
403 *
404 * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
405 * Description
406 * Add or update the value of the entry associated to *key* in
407 * *map* with *value*. *flags* is one of:
408 *
409 * **BPF_NOEXIST**
410 * The entry for *key* must not exist in the map.
411 * **BPF_EXIST**
412 * The entry for *key* must already exist in the map.
413 * **BPF_ANY**
414 * No condition on the existence of the entry for *key*.
415 *
416 * Flag value **BPF_NOEXIST** cannot be used for maps of types
417 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all
418 * elements always exist), the helper would return an error.
419 * Return
420 * 0 on success, or a negative error in case of failure.
421 *
422 * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
423 * Description
424 * Delete entry with *key* from *map*.
425 * Return
426 * 0 on success, or a negative error in case of failure.
427 *
428 * int bpf_probe_read(void *dst, u32 size, const void *src)
429 * Description
430 * For tracing programs, safely attempt to read *size* bytes from
431 * address *src* and store the data in *dst*.
432 * Return
433 * 0 on success, or a negative error in case of failure.
0cb34dc2
JS
434 *
435 * u64 bpf_ktime_get_ns(void)
9cde0c88
QM
436 * Description
437 * Return the time elapsed since system boot, in nanoseconds.
438 * Return
439 * Current *ktime*.
440 *
441 * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
442 * Description
443 * This helper is a "printk()-like" facility for debugging. It
444 * prints a message defined by format *fmt* (of size *fmt_size*)
445 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
446 * available. It can take up to three additional **u64**
447 * arguments (as an eBPF helpers, the total number of arguments is
448 * limited to five).
449 *
450 * Each time the helper is called, it appends a line to the trace.
451 * The format of the trace is customizable, and the exact output
452 * one will get depends on the options set in
453 * *\/sys/kernel/debug/tracing/trace_options* (see also the
454 * *README* file under the same directory). However, it usually
455 * defaults to something like:
456 *
457 * ::
458 *
459 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg>
460 *
461 * In the above:
462 *
463 * * ``telnet`` is the name of the current task.
464 * * ``470`` is the PID of the current task.
465 * * ``001`` is the CPU number on which the task is
466 * running.
467 * * In ``.N..``, each character refers to a set of
468 * options (whether irqs are enabled, scheduling
469 * options, whether hard/softirqs are running, level of
470 * preempt_disabled respectively). **N** means that
471 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
472 * are set.
473 * * ``419421.045894`` is a timestamp.
474 * * ``0x00000001`` is a fake value used by BPF for the
475 * instruction pointer register.
476 * * ``<formatted msg>`` is the message formatted with
477 * *fmt*.
478 *
479 * The conversion specifiers supported by *fmt* are similar, but
480 * more limited than for printk(). They are **%d**, **%i**,
481 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
482 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
483 * of field, padding with zeroes, etc.) is available, and the
484 * helper will return **-EINVAL** (but print nothing) if it
485 * encounters an unknown specifier.
486 *
487 * Also, note that **bpf_trace_printk**\ () is slow, and should
488 * only be used for debugging purposes. For this reason, a notice
489 * bloc (spanning several lines) is printed to kernel logs and
490 * states that the helper should not be used "for production use"
491 * the first time this helper is used (or more precisely, when
492 * **trace_printk**\ () buffers are allocated). For passing values
493 * to user space, perf events should be preferred.
494 * Return
495 * The number of bytes written to the buffer, or a negative error
496 * in case of failure.
497 *
498 * u32 bpf_get_prandom_u32(void)
499 * Description
500 * Get a pseudo-random number.
501 *
502 * From a security point of view, this helper uses its own
503 * pseudo-random internal state, and cannot be used to infer the
504 * seed of other random functions in the kernel. However, it is
505 * essential to note that the generator used by the helper is not
506 * cryptographically secure.
507 * Return
508 * A random 32-bit unsigned value.
509 *
510 * u32 bpf_get_smp_processor_id(void)
511 * Description
512 * Get the SMP (symmetric multiprocessing) processor id. Note that
513 * all programs run with preemption disabled, which means that the
514 * SMP processor id is stable during all the execution of the
515 * program.
516 * Return
517 * The SMP id of the processor running the program.
518 *
519 * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
520 * Description
521 * Store *len* bytes from address *from* into the packet
522 * associated to *skb*, at *offset*. *flags* are a combination of
523 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
524 * checksum for the packet after storing the bytes) and
525 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
526 * **->swhash** and *skb*\ **->l4hash** to 0).
527 *
528 * A call to this helper is susceptible to change the underlaying
529 * packet buffer. Therefore, at load time, all checks on pointers
530 * previously done by the verifier are invalidated and must be
531 * performed again, if the helper is used in combination with
532 * direct packet access.
533 * Return
534 * 0 on success, or a negative error in case of failure.
535 *
536 * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
537 * Description
538 * Recompute the layer 3 (e.g. IP) checksum for the packet
539 * associated to *skb*. Computation is incremental, so the helper
540 * must know the former value of the header field that was
541 * modified (*from*), the new value of this field (*to*), and the
542 * number of bytes (2 or 4) for this field, stored in *size*.
543 * Alternatively, it is possible to store the difference between
544 * the previous and the new values of the header field in *to*, by
545 * setting *from* and *size* to 0. For both methods, *offset*
546 * indicates the location of the IP checksum within the packet.
547 *
548 * This helper works in combination with **bpf_csum_diff**\ (),
549 * which does not update the checksum in-place, but offers more
550 * flexibility and can handle sizes larger than 2 or 4 for the
551 * checksum to update.
552 *
553 * A call to this helper is susceptible to change the underlaying
554 * packet buffer. Therefore, at load time, all checks on pointers
555 * previously done by the verifier are invalidated and must be
556 * performed again, if the helper is used in combination with
557 * direct packet access.
558 * Return
559 * 0 on success, or a negative error in case of failure.
560 *
561 * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
562 * Description
563 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
564 * packet associated to *skb*. Computation is incremental, so the
565 * helper must know the former value of the header field that was
566 * modified (*from*), the new value of this field (*to*), and the
567 * number of bytes (2 or 4) for this field, stored on the lowest
568 * four bits of *flags*. Alternatively, it is possible to store
569 * the difference between the previous and the new values of the
570 * header field in *to*, by setting *from* and the four lowest
571 * bits of *flags* to 0. For both methods, *offset* indicates the
572 * location of the IP checksum within the packet. In addition to
573 * the size of the field, *flags* can be added (bitwise OR) actual
574 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
575 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
576 * for updates resulting in a null checksum the value is set to
577 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
578 * the checksum is to be computed against a pseudo-header.
579 *
580 * This helper works in combination with **bpf_csum_diff**\ (),
581 * which does not update the checksum in-place, but offers more
582 * flexibility and can handle sizes larger than 2 or 4 for the
583 * checksum to update.
584 *
585 * A call to this helper is susceptible to change the underlaying
586 * packet buffer. Therefore, at load time, all checks on pointers
587 * previously done by the verifier are invalidated and must be
588 * performed again, if the helper is used in combination with
589 * direct packet access.
590 * Return
591 * 0 on success, or a negative error in case of failure.
592 *
593 * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
594 * Description
595 * This special helper is used to trigger a "tail call", or in
596 * other words, to jump into another eBPF program. The same stack
597 * frame is used (but values on stack and in registers for the
598 * caller are not accessible to the callee). This mechanism allows
599 * for program chaining, either for raising the maximum number of
600 * available eBPF instructions, or to execute given programs in
601 * conditional blocks. For security reasons, there is an upper
602 * limit to the number of successive tail calls that can be
603 * performed.
604 *
605 * Upon call of this helper, the program attempts to jump into a
606 * program referenced at index *index* in *prog_array_map*, a
607 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
608 * *ctx*, a pointer to the context.
609 *
610 * If the call succeeds, the kernel immediately runs the first
611 * instruction of the new program. This is not a function call,
612 * and it never returns to the previous program. If the call
613 * fails, then the helper has no effect, and the caller continues
614 * to run its subsequent instructions. A call can fail if the
615 * destination program for the jump does not exist (i.e. *index*
616 * is superior to the number of entries in *prog_array_map*), or
617 * if the maximum number of tail calls has been reached for this
618 * chain of programs. This limit is defined in the kernel by the
619 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
620 * which is currently set to 32.
621 * Return
622 * 0 on success, or a negative error in case of failure.
623 *
624 * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
625 * Description
626 * Clone and redirect the packet associated to *skb* to another
627 * net device of index *ifindex*. Both ingress and egress
628 * interfaces can be used for redirection. The **BPF_F_INGRESS**
629 * value in *flags* is used to make the distinction (ingress path
630 * is selected if the flag is present, egress path otherwise).
631 * This is the only flag supported for now.
632 *
633 * In comparison with **bpf_redirect**\ () helper,
634 * **bpf_clone_redirect**\ () has the associated cost of
635 * duplicating the packet buffer, but this can be executed out of
636 * the eBPF program. Conversely, **bpf_redirect**\ () is more
637 * efficient, but it is handled through an action code where the
638 * redirection happens only after the eBPF program has returned.
639 *
640 * A call to this helper is susceptible to change the underlaying
641 * packet buffer. Therefore, at load time, all checks on pointers
642 * previously done by the verifier are invalidated and must be
643 * performed again, if the helper is used in combination with
644 * direct packet access.
645 * Return
646 * 0 on success, or a negative error in case of failure.
0cb34dc2
JS
647 *
648 * u64 bpf_get_current_pid_tgid(void)
9cde0c88
QM
649 * Return
650 * A 64-bit integer containing the current tgid and pid, and
651 * created as such:
652 * *current_task*\ **->tgid << 32 \|**
653 * *current_task*\ **->pid**.
0cb34dc2
JS
654 *
655 * u64 bpf_get_current_uid_gid(void)
9cde0c88
QM
656 * Return
657 * A 64-bit integer containing the current GID and UID, and
658 * created as such: *current_gid* **<< 32 \|** *current_uid*.
659 *
660 * int bpf_get_current_comm(char *buf, u32 size_of_buf)
661 * Description
662 * Copy the **comm** attribute of the current task into *buf* of
663 * *size_of_buf*. The **comm** attribute contains the name of
664 * the executable (excluding the path) for the current task. The
665 * *size_of_buf* must be strictly positive. On success, the
666 * helper makes sure that the *buf* is NUL-terminated. On failure,
667 * it is filled with zeroes.
668 * Return
669 * 0 on success, or a negative error in case of failure.
670 *
671 * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
672 * Description
673 * Retrieve the classid for the current task, i.e. for the net_cls
674 * cgroup to which *skb* belongs.
675 *
676 * This helper can be used on TC egress path, but not on ingress.
677 *
678 * The net_cls cgroup provides an interface to tag network packets
679 * based on a user-provided identifier for all traffic coming from
680 * the tasks belonging to the related cgroup. See also the related
681 * kernel documentation, available from the Linux sources in file
682 * *Documentation/cgroup-v1/net_cls.txt*.
683 *
684 * The Linux kernel has two versions for cgroups: there are
685 * cgroups v1 and cgroups v2. Both are available to users, who can
686 * use a mixture of them, but note that the net_cls cgroup is for
687 * cgroup v1 only. This makes it incompatible with BPF programs
688 * run on cgroups, which is a cgroup-v2-only feature (a socket can
689 * only hold data for one version of cgroups at a time).
690 *
691 * This helper is only available is the kernel was compiled with
692 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
693 * "**y**" or to "**m**".
694 * Return
695 * The classid, or 0 for the default unconfigured classid.
696 *
697 * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
698 * Description
699 * Push a *vlan_tci* (VLAN tag control information) of protocol
700 * *vlan_proto* to the packet associated to *skb*, then update
701 * the checksum. Note that if *vlan_proto* is different from
702 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
703 * be **ETH_P_8021Q**.
704 *
705 * A call to this helper is susceptible to change the underlaying
706 * packet buffer. Therefore, at load time, all checks on pointers
707 * previously done by the verifier are invalidated and must be
708 * performed again, if the helper is used in combination with
709 * direct packet access.
710 * Return
711 * 0 on success, or a negative error in case of failure.
712 *
713 * int bpf_skb_vlan_pop(struct sk_buff *skb)
714 * Description
715 * Pop a VLAN header from the packet associated to *skb*.
716 *
717 * A call to this helper is susceptible to change the underlaying
718 * packet buffer. Therefore, at load time, all checks on pointers
719 * previously done by the verifier are invalidated and must be
720 * performed again, if the helper is used in combination with
721 * direct packet access.
722 * Return
723 * 0 on success, or a negative error in case of failure.
724 *
725 * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
726 * Description
727 * Get tunnel metadata. This helper takes a pointer *key* to an
728 * empty **struct bpf_tunnel_key** of **size**, that will be
729 * filled with tunnel metadata for the packet associated to *skb*.
730 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
731 * indicates that the tunnel is based on IPv6 protocol instead of
732 * IPv4.
733 *
734 * The **struct bpf_tunnel_key** is an object that generalizes the
735 * principal parameters used by various tunneling protocols into a
736 * single struct. This way, it can be used to easily make a
737 * decision based on the contents of the encapsulation header,
738 * "summarized" in this struct. In particular, it holds the IP
739 * address of the remote end (IPv4 or IPv6, depending on the case)
740 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
741 * this struct exposes the *key*\ **->tunnel_id**, which is
742 * generally mapped to a VNI (Virtual Network Identifier), making
743 * it programmable together with the **bpf_skb_set_tunnel_key**\
744 * () helper.
745 *
746 * Let's imagine that the following code is part of a program
747 * attached to the TC ingress interface, on one end of a GRE
748 * tunnel, and is supposed to filter out all messages coming from
749 * remote ends with IPv4 address other than 10.0.0.1:
750 *
751 * ::
752 *
753 * int ret;
754 * struct bpf_tunnel_key key = {};
755 *
756 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
757 * if (ret < 0)
758 * return TC_ACT_SHOT; // drop packet
759 *
760 * if (key.remote_ipv4 != 0x0a000001)
761 * return TC_ACT_SHOT; // drop packet
762 *
763 * return TC_ACT_OK; // accept packet
764 *
765 * This interface can also be used with all encapsulation devices
766 * that can operate in "collect metadata" mode: instead of having
767 * one network device per specific configuration, the "collect
768 * metadata" mode only requires a single device where the
769 * configuration can be extracted from this helper.
770 *
771 * This can be used together with various tunnels such as VXLan,
772 * Geneve, GRE or IP in IP (IPIP).
773 * Return
774 * 0 on success, or a negative error in case of failure.
775 *
776 * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
777 * Description
778 * Populate tunnel metadata for packet associated to *skb.* The
779 * tunnel metadata is set to the contents of *key*, of *size*. The
780 * *flags* can be set to a combination of the following values:
781 *
782 * **BPF_F_TUNINFO_IPV6**
783 * Indicate that the tunnel is based on IPv6 protocol
784 * instead of IPv4.
785 * **BPF_F_ZERO_CSUM_TX**
786 * For IPv4 packets, add a flag to tunnel metadata
787 * indicating that checksum computation should be skipped
788 * and checksum set to zeroes.
789 * **BPF_F_DONT_FRAGMENT**
790 * Add a flag to tunnel metadata indicating that the
791 * packet should not be fragmented.
792 * **BPF_F_SEQ_NUMBER**
793 * Add a flag to tunnel metadata indicating that a
794 * sequence number should be added to tunnel header before
795 * sending the packet. This flag was added for GRE
796 * encapsulation, but might be used with other protocols
797 * as well in the future.
798 *
799 * Here is a typical usage on the transmit path:
800 *
801 * ::
802 *
803 * struct bpf_tunnel_key key;
804 * populate key ...
805 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
806 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
807 *
808 * See also the description of the **bpf_skb_get_tunnel_key**\ ()
809 * helper for additional information.
810 * Return
811 * 0 on success, or a negative error in case of failure.
812 *
813 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
814 * Description
815 * Read the value of a perf event counter. This helper relies on a
816 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
817 * the perf event counter is selected when *map* is updated with
818 * perf event file descriptors. The *map* is an array whose size
819 * is the number of available CPUs, and each cell contains a value
820 * relative to one CPU. The value to retrieve is indicated by
821 * *flags*, that contains the index of the CPU to look up, masked
822 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
823 * **BPF_F_CURRENT_CPU** to indicate that the value for the
824 * current CPU should be retrieved.
825 *
826 * Note that before Linux 4.13, only hardware perf event can be
827 * retrieved.
828 *
829 * Also, be aware that the newer helper
830 * **bpf_perf_event_read_value**\ () is recommended over
a56497d3 831 * **bpf_perf_event_read**\ () in general. The latter has some ABI
9cde0c88
QM
832 * quirks where error and counter value are used as a return code
833 * (which is wrong to do since ranges may overlap). This issue is
a56497d3
QM
834 * fixed with **bpf_perf_event_read_value**\ (), which at the same
835 * time provides more features over the **bpf_perf_event_read**\
836 * () interface. Please refer to the description of
9cde0c88
QM
837 * **bpf_perf_event_read_value**\ () for details.
838 * Return
839 * The value of the perf event counter read from the map, or a
840 * negative error code in case of failure.
841 *
842 * int bpf_redirect(u32 ifindex, u64 flags)
843 * Description
844 * Redirect the packet to another net device of index *ifindex*.
845 * This helper is somewhat similar to **bpf_clone_redirect**\
846 * (), except that the packet is not cloned, which provides
847 * increased performance.
848 *
849 * Except for XDP, both ingress and egress interfaces can be used
850 * for redirection. The **BPF_F_INGRESS** value in *flags* is used
851 * to make the distinction (ingress path is selected if the flag
852 * is present, egress path otherwise). Currently, XDP only
853 * supports redirection to the egress interface, and accepts no
854 * flag at all.
855 *
856 * The same effect can be attained with the more generic
857 * **bpf_redirect_map**\ (), which requires specific maps to be
858 * used but offers better performance.
859 * Return
860 * For XDP, the helper returns **XDP_REDIRECT** on success or
861 * **XDP_ABORTED** on error. For other program types, the values
862 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
863 * error.
864 *
865 * u32 bpf_get_route_realm(struct sk_buff *skb)
866 * Description
867 * Retrieve the realm or the route, that is to say the
868 * **tclassid** field of the destination for the *skb*. The
869 * indentifier retrieved is a user-provided tag, similar to the
870 * one used with the net_cls cgroup (see description for
871 * **bpf_get_cgroup_classid**\ () helper), but here this tag is
872 * held by a route (a destination entry), not by a task.
873 *
874 * Retrieving this identifier works with the clsact TC egress hook
875 * (see also **tc-bpf(8)**), or alternatively on conventional
876 * classful egress qdiscs, but not on TC ingress path. In case of
877 * clsact TC egress hook, this has the advantage that, internally,
878 * the destination entry has not been dropped yet in the transmit
879 * path. Therefore, the destination entry does not need to be
880 * artificially held via **netif_keep_dst**\ () for a classful
881 * qdisc until the *skb* is freed.
882 *
883 * This helper is available only if the kernel was compiled with
884 * **CONFIG_IP_ROUTE_CLASSID** configuration option.
885 * Return
886 * The realm of the route for the packet associated to *skb*, or 0
887 * if none was found.
888 *
889 * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
890 * Description
891 * Write raw *data* blob into a special BPF perf event held by
892 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
893 * event must have the following attributes: **PERF_SAMPLE_RAW**
894 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
895 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
896 *
897 * The *flags* are used to indicate the index in *map* for which
898 * the value must be put, masked with **BPF_F_INDEX_MASK**.
899 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
900 * to indicate that the index of the current CPU core should be
901 * used.
902 *
903 * The value to write, of *size*, is passed through eBPF stack and
904 * pointed by *data*.
905 *
906 * The context of the program *ctx* needs also be passed to the
907 * helper.
908 *
909 * On user space, a program willing to read the values needs to
910 * call **perf_event_open**\ () on the perf event (either for
911 * one or for all CPUs) and to store the file descriptor into the
912 * *map*. This must be done before the eBPF program can send data
913 * into it. An example is available in file
914 * *samples/bpf/trace_output_user.c* in the Linux kernel source
915 * tree (the eBPF program counterpart is in
916 * *samples/bpf/trace_output_kern.c*).
917 *
918 * **bpf_perf_event_output**\ () achieves better performance
919 * than **bpf_trace_printk**\ () for sharing data with user
920 * space, and is much better suitable for streaming data from eBPF
921 * programs.
922 *
923 * Note that this helper is not restricted to tracing use cases
924 * and can be used with programs attached to TC or XDP as well,
925 * where it allows for passing data to user space listeners. Data
926 * can be:
927 *
928 * * Only custom structs,
929 * * Only the packet payload, or
930 * * A combination of both.
931 * Return
932 * 0 on success, or a negative error in case of failure.
933 *
934 * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
935 * Description
936 * This helper was provided as an easy way to load data from a
937 * packet. It can be used to load *len* bytes from *offset* from
938 * the packet associated to *skb*, into the buffer pointed by
939 * *to*.
940 *
941 * Since Linux 4.7, usage of this helper has mostly been replaced
942 * by "direct packet access", enabling packet data to be
943 * manipulated with *skb*\ **->data** and *skb*\ **->data_end**
944 * pointing respectively to the first byte of packet data and to
945 * the byte after the last byte of packet data. However, it
946 * remains useful if one wishes to read large quantities of data
947 * at once from a packet into the eBPF stack.
948 * Return
949 * 0 on success, or a negative error in case of failure.
950 *
951 * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags)
952 * Description
953 * Walk a user or a kernel stack and return its id. To achieve
954 * this, the helper needs *ctx*, which is a pointer to the context
955 * on which the tracing program is executed, and a pointer to a
956 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**.
957 *
958 * The last argument, *flags*, holds the number of stack frames to
959 * skip (from 0 to 255), masked with
960 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
961 * a combination of the following flags:
962 *
963 * **BPF_F_USER_STACK**
964 * Collect a user space stack instead of a kernel stack.
965 * **BPF_F_FAST_STACK_CMP**
966 * Compare stacks by hash only.
967 * **BPF_F_REUSE_STACKID**
968 * If two different stacks hash into the same *stackid*,
969 * discard the old one.
970 *
971 * The stack id retrieved is a 32 bit long integer handle which
972 * can be further combined with other data (including other stack
973 * ids) and used as a key into maps. This can be useful for
974 * generating a variety of graphs (such as flame graphs or off-cpu
975 * graphs).
976 *
977 * For walking a stack, this helper is an improvement over
978 * **bpf_probe_read**\ (), which can be used with unrolled loops
979 * but is not efficient and consumes a lot of eBPF instructions.
980 * Instead, **bpf_get_stackid**\ () can collect up to
981 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
982 * this limit can be controlled with the **sysctl** program, and
983 * that it should be manually increased in order to profile long
984 * user stacks (such as stacks for Java programs). To do so, use:
985 *
986 * ::
987 *
988 * # sysctl kernel.perf_event_max_stack=<new value>
989 *
990 * Return
991 * The positive or null stack id on success, or a negative error
992 * in case of failure.
993 *
994 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
995 * Description
996 * Compute a checksum difference, from the raw buffer pointed by
997 * *from*, of length *from_size* (that must be a multiple of 4),
998 * towards the raw buffer pointed by *to*, of size *to_size*
999 * (same remark). An optional *seed* can be added to the value
1000 * (this can be cascaded, the seed may come from a previous call
1001 * to the helper).
1002 *
1003 * This is flexible enough to be used in several ways:
1004 *
1005 * * With *from_size* == 0, *to_size* > 0 and *seed* set to
1006 * checksum, it can be used when pushing new data.
1007 * * With *from_size* > 0, *to_size* == 0 and *seed* set to
1008 * checksum, it can be used when removing data from a packet.
1009 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
1010 * can be used to compute a diff. Note that *from_size* and
1011 * *to_size* do not need to be equal.
1012 *
1013 * This helper can be used in combination with
1014 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
1015 * which one can feed in the difference computed with
1016 * **bpf_csum_diff**\ ().
1017 * Return
1018 * The checksum result, or a negative error code in case of
1019 * failure.
1020 *
1021 * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
1022 * Description
1023 * Retrieve tunnel options metadata for the packet associated to
1024 * *skb*, and store the raw tunnel option data to the buffer *opt*
1025 * of *size*.
1026 *
1027 * This helper can be used with encapsulation devices that can
1028 * operate in "collect metadata" mode (please refer to the related
1029 * note in the description of **bpf_skb_get_tunnel_key**\ () for
1030 * more details). A particular example where this can be used is
1031 * in combination with the Geneve encapsulation protocol, where it
1032 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
1033 * and retrieving arbitrary TLVs (Type-Length-Value headers) from
1034 * the eBPF program. This allows for full customization of these
1035 * headers.
1036 * Return
1037 * The size of the option data retrieved.
1038 *
1039 * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
1040 * Description
1041 * Set tunnel options metadata for the packet associated to *skb*
1042 * to the option data contained in the raw buffer *opt* of *size*.
1043 *
1044 * See also the description of the **bpf_skb_get_tunnel_opt**\ ()
1045 * helper for additional information.
1046 * Return
1047 * 0 on success, or a negative error in case of failure.
1048 *
1049 * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
1050 * Description
1051 * Change the protocol of the *skb* to *proto*. Currently
1052 * supported are transition from IPv4 to IPv6, and from IPv6 to
1053 * IPv4. The helper takes care of the groundwork for the
1054 * transition, including resizing the socket buffer. The eBPF
1055 * program is expected to fill the new headers, if any, via
1056 * **skb_store_bytes**\ () and to recompute the checksums with
1057 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
1058 * (). The main case for this helper is to perform NAT64
1059 * operations out of an eBPF program.
1060 *
1061 * Internally, the GSO type is marked as dodgy so that headers are
1062 * checked and segments are recalculated by the GSO/GRO engine.
1063 * The size for GSO target is adapted as well.
1064 *
1065 * All values for *flags* are reserved for future usage, and must
1066 * be left at zero.
1067 *
1068 * A call to this helper is susceptible to change the underlaying
1069 * packet buffer. Therefore, at load time, all checks on pointers
1070 * previously done by the verifier are invalidated and must be
1071 * performed again, if the helper is used in combination with
1072 * direct packet access.
1073 * Return
1074 * 0 on success, or a negative error in case of failure.
1075 *
1076 * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
1077 * Description
1078 * Change the packet type for the packet associated to *skb*. This
1079 * comes down to setting *skb*\ **->pkt_type** to *type*, except
1080 * the eBPF program does not have a write access to *skb*\
1081 * **->pkt_type** beside this helper. Using a helper here allows
1082 * for graceful handling of errors.
1083 *
1084 * The major use case is to change incoming *skb*s to
1085 * **PACKET_HOST** in a programmatic way instead of having to
1086 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
1087 * example.
1088 *
1089 * Note that *type* only allows certain values. At this time, they
1090 * are:
1091 *
1092 * **PACKET_HOST**
1093 * Packet is for us.
1094 * **PACKET_BROADCAST**
1095 * Send packet to all.
1096 * **PACKET_MULTICAST**
1097 * Send packet to group.
1098 * **PACKET_OTHERHOST**
1099 * Send packet to someone else.
1100 * Return
1101 * 0 on success, or a negative error in case of failure.
1102 *
1103 * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
1104 * Description
1105 * Check whether *skb* is a descendant of the cgroup2 held by
1106 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1107 * Return
1108 * The return value depends on the result of the test, and can be:
1109 *
1110 * * 0, if the *skb* failed the cgroup2 descendant test.
1111 * * 1, if the *skb* succeeded the cgroup2 descendant test.
1112 * * A negative error code, if an error occurred.
1113 *
1114 * u32 bpf_get_hash_recalc(struct sk_buff *skb)
1115 * Description
1116 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is
1117 * not set, in particular if the hash was cleared due to mangling,
1118 * recompute this hash. Later accesses to the hash can be done
1119 * directly with *skb*\ **->hash**.
1120 *
1121 * Calling **bpf_set_hash_invalid**\ (), changing a packet
1122 * prototype with **bpf_skb_change_proto**\ (), or calling
1123 * **bpf_skb_store_bytes**\ () with the
1124 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear
1125 * the hash and to trigger a new computation for the next call to
1126 * **bpf_get_hash_recalc**\ ().
1127 * Return
1128 * The 32-bit hash.
0cb34dc2
JS
1129 *
1130 * u64 bpf_get_current_task(void)
9cde0c88
QM
1131 * Return
1132 * A pointer to the current task struct.
1133 *
1134 * int bpf_probe_write_user(void *dst, const void *src, u32 len)
1135 * Description
1136 * Attempt in a safe way to write *len* bytes from the buffer
1137 * *src* to *dst* in memory. It only works for threads that are in
1138 * user context, and *dst* must be a valid user space address.
1139 *
1140 * This helper should not be used to implement any kind of
1141 * security mechanism because of TOC-TOU attacks, but rather to
1142 * debug, divert, and manipulate execution of semi-cooperative
1143 * processes.
1144 *
1145 * Keep in mind that this feature is meant for experiments, and it
1146 * has a risk of crashing the system and running programs.
1147 * Therefore, when an eBPF program using this helper is attached,
1148 * a warning including PID and process name is printed to kernel
1149 * logs.
1150 * Return
1151 * 0 on success, or a negative error in case of failure.
1152 *
1153 * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
1154 * Description
1155 * Check whether the probe is being run is the context of a given
1156 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by
1157 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1158 * Return
1159 * The return value depends on the result of the test, and can be:
1160 *
1161 * * 0, if the *skb* task belongs to the cgroup2.
1162 * * 1, if the *skb* task does not belong to the cgroup2.
1163 * * A negative error code, if an error occurred.
1164 *
1165 * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
1166 * Description
1167 * Resize (trim or grow) the packet associated to *skb* to the
1168 * new *len*. The *flags* are reserved for future usage, and must
1169 * be left at zero.
1170 *
1171 * The basic idea is that the helper performs the needed work to
1172 * change the size of the packet, then the eBPF program rewrites
1173 * the rest via helpers like **bpf_skb_store_bytes**\ (),
1174 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
1175 * and others. This helper is a slow path utility intended for
1176 * replies with control messages. And because it is targeted for
1177 * slow path, the helper itself can afford to be slow: it
1178 * implicitly linearizes, unclones and drops offloads from the
1179 * *skb*.
1180 *
1181 * A call to this helper is susceptible to change the underlaying
1182 * packet buffer. Therefore, at load time, all checks on pointers
1183 * previously done by the verifier are invalidated and must be
1184 * performed again, if the helper is used in combination with
1185 * direct packet access.
1186 * Return
1187 * 0 on success, or a negative error in case of failure.
1188 *
1189 * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
1190 * Description
1191 * Pull in non-linear data in case the *skb* is non-linear and not
1192 * all of *len* are part of the linear section. Make *len* bytes
1193 * from *skb* readable and writable. If a zero value is passed for
1194 * *len*, then the whole length of the *skb* is pulled.
1195 *
1196 * This helper is only needed for reading and writing with direct
1197 * packet access.
1198 *
1199 * For direct packet access, testing that offsets to access
1200 * are within packet boundaries (test on *skb*\ **->data_end**) is
1201 * susceptible to fail if offsets are invalid, or if the requested
1202 * data is in non-linear parts of the *skb*. On failure the
1203 * program can just bail out, or in the case of a non-linear
1204 * buffer, use a helper to make the data available. The
1205 * **bpf_skb_load_bytes**\ () helper is a first solution to access
1206 * the data. Another one consists in using **bpf_skb_pull_data**
1207 * to pull in once the non-linear parts, then retesting and
1208 * eventually access the data.
1209 *
1210 * At the same time, this also makes sure the *skb* is uncloned,
1211 * which is a necessary condition for direct write. As this needs
1212 * to be an invariant for the write part only, the verifier
1213 * detects writes and adds a prologue that is calling
1214 * **bpf_skb_pull_data()** to effectively unclone the *skb* from
1215 * the very beginning in case it is indeed cloned.
1216 *
1217 * A call to this helper is susceptible to change the underlaying
1218 * packet buffer. Therefore, at load time, all checks on pointers
1219 * previously done by the verifier are invalidated and must be
1220 * performed again, if the helper is used in combination with
1221 * direct packet access.
1222 * Return
1223 * 0 on success, or a negative error in case of failure.
1224 *
1225 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
1226 * Description
1227 * Add the checksum *csum* into *skb*\ **->csum** in case the
1228 * driver has supplied a checksum for the entire packet into that
1229 * field. Return an error otherwise. This helper is intended to be
1230 * used in combination with **bpf_csum_diff**\ (), in particular
1231 * when the checksum needs to be updated after data has been
1232 * written into the packet through direct packet access.
1233 * Return
1234 * The checksum on success, or a negative error code in case of
1235 * failure.
1236 *
1237 * void bpf_set_hash_invalid(struct sk_buff *skb)
1238 * Description
1239 * Invalidate the current *skb*\ **->hash**. It can be used after
1240 * mangling on headers through direct packet access, in order to
1241 * indicate that the hash is outdated and to trigger a
1242 * recalculation the next time the kernel tries to access this
1243 * hash or when the **bpf_get_hash_recalc**\ () helper is called.
1244 *
1245 * int bpf_get_numa_node_id(void)
1246 * Description
1247 * Return the id of the current NUMA node. The primary use case
1248 * for this helper is the selection of sockets for the local NUMA
1249 * node, when the program is attached to sockets using the
1250 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
1251 * but the helper is also available to other eBPF program types,
1252 * similarly to **bpf_get_smp_processor_id**\ ().
1253 * Return
1254 * The id of current NUMA node.
1255 *
1256 * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
1257 * Description
1258 * Grows headroom of packet associated to *skb* and adjusts the
1259 * offset of the MAC header accordingly, adding *len* bytes of
1260 * space. It automatically extends and reallocates memory as
1261 * required.
1262 *
1263 * This helper can be used on a layer 3 *skb* to push a MAC header
1264 * for redirection into a layer 2 device.
1265 *
1266 * All values for *flags* are reserved for future usage, and must
1267 * be left at zero.
1268 *
1269 * A call to this helper is susceptible to change the underlaying
1270 * packet buffer. Therefore, at load time, all checks on pointers
1271 * previously done by the verifier are invalidated and must be
1272 * performed again, if the helper is used in combination with
1273 * direct packet access.
1274 * Return
1275 * 0 on success, or a negative error in case of failure.
1276 *
1277 * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
1278 * Description
1279 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
1280 * it is possible to use a negative value for *delta*. This helper
1281 * can be used to prepare the packet for pushing or popping
1282 * headers.
1283 *
1284 * A call to this helper is susceptible to change the underlaying
1285 * packet buffer. Therefore, at load time, all checks on pointers
1286 * previously done by the verifier are invalidated and must be
1287 * performed again, if the helper is used in combination with
1288 * direct packet access.
1289 * Return
1290 * 0 on success, or a negative error in case of failure.
9a738266
MS
1291 *
1292 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
9cde0c88
QM
1293 * Description
1294 * Copy a NUL terminated string from an unsafe address
1295 * *unsafe_ptr* to *dst*. The *size* should include the
1296 * terminating NUL byte. In case the string length is smaller than
1297 * *size*, the target is not padded with further NUL bytes. If the
1298 * string length is larger than *size*, just *size*-1 bytes are
1299 * copied and the last byte is set to NUL.
1300 *
1301 * On success, the length of the copied string is returned. This
1302 * makes this helper useful in tracing programs for reading
1303 * strings, and more importantly to get its length at runtime. See
1304 * the following snippet:
1305 *
1306 * ::
1307 *
1308 * SEC("kprobe/sys_open")
1309 * void bpf_sys_open(struct pt_regs *ctx)
1310 * {
1311 * char buf[PATHLEN]; // PATHLEN is defined to 256
1312 * int res = bpf_probe_read_str(buf, sizeof(buf),
1313 * ctx->di);
1314 *
1315 * // Consume buf, for example push it to
1316 * // userspace via bpf_perf_event_output(); we
1317 * // can use res (the string length) as event
1318 * // size, after checking its boundaries.
1319 * }
1320 *
1321 * In comparison, using **bpf_probe_read()** helper here instead
1322 * to read the string would require to estimate the length at
1323 * compile time, and would often result in copying more memory
1324 * than necessary.
1325 *
1326 * Another useful use case is when parsing individual process
1327 * arguments or individual environment variables navigating
1328 * *current*\ **->mm->arg_start** and *current*\
1329 * **->mm->env_start**: using this helper and the return value,
1330 * one can quickly iterate at the right offset of the memory area.
1331 * Return
1332 * On success, the strictly positive length of the string,
1333 * including the trailing NUL character. On error, a negative
1334 * value.
1335 *
1336 * u64 bpf_get_socket_cookie(struct sk_buff *skb)
1337 * Description
1338 * If the **struct sk_buff** pointed by *skb* has a known socket,
1339 * retrieve the cookie (generated by the kernel) of this socket.
1340 * If no cookie has been set yet, generate a new cookie. Once
1341 * generated, the socket cookie remains stable for the life of the
1342 * socket. This helper can be useful for monitoring per socket
1343 * networking traffic statistics as it provides a unique socket
1344 * identifier per namespace.
1345 * Return
1346 * A 8-byte long non-decreasing number on success, or 0 if the
1347 * socket field is missing inside *skb*.
1348 *
1349 * u32 bpf_get_socket_uid(struct sk_buff *skb)
1350 * Return
1351 * The owner UID of the socket associated to *skb*. If the socket
1352 * is **NULL**, or if it is not a full socket (i.e. if it is a
1353 * time-wait or a request socket instead), **overflowuid** value
1354 * is returned (note that **overflowuid** might also be the actual
1355 * UID value for the socket).
1356 *
1357 * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
1358 * Description
1359 * Set the full hash for *skb* (set the field *skb*\ **->hash**)
1360 * to value *hash*.
1361 * Return
1362 * 0
1363 *
96871b9f 1364 * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
9cde0c88
QM
1365 * Description
1366 * Emulate a call to **setsockopt()** on the socket associated to
1367 * *bpf_socket*, which must be a full socket. The *level* at
1368 * which the option resides and the name *optname* of the option
1369 * must be specified, see **setsockopt(2)** for more information.
1370 * The option value of length *optlen* is pointed by *optval*.
1371 *
1372 * This helper actually implements a subset of **setsockopt()**.
1373 * It supports the following *level*\ s:
1374 *
1375 * * **SOL_SOCKET**, which supports the following *optname*\ s:
1376 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
1377 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
1378 * * **IPPROTO_TCP**, which supports the following *optname*\ s:
1379 * **TCP_CONGESTION**, **TCP_BPF_IW**,
1380 * **TCP_BPF_SNDCWND_CLAMP**.
1381 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1382 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1383 * Return
1384 * 0 on success, or a negative error in case of failure.
1385 *
1386 * int bpf_skb_adjust_room(struct sk_buff *skb, u32 len_diff, u32 mode, u64 flags)
1387 * Description
1388 * Grow or shrink the room for data in the packet associated to
1389 * *skb* by *len_diff*, and according to the selected *mode*.
1390 *
1391 * There is a single supported mode at this time:
1392 *
1393 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
1394 * (room space is added or removed below the layer 3 header).
1395 *
1396 * All values for *flags* are reserved for future usage, and must
1397 * be left at zero.
1398 *
1399 * A call to this helper is susceptible to change the underlaying
1400 * packet buffer. Therefore, at load time, all checks on pointers
1401 * previously done by the verifier are invalidated and must be
1402 * performed again, if the helper is used in combination with
1403 * direct packet access.
1404 * Return
1405 * 0 on success, or a negative error in case of failure.
1406 *
1407 * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1408 * Description
1409 * Redirect the packet to the endpoint referenced by *map* at
1410 * index *key*. Depending on its type, this *map* can contain
1411 * references to net devices (for forwarding packets through other
1412 * ports), or to CPUs (for redirecting XDP frames to another CPU;
1413 * but this is only implemented for native XDP (with driver
1414 * support) as of this writing).
1415 *
1416 * All values for *flags* are reserved for future usage, and must
1417 * be left at zero.
1418 *
1419 * When used to redirect packets to net devices, this helper
1420 * provides a high performance increase over **bpf_redirect**\ ().
1421 * This is due to various implementation details of the underlying
1422 * mechanisms, one of which is the fact that **bpf_redirect_map**\
1423 * () tries to send packet as a "bulk" to the device.
1424 * Return
1425 * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
1426 *
1427 * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1428 * Description
1429 * Redirect the packet to the socket referenced by *map* (of type
1430 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1431 * egress interfaces can be used for redirection. The
1432 * **BPF_F_INGRESS** value in *flags* is used to make the
1433 * distinction (ingress path is selected if the flag is present,
1434 * egress path otherwise). This is the only flag supported for now.
1435 * Return
1436 * **SK_PASS** on success, or **SK_DROP** on error.
1437 *
96871b9f 1438 * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
9cde0c88
QM
1439 * Description
1440 * Add an entry to, or update a *map* referencing sockets. The
1441 * *skops* is used as a new value for the entry associated to
1442 * *key*. *flags* is one of:
1443 *
1444 * **BPF_NOEXIST**
1445 * The entry for *key* must not exist in the map.
1446 * **BPF_EXIST**
1447 * The entry for *key* must already exist in the map.
1448 * **BPF_ANY**
1449 * No condition on the existence of the entry for *key*.
1450 *
1451 * If the *map* has eBPF programs (parser and verdict), those will
1452 * be inherited by the socket being added. If the socket is
1453 * already attached to eBPF programs, this results in an error.
1454 * Return
1455 * 0 on success, or a negative error in case of failure.
1456 *
1457 * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
1458 * Description
1459 * Adjust the address pointed by *xdp_md*\ **->data_meta** by
1460 * *delta* (which can be positive or negative). Note that this
1461 * operation modifies the address stored in *xdp_md*\ **->data**,
1462 * so the latter must be loaded only after the helper has been
1463 * called.
1464 *
1465 * The use of *xdp_md*\ **->data_meta** is optional and programs
1466 * are not required to use it. The rationale is that when the
1467 * packet is processed with XDP (e.g. as DoS filter), it is
1468 * possible to push further meta data along with it before passing
1469 * to the stack, and to give the guarantee that an ingress eBPF
1470 * program attached as a TC classifier on the same device can pick
1471 * this up for further post-processing. Since TC works with socket
1472 * buffers, it remains possible to set from XDP the **mark** or
1473 * **priority** pointers, or other pointers for the socket buffer.
1474 * Having this scratch space generic and programmable allows for
1475 * more flexibility as the user is free to store whatever meta
1476 * data they need.
1477 *
1478 * A call to this helper is susceptible to change the underlaying
1479 * packet buffer. Therefore, at load time, all checks on pointers
1480 * previously done by the verifier are invalidated and must be
1481 * performed again, if the helper is used in combination with
1482 * direct packet access.
1483 * Return
1484 * 0 on success, or a negative error in case of failure.
1485 *
1486 * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
1487 * Description
1488 * Read the value of a perf event counter, and store it into *buf*
1489 * of size *buf_size*. This helper relies on a *map* of type
1490 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
1491 * counter is selected when *map* is updated with perf event file
1492 * descriptors. The *map* is an array whose size is the number of
1493 * available CPUs, and each cell contains a value relative to one
1494 * CPU. The value to retrieve is indicated by *flags*, that
1495 * contains the index of the CPU to look up, masked with
1496 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1497 * **BPF_F_CURRENT_CPU** to indicate that the value for the
1498 * current CPU should be retrieved.
1499 *
1500 * This helper behaves in a way close to
1501 * **bpf_perf_event_read**\ () helper, save that instead of
1502 * just returning the value observed, it fills the *buf*
1503 * structure. This allows for additional data to be retrieved: in
1504 * particular, the enabled and running times (in *buf*\
1505 * **->enabled** and *buf*\ **->running**, respectively) are
1506 * copied. In general, **bpf_perf_event_read_value**\ () is
1507 * recommended over **bpf_perf_event_read**\ (), which has some
1508 * ABI issues and provides fewer functionalities.
1509 *
1510 * These values are interesting, because hardware PMU (Performance
1511 * Monitoring Unit) counters are limited resources. When there are
1512 * more PMU based perf events opened than available counters,
1513 * kernel will multiplex these events so each event gets certain
1514 * percentage (but not all) of the PMU time. In case that
1515 * multiplexing happens, the number of samples or counter value
1516 * will not reflect the case compared to when no multiplexing
1517 * occurs. This makes comparison between different runs difficult.
1518 * Typically, the counter value should be normalized before
1519 * comparing to other experiments. The usual normalization is done
1520 * as follows.
1521 *
1522 * ::
1523 *
1524 * normalized_counter = counter * t_enabled / t_running
1525 *
1526 * Where t_enabled is the time enabled for event and t_running is
1527 * the time running for event since last normalization. The
1528 * enabled and running times are accumulated since the perf event
1529 * open. To achieve scaling factor between two invocations of an
1530 * eBPF program, users can can use CPU id as the key (which is
1531 * typical for perf array usage model) to remember the previous
1532 * value and do the calculation inside the eBPF program.
1533 * Return
1534 * 0 on success, or a negative error in case of failure.
1535 *
96871b9f 1536 * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
9cde0c88
QM
1537 * Description
1538 * For en eBPF program attached to a perf event, retrieve the
1539 * value of the event counter associated to *ctx* and store it in
1540 * the structure pointed by *buf* and of size *buf_size*. Enabled
1541 * and running times are also stored in the structure (see
1542 * description of helper **bpf_perf_event_read_value**\ () for
1543 * more details).
1544 * Return
1545 * 0 on success, or a negative error in case of failure.
1546 *
96871b9f 1547 * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
9cde0c88
QM
1548 * Description
1549 * Emulate a call to **getsockopt()** on the socket associated to
1550 * *bpf_socket*, which must be a full socket. The *level* at
1551 * which the option resides and the name *optname* of the option
1552 * must be specified, see **getsockopt(2)** for more information.
1553 * The retrieved value is stored in the structure pointed by
1554 * *opval* and of length *optlen*.
1555 *
1556 * This helper actually implements a subset of **getsockopt()**.
1557 * It supports the following *level*\ s:
1558 *
1559 * * **IPPROTO_TCP**, which supports *optname*
1560 * **TCP_CONGESTION**.
1561 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1562 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1563 * Return
1564 * 0 on success, or a negative error in case of failure.
1565 *
1566 * int bpf_override_return(struct pt_reg *regs, u64 rc)
1567 * Description
1568 * Used for error injection, this helper uses kprobes to override
1569 * the return value of the probed function, and to set it to *rc*.
1570 * The first argument is the context *regs* on which the kprobe
1571 * works.
1572 *
1573 * This helper works by setting setting the PC (program counter)
1574 * to an override function which is run in place of the original
1575 * probed function. This means the probed function is not run at
1576 * all. The replacement function just returns with the required
1577 * value.
1578 *
1579 * This helper has security implications, and thus is subject to
1580 * restrictions. It is only available if the kernel was compiled
1581 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
1582 * option, and in this case it only works on functions tagged with
1583 * **ALLOW_ERROR_INJECTION** in the kernel code.
1584 *
1585 * Also, the helper is only available for the architectures having
1586 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
1587 * x86 architecture is the only one to support this feature.
1588 * Return
1589 * 0
1590 *
96871b9f 1591 * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
9cde0c88
QM
1592 * Description
1593 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field
1594 * for the full TCP socket associated to *bpf_sock_ops* to
1595 * *argval*.
1596 *
1597 * The primary use of this field is to determine if there should
1598 * be calls to eBPF programs of type
1599 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
1600 * code. A program of the same type can change its value, per
1601 * connection and as necessary, when the connection is
1602 * established. This field is directly accessible for reading, but
1603 * this helper must be used for updates in order to return an
1604 * error if an eBPF program tries to set a callback that is not
1605 * supported in the current kernel.
1606 *
1607 * The supported callback values that *argval* can combine are:
1608 *
1609 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
1610 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
1611 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
1612 *
1613 * Here are some examples of where one could call such eBPF
1614 * program:
1615 *
1616 * * When RTO fires.
1617 * * When a packet is retransmitted.
1618 * * When the connection terminates.
1619 * * When a packet is sent.
1620 * * When a packet is received.
1621 * Return
1622 * Code **-EINVAL** if the socket is not a full TCP socket;
1623 * otherwise, a positive number containing the bits that could not
1624 * be set is returned (which comes down to 0 if all bits were set
1625 * as required).
1626 *
1627 * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
1628 * Description
1629 * This helper is used in programs implementing policies at the
1630 * socket level. If the message *msg* is allowed to pass (i.e. if
1631 * the verdict eBPF program returns **SK_PASS**), redirect it to
1632 * the socket referenced by *map* (of type
1633 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1634 * egress interfaces can be used for redirection. The
1635 * **BPF_F_INGRESS** value in *flags* is used to make the
1636 * distinction (ingress path is selected if the flag is present,
1637 * egress path otherwise). This is the only flag supported for now.
1638 * Return
1639 * **SK_PASS** on success, or **SK_DROP** on error.
1640 *
1641 * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
1642 * Description
1643 * For socket policies, apply the verdict of the eBPF program to
1644 * the next *bytes* (number of bytes) of message *msg*.
1645 *
1646 * For example, this helper can be used in the following cases:
1647 *
1648 * * A single **sendmsg**\ () or **sendfile**\ () system call
1649 * contains multiple logical messages that the eBPF program is
1650 * supposed to read and for which it should apply a verdict.
1651 * * An eBPF program only cares to read the first *bytes* of a
1652 * *msg*. If the message has a large payload, then setting up
1653 * and calling the eBPF program repeatedly for all bytes, even
1654 * though the verdict is already known, would create unnecessary
1655 * overhead.
1656 *
1657 * When called from within an eBPF program, the helper sets a
1658 * counter internal to the BPF infrastructure, that is used to
1659 * apply the last verdict to the next *bytes*. If *bytes* is
1660 * smaller than the current data being processed from a
1661 * **sendmsg**\ () or **sendfile**\ () system call, the first
1662 * *bytes* will be sent and the eBPF program will be re-run with
1663 * the pointer for start of data pointing to byte number *bytes*
1664 * **+ 1**. If *bytes* is larger than the current data being
1665 * processed, then the eBPF verdict will be applied to multiple
1666 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are
1667 * consumed.
1668 *
1669 * Note that if a socket closes with the internal counter holding
1670 * a non-zero value, this is not a problem because data is not
1671 * being buffered for *bytes* and is sent as it is received.
1672 * Return
1673 * 0
1674 *
1675 * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
1676 * Description
1677 * For socket policies, prevent the execution of the verdict eBPF
1678 * program for message *msg* until *bytes* (byte number) have been
1679 * accumulated.
1680 *
1681 * This can be used when one needs a specific number of bytes
1682 * before a verdict can be assigned, even if the data spans
1683 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
1684 * case would be a user calling **sendmsg**\ () repeatedly with
1685 * 1-byte long message segments. Obviously, this is bad for
1686 * performance, but it is still valid. If the eBPF program needs
1687 * *bytes* bytes to validate a header, this helper can be used to
1688 * prevent the eBPF program to be called again until *bytes* have
1689 * been accumulated.
1690 * Return
1691 * 0
1692 *
1693 * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
1694 * Description
1695 * For socket policies, pull in non-linear data from user space
1696 * for *msg* and set pointers *msg*\ **->data** and *msg*\
1697 * **->data_end** to *start* and *end* bytes offsets into *msg*,
1698 * respectively.
1699 *
1700 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
1701 * *msg* it can only parse data that the (**data**, **data_end**)
1702 * pointers have already consumed. For **sendmsg**\ () hooks this
1703 * is likely the first scatterlist element. But for calls relying
1704 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will
1705 * be the range (**0**, **0**) because the data is shared with
1706 * user space and by default the objective is to avoid allowing
1707 * user space to modify data while (or after) eBPF verdict is
1708 * being decided. This helper can be used to pull in data and to
1709 * set the start and end pointer to given values. Data will be
1710 * copied if necessary (i.e. if data was not linear and if start
1711 * and end pointers do not point to the same chunk).
1712 *
1713 * A call to this helper is susceptible to change the underlaying
1714 * packet buffer. Therefore, at load time, all checks on pointers
1715 * previously done by the verifier are invalidated and must be
1716 * performed again, if the helper is used in combination with
1717 * direct packet access.
1718 *
1719 * All values for *flags* are reserved for future usage, and must
1720 * be left at zero.
1721 * Return
1722 * 0 on success, or a negative error in case of failure.
1723 *
96871b9f 1724 * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
9cde0c88
QM
1725 * Description
1726 * Bind the socket associated to *ctx* to the address pointed by
1727 * *addr*, of length *addr_len*. This allows for making outgoing
1728 * connection from the desired IP address, which can be useful for
1729 * example when all processes inside a cgroup should use one
1730 * single IP address on a host that has multiple IP configured.
1731 *
1732 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The
1733 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or
1734 * **AF_INET6**). Looking for a free port to bind to can be
1735 * expensive, therefore binding to port is not permitted by the
1736 * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively)
1737 * must be set to zero.
1738 * Return
1739 * 0 on success, or a negative error in case of failure.
1740 *
1741 * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
1742 * Description
1743 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
1744 * only possible to shrink the packet as of this writing,
1745 * therefore *delta* must be a negative integer.
1746 *
1747 * A call to this helper is susceptible to change the underlaying
1748 * packet buffer. Therefore, at load time, all checks on pointers
1749 * previously done by the verifier are invalidated and must be
1750 * performed again, if the helper is used in combination with
1751 * direct packet access.
1752 * Return
1753 * 0 on success, or a negative error in case of failure.
1754 *
1755 * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
1756 * Description
1757 * Retrieve the XFRM state (IP transform framework, see also
1758 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
1759 *
1760 * The retrieved value is stored in the **struct bpf_xfrm_state**
1761 * pointed by *xfrm_state* and of length *size*.
1762 *
1763 * All values for *flags* are reserved for future usage, and must
1764 * be left at zero.
1765 *
1766 * This helper is available only if the kernel was compiled with
1767 * **CONFIG_XFRM** configuration option.
1768 * Return
1769 * 0 on success, or a negative error in case of failure.
de2ff05f
YS
1770 *
1771 * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
1772 * Description
a56497d3
QM
1773 * Return a user or a kernel stack in bpf program provided buffer.
1774 * To achieve this, the helper needs *ctx*, which is a pointer
1775 * to the context on which the tracing program is executed.
1776 * To store the stacktrace, the bpf program provides *buf* with
1777 * a nonnegative *size*.
1778 *
1779 * The last argument, *flags*, holds the number of stack frames to
1780 * skip (from 0 to 255), masked with
1781 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1782 * the following flags:
1783 *
1784 * **BPF_F_USER_STACK**
1785 * Collect a user space stack instead of a kernel stack.
1786 * **BPF_F_USER_BUILD_ID**
1787 * Collect buildid+offset instead of ips for user stack,
1788 * only valid if **BPF_F_USER_STACK** is also specified.
1789 *
1790 * **bpf_get_stack**\ () can collect up to
1791 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
1792 * to sufficient large buffer size. Note that
1793 * this limit can be controlled with the **sysctl** program, and
1794 * that it should be manually increased in order to profile long
1795 * user stacks (such as stacks for Java programs). To do so, use:
1796 *
1797 * ::
1798 *
1799 * # sysctl kernel.perf_event_max_stack=<new value>
de2ff05f
YS
1800 *
1801 * Return
1802 * a non-negative value equal to or less than size on success, or
1803 * a negative error in case of failure.
0cb34dc2
JS
1804 */
1805#define __BPF_FUNC_MAPPER(FN) \
1806 FN(unspec), \
1807 FN(map_lookup_elem), \
1808 FN(map_update_elem), \
1809 FN(map_delete_elem), \
1810 FN(probe_read), \
1811 FN(ktime_get_ns), \
1812 FN(trace_printk), \
1813 FN(get_prandom_u32), \
1814 FN(get_smp_processor_id), \
1815 FN(skb_store_bytes), \
1816 FN(l3_csum_replace), \
1817 FN(l4_csum_replace), \
1818 FN(tail_call), \
1819 FN(clone_redirect), \
1820 FN(get_current_pid_tgid), \
1821 FN(get_current_uid_gid), \
1822 FN(get_current_comm), \
1823 FN(get_cgroup_classid), \
1824 FN(skb_vlan_push), \
1825 FN(skb_vlan_pop), \
1826 FN(skb_get_tunnel_key), \
1827 FN(skb_set_tunnel_key), \
1828 FN(perf_event_read), \
1829 FN(redirect), \
1830 FN(get_route_realm), \
1831 FN(perf_event_output), \
1832 FN(skb_load_bytes), \
1833 FN(get_stackid), \
1834 FN(csum_diff), \
1835 FN(skb_get_tunnel_opt), \
1836 FN(skb_set_tunnel_opt), \
1837 FN(skb_change_proto), \
1838 FN(skb_change_type), \
1839 FN(skb_under_cgroup), \
1840 FN(get_hash_recalc), \
1841 FN(get_current_task), \
1842 FN(probe_write_user), \
1843 FN(current_task_under_cgroup), \
1844 FN(skb_change_tail), \
1845 FN(skb_pull_data), \
1846 FN(csum_update), \
1847 FN(set_hash_invalid), \
1848 FN(get_numa_node_id), \
1849 FN(skb_change_head), \
9a738266 1850 FN(xdp_adjust_head), \
91b8270f 1851 FN(probe_read_str), \
6acc5c29 1852 FN(get_socket_cookie), \
ded092cd 1853 FN(get_socket_uid), \
04df41e3 1854 FN(set_hash), \
2be7e212 1855 FN(setsockopt), \
996139e8 1856 FN(skb_adjust_room), \
69e8cc13
JF
1857 FN(redirect_map), \
1858 FN(sk_redirect_map), \
ac29991b 1859 FN(sock_map_update), \
020a32d9 1860 FN(xdp_adjust_meta), \
81b9cf80 1861 FN(perf_event_read_value), \
e27afb84 1862 FN(perf_prog_read_value), \
965de87e 1863 FN(getsockopt), \
d6d4f60c 1864 FN(override_return), \
4c4c3c27
JF
1865 FN(sock_ops_cb_flags_set), \
1866 FN(msg_redirect_map), \
468b3fde 1867 FN(msg_apply_bytes), \
0dcbbf67 1868 FN(msg_cork_bytes), \
622adafb 1869 FN(msg_pull_data), \
0367d0a2 1870 FN(bind), \
29a36f9e 1871 FN(xdp_adjust_tail), \
de2ff05f
YS
1872 FN(skb_get_xfrm_state), \
1873 FN(get_stack),
0cb34dc2 1874
971e827b
ACM
1875/* integer value in 'imm' field of BPF_CALL instruction selects which helper
1876 * function eBPF program intends to call
1877 */
0cb34dc2 1878#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
971e827b 1879enum bpf_func_id {
0cb34dc2 1880 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
971e827b
ACM
1881 __BPF_FUNC_MAX_ID,
1882};
0cb34dc2 1883#undef __BPF_ENUM_FN
971e827b
ACM
1884
1885/* All flags used by eBPF helper functions, placed here. */
1886
1887/* BPF_FUNC_skb_store_bytes flags. */
1888#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
1889#define BPF_F_INVALIDATE_HASH (1ULL << 1)
1890
1891/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
1892 * First 4 bits are for passing the header field size.
1893 */
1894#define BPF_F_HDR_FIELD_MASK 0xfULL
1895
1896/* BPF_FUNC_l4_csum_replace flags. */
1897#define BPF_F_PSEUDO_HDR (1ULL << 4)
1898#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
9a738266 1899#define BPF_F_MARK_ENFORCE (1ULL << 6)
971e827b
ACM
1900
1901/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
1902#define BPF_F_INGRESS (1ULL << 0)
1903
1904/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
1905#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
1906
de2ff05f 1907/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
971e827b
ACM
1908#define BPF_F_SKIP_FIELD_MASK 0xffULL
1909#define BPF_F_USER_STACK (1ULL << 8)
de2ff05f 1910/* flags used by BPF_FUNC_get_stackid only. */
971e827b
ACM
1911#define BPF_F_FAST_STACK_CMP (1ULL << 9)
1912#define BPF_F_REUSE_STACKID (1ULL << 10)
de2ff05f
YS
1913/* flags used by BPF_FUNC_get_stack only. */
1914#define BPF_F_USER_BUILD_ID (1ULL << 11)
971e827b
ACM
1915
1916/* BPF_FUNC_skb_set_tunnel_key flags. */
1917#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
1918#define BPF_F_DONT_FRAGMENT (1ULL << 2)
16962b24 1919#define BPF_F_SEQ_NUMBER (1ULL << 3)
971e827b 1920
e27afb84
AS
1921/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
1922 * BPF_FUNC_perf_event_read_value flags.
1923 */
971e827b
ACM
1924#define BPF_F_INDEX_MASK 0xffffffffULL
1925#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
791cceb8
ACM
1926/* BPF_FUNC_perf_event_output for sk_buff input context. */
1927#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
971e827b 1928
2be7e212
DB
1929/* Mode for BPF_FUNC_skb_adjust_room helper. */
1930enum bpf_adj_room_mode {
d62c1d72 1931 BPF_ADJ_ROOM_NET,
2be7e212
DB
1932};
1933
971e827b
ACM
1934/* user accessible mirror of in-kernel sk_buff.
1935 * new fields can only be added to the end of this structure
1936 */
1937struct __sk_buff {
1938 __u32 len;
1939 __u32 pkt_type;
1940 __u32 mark;
1941 __u32 queue_mapping;
1942 __u32 protocol;
1943 __u32 vlan_present;
1944 __u32 vlan_tci;
1945 __u32 vlan_proto;
1946 __u32 priority;
1947 __u32 ingress_ifindex;
1948 __u32 ifindex;
1949 __u32 tc_index;
1950 __u32 cb[5];
1951 __u32 hash;
1952 __u32 tc_classid;
1953 __u32 data;
1954 __u32 data_end;
b1d9fc41 1955 __u32 napi_id;
69e8cc13 1956
ac29991b 1957 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
69e8cc13
JF
1958 __u32 family;
1959 __u32 remote_ip4; /* Stored in network byte order */
1960 __u32 local_ip4; /* Stored in network byte order */
1961 __u32 remote_ip6[4]; /* Stored in network byte order */
1962 __u32 local_ip6[4]; /* Stored in network byte order */
1963 __u32 remote_port; /* Stored in network byte order */
1964 __u32 local_port; /* stored in host byte order */
ac29991b
DB
1965 /* ... here. */
1966
1967 __u32 data_meta;
971e827b
ACM
1968};
1969
1970struct bpf_tunnel_key {
1971 __u32 tunnel_id;
1972 union {
1973 __u32 remote_ipv4;
1974 __u32 remote_ipv6[4];
1975 };
1976 __u8 tunnel_tos;
1977 __u8 tunnel_ttl;
1978 __u16 tunnel_ext;
1979 __u32 tunnel_label;
1980};
1981
29a36f9e
EB
1982/* user accessible mirror of in-kernel xfrm_state.
1983 * new fields can only be added to the end of this structure
1984 */
1985struct bpf_xfrm_state {
1986 __u32 reqid;
1987 __u32 spi; /* Stored in network byte order */
1988 __u16 family;
1989 union {
1990 __u32 remote_ipv4; /* Stored in network byte order */
1991 __u32 remote_ipv6[4]; /* Stored in network byte order */
1992 };
1993};
1994
0cb34dc2
JS
1995/* Generic BPF return codes which all BPF program types may support.
1996 * The values are binary compatible with their TC_ACT_* counter-part to
1997 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
1998 * programs.
1999 *
2000 * XDP is handled seprately, see XDP_*.
2001 */
2002enum bpf_ret_code {
2003 BPF_OK = 0,
2004 /* 1 reserved */
2005 BPF_DROP = 2,
2006 /* 3-6 reserved */
2007 BPF_REDIRECT = 7,
2008 /* >127 are reserved for prog type specific return codes */
2009};
2010
2011struct bpf_sock {
2012 __u32 bound_dev_if;
2013 __u32 family;
2014 __u32 type;
2015 __u32 protocol;
ac29991b
DB
2016 __u32 mark;
2017 __u32 priority;
1d436885
AI
2018 __u32 src_ip4; /* Allows 1,2,4-byte read.
2019 * Stored in network byte order.
2020 */
2021 __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
2022 * Stored in network byte order.
2023 */
2024 __u32 src_port; /* Allows 4-byte read.
2025 * Stored in host byte order
2026 */
0cb34dc2
JS
2027};
2028
2029#define XDP_PACKET_HEADROOM 256
2030
791cceb8
ACM
2031/* User return codes for XDP prog type.
2032 * A valid XDP program must return one of these defined values. All other
ac29991b
DB
2033 * return codes are reserved for future use. Unknown return codes will
2034 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
791cceb8
ACM
2035 */
2036enum xdp_action {
2037 XDP_ABORTED = 0,
2038 XDP_DROP,
2039 XDP_PASS,
2040 XDP_TX,
ac29991b 2041 XDP_REDIRECT,
791cceb8
ACM
2042};
2043
2044/* user accessible metadata for XDP packet hook
2045 * new fields must be added to the end of this structure
2046 */
2047struct xdp_md {
2048 __u32 data;
2049 __u32 data_end;
ac29991b 2050 __u32 data_meta;
e7b2823a
JDB
2051 /* Below access go through struct xdp_rxq_info */
2052 __u32 ingress_ifindex; /* rxq->dev->ifindex */
2053 __u32 rx_queue_index; /* rxq->queue_index */
791cceb8
ACM
2054};
2055
69e8cc13 2056enum sk_action {
bfa64075
JF
2057 SK_DROP = 0,
2058 SK_PASS,
69e8cc13
JF
2059};
2060
82a86168
JF
2061/* user accessible metadata for SK_MSG packet hook, new fields must
2062 * be added to the end of this structure
2063 */
2064struct sk_msg_md {
2065 void *data;
2066 void *data_end;
2067};
2068
95b9afd3
MKL
2069#define BPF_TAG_SIZE 8
2070
2071struct bpf_prog_info {
2072 __u32 type;
2073 __u32 id;
2074 __u8 tag[BPF_TAG_SIZE];
2075 __u32 jited_prog_len;
2076 __u32 xlated_prog_len;
2077 __aligned_u64 jited_prog_insns;
2078 __aligned_u64 xlated_prog_insns;
88cda1c9
MKL
2079 __u64 load_time; /* ns since boottime */
2080 __u32 created_by_uid;
2081 __u32 nr_map_ids;
2082 __aligned_u64 map_ids;
e27afb84 2083 char name[BPF_OBJ_NAME_LEN];
675fc275 2084 __u32 ifindex;
fb6ef42b 2085 __u32 gpl_compatible:1;
675fc275
JK
2086 __u64 netns_dev;
2087 __u64 netns_ino;
95b9afd3
MKL
2088} __attribute__((aligned(8)));
2089
2090struct bpf_map_info {
2091 __u32 type;
2092 __u32 id;
2093 __u32 key_size;
2094 __u32 value_size;
2095 __u32 max_entries;
2096 __u32 map_flags;
067cae47 2097 char name[BPF_OBJ_NAME_LEN];
52775b33
JK
2098 __u32 ifindex;
2099 __u64 netns_dev;
2100 __u64 netns_ino;
95b9afd3
MKL
2101} __attribute__((aligned(8)));
2102
e50b0a6f
AI
2103/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
2104 * by user and intended to be used by socket (e.g. to bind to, depends on
2105 * attach attach type).
2106 */
2107struct bpf_sock_addr {
2108 __u32 user_family; /* Allows 4-byte read, but no write. */
2109 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
2110 * Stored in network byte order.
2111 */
2112 __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
2113 * Stored in network byte order.
2114 */
2115 __u32 user_port; /* Allows 4-byte read and write.
2116 * Stored in network byte order
2117 */
2118 __u32 family; /* Allows 4-byte read, but no write */
2119 __u32 type; /* Allows 4-byte read, but no write */
2120 __u32 protocol; /* Allows 4-byte read, but no write */
2121};
2122
04df41e3
LB
2123/* User bpf_sock_ops struct to access socket values and specify request ops
2124 * and their replies.
f1d6cb2d
ACM
2125 * Some of this fields are in network (bigendian) byte order and may need
2126 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
04df41e3
LB
2127 * New fields can only be added at the end of this structure
2128 */
2129struct bpf_sock_ops {
2130 __u32 op;
2131 union {
d6d4f60c
LB
2132 __u32 args[4]; /* Optionally passed to bpf program */
2133 __u32 reply; /* Returned by bpf program */
2134 __u32 replylong[4]; /* Optionally returned by bpf prog */
04df41e3
LB
2135 };
2136 __u32 family;
f1d6cb2d
ACM
2137 __u32 remote_ip4; /* Stored in network byte order */
2138 __u32 local_ip4; /* Stored in network byte order */
2139 __u32 remote_ip6[4]; /* Stored in network byte order */
2140 __u32 local_ip6[4]; /* Stored in network byte order */
2141 __u32 remote_port; /* Stored in network byte order */
2142 __u32 local_port; /* stored in host byte order */
e7b2823a
JDB
2143 __u32 is_fullsock; /* Some TCP fields are only valid if
2144 * there is a full socket. If not, the
2145 * fields read as zero.
2146 */
2147 __u32 snd_cwnd;
2148 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
d6d4f60c
LB
2149 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
2150 __u32 state;
2151 __u32 rtt_min;
2152 __u32 snd_ssthresh;
2153 __u32 rcv_nxt;
2154 __u32 snd_nxt;
2155 __u32 snd_una;
2156 __u32 mss_cache;
2157 __u32 ecn_flags;
2158 __u32 rate_delivered;
2159 __u32 rate_interval_us;
2160 __u32 packets_out;
2161 __u32 retrans_out;
2162 __u32 total_retrans;
2163 __u32 segs_in;
2164 __u32 data_segs_in;
2165 __u32 segs_out;
2166 __u32 data_segs_out;
2167 __u32 lost_out;
2168 __u32 sacked_out;
2169 __u32 sk_txhash;
2170 __u64 bytes_received;
2171 __u64 bytes_acked;
04df41e3
LB
2172};
2173
d6d4f60c
LB
2174/* Definitions for bpf_sock_ops_cb_flags */
2175#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
2176#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
2177#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
2178#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
2179 * supported cb flags
2180 */
2181
04df41e3
LB
2182/* List of known BPF sock_ops operators.
2183 * New entries can only be added at the end
2184 */
2185enum {
2186 BPF_SOCK_OPS_VOID,
2187 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
2188 * -1 if default value should be used
2189 */
2190 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
2191 * window (in packets) or -1 if default
2192 * value should be used
2193 */
2194 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
2195 * active connection is initialized
2196 */
2197 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
2198 * active connection is
2199 * established
2200 */
2201 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
2202 * passive connection is
2203 * established
2204 */
2205 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
2206 * needs ECN
2207 */
e27afb84
AS
2208 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
2209 * based on the path and may be
2210 * dependent on the congestion control
2211 * algorithm. In general it indicates
2212 * a congestion threshold. RTTs above
2213 * this indicate congestion
2214 */
d6d4f60c
LB
2215 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
2216 * Arg1: value of icsk_retransmits
2217 * Arg2: value of icsk_rto
2218 * Arg3: whether RTO has expired
2219 */
2220 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
2221 * Arg1: sequence number of 1st byte
2222 * Arg2: # segments
2223 * Arg3: return value of
2224 * tcp_transmit_skb (0 => success)
2225 */
2226 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
2227 * Arg1: old_state
2228 * Arg2: new_state
2229 */
2230};
2231
2232/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
2233 * changes between the TCP and BPF versions. Ideally this should never happen.
2234 * If it does, we need to add code to convert them before calling
2235 * the BPF sock_ops function.
2236 */
2237enum {
2238 BPF_TCP_ESTABLISHED = 1,
2239 BPF_TCP_SYN_SENT,
2240 BPF_TCP_SYN_RECV,
2241 BPF_TCP_FIN_WAIT1,
2242 BPF_TCP_FIN_WAIT2,
2243 BPF_TCP_TIME_WAIT,
2244 BPF_TCP_CLOSE,
2245 BPF_TCP_CLOSE_WAIT,
2246 BPF_TCP_LAST_ACK,
2247 BPF_TCP_LISTEN,
2248 BPF_TCP_CLOSING, /* Now a valid state */
2249 BPF_TCP_NEW_SYN_RECV,
2250
2251 BPF_TCP_MAX_STATES /* Leave at the end! */
04df41e3
LB
2252};
2253
2254#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
2255#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
2256
e27afb84
AS
2257struct bpf_perf_event_value {
2258 __u64 counter;
2259 __u64 enabled;
2260 __u64 running;
2261};
2262
ebc614f6
RG
2263#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
2264#define BPF_DEVCG_ACC_READ (1ULL << 1)
2265#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
2266
2267#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
2268#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
2269
2270struct bpf_cgroup_dev_ctx {
e7b2823a
JDB
2271 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
2272 __u32 access_type;
ebc614f6
RG
2273 __u32 major;
2274 __u32 minor;
2275};
2276
a0fe3e57
AS
2277struct bpf_raw_tracepoint_args {
2278 __u64 args[0];
2279};
2280
971e827b 2281#endif /* _UAPI__LINUX_BPF_H__ */