1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 */ 8 #ifndef _UAPI__LINUX_BPF_H__ 9 #define _UAPI__LINUX_BPF_H__ 10 11 #include <linux/types.h> 12 #include <linux/bpf_common.h> 13 14 /* Extended instruction set based on top of classic BPF */ 15 16 /* instruction classes */ 17 #define BPF_JMP32 0x06 /* jmp mode in word width */ 18 #define BPF_ALU64 0x07 /* alu mode in double word width */ 19 20 /* ld/ldx fields */ 21 #define BPF_DW 0x18 /* double word (64-bit) */ 22 #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ 23 #define BPF_XADD 0xc0 /* exclusive add - legacy name */ 24 25 /* alu/jmp fields */ 26 #define BPF_MOV 0xb0 /* mov reg to reg */ 27 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 28 29 /* change endianness of a register */ 30 #define BPF_END 0xd0 /* flags for endianness conversion: */ 31 #define BPF_TO_LE 0x00 /* convert to little-endian */ 32 #define BPF_TO_BE 0x08 /* convert to big-endian */ 33 #define BPF_FROM_LE BPF_TO_LE 34 #define BPF_FROM_BE BPF_TO_BE 35 36 /* jmp encodings */ 37 #define BPF_JNE 0x50 /* jump != */ 38 #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ 39 #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ 40 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 41 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 42 #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ 43 #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ 44 #define BPF_CALL 0x80 /* function call */ 45 #define BPF_EXIT 0x90 /* function return */ 46 47 /* atomic op type fields (stored in immediate) */ 48 #define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */ 49 #define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ 50 #define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ 51 52 /* Register numbers */ 53 enum { 54 BPF_REG_0 = 0, 55 BPF_REG_1, 56 BPF_REG_2, 57 BPF_REG_3, 58 BPF_REG_4, 59 BPF_REG_5, 60 BPF_REG_6, 61 BPF_REG_7, 62 BPF_REG_8, 63 BPF_REG_9, 64 BPF_REG_10, 65 __MAX_BPF_REG, 66 }; 67 68 /* BPF has 10 general purpose 64-bit registers and stack frame. */ 69 #define MAX_BPF_REG __MAX_BPF_REG 70 71 struct bpf_insn { 72 __u8 code; /* opcode */ 73 __u8 dst_reg:4; /* dest register */ 74 __u8 src_reg:4; /* source register */ 75 __s16 off; /* signed offset */ 76 __s32 imm; /* signed immediate constant */ 77 }; 78 79 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 80 struct bpf_lpm_trie_key { 81 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 82 __u8 data[0]; /* Arbitrary size */ 83 }; 84 85 struct bpf_cgroup_storage_key { 86 __u64 cgroup_inode_id; /* cgroup inode id */ 87 __u32 attach_type; /* program attach type (enum bpf_attach_type) */ 88 }; 89 90 enum bpf_cgroup_iter_order { 91 BPF_CGROUP_ITER_ORDER_UNSPEC = 0, 92 BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */ 93 BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */ 94 BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */ 95 BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */ 96 }; 97 98 union bpf_iter_link_info { 99 struct { 100 __u32 map_fd; 101 } map; 102 struct { 103 enum bpf_cgroup_iter_order order; 104 105 /* At most one of cgroup_fd and cgroup_id can be non-zero. If 106 * both are zero, the walk starts from the default cgroup v2 107 * root. For walking v1 hierarchy, one should always explicitly 108 * specify cgroup_fd. 109 */ 110 __u32 cgroup_fd; 111 __u64 cgroup_id; 112 } cgroup; 113 /* Parameters of task iterators. */ 114 struct { 115 __u32 tid; 116 __u32 pid; 117 __u32 pid_fd; 118 } task; 119 }; 120 121 /* BPF syscall commands, see bpf(2) man-page for more details. */ 122 /** 123 * DOC: eBPF Syscall Preamble 124 * 125 * The operation to be performed by the **bpf**\ () system call is determined 126 * by the *cmd* argument. Each operation takes an accompanying argument, 127 * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see 128 * below). The size argument is the size of the union pointed to by *attr*. 129 */ 130 /** 131 * DOC: eBPF Syscall Commands 132 * 133 * BPF_MAP_CREATE 134 * Description 135 * Create a map and return a file descriptor that refers to the 136 * map. The close-on-exec file descriptor flag (see **fcntl**\ (2)) 137 * is automatically enabled for the new file descriptor. 138 * 139 * Applying **close**\ (2) to the file descriptor returned by 140 * **BPF_MAP_CREATE** will delete the map (but see NOTES). 141 * 142 * Return 143 * A new file descriptor (a nonnegative integer), or -1 if an 144 * error occurred (in which case, *errno* is set appropriately). 145 * 146 * BPF_MAP_LOOKUP_ELEM 147 * Description 148 * Look up an element with a given *key* in the map referred to 149 * by the file descriptor *map_fd*. 150 * 151 * The *flags* argument may be specified as one of the 152 * following: 153 * 154 * **BPF_F_LOCK** 155 * Look up the value of a spin-locked map without 156 * returning the lock. This must be specified if the 157 * elements contain a spinlock. 158 * 159 * Return 160 * Returns zero on success. On error, -1 is returned and *errno* 161 * is set appropriately. 162 * 163 * BPF_MAP_UPDATE_ELEM 164 * Description 165 * Create or update an element (key/value pair) in a specified map. 166 * 167 * The *flags* argument should be specified as one of the 168 * following: 169 * 170 * **BPF_ANY** 171 * Create a new element or update an existing element. 172 * **BPF_NOEXIST** 173 * Create a new element only if it did not exist. 174 * **BPF_EXIST** 175 * Update an existing element. 176 * **BPF_F_LOCK** 177 * Update a spin_lock-ed map element. 178 * 179 * Return 180 * Returns zero on success. On error, -1 is returned and *errno* 181 * is set appropriately. 182 * 183 * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, 184 * **E2BIG**, **EEXIST**, or **ENOENT**. 185 * 186 * **E2BIG** 187 * The number of elements in the map reached the 188 * *max_entries* limit specified at map creation time. 189 * **EEXIST** 190 * If *flags* specifies **BPF_NOEXIST** and the element 191 * with *key* already exists in the map. 192 * **ENOENT** 193 * If *flags* specifies **BPF_EXIST** and the element with 194 * *key* does not exist in the map. 195 * 196 * BPF_MAP_DELETE_ELEM 197 * Description 198 * Look up and delete an element by key in a specified map. 199 * 200 * Return 201 * Returns zero on success. On error, -1 is returned and *errno* 202 * is set appropriately. 203 * 204 * BPF_MAP_GET_NEXT_KEY 205 * Description 206 * Look up an element by key in a specified map and return the key 207 * of the next element. Can be used to iterate over all elements 208 * in the map. 209 * 210 * Return 211 * Returns zero on success. On error, -1 is returned and *errno* 212 * is set appropriately. 213 * 214 * The following cases can be used to iterate over all elements of 215 * the map: 216 * 217 * * If *key* is not found, the operation returns zero and sets 218 * the *next_key* pointer to the key of the first element. 219 * * If *key* is found, the operation returns zero and sets the 220 * *next_key* pointer to the key of the next element. 221 * * If *key* is the last element, returns -1 and *errno* is set 222 * to **ENOENT**. 223 * 224 * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or 225 * **EINVAL** on error. 226 * 227 * BPF_PROG_LOAD 228 * Description 229 * Verify and load an eBPF program, returning a new file 230 * descriptor associated with the program. 231 * 232 * Applying **close**\ (2) to the file descriptor returned by 233 * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES). 234 * 235 * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is 236 * automatically enabled for the new file descriptor. 237 * 238 * Return 239 * A new file descriptor (a nonnegative integer), or -1 if an 240 * error occurred (in which case, *errno* is set appropriately). 241 * 242 * BPF_OBJ_PIN 243 * Description 244 * Pin an eBPF program or map referred by the specified *bpf_fd* 245 * to the provided *pathname* on the filesystem. 246 * 247 * The *pathname* argument must not contain a dot ("."). 248 * 249 * On success, *pathname* retains a reference to the eBPF object, 250 * preventing deallocation of the object when the original 251 * *bpf_fd* is closed. This allow the eBPF object to live beyond 252 * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent 253 * process. 254 * 255 * Applying **unlink**\ (2) or similar calls to the *pathname* 256 * unpins the object from the filesystem, removing the reference. 257 * If no other file descriptors or filesystem nodes refer to the 258 * same object, it will be deallocated (see NOTES). 259 * 260 * The filesystem type for the parent directory of *pathname* must 261 * be **BPF_FS_MAGIC**. 262 * 263 * Return 264 * Returns zero on success. On error, -1 is returned and *errno* 265 * is set appropriately. 266 * 267 * BPF_OBJ_GET 268 * Description 269 * Open a file descriptor for the eBPF object pinned to the 270 * specified *pathname*. 271 * 272 * Return 273 * A new file descriptor (a nonnegative integer), or -1 if an 274 * error occurred (in which case, *errno* is set appropriately). 275 * 276 * BPF_PROG_ATTACH 277 * Description 278 * Attach an eBPF program to a *target_fd* at the specified 279 * *attach_type* hook. 280 * 281 * The *attach_type* specifies the eBPF attachment point to 282 * attach the program to, and must be one of *bpf_attach_type* 283 * (see below). 284 * 285 * The *attach_bpf_fd* must be a valid file descriptor for a 286 * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap 287 * or sock_ops type corresponding to the specified *attach_type*. 288 * 289 * The *target_fd* must be a valid file descriptor for a kernel 290 * object which depends on the attach type of *attach_bpf_fd*: 291 * 292 * **BPF_PROG_TYPE_CGROUP_DEVICE**, 293 * **BPF_PROG_TYPE_CGROUP_SKB**, 294 * **BPF_PROG_TYPE_CGROUP_SOCK**, 295 * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, 296 * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, 297 * **BPF_PROG_TYPE_CGROUP_SYSCTL**, 298 * **BPF_PROG_TYPE_SOCK_OPS** 299 * 300 * Control Group v2 hierarchy with the eBPF controller 301 * enabled. Requires the kernel to be compiled with 302 * **CONFIG_CGROUP_BPF**. 303 * 304 * **BPF_PROG_TYPE_FLOW_DISSECTOR** 305 * 306 * Network namespace (eg /proc/self/ns/net). 307 * 308 * **BPF_PROG_TYPE_LIRC_MODE2** 309 * 310 * LIRC device path (eg /dev/lircN). Requires the kernel 311 * to be compiled with **CONFIG_BPF_LIRC_MODE2**. 312 * 313 * **BPF_PROG_TYPE_SK_SKB**, 314 * **BPF_PROG_TYPE_SK_MSG** 315 * 316 * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**). 317 * 318 * Return 319 * Returns zero on success. On error, -1 is returned and *errno* 320 * is set appropriately. 321 * 322 * BPF_PROG_DETACH 323 * Description 324 * Detach the eBPF program associated with the *target_fd* at the 325 * hook specified by *attach_type*. The program must have been 326 * previously attached using **BPF_PROG_ATTACH**. 327 * 328 * Return 329 * Returns zero on success. On error, -1 is returned and *errno* 330 * is set appropriately. 331 * 332 * BPF_PROG_TEST_RUN 333 * Description 334 * Run the eBPF program associated with the *prog_fd* a *repeat* 335 * number of times against a provided program context *ctx_in* and 336 * data *data_in*, and return the modified program context 337 * *ctx_out*, *data_out* (for example, packet data), result of the 338 * execution *retval*, and *duration* of the test run. 339 * 340 * The sizes of the buffers provided as input and output 341 * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must 342 * be provided in the corresponding variables *ctx_size_in*, 343 * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any 344 * of these parameters are not provided (ie set to NULL), the 345 * corresponding size field must be zero. 346 * 347 * Some program types have particular requirements: 348 * 349 * **BPF_PROG_TYPE_SK_LOOKUP** 350 * *data_in* and *data_out* must be NULL. 351 * 352 * **BPF_PROG_TYPE_RAW_TRACEPOINT**, 353 * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE** 354 * 355 * *ctx_out*, *data_in* and *data_out* must be NULL. 356 * *repeat* must be zero. 357 * 358 * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN. 359 * 360 * Return 361 * Returns zero on success. On error, -1 is returned and *errno* 362 * is set appropriately. 363 * 364 * **ENOSPC** 365 * Either *data_size_out* or *ctx_size_out* is too small. 366 * **ENOTSUPP** 367 * This command is not supported by the program type of 368 * the program referred to by *prog_fd*. 369 * 370 * BPF_PROG_GET_NEXT_ID 371 * Description 372 * Fetch the next eBPF program currently loaded into the kernel. 373 * 374 * Looks for the eBPF program with an id greater than *start_id* 375 * and updates *next_id* on success. If no other eBPF programs 376 * remain with ids higher than *start_id*, returns -1 and sets 377 * *errno* to **ENOENT**. 378 * 379 * Return 380 * Returns zero on success. On error, or when no id remains, -1 381 * is returned and *errno* is set appropriately. 382 * 383 * BPF_MAP_GET_NEXT_ID 384 * Description 385 * Fetch the next eBPF map currently loaded into the kernel. 386 * 387 * Looks for the eBPF map with an id greater than *start_id* 388 * and updates *next_id* on success. If no other eBPF maps 389 * remain with ids higher than *start_id*, returns -1 and sets 390 * *errno* to **ENOENT**. 391 * 392 * Return 393 * Returns zero on success. On error, or when no id remains, -1 394 * is returned and *errno* is set appropriately. 395 * 396 * BPF_PROG_GET_FD_BY_ID 397 * Description 398 * Open a file descriptor for the eBPF program corresponding to 399 * *prog_id*. 400 * 401 * Return 402 * A new file descriptor (a nonnegative integer), or -1 if an 403 * error occurred (in which case, *errno* is set appropriately). 404 * 405 * BPF_MAP_GET_FD_BY_ID 406 * Description 407 * Open a file descriptor for the eBPF map corresponding to 408 * *map_id*. 409 * 410 * Return 411 * A new file descriptor (a nonnegative integer), or -1 if an 412 * error occurred (in which case, *errno* is set appropriately). 413 * 414 * BPF_OBJ_GET_INFO_BY_FD 415 * Description 416 * Obtain information about the eBPF object corresponding to 417 * *bpf_fd*. 418 * 419 * Populates up to *info_len* bytes of *info*, which will be in 420 * one of the following formats depending on the eBPF object type 421 * of *bpf_fd*: 422 * 423 * * **struct bpf_prog_info** 424 * * **struct bpf_map_info** 425 * * **struct bpf_btf_info** 426 * * **struct bpf_link_info** 427 * 428 * Return 429 * Returns zero on success. On error, -1 is returned and *errno* 430 * is set appropriately. 431 * 432 * BPF_PROG_QUERY 433 * Description 434 * Obtain information about eBPF programs associated with the 435 * specified *attach_type* hook. 436 * 437 * The *target_fd* must be a valid file descriptor for a kernel 438 * object which depends on the attach type of *attach_bpf_fd*: 439 * 440 * **BPF_PROG_TYPE_CGROUP_DEVICE**, 441 * **BPF_PROG_TYPE_CGROUP_SKB**, 442 * **BPF_PROG_TYPE_CGROUP_SOCK**, 443 * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, 444 * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, 445 * **BPF_PROG_TYPE_CGROUP_SYSCTL**, 446 * **BPF_PROG_TYPE_SOCK_OPS** 447 * 448 * Control Group v2 hierarchy with the eBPF controller 449 * enabled. Requires the kernel to be compiled with 450 * **CONFIG_CGROUP_BPF**. 451 * 452 * **BPF_PROG_TYPE_FLOW_DISSECTOR** 453 * 454 * Network namespace (eg /proc/self/ns/net). 455 * 456 * **BPF_PROG_TYPE_LIRC_MODE2** 457 * 458 * LIRC device path (eg /dev/lircN). Requires the kernel 459 * to be compiled with **CONFIG_BPF_LIRC_MODE2**. 460 * 461 * **BPF_PROG_QUERY** always fetches the number of programs 462 * attached and the *attach_flags* which were used to attach those 463 * programs. Additionally, if *prog_ids* is nonzero and the number 464 * of attached programs is less than *prog_cnt*, populates 465 * *prog_ids* with the eBPF program ids of the programs attached 466 * at *target_fd*. 467 * 468 * The following flags may alter the result: 469 * 470 * **BPF_F_QUERY_EFFECTIVE** 471 * Only return information regarding programs which are 472 * currently effective at the specified *target_fd*. 473 * 474 * Return 475 * Returns zero on success. On error, -1 is returned and *errno* 476 * is set appropriately. 477 * 478 * BPF_RAW_TRACEPOINT_OPEN 479 * Description 480 * Attach an eBPF program to a tracepoint *name* to access kernel 481 * internal arguments of the tracepoint in their raw form. 482 * 483 * The *prog_fd* must be a valid file descriptor associated with 484 * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**. 485 * 486 * No ABI guarantees are made about the content of tracepoint 487 * arguments exposed to the corresponding eBPF program. 488 * 489 * Applying **close**\ (2) to the file descriptor returned by 490 * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES). 491 * 492 * Return 493 * A new file descriptor (a nonnegative integer), or -1 if an 494 * error occurred (in which case, *errno* is set appropriately). 495 * 496 * BPF_BTF_LOAD 497 * Description 498 * Verify and load BPF Type Format (BTF) metadata into the kernel, 499 * returning a new file descriptor associated with the metadata. 500 * BTF is described in more detail at 501 * https://www.kernel.org/doc/html/latest/bpf/btf.html. 502 * 503 * The *btf* parameter must point to valid memory providing 504 * *btf_size* bytes of BTF binary metadata. 505 * 506 * The returned file descriptor can be passed to other **bpf**\ () 507 * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to 508 * associate the BTF with those objects. 509 * 510 * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional 511 * parameters to specify a *btf_log_buf*, *btf_log_size* and 512 * *btf_log_level* which allow the kernel to return freeform log 513 * output regarding the BTF verification process. 514 * 515 * Return 516 * A new file descriptor (a nonnegative integer), or -1 if an 517 * error occurred (in which case, *errno* is set appropriately). 518 * 519 * BPF_BTF_GET_FD_BY_ID 520 * Description 521 * Open a file descriptor for the BPF Type Format (BTF) 522 * corresponding to *btf_id*. 523 * 524 * Return 525 * A new file descriptor (a nonnegative integer), or -1 if an 526 * error occurred (in which case, *errno* is set appropriately). 527 * 528 * BPF_TASK_FD_QUERY 529 * Description 530 * Obtain information about eBPF programs associated with the 531 * target process identified by *pid* and *fd*. 532 * 533 * If the *pid* and *fd* are associated with a tracepoint, kprobe 534 * or uprobe perf event, then the *prog_id* and *fd_type* will 535 * be populated with the eBPF program id and file descriptor type 536 * of type **bpf_task_fd_type**. If associated with a kprobe or 537 * uprobe, the *probe_offset* and *probe_addr* will also be 538 * populated. Optionally, if *buf* is provided, then up to 539 * *buf_len* bytes of *buf* will be populated with the name of 540 * the tracepoint, kprobe or uprobe. 541 * 542 * The resulting *prog_id* may be introspected in deeper detail 543 * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**. 544 * 545 * Return 546 * Returns zero on success. On error, -1 is returned and *errno* 547 * is set appropriately. 548 * 549 * BPF_MAP_LOOKUP_AND_DELETE_ELEM 550 * Description 551 * Look up an element with the given *key* in the map referred to 552 * by the file descriptor *fd*, and if found, delete the element. 553 * 554 * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map 555 * types, the *flags* argument needs to be set to 0, but for other 556 * map types, it may be specified as: 557 * 558 * **BPF_F_LOCK** 559 * Look up and delete the value of a spin-locked map 560 * without returning the lock. This must be specified if 561 * the elements contain a spinlock. 562 * 563 * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types 564 * implement this command as a "pop" operation, deleting the top 565 * element rather than one corresponding to *key*. 566 * The *key* and *key_len* parameters should be zeroed when 567 * issuing this operation for these map types. 568 * 569 * This command is only valid for the following map types: 570 * * **BPF_MAP_TYPE_QUEUE** 571 * * **BPF_MAP_TYPE_STACK** 572 * * **BPF_MAP_TYPE_HASH** 573 * * **BPF_MAP_TYPE_PERCPU_HASH** 574 * * **BPF_MAP_TYPE_LRU_HASH** 575 * * **BPF_MAP_TYPE_LRU_PERCPU_HASH** 576 * 577 * Return 578 * Returns zero on success. On error, -1 is returned and *errno* 579 * is set appropriately. 580 * 581 * BPF_MAP_FREEZE 582 * Description 583 * Freeze the permissions of the specified map. 584 * 585 * Write permissions may be frozen by passing zero *flags*. 586 * Upon success, no future syscall invocations may alter the 587 * map state of *map_fd*. Write operations from eBPF programs 588 * are still possible for a frozen map. 589 * 590 * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**. 591 * 592 * Return 593 * Returns zero on success. On error, -1 is returned and *errno* 594 * is set appropriately. 595 * 596 * BPF_BTF_GET_NEXT_ID 597 * Description 598 * Fetch the next BPF Type Format (BTF) object currently loaded 599 * into the kernel. 600 * 601 * Looks for the BTF object with an id greater than *start_id* 602 * and updates *next_id* on success. If no other BTF objects 603 * remain with ids higher than *start_id*, returns -1 and sets 604 * *errno* to **ENOENT**. 605 * 606 * Return 607 * Returns zero on success. On error, or when no id remains, -1 608 * is returned and *errno* is set appropriately. 609 * 610 * BPF_MAP_LOOKUP_BATCH 611 * Description 612 * Iterate and fetch multiple elements in a map. 613 * 614 * Two opaque values are used to manage batch operations, 615 * *in_batch* and *out_batch*. Initially, *in_batch* must be set 616 * to NULL to begin the batched operation. After each subsequent 617 * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant 618 * *out_batch* as the *in_batch* for the next operation to 619 * continue iteration from the current point. 620 * 621 * The *keys* and *values* are output parameters which must point 622 * to memory large enough to hold *count* items based on the key 623 * and value size of the map *map_fd*. The *keys* buffer must be 624 * of *key_size* * *count*. The *values* buffer must be of 625 * *value_size* * *count*. 626 * 627 * The *elem_flags* argument may be specified as one of the 628 * following: 629 * 630 * **BPF_F_LOCK** 631 * Look up the value of a spin-locked map without 632 * returning the lock. This must be specified if the 633 * elements contain a spinlock. 634 * 635 * On success, *count* elements from the map are copied into the 636 * user buffer, with the keys copied into *keys* and the values 637 * copied into the corresponding indices in *values*. 638 * 639 * If an error is returned and *errno* is not **EFAULT**, *count* 640 * is set to the number of successfully processed elements. 641 * 642 * Return 643 * Returns zero on success. On error, -1 is returned and *errno* 644 * is set appropriately. 645 * 646 * May set *errno* to **ENOSPC** to indicate that *keys* or 647 * *values* is too small to dump an entire bucket during 648 * iteration of a hash-based map type. 649 * 650 * BPF_MAP_LOOKUP_AND_DELETE_BATCH 651 * Description 652 * Iterate and delete all elements in a map. 653 * 654 * This operation has the same behavior as 655 * **BPF_MAP_LOOKUP_BATCH** with two exceptions: 656 * 657 * * Every element that is successfully returned is also deleted 658 * from the map. This is at least *count* elements. Note that 659 * *count* is both an input and an output parameter. 660 * * Upon returning with *errno* set to **EFAULT**, up to 661 * *count* elements may be deleted without returning the keys 662 * and values of the deleted elements. 663 * 664 * Return 665 * Returns zero on success. On error, -1 is returned and *errno* 666 * is set appropriately. 667 * 668 * BPF_MAP_UPDATE_BATCH 669 * Description 670 * Update multiple elements in a map by *key*. 671 * 672 * The *keys* and *values* are input parameters which must point 673 * to memory large enough to hold *count* items based on the key 674 * and value size of the map *map_fd*. The *keys* buffer must be 675 * of *key_size* * *count*. The *values* buffer must be of 676 * *value_size* * *count*. 677 * 678 * Each element specified in *keys* is sequentially updated to the 679 * value in the corresponding index in *values*. The *in_batch* 680 * and *out_batch* parameters are ignored and should be zeroed. 681 * 682 * The *elem_flags* argument should be specified as one of the 683 * following: 684 * 685 * **BPF_ANY** 686 * Create new elements or update a existing elements. 687 * **BPF_NOEXIST** 688 * Create new elements only if they do not exist. 689 * **BPF_EXIST** 690 * Update existing elements. 691 * **BPF_F_LOCK** 692 * Update spin_lock-ed map elements. This must be 693 * specified if the map value contains a spinlock. 694 * 695 * On success, *count* elements from the map are updated. 696 * 697 * If an error is returned and *errno* is not **EFAULT**, *count* 698 * is set to the number of successfully processed elements. 699 * 700 * Return 701 * Returns zero on success. On error, -1 is returned and *errno* 702 * is set appropriately. 703 * 704 * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or 705 * **E2BIG**. **E2BIG** indicates that the number of elements in 706 * the map reached the *max_entries* limit specified at map 707 * creation time. 708 * 709 * May set *errno* to one of the following error codes under 710 * specific circumstances: 711 * 712 * **EEXIST** 713 * If *flags* specifies **BPF_NOEXIST** and the element 714 * with *key* already exists in the map. 715 * **ENOENT** 716 * If *flags* specifies **BPF_EXIST** and the element with 717 * *key* does not exist in the map. 718 * 719 * BPF_MAP_DELETE_BATCH 720 * Description 721 * Delete multiple elements in a map by *key*. 722 * 723 * The *keys* parameter is an input parameter which must point 724 * to memory large enough to hold *count* items based on the key 725 * size of the map *map_fd*, that is, *key_size* * *count*. 726 * 727 * Each element specified in *keys* is sequentially deleted. The 728 * *in_batch*, *out_batch*, and *values* parameters are ignored 729 * and should be zeroed. 730 * 731 * The *elem_flags* argument may be specified as one of the 732 * following: 733 * 734 * **BPF_F_LOCK** 735 * Look up the value of a spin-locked map without 736 * returning the lock. This must be specified if the 737 * elements contain a spinlock. 738 * 739 * On success, *count* elements from the map are updated. 740 * 741 * If an error is returned and *errno* is not **EFAULT**, *count* 742 * is set to the number of successfully processed elements. If 743 * *errno* is **EFAULT**, up to *count* elements may be been 744 * deleted. 745 * 746 * Return 747 * Returns zero on success. On error, -1 is returned and *errno* 748 * is set appropriately. 749 * 750 * BPF_LINK_CREATE 751 * Description 752 * Attach an eBPF program to a *target_fd* at the specified 753 * *attach_type* hook and return a file descriptor handle for 754 * managing the link. 755 * 756 * Return 757 * A new file descriptor (a nonnegative integer), or -1 if an 758 * error occurred (in which case, *errno* is set appropriately). 759 * 760 * BPF_LINK_UPDATE 761 * Description 762 * Update the eBPF program in the specified *link_fd* to 763 * *new_prog_fd*. 764 * 765 * Return 766 * Returns zero on success. On error, -1 is returned and *errno* 767 * is set appropriately. 768 * 769 * BPF_LINK_GET_FD_BY_ID 770 * Description 771 * Open a file descriptor for the eBPF Link corresponding to 772 * *link_id*. 773 * 774 * Return 775 * A new file descriptor (a nonnegative integer), or -1 if an 776 * error occurred (in which case, *errno* is set appropriately). 777 * 778 * BPF_LINK_GET_NEXT_ID 779 * Description 780 * Fetch the next eBPF link currently loaded into the kernel. 781 * 782 * Looks for the eBPF link with an id greater than *start_id* 783 * and updates *next_id* on success. If no other eBPF links 784 * remain with ids higher than *start_id*, returns -1 and sets 785 * *errno* to **ENOENT**. 786 * 787 * Return 788 * Returns zero on success. On error, or when no id remains, -1 789 * is returned and *errno* is set appropriately. 790 * 791 * BPF_ENABLE_STATS 792 * Description 793 * Enable eBPF runtime statistics gathering. 794 * 795 * Runtime statistics gathering for the eBPF runtime is disabled 796 * by default to minimize the corresponding performance overhead. 797 * This command enables statistics globally. 798 * 799 * Multiple programs may independently enable statistics. 800 * After gathering the desired statistics, eBPF runtime statistics 801 * may be disabled again by calling **close**\ (2) for the file 802 * descriptor returned by this function. Statistics will only be 803 * disabled system-wide when all outstanding file descriptors 804 * returned by prior calls for this subcommand are closed. 805 * 806 * Return 807 * A new file descriptor (a nonnegative integer), or -1 if an 808 * error occurred (in which case, *errno* is set appropriately). 809 * 810 * BPF_ITER_CREATE 811 * Description 812 * Create an iterator on top of the specified *link_fd* (as 813 * previously created using **BPF_LINK_CREATE**) and return a 814 * file descriptor that can be used to trigger the iteration. 815 * 816 * If the resulting file descriptor is pinned to the filesystem 817 * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls 818 * for that path will trigger the iterator to read kernel state 819 * using the eBPF program attached to *link_fd*. 820 * 821 * Return 822 * A new file descriptor (a nonnegative integer), or -1 if an 823 * error occurred (in which case, *errno* is set appropriately). 824 * 825 * BPF_LINK_DETACH 826 * Description 827 * Forcefully detach the specified *link_fd* from its 828 * corresponding attachment point. 829 * 830 * Return 831 * Returns zero on success. On error, -1 is returned and *errno* 832 * is set appropriately. 833 * 834 * BPF_PROG_BIND_MAP 835 * Description 836 * Bind a map to the lifetime of an eBPF program. 837 * 838 * The map identified by *map_fd* is bound to the program 839 * identified by *prog_fd* and only released when *prog_fd* is 840 * released. This may be used in cases where metadata should be 841 * associated with a program which otherwise does not contain any 842 * references to the map (for example, embedded in the eBPF 843 * program instructions). 844 * 845 * Return 846 * Returns zero on success. On error, -1 is returned and *errno* 847 * is set appropriately. 848 * 849 * NOTES 850 * eBPF objects (maps and programs) can be shared between processes. 851 * 852 * * After **fork**\ (2), the child inherits file descriptors 853 * referring to the same eBPF objects. 854 * * File descriptors referring to eBPF objects can be transferred over 855 * **unix**\ (7) domain sockets. 856 * * File descriptors referring to eBPF objects can be duplicated in the 857 * usual way, using **dup**\ (2) and similar calls. 858 * * File descriptors referring to eBPF objects can be pinned to the 859 * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2). 860 * 861 * An eBPF object is deallocated only after all file descriptors referring 862 * to the object have been closed and no references remain pinned to the 863 * filesystem or attached (for example, bound to a program or device). 864 */ 865 enum bpf_cmd { 866 BPF_MAP_CREATE, 867 BPF_MAP_LOOKUP_ELEM, 868 BPF_MAP_UPDATE_ELEM, 869 BPF_MAP_DELETE_ELEM, 870 BPF_MAP_GET_NEXT_KEY, 871 BPF_PROG_LOAD, 872 BPF_OBJ_PIN, 873 BPF_OBJ_GET, 874 BPF_PROG_ATTACH, 875 BPF_PROG_DETACH, 876 BPF_PROG_TEST_RUN, 877 BPF_PROG_RUN = BPF_PROG_TEST_RUN, 878 BPF_PROG_GET_NEXT_ID, 879 BPF_MAP_GET_NEXT_ID, 880 BPF_PROG_GET_FD_BY_ID, 881 BPF_MAP_GET_FD_BY_ID, 882 BPF_OBJ_GET_INFO_BY_FD, 883 BPF_PROG_QUERY, 884 BPF_RAW_TRACEPOINT_OPEN, 885 BPF_BTF_LOAD, 886 BPF_BTF_GET_FD_BY_ID, 887 BPF_TASK_FD_QUERY, 888 BPF_MAP_LOOKUP_AND_DELETE_ELEM, 889 BPF_MAP_FREEZE, 890 BPF_BTF_GET_NEXT_ID, 891 BPF_MAP_LOOKUP_BATCH, 892 BPF_MAP_LOOKUP_AND_DELETE_BATCH, 893 BPF_MAP_UPDATE_BATCH, 894 BPF_MAP_DELETE_BATCH, 895 BPF_LINK_CREATE, 896 BPF_LINK_UPDATE, 897 BPF_LINK_GET_FD_BY_ID, 898 BPF_LINK_GET_NEXT_ID, 899 BPF_ENABLE_STATS, 900 BPF_ITER_CREATE, 901 BPF_LINK_DETACH, 902 BPF_PROG_BIND_MAP, 903 }; 904 905 enum bpf_map_type { 906 BPF_MAP_TYPE_UNSPEC, 907 BPF_MAP_TYPE_HASH, 908 BPF_MAP_TYPE_ARRAY, 909 BPF_MAP_TYPE_PROG_ARRAY, 910 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 911 BPF_MAP_TYPE_PERCPU_HASH, 912 BPF_MAP_TYPE_PERCPU_ARRAY, 913 BPF_MAP_TYPE_STACK_TRACE, 914 BPF_MAP_TYPE_CGROUP_ARRAY, 915 BPF_MAP_TYPE_LRU_HASH, 916 BPF_MAP_TYPE_LRU_PERCPU_HASH, 917 BPF_MAP_TYPE_LPM_TRIE, 918 BPF_MAP_TYPE_ARRAY_OF_MAPS, 919 BPF_MAP_TYPE_HASH_OF_MAPS, 920 BPF_MAP_TYPE_DEVMAP, 921 BPF_MAP_TYPE_SOCKMAP, 922 BPF_MAP_TYPE_CPUMAP, 923 BPF_MAP_TYPE_XSKMAP, 924 BPF_MAP_TYPE_SOCKHASH, 925 BPF_MAP_TYPE_CGROUP_STORAGE, 926 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, 927 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, 928 BPF_MAP_TYPE_QUEUE, 929 BPF_MAP_TYPE_STACK, 930 BPF_MAP_TYPE_SK_STORAGE, 931 BPF_MAP_TYPE_DEVMAP_HASH, 932 BPF_MAP_TYPE_STRUCT_OPS, 933 BPF_MAP_TYPE_RINGBUF, 934 BPF_MAP_TYPE_INODE_STORAGE, 935 BPF_MAP_TYPE_TASK_STORAGE, 936 BPF_MAP_TYPE_BLOOM_FILTER, 937 BPF_MAP_TYPE_USER_RINGBUF, 938 }; 939 940 /* Note that tracing related programs such as 941 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} 942 * are not subject to a stable API since kernel internal data 943 * structures can change from release to release and may 944 * therefore break existing tracing BPF programs. Tracing BPF 945 * programs correspond to /a/ specific kernel which is to be 946 * analyzed, and not /a/ specific kernel /and/ all future ones. 947 */ 948 enum bpf_prog_type { 949 BPF_PROG_TYPE_UNSPEC, 950 BPF_PROG_TYPE_SOCKET_FILTER, 951 BPF_PROG_TYPE_KPROBE, 952 BPF_PROG_TYPE_SCHED_CLS, 953 BPF_PROG_TYPE_SCHED_ACT, 954 BPF_PROG_TYPE_TRACEPOINT, 955 BPF_PROG_TYPE_XDP, 956 BPF_PROG_TYPE_PERF_EVENT, 957 BPF_PROG_TYPE_CGROUP_SKB, 958 BPF_PROG_TYPE_CGROUP_SOCK, 959 BPF_PROG_TYPE_LWT_IN, 960 BPF_PROG_TYPE_LWT_OUT, 961 BPF_PROG_TYPE_LWT_XMIT, 962 BPF_PROG_TYPE_SOCK_OPS, 963 BPF_PROG_TYPE_SK_SKB, 964 BPF_PROG_TYPE_CGROUP_DEVICE, 965 BPF_PROG_TYPE_SK_MSG, 966 BPF_PROG_TYPE_RAW_TRACEPOINT, 967 BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 968 BPF_PROG_TYPE_LWT_SEG6LOCAL, 969 BPF_PROG_TYPE_LIRC_MODE2, 970 BPF_PROG_TYPE_SK_REUSEPORT, 971 BPF_PROG_TYPE_FLOW_DISSECTOR, 972 BPF_PROG_TYPE_CGROUP_SYSCTL, 973 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 974 BPF_PROG_TYPE_CGROUP_SOCKOPT, 975 BPF_PROG_TYPE_TRACING, 976 BPF_PROG_TYPE_STRUCT_OPS, 977 BPF_PROG_TYPE_EXT, 978 BPF_PROG_TYPE_LSM, 979 BPF_PROG_TYPE_SK_LOOKUP, 980 BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ 981 }; 982 983 enum bpf_attach_type { 984 BPF_CGROUP_INET_INGRESS, 985 BPF_CGROUP_INET_EGRESS, 986 BPF_CGROUP_INET_SOCK_CREATE, 987 BPF_CGROUP_SOCK_OPS, 988 BPF_SK_SKB_STREAM_PARSER, 989 BPF_SK_SKB_STREAM_VERDICT, 990 BPF_CGROUP_DEVICE, 991 BPF_SK_MSG_VERDICT, 992 BPF_CGROUP_INET4_BIND, 993 BPF_CGROUP_INET6_BIND, 994 BPF_CGROUP_INET4_CONNECT, 995 BPF_CGROUP_INET6_CONNECT, 996 BPF_CGROUP_INET4_POST_BIND, 997 BPF_CGROUP_INET6_POST_BIND, 998 BPF_CGROUP_UDP4_SENDMSG, 999 BPF_CGROUP_UDP6_SENDMSG, 1000 BPF_LIRC_MODE2, 1001 BPF_FLOW_DISSECTOR, 1002 BPF_CGROUP_SYSCTL, 1003 BPF_CGROUP_UDP4_RECVMSG, 1004 BPF_CGROUP_UDP6_RECVMSG, 1005 BPF_CGROUP_GETSOCKOPT, 1006 BPF_CGROUP_SETSOCKOPT, 1007 BPF_TRACE_RAW_TP, 1008 BPF_TRACE_FENTRY, 1009 BPF_TRACE_FEXIT, 1010 BPF_MODIFY_RETURN, 1011 BPF_LSM_MAC, 1012 BPF_TRACE_ITER, 1013 BPF_CGROUP_INET4_GETPEERNAME, 1014 BPF_CGROUP_INET6_GETPEERNAME, 1015 BPF_CGROUP_INET4_GETSOCKNAME, 1016 BPF_CGROUP_INET6_GETSOCKNAME, 1017 BPF_XDP_DEVMAP, 1018 BPF_CGROUP_INET_SOCK_RELEASE, 1019 BPF_XDP_CPUMAP, 1020 BPF_SK_LOOKUP, 1021 BPF_XDP, 1022 BPF_SK_SKB_VERDICT, 1023 BPF_SK_REUSEPORT_SELECT, 1024 BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, 1025 BPF_PERF_EVENT, 1026 BPF_TRACE_KPROBE_MULTI, 1027 BPF_LSM_CGROUP, 1028 __MAX_BPF_ATTACH_TYPE 1029 }; 1030 1031 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 1032 1033 enum bpf_link_type { 1034 BPF_LINK_TYPE_UNSPEC = 0, 1035 BPF_LINK_TYPE_RAW_TRACEPOINT = 1, 1036 BPF_LINK_TYPE_TRACING = 2, 1037 BPF_LINK_TYPE_CGROUP = 3, 1038 BPF_LINK_TYPE_ITER = 4, 1039 BPF_LINK_TYPE_NETNS = 5, 1040 BPF_LINK_TYPE_XDP = 6, 1041 BPF_LINK_TYPE_PERF_EVENT = 7, 1042 BPF_LINK_TYPE_KPROBE_MULTI = 8, 1043 BPF_LINK_TYPE_STRUCT_OPS = 9, 1044 1045 MAX_BPF_LINK_TYPE, 1046 }; 1047 1048 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command 1049 * 1050 * NONE(default): No further bpf programs allowed in the subtree. 1051 * 1052 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, 1053 * the program in this cgroup yields to sub-cgroup program. 1054 * 1055 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, 1056 * that cgroup program gets run in addition to the program in this cgroup. 1057 * 1058 * Only one program is allowed to be attached to a cgroup with 1059 * NONE or BPF_F_ALLOW_OVERRIDE flag. 1060 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will 1061 * release old program and attach the new one. Attach flags has to match. 1062 * 1063 * Multiple programs are allowed to be attached to a cgroup with 1064 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order 1065 * (those that were attached first, run first) 1066 * The programs of sub-cgroup are executed first, then programs of 1067 * this cgroup and then programs of parent cgroup. 1068 * When children program makes decision (like picking TCP CA or sock bind) 1069 * parent program has a chance to override it. 1070 * 1071 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of 1072 * programs for a cgroup. Though it's possible to replace an old program at 1073 * any position by also specifying BPF_F_REPLACE flag and position itself in 1074 * replace_bpf_fd attribute. Old program at this position will be released. 1075 * 1076 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. 1077 * A cgroup with NONE doesn't allow any programs in sub-cgroups. 1078 * Ex1: 1079 * cgrp1 (MULTI progs A, B) -> 1080 * cgrp2 (OVERRIDE prog C) -> 1081 * cgrp3 (MULTI prog D) -> 1082 * cgrp4 (OVERRIDE prog E) -> 1083 * cgrp5 (NONE prog F) 1084 * the event in cgrp5 triggers execution of F,D,A,B in that order. 1085 * if prog F is detached, the execution is E,D,A,B 1086 * if prog F and D are detached, the execution is E,A,B 1087 * if prog F, E and D are detached, the execution is C,A,B 1088 * 1089 * All eligible programs are executed regardless of return code from 1090 * earlier programs. 1091 */ 1092 #define BPF_F_ALLOW_OVERRIDE (1U << 0) 1093 #define BPF_F_ALLOW_MULTI (1U << 1) 1094 #define BPF_F_REPLACE (1U << 2) 1095 1096 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 1097 * verifier will perform strict alignment checking as if the kernel 1098 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 1099 * and NET_IP_ALIGN defined to 2. 1100 */ 1101 #define BPF_F_STRICT_ALIGNMENT (1U << 0) 1102 1103 /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the 1104 * verifier will allow any alignment whatsoever. On platforms 1105 * with strict alignment requirements for loads ands stores (such 1106 * as sparc and mips) the verifier validates that all loads and 1107 * stores provably follow this requirement. This flag turns that 1108 * checking and enforcement off. 1109 * 1110 * It is mostly used for testing when we want to validate the 1111 * context and memory access aspects of the verifier, but because 1112 * of an unaligned access the alignment check would trigger before 1113 * the one we are interested in. 1114 */ 1115 #define BPF_F_ANY_ALIGNMENT (1U << 1) 1116 1117 /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. 1118 * Verifier does sub-register def/use analysis and identifies instructions whose 1119 * def only matters for low 32-bit, high 32-bit is never referenced later 1120 * through implicit zero extension. Therefore verifier notifies JIT back-ends 1121 * that it is safe to ignore clearing high 32-bit for these instructions. This 1122 * saves some back-ends a lot of code-gen. However such optimization is not 1123 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends 1124 * hence hasn't used verifier's analysis result. But, we really want to have a 1125 * way to be able to verify the correctness of the described optimization on 1126 * x86_64 on which testsuites are frequently exercised. 1127 * 1128 * So, this flag is introduced. Once it is set, verifier will randomize high 1129 * 32-bit for those instructions who has been identified as safe to ignore them. 1130 * Then, if verifier is not doing correct analysis, such randomization will 1131 * regress tests to expose bugs. 1132 */ 1133 #define BPF_F_TEST_RND_HI32 (1U << 2) 1134 1135 /* The verifier internal test flag. Behavior is undefined */ 1136 #define BPF_F_TEST_STATE_FREQ (1U << 3) 1137 1138 /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will 1139 * restrict map and helper usage for such programs. Sleepable BPF programs can 1140 * only be attached to hooks where kernel execution context allows sleeping. 1141 * Such programs are allowed to use helpers that may sleep like 1142 * bpf_copy_from_user(). 1143 */ 1144 #define BPF_F_SLEEPABLE (1U << 4) 1145 1146 /* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program 1147 * fully support xdp frags. 1148 */ 1149 #define BPF_F_XDP_HAS_FRAGS (1U << 5) 1150 1151 /* link_create.kprobe_multi.flags used in LINK_CREATE command for 1152 * BPF_TRACE_KPROBE_MULTI attach type to create return probe. 1153 */ 1154 #define BPF_F_KPROBE_MULTI_RETURN (1U << 0) 1155 1156 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have 1157 * the following extensions: 1158 * 1159 * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX] 1160 * insn[0].imm: map fd or fd_idx 1161 * insn[1].imm: 0 1162 * insn[0].off: 0 1163 * insn[1].off: 0 1164 * ldimm64 rewrite: address of map 1165 * verifier type: CONST_PTR_TO_MAP 1166 */ 1167 #define BPF_PSEUDO_MAP_FD 1 1168 #define BPF_PSEUDO_MAP_IDX 5 1169 1170 /* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE 1171 * insn[0].imm: map fd or fd_idx 1172 * insn[1].imm: offset into value 1173 * insn[0].off: 0 1174 * insn[1].off: 0 1175 * ldimm64 rewrite: address of map[0]+offset 1176 * verifier type: PTR_TO_MAP_VALUE 1177 */ 1178 #define BPF_PSEUDO_MAP_VALUE 2 1179 #define BPF_PSEUDO_MAP_IDX_VALUE 6 1180 1181 /* insn[0].src_reg: BPF_PSEUDO_BTF_ID 1182 * insn[0].imm: kernel btd id of VAR 1183 * insn[1].imm: 0 1184 * insn[0].off: 0 1185 * insn[1].off: 0 1186 * ldimm64 rewrite: address of the kernel variable 1187 * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var 1188 * is struct/union. 1189 */ 1190 #define BPF_PSEUDO_BTF_ID 3 1191 /* insn[0].src_reg: BPF_PSEUDO_FUNC 1192 * insn[0].imm: insn offset to the func 1193 * insn[1].imm: 0 1194 * insn[0].off: 0 1195 * insn[1].off: 0 1196 * ldimm64 rewrite: address of the function 1197 * verifier type: PTR_TO_FUNC. 1198 */ 1199 #define BPF_PSEUDO_FUNC 4 1200 1201 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative 1202 * offset to another bpf function 1203 */ 1204 #define BPF_PSEUDO_CALL 1 1205 /* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL, 1206 * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel 1207 */ 1208 #define BPF_PSEUDO_KFUNC_CALL 2 1209 1210 /* flags for BPF_MAP_UPDATE_ELEM command */ 1211 enum { 1212 BPF_ANY = 0, /* create new element or update existing */ 1213 BPF_NOEXIST = 1, /* create new element if it didn't exist */ 1214 BPF_EXIST = 2, /* update existing element */ 1215 BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ 1216 }; 1217 1218 /* flags for BPF_MAP_CREATE command */ 1219 enum { 1220 BPF_F_NO_PREALLOC = (1U << 0), 1221 /* Instead of having one common LRU list in the 1222 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 1223 * which can scale and perform better. 1224 * Note, the LRU nodes (including free nodes) cannot be moved 1225 * across different LRU lists. 1226 */ 1227 BPF_F_NO_COMMON_LRU = (1U << 1), 1228 /* Specify numa node during map creation */ 1229 BPF_F_NUMA_NODE = (1U << 2), 1230 1231 /* Flags for accessing BPF object from syscall side. */ 1232 BPF_F_RDONLY = (1U << 3), 1233 BPF_F_WRONLY = (1U << 4), 1234 1235 /* Flag for stack_map, store build_id+offset instead of pointer */ 1236 BPF_F_STACK_BUILD_ID = (1U << 5), 1237 1238 /* Zero-initialize hash function seed. This should only be used for testing. */ 1239 BPF_F_ZERO_SEED = (1U << 6), 1240 1241 /* Flags for accessing BPF object from program side. */ 1242 BPF_F_RDONLY_PROG = (1U << 7), 1243 BPF_F_WRONLY_PROG = (1U << 8), 1244 1245 /* Clone map from listener for newly accepted socket */ 1246 BPF_F_CLONE = (1U << 9), 1247 1248 /* Enable memory-mapping BPF map */ 1249 BPF_F_MMAPABLE = (1U << 10), 1250 1251 /* Share perf_event among processes */ 1252 BPF_F_PRESERVE_ELEMS = (1U << 11), 1253 1254 /* Create a map that is suitable to be an inner map with dynamic max entries */ 1255 BPF_F_INNER_MAP = (1U << 12), 1256 }; 1257 1258 /* Flags for BPF_PROG_QUERY. */ 1259 1260 /* Query effective (directly attached + inherited from ancestor cgroups) 1261 * programs that will be executed for events within a cgroup. 1262 * attach_flags with this flag are always returned 0. 1263 */ 1264 #define BPF_F_QUERY_EFFECTIVE (1U << 0) 1265 1266 /* Flags for BPF_PROG_TEST_RUN */ 1267 1268 /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ 1269 #define BPF_F_TEST_RUN_ON_CPU (1U << 0) 1270 /* If set, XDP frames will be transmitted after processing */ 1271 #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) 1272 1273 /* type for BPF_ENABLE_STATS */ 1274 enum bpf_stats_type { 1275 /* enabled run_time_ns and run_cnt */ 1276 BPF_STATS_RUN_TIME = 0, 1277 }; 1278 1279 enum bpf_stack_build_id_status { 1280 /* user space need an empty entry to identify end of a trace */ 1281 BPF_STACK_BUILD_ID_EMPTY = 0, 1282 /* with valid build_id and offset */ 1283 BPF_STACK_BUILD_ID_VALID = 1, 1284 /* couldn't get build_id, fallback to ip */ 1285 BPF_STACK_BUILD_ID_IP = 2, 1286 }; 1287 1288 #define BPF_BUILD_ID_SIZE 20 1289 struct bpf_stack_build_id { 1290 __s32 status; 1291 unsigned char build_id[BPF_BUILD_ID_SIZE]; 1292 union { 1293 __u64 offset; 1294 __u64 ip; 1295 }; 1296 }; 1297 1298 #define BPF_OBJ_NAME_LEN 16U 1299 1300 union bpf_attr { 1301 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 1302 __u32 map_type; /* one of enum bpf_map_type */ 1303 __u32 key_size; /* size of key in bytes */ 1304 __u32 value_size; /* size of value in bytes */ 1305 __u32 max_entries; /* max number of entries in a map */ 1306 __u32 map_flags; /* BPF_MAP_CREATE related 1307 * flags defined above. 1308 */ 1309 __u32 inner_map_fd; /* fd pointing to the inner map */ 1310 __u32 numa_node; /* numa node (effective only if 1311 * BPF_F_NUMA_NODE is set). 1312 */ 1313 char map_name[BPF_OBJ_NAME_LEN]; 1314 __u32 map_ifindex; /* ifindex of netdev to create on */ 1315 __u32 btf_fd; /* fd pointing to a BTF type data */ 1316 __u32 btf_key_type_id; /* BTF type_id of the key */ 1317 __u32 btf_value_type_id; /* BTF type_id of the value */ 1318 __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- 1319 * struct stored as the 1320 * map value 1321 */ 1322 /* Any per-map-type extra fields 1323 * 1324 * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the 1325 * number of hash functions (if 0, the bloom filter will default 1326 * to using 5 hash functions). 1327 */ 1328 __u64 map_extra; 1329 }; 1330 1331 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 1332 __u32 map_fd; 1333 __aligned_u64 key; 1334 union { 1335 __aligned_u64 value; 1336 __aligned_u64 next_key; 1337 }; 1338 __u64 flags; 1339 }; 1340 1341 struct { /* struct used by BPF_MAP_*_BATCH commands */ 1342 __aligned_u64 in_batch; /* start batch, 1343 * NULL to start from beginning 1344 */ 1345 __aligned_u64 out_batch; /* output: next start batch */ 1346 __aligned_u64 keys; 1347 __aligned_u64 values; 1348 __u32 count; /* input/output: 1349 * input: # of key/value 1350 * elements 1351 * output: # of filled elements 1352 */ 1353 __u32 map_fd; 1354 __u64 elem_flags; 1355 __u64 flags; 1356 } batch; 1357 1358 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 1359 __u32 prog_type; /* one of enum bpf_prog_type */ 1360 __u32 insn_cnt; 1361 __aligned_u64 insns; 1362 __aligned_u64 license; 1363 __u32 log_level; /* verbosity level of verifier */ 1364 __u32 log_size; /* size of user buffer */ 1365 __aligned_u64 log_buf; /* user supplied buffer */ 1366 __u32 kern_version; /* not used */ 1367 __u32 prog_flags; 1368 char prog_name[BPF_OBJ_NAME_LEN]; 1369 __u32 prog_ifindex; /* ifindex of netdev to prep for */ 1370 /* For some prog types expected attach type must be known at 1371 * load time to verify attach type specific parts of prog 1372 * (context accesses, allowed helpers, etc). 1373 */ 1374 __u32 expected_attach_type; 1375 __u32 prog_btf_fd; /* fd pointing to BTF type data */ 1376 __u32 func_info_rec_size; /* userspace bpf_func_info size */ 1377 __aligned_u64 func_info; /* func info */ 1378 __u32 func_info_cnt; /* number of bpf_func_info records */ 1379 __u32 line_info_rec_size; /* userspace bpf_line_info size */ 1380 __aligned_u64 line_info; /* line info */ 1381 __u32 line_info_cnt; /* number of bpf_line_info records */ 1382 __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 1383 union { 1384 /* valid prog_fd to attach to bpf prog */ 1385 __u32 attach_prog_fd; 1386 /* or valid module BTF object fd or 0 to attach to vmlinux */ 1387 __u32 attach_btf_obj_fd; 1388 }; 1389 __u32 core_relo_cnt; /* number of bpf_core_relo */ 1390 __aligned_u64 fd_array; /* array of FDs */ 1391 __aligned_u64 core_relos; 1392 __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ 1393 }; 1394 1395 struct { /* anonymous struct used by BPF_OBJ_* commands */ 1396 __aligned_u64 pathname; 1397 __u32 bpf_fd; 1398 __u32 file_flags; 1399 }; 1400 1401 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 1402 __u32 target_fd; /* container object to attach to */ 1403 __u32 attach_bpf_fd; /* eBPF program to attach */ 1404 __u32 attach_type; 1405 __u32 attach_flags; 1406 __u32 replace_bpf_fd; /* previously attached eBPF 1407 * program to replace if 1408 * BPF_F_REPLACE is used 1409 */ 1410 }; 1411 1412 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 1413 __u32 prog_fd; 1414 __u32 retval; 1415 __u32 data_size_in; /* input: len of data_in */ 1416 __u32 data_size_out; /* input/output: len of data_out 1417 * returns ENOSPC if data_out 1418 * is too small. 1419 */ 1420 __aligned_u64 data_in; 1421 __aligned_u64 data_out; 1422 __u32 repeat; 1423 __u32 duration; 1424 __u32 ctx_size_in; /* input: len of ctx_in */ 1425 __u32 ctx_size_out; /* input/output: len of ctx_out 1426 * returns ENOSPC if ctx_out 1427 * is too small. 1428 */ 1429 __aligned_u64 ctx_in; 1430 __aligned_u64 ctx_out; 1431 __u32 flags; 1432 __u32 cpu; 1433 __u32 batch_size; 1434 } test; 1435 1436 struct { /* anonymous struct used by BPF_*_GET_*_ID */ 1437 union { 1438 __u32 start_id; 1439 __u32 prog_id; 1440 __u32 map_id; 1441 __u32 btf_id; 1442 __u32 link_id; 1443 }; 1444 __u32 next_id; 1445 __u32 open_flags; 1446 }; 1447 1448 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 1449 __u32 bpf_fd; 1450 __u32 info_len; 1451 __aligned_u64 info; 1452 } info; 1453 1454 struct { /* anonymous struct used by BPF_PROG_QUERY command */ 1455 __u32 target_fd; /* container object to query */ 1456 __u32 attach_type; 1457 __u32 query_flags; 1458 __u32 attach_flags; 1459 __aligned_u64 prog_ids; 1460 __u32 prog_cnt; 1461 /* output: per-program attach_flags. 1462 * not allowed to be set during effective query. 1463 */ 1464 __aligned_u64 prog_attach_flags; 1465 } query; 1466 1467 struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ 1468 __u64 name; 1469 __u32 prog_fd; 1470 } raw_tracepoint; 1471 1472 struct { /* anonymous struct for BPF_BTF_LOAD */ 1473 __aligned_u64 btf; 1474 __aligned_u64 btf_log_buf; 1475 __u32 btf_size; 1476 __u32 btf_log_size; 1477 __u32 btf_log_level; 1478 }; 1479 1480 struct { 1481 __u32 pid; /* input: pid */ 1482 __u32 fd; /* input: fd */ 1483 __u32 flags; /* input: flags */ 1484 __u32 buf_len; /* input/output: buf len */ 1485 __aligned_u64 buf; /* input/output: 1486 * tp_name for tracepoint 1487 * symbol for kprobe 1488 * filename for uprobe 1489 */ 1490 __u32 prog_id; /* output: prod_id */ 1491 __u32 fd_type; /* output: BPF_FD_TYPE_* */ 1492 __u64 probe_offset; /* output: probe_offset */ 1493 __u64 probe_addr; /* output: probe_addr */ 1494 } task_fd_query; 1495 1496 struct { /* struct used by BPF_LINK_CREATE command */ 1497 __u32 prog_fd; /* eBPF program to attach */ 1498 union { 1499 __u32 target_fd; /* object to attach to */ 1500 __u32 target_ifindex; /* target ifindex */ 1501 }; 1502 __u32 attach_type; /* attach type */ 1503 __u32 flags; /* extra flags */ 1504 union { 1505 __u32 target_btf_id; /* btf_id of target to attach to */ 1506 struct { 1507 __aligned_u64 iter_info; /* extra bpf_iter_link_info */ 1508 __u32 iter_info_len; /* iter_info length */ 1509 }; 1510 struct { 1511 /* black box user-provided value passed through 1512 * to BPF program at the execution time and 1513 * accessible through bpf_get_attach_cookie() BPF helper 1514 */ 1515 __u64 bpf_cookie; 1516 } perf_event; 1517 struct { 1518 __u32 flags; 1519 __u32 cnt; 1520 __aligned_u64 syms; 1521 __aligned_u64 addrs; 1522 __aligned_u64 cookies; 1523 } kprobe_multi; 1524 struct { 1525 /* this is overlaid with the target_btf_id above. */ 1526 __u32 target_btf_id; 1527 /* black box user-provided value passed through 1528 * to BPF program at the execution time and 1529 * accessible through bpf_get_attach_cookie() BPF helper 1530 */ 1531 __u64 cookie; 1532 } tracing; 1533 }; 1534 } link_create; 1535 1536 struct { /* struct used by BPF_LINK_UPDATE command */ 1537 __u32 link_fd; /* link fd */ 1538 /* new program fd to update link with */ 1539 __u32 new_prog_fd; 1540 __u32 flags; /* extra flags */ 1541 /* expected link's program fd; is specified only if 1542 * BPF_F_REPLACE flag is set in flags */ 1543 __u32 old_prog_fd; 1544 } link_update; 1545 1546 struct { 1547 __u32 link_fd; 1548 } link_detach; 1549 1550 struct { /* struct used by BPF_ENABLE_STATS command */ 1551 __u32 type; 1552 } enable_stats; 1553 1554 struct { /* struct used by BPF_ITER_CREATE command */ 1555 __u32 link_fd; 1556 __u32 flags; 1557 } iter_create; 1558 1559 struct { /* struct used by BPF_PROG_BIND_MAP command */ 1560 __u32 prog_fd; 1561 __u32 map_fd; 1562 __u32 flags; /* extra flags */ 1563 } prog_bind_map; 1564 1565 } __attribute__((aligned(8))); 1566 1567 /* The description below is an attempt at providing documentation to eBPF 1568 * developers about the multiple available eBPF helper functions. It can be 1569 * parsed and used to produce a manual page. The workflow is the following, 1570 * and requires the rst2man utility: 1571 * 1572 * $ ./scripts/bpf_doc.py \ 1573 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst 1574 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 1575 * $ man /tmp/bpf-helpers.7 1576 * 1577 * Note that in order to produce this external documentation, some RST 1578 * formatting is used in the descriptions to get "bold" and "italics" in 1579 * manual pages. Also note that the few trailing white spaces are 1580 * intentional, removing them would break paragraphs for rst2man. 1581 * 1582 * Start of BPF helper function descriptions: 1583 * 1584 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) 1585 * Description 1586 * Perform a lookup in *map* for an entry associated to *key*. 1587 * Return 1588 * Map value associated to *key*, or **NULL** if no entry was 1589 * found. 1590 * 1591 * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) 1592 * Description 1593 * Add or update the value of the entry associated to *key* in 1594 * *map* with *value*. *flags* is one of: 1595 * 1596 * **BPF_NOEXIST** 1597 * The entry for *key* must not exist in the map. 1598 * **BPF_EXIST** 1599 * The entry for *key* must already exist in the map. 1600 * **BPF_ANY** 1601 * No condition on the existence of the entry for *key*. 1602 * 1603 * Flag value **BPF_NOEXIST** cannot be used for maps of types 1604 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all 1605 * elements always exist), the helper would return an error. 1606 * Return 1607 * 0 on success, or a negative error in case of failure. 1608 * 1609 * long bpf_map_delete_elem(struct bpf_map *map, const void *key) 1610 * Description 1611 * Delete entry with *key* from *map*. 1612 * Return 1613 * 0 on success, or a negative error in case of failure. 1614 * 1615 * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) 1616 * Description 1617 * For tracing programs, safely attempt to read *size* bytes from 1618 * kernel space address *unsafe_ptr* and store the data in *dst*. 1619 * 1620 * Generally, use **bpf_probe_read_user**\ () or 1621 * **bpf_probe_read_kernel**\ () instead. 1622 * Return 1623 * 0 on success, or a negative error in case of failure. 1624 * 1625 * u64 bpf_ktime_get_ns(void) 1626 * Description 1627 * Return the time elapsed since system boot, in nanoseconds. 1628 * Does not include time the system was suspended. 1629 * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) 1630 * Return 1631 * Current *ktime*. 1632 * 1633 * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) 1634 * Description 1635 * This helper is a "printk()-like" facility for debugging. It 1636 * prints a message defined by format *fmt* (of size *fmt_size*) 1637 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if 1638 * available. It can take up to three additional **u64** 1639 * arguments (as an eBPF helpers, the total number of arguments is 1640 * limited to five). 1641 * 1642 * Each time the helper is called, it appends a line to the trace. 1643 * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is 1644 * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. 1645 * The format of the trace is customizable, and the exact output 1646 * one will get depends on the options set in 1647 * *\/sys/kernel/debug/tracing/trace_options* (see also the 1648 * *README* file under the same directory). However, it usually 1649 * defaults to something like: 1650 * 1651 * :: 1652 * 1653 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> 1654 * 1655 * In the above: 1656 * 1657 * * ``telnet`` is the name of the current task. 1658 * * ``470`` is the PID of the current task. 1659 * * ``001`` is the CPU number on which the task is 1660 * running. 1661 * * In ``.N..``, each character refers to a set of 1662 * options (whether irqs are enabled, scheduling 1663 * options, whether hard/softirqs are running, level of 1664 * preempt_disabled respectively). **N** means that 1665 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** 1666 * are set. 1667 * * ``419421.045894`` is a timestamp. 1668 * * ``0x00000001`` is a fake value used by BPF for the 1669 * instruction pointer register. 1670 * * ``<formatted msg>`` is the message formatted with 1671 * *fmt*. 1672 * 1673 * The conversion specifiers supported by *fmt* are similar, but 1674 * more limited than for printk(). They are **%d**, **%i**, 1675 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, 1676 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size 1677 * of field, padding with zeroes, etc.) is available, and the 1678 * helper will return **-EINVAL** (but print nothing) if it 1679 * encounters an unknown specifier. 1680 * 1681 * Also, note that **bpf_trace_printk**\ () is slow, and should 1682 * only be used for debugging purposes. For this reason, a notice 1683 * block (spanning several lines) is printed to kernel logs and 1684 * states that the helper should not be used "for production use" 1685 * the first time this helper is used (or more precisely, when 1686 * **trace_printk**\ () buffers are allocated). For passing values 1687 * to user space, perf events should be preferred. 1688 * Return 1689 * The number of bytes written to the buffer, or a negative error 1690 * in case of failure. 1691 * 1692 * u32 bpf_get_prandom_u32(void) 1693 * Description 1694 * Get a pseudo-random number. 1695 * 1696 * From a security point of view, this helper uses its own 1697 * pseudo-random internal state, and cannot be used to infer the 1698 * seed of other random functions in the kernel. However, it is 1699 * essential to note that the generator used by the helper is not 1700 * cryptographically secure. 1701 * Return 1702 * A random 32-bit unsigned value. 1703 * 1704 * u32 bpf_get_smp_processor_id(void) 1705 * Description 1706 * Get the SMP (symmetric multiprocessing) processor id. Note that 1707 * all programs run with migration disabled, which means that the 1708 * SMP processor id is stable during all the execution of the 1709 * program. 1710 * Return 1711 * The SMP id of the processor running the program. 1712 * 1713 * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) 1714 * Description 1715 * Store *len* bytes from address *from* into the packet 1716 * associated to *skb*, at *offset*. *flags* are a combination of 1717 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the 1718 * checksum for the packet after storing the bytes) and 1719 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ 1720 * **->swhash** and *skb*\ **->l4hash** to 0). 1721 * 1722 * A call to this helper is susceptible to change the underlying 1723 * packet buffer. Therefore, at load time, all checks on pointers 1724 * previously done by the verifier are invalidated and must be 1725 * performed again, if the helper is used in combination with 1726 * direct packet access. 1727 * Return 1728 * 0 on success, or a negative error in case of failure. 1729 * 1730 * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) 1731 * Description 1732 * Recompute the layer 3 (e.g. IP) checksum for the packet 1733 * associated to *skb*. Computation is incremental, so the helper 1734 * must know the former value of the header field that was 1735 * modified (*from*), the new value of this field (*to*), and the 1736 * number of bytes (2 or 4) for this field, stored in *size*. 1737 * Alternatively, it is possible to store the difference between 1738 * the previous and the new values of the header field in *to*, by 1739 * setting *from* and *size* to 0. For both methods, *offset* 1740 * indicates the location of the IP checksum within the packet. 1741 * 1742 * This helper works in combination with **bpf_csum_diff**\ (), 1743 * which does not update the checksum in-place, but offers more 1744 * flexibility and can handle sizes larger than 2 or 4 for the 1745 * checksum to update. 1746 * 1747 * A call to this helper is susceptible to change the underlying 1748 * packet buffer. Therefore, at load time, all checks on pointers 1749 * previously done by the verifier are invalidated and must be 1750 * performed again, if the helper is used in combination with 1751 * direct packet access. 1752 * Return 1753 * 0 on success, or a negative error in case of failure. 1754 * 1755 * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) 1756 * Description 1757 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the 1758 * packet associated to *skb*. Computation is incremental, so the 1759 * helper must know the former value of the header field that was 1760 * modified (*from*), the new value of this field (*to*), and the 1761 * number of bytes (2 or 4) for this field, stored on the lowest 1762 * four bits of *flags*. Alternatively, it is possible to store 1763 * the difference between the previous and the new values of the 1764 * header field in *to*, by setting *from* and the four lowest 1765 * bits of *flags* to 0. For both methods, *offset* indicates the 1766 * location of the IP checksum within the packet. In addition to 1767 * the size of the field, *flags* can be added (bitwise OR) actual 1768 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left 1769 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and 1770 * for updates resulting in a null checksum the value is set to 1771 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates 1772 * the checksum is to be computed against a pseudo-header. 1773 * 1774 * This helper works in combination with **bpf_csum_diff**\ (), 1775 * which does not update the checksum in-place, but offers more 1776 * flexibility and can handle sizes larger than 2 or 4 for the 1777 * checksum to update. 1778 * 1779 * A call to this helper is susceptible to change the underlying 1780 * packet buffer. Therefore, at load time, all checks on pointers 1781 * previously done by the verifier are invalidated and must be 1782 * performed again, if the helper is used in combination with 1783 * direct packet access. 1784 * Return 1785 * 0 on success, or a negative error in case of failure. 1786 * 1787 * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) 1788 * Description 1789 * This special helper is used to trigger a "tail call", or in 1790 * other words, to jump into another eBPF program. The same stack 1791 * frame is used (but values on stack and in registers for the 1792 * caller are not accessible to the callee). This mechanism allows 1793 * for program chaining, either for raising the maximum number of 1794 * available eBPF instructions, or to execute given programs in 1795 * conditional blocks. For security reasons, there is an upper 1796 * limit to the number of successive tail calls that can be 1797 * performed. 1798 * 1799 * Upon call of this helper, the program attempts to jump into a 1800 * program referenced at index *index* in *prog_array_map*, a 1801 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes 1802 * *ctx*, a pointer to the context. 1803 * 1804 * If the call succeeds, the kernel immediately runs the first 1805 * instruction of the new program. This is not a function call, 1806 * and it never returns to the previous program. If the call 1807 * fails, then the helper has no effect, and the caller continues 1808 * to run its subsequent instructions. A call can fail if the 1809 * destination program for the jump does not exist (i.e. *index* 1810 * is superior to the number of entries in *prog_array_map*), or 1811 * if the maximum number of tail calls has been reached for this 1812 * chain of programs. This limit is defined in the kernel by the 1813 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), 1814 * which is currently set to 33. 1815 * Return 1816 * 0 on success, or a negative error in case of failure. 1817 * 1818 * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) 1819 * Description 1820 * Clone and redirect the packet associated to *skb* to another 1821 * net device of index *ifindex*. Both ingress and egress 1822 * interfaces can be used for redirection. The **BPF_F_INGRESS** 1823 * value in *flags* is used to make the distinction (ingress path 1824 * is selected if the flag is present, egress path otherwise). 1825 * This is the only flag supported for now. 1826 * 1827 * In comparison with **bpf_redirect**\ () helper, 1828 * **bpf_clone_redirect**\ () has the associated cost of 1829 * duplicating the packet buffer, but this can be executed out of 1830 * the eBPF program. Conversely, **bpf_redirect**\ () is more 1831 * efficient, but it is handled through an action code where the 1832 * redirection happens only after the eBPF program has returned. 1833 * 1834 * A call to this helper is susceptible to change the underlying 1835 * packet buffer. Therefore, at load time, all checks on pointers 1836 * previously done by the verifier are invalidated and must be 1837 * performed again, if the helper is used in combination with 1838 * direct packet access. 1839 * Return 1840 * 0 on success, or a negative error in case of failure. 1841 * 1842 * u64 bpf_get_current_pid_tgid(void) 1843 * Description 1844 * Get the current pid and tgid. 1845 * Return 1846 * A 64-bit integer containing the current tgid and pid, and 1847 * created as such: 1848 * *current_task*\ **->tgid << 32 \|** 1849 * *current_task*\ **->pid**. 1850 * 1851 * u64 bpf_get_current_uid_gid(void) 1852 * Description 1853 * Get the current uid and gid. 1854 * Return 1855 * A 64-bit integer containing the current GID and UID, and 1856 * created as such: *current_gid* **<< 32 \|** *current_uid*. 1857 * 1858 * long bpf_get_current_comm(void *buf, u32 size_of_buf) 1859 * Description 1860 * Copy the **comm** attribute of the current task into *buf* of 1861 * *size_of_buf*. The **comm** attribute contains the name of 1862 * the executable (excluding the path) for the current task. The 1863 * *size_of_buf* must be strictly positive. On success, the 1864 * helper makes sure that the *buf* is NUL-terminated. On failure, 1865 * it is filled with zeroes. 1866 * Return 1867 * 0 on success, or a negative error in case of failure. 1868 * 1869 * u32 bpf_get_cgroup_classid(struct sk_buff *skb) 1870 * Description 1871 * Retrieve the classid for the current task, i.e. for the net_cls 1872 * cgroup to which *skb* belongs. 1873 * 1874 * This helper can be used on TC egress path, but not on ingress. 1875 * 1876 * The net_cls cgroup provides an interface to tag network packets 1877 * based on a user-provided identifier for all traffic coming from 1878 * the tasks belonging to the related cgroup. See also the related 1879 * kernel documentation, available from the Linux sources in file 1880 * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. 1881 * 1882 * The Linux kernel has two versions for cgroups: there are 1883 * cgroups v1 and cgroups v2. Both are available to users, who can 1884 * use a mixture of them, but note that the net_cls cgroup is for 1885 * cgroup v1 only. This makes it incompatible with BPF programs 1886 * run on cgroups, which is a cgroup-v2-only feature (a socket can 1887 * only hold data for one version of cgroups at a time). 1888 * 1889 * This helper is only available is the kernel was compiled with 1890 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to 1891 * "**y**" or to "**m**". 1892 * Return 1893 * The classid, or 0 for the default unconfigured classid. 1894 * 1895 * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 1896 * Description 1897 * Push a *vlan_tci* (VLAN tag control information) of protocol 1898 * *vlan_proto* to the packet associated to *skb*, then update 1899 * the checksum. Note that if *vlan_proto* is different from 1900 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to 1901 * be **ETH_P_8021Q**. 1902 * 1903 * A call to this helper is susceptible to change the underlying 1904 * packet buffer. Therefore, at load time, all checks on pointers 1905 * previously done by the verifier are invalidated and must be 1906 * performed again, if the helper is used in combination with 1907 * direct packet access. 1908 * Return 1909 * 0 on success, or a negative error in case of failure. 1910 * 1911 * long bpf_skb_vlan_pop(struct sk_buff *skb) 1912 * Description 1913 * Pop a VLAN header from the packet associated to *skb*. 1914 * 1915 * A call to this helper is susceptible to change the underlying 1916 * packet buffer. Therefore, at load time, all checks on pointers 1917 * previously done by the verifier are invalidated and must be 1918 * performed again, if the helper is used in combination with 1919 * direct packet access. 1920 * Return 1921 * 0 on success, or a negative error in case of failure. 1922 * 1923 * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 1924 * Description 1925 * Get tunnel metadata. This helper takes a pointer *key* to an 1926 * empty **struct bpf_tunnel_key** of **size**, that will be 1927 * filled with tunnel metadata for the packet associated to *skb*. 1928 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which 1929 * indicates that the tunnel is based on IPv6 protocol instead of 1930 * IPv4. 1931 * 1932 * The **struct bpf_tunnel_key** is an object that generalizes the 1933 * principal parameters used by various tunneling protocols into a 1934 * single struct. This way, it can be used to easily make a 1935 * decision based on the contents of the encapsulation header, 1936 * "summarized" in this struct. In particular, it holds the IP 1937 * address of the remote end (IPv4 or IPv6, depending on the case) 1938 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, 1939 * this struct exposes the *key*\ **->tunnel_id**, which is 1940 * generally mapped to a VNI (Virtual Network Identifier), making 1941 * it programmable together with the **bpf_skb_set_tunnel_key**\ 1942 * () helper. 1943 * 1944 * Let's imagine that the following code is part of a program 1945 * attached to the TC ingress interface, on one end of a GRE 1946 * tunnel, and is supposed to filter out all messages coming from 1947 * remote ends with IPv4 address other than 10.0.0.1: 1948 * 1949 * :: 1950 * 1951 * int ret; 1952 * struct bpf_tunnel_key key = {}; 1953 * 1954 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); 1955 * if (ret < 0) 1956 * return TC_ACT_SHOT; // drop packet 1957 * 1958 * if (key.remote_ipv4 != 0x0a000001) 1959 * return TC_ACT_SHOT; // drop packet 1960 * 1961 * return TC_ACT_OK; // accept packet 1962 * 1963 * This interface can also be used with all encapsulation devices 1964 * that can operate in "collect metadata" mode: instead of having 1965 * one network device per specific configuration, the "collect 1966 * metadata" mode only requires a single device where the 1967 * configuration can be extracted from this helper. 1968 * 1969 * This can be used together with various tunnels such as VXLan, 1970 * Geneve, GRE or IP in IP (IPIP). 1971 * Return 1972 * 0 on success, or a negative error in case of failure. 1973 * 1974 * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 1975 * Description 1976 * Populate tunnel metadata for packet associated to *skb.* The 1977 * tunnel metadata is set to the contents of *key*, of *size*. The 1978 * *flags* can be set to a combination of the following values: 1979 * 1980 * **BPF_F_TUNINFO_IPV6** 1981 * Indicate that the tunnel is based on IPv6 protocol 1982 * instead of IPv4. 1983 * **BPF_F_ZERO_CSUM_TX** 1984 * For IPv4 packets, add a flag to tunnel metadata 1985 * indicating that checksum computation should be skipped 1986 * and checksum set to zeroes. 1987 * **BPF_F_DONT_FRAGMENT** 1988 * Add a flag to tunnel metadata indicating that the 1989 * packet should not be fragmented. 1990 * **BPF_F_SEQ_NUMBER** 1991 * Add a flag to tunnel metadata indicating that a 1992 * sequence number should be added to tunnel header before 1993 * sending the packet. This flag was added for GRE 1994 * encapsulation, but might be used with other protocols 1995 * as well in the future. 1996 * 1997 * Here is a typical usage on the transmit path: 1998 * 1999 * :: 2000 * 2001 * struct bpf_tunnel_key key; 2002 * populate key ... 2003 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); 2004 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); 2005 * 2006 * See also the description of the **bpf_skb_get_tunnel_key**\ () 2007 * helper for additional information. 2008 * Return 2009 * 0 on success, or a negative error in case of failure. 2010 * 2011 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) 2012 * Description 2013 * Read the value of a perf event counter. This helper relies on a 2014 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of 2015 * the perf event counter is selected when *map* is updated with 2016 * perf event file descriptors. The *map* is an array whose size 2017 * is the number of available CPUs, and each cell contains a value 2018 * relative to one CPU. The value to retrieve is indicated by 2019 * *flags*, that contains the index of the CPU to look up, masked 2020 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 2021 * **BPF_F_CURRENT_CPU** to indicate that the value for the 2022 * current CPU should be retrieved. 2023 * 2024 * Note that before Linux 4.13, only hardware perf event can be 2025 * retrieved. 2026 * 2027 * Also, be aware that the newer helper 2028 * **bpf_perf_event_read_value**\ () is recommended over 2029 * **bpf_perf_event_read**\ () in general. The latter has some ABI 2030 * quirks where error and counter value are used as a return code 2031 * (which is wrong to do since ranges may overlap). This issue is 2032 * fixed with **bpf_perf_event_read_value**\ (), which at the same 2033 * time provides more features over the **bpf_perf_event_read**\ 2034 * () interface. Please refer to the description of 2035 * **bpf_perf_event_read_value**\ () for details. 2036 * Return 2037 * The value of the perf event counter read from the map, or a 2038 * negative error code in case of failure. 2039 * 2040 * long bpf_redirect(u32 ifindex, u64 flags) 2041 * Description 2042 * Redirect the packet to another net device of index *ifindex*. 2043 * This helper is somewhat similar to **bpf_clone_redirect**\ 2044 * (), except that the packet is not cloned, which provides 2045 * increased performance. 2046 * 2047 * Except for XDP, both ingress and egress interfaces can be used 2048 * for redirection. The **BPF_F_INGRESS** value in *flags* is used 2049 * to make the distinction (ingress path is selected if the flag 2050 * is present, egress path otherwise). Currently, XDP only 2051 * supports redirection to the egress interface, and accepts no 2052 * flag at all. 2053 * 2054 * The same effect can also be attained with the more generic 2055 * **bpf_redirect_map**\ (), which uses a BPF map to store the 2056 * redirect target instead of providing it directly to the helper. 2057 * Return 2058 * For XDP, the helper returns **XDP_REDIRECT** on success or 2059 * **XDP_ABORTED** on error. For other program types, the values 2060 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on 2061 * error. 2062 * 2063 * u32 bpf_get_route_realm(struct sk_buff *skb) 2064 * Description 2065 * Retrieve the realm or the route, that is to say the 2066 * **tclassid** field of the destination for the *skb*. The 2067 * identifier retrieved is a user-provided tag, similar to the 2068 * one used with the net_cls cgroup (see description for 2069 * **bpf_get_cgroup_classid**\ () helper), but here this tag is 2070 * held by a route (a destination entry), not by a task. 2071 * 2072 * Retrieving this identifier works with the clsact TC egress hook 2073 * (see also **tc-bpf(8)**), or alternatively on conventional 2074 * classful egress qdiscs, but not on TC ingress path. In case of 2075 * clsact TC egress hook, this has the advantage that, internally, 2076 * the destination entry has not been dropped yet in the transmit 2077 * path. Therefore, the destination entry does not need to be 2078 * artificially held via **netif_keep_dst**\ () for a classful 2079 * qdisc until the *skb* is freed. 2080 * 2081 * This helper is available only if the kernel was compiled with 2082 * **CONFIG_IP_ROUTE_CLASSID** configuration option. 2083 * Return 2084 * The realm of the route for the packet associated to *skb*, or 0 2085 * if none was found. 2086 * 2087 * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 2088 * Description 2089 * Write raw *data* blob into a special BPF perf event held by 2090 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 2091 * event must have the following attributes: **PERF_SAMPLE_RAW** 2092 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 2093 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 2094 * 2095 * The *flags* are used to indicate the index in *map* for which 2096 * the value must be put, masked with **BPF_F_INDEX_MASK**. 2097 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 2098 * to indicate that the index of the current CPU core should be 2099 * used. 2100 * 2101 * The value to write, of *size*, is passed through eBPF stack and 2102 * pointed by *data*. 2103 * 2104 * The context of the program *ctx* needs also be passed to the 2105 * helper. 2106 * 2107 * On user space, a program willing to read the values needs to 2108 * call **perf_event_open**\ () on the perf event (either for 2109 * one or for all CPUs) and to store the file descriptor into the 2110 * *map*. This must be done before the eBPF program can send data 2111 * into it. An example is available in file 2112 * *samples/bpf/trace_output_user.c* in the Linux kernel source 2113 * tree (the eBPF program counterpart is in 2114 * *samples/bpf/trace_output_kern.c*). 2115 * 2116 * **bpf_perf_event_output**\ () achieves better performance 2117 * than **bpf_trace_printk**\ () for sharing data with user 2118 * space, and is much better suitable for streaming data from eBPF 2119 * programs. 2120 * 2121 * Note that this helper is not restricted to tracing use cases 2122 * and can be used with programs attached to TC or XDP as well, 2123 * where it allows for passing data to user space listeners. Data 2124 * can be: 2125 * 2126 * * Only custom structs, 2127 * * Only the packet payload, or 2128 * * A combination of both. 2129 * Return 2130 * 0 on success, or a negative error in case of failure. 2131 * 2132 * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) 2133 * Description 2134 * This helper was provided as an easy way to load data from a 2135 * packet. It can be used to load *len* bytes from *offset* from 2136 * the packet associated to *skb*, into the buffer pointed by 2137 * *to*. 2138 * 2139 * Since Linux 4.7, usage of this helper has mostly been replaced 2140 * by "direct packet access", enabling packet data to be 2141 * manipulated with *skb*\ **->data** and *skb*\ **->data_end** 2142 * pointing respectively to the first byte of packet data and to 2143 * the byte after the last byte of packet data. However, it 2144 * remains useful if one wishes to read large quantities of data 2145 * at once from a packet into the eBPF stack. 2146 * Return 2147 * 0 on success, or a negative error in case of failure. 2148 * 2149 * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) 2150 * Description 2151 * Walk a user or a kernel stack and return its id. To achieve 2152 * this, the helper needs *ctx*, which is a pointer to the context 2153 * on which the tracing program is executed, and a pointer to a 2154 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. 2155 * 2156 * The last argument, *flags*, holds the number of stack frames to 2157 * skip (from 0 to 255), masked with 2158 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 2159 * a combination of the following flags: 2160 * 2161 * **BPF_F_USER_STACK** 2162 * Collect a user space stack instead of a kernel stack. 2163 * **BPF_F_FAST_STACK_CMP** 2164 * Compare stacks by hash only. 2165 * **BPF_F_REUSE_STACKID** 2166 * If two different stacks hash into the same *stackid*, 2167 * discard the old one. 2168 * 2169 * The stack id retrieved is a 32 bit long integer handle which 2170 * can be further combined with other data (including other stack 2171 * ids) and used as a key into maps. This can be useful for 2172 * generating a variety of graphs (such as flame graphs or off-cpu 2173 * graphs). 2174 * 2175 * For walking a stack, this helper is an improvement over 2176 * **bpf_probe_read**\ (), which can be used with unrolled loops 2177 * but is not efficient and consumes a lot of eBPF instructions. 2178 * Instead, **bpf_get_stackid**\ () can collect up to 2179 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that 2180 * this limit can be controlled with the **sysctl** program, and 2181 * that it should be manually increased in order to profile long 2182 * user stacks (such as stacks for Java programs). To do so, use: 2183 * 2184 * :: 2185 * 2186 * # sysctl kernel.perf_event_max_stack=<new value> 2187 * Return 2188 * The positive or null stack id on success, or a negative error 2189 * in case of failure. 2190 * 2191 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) 2192 * Description 2193 * Compute a checksum difference, from the raw buffer pointed by 2194 * *from*, of length *from_size* (that must be a multiple of 4), 2195 * towards the raw buffer pointed by *to*, of size *to_size* 2196 * (same remark). An optional *seed* can be added to the value 2197 * (this can be cascaded, the seed may come from a previous call 2198 * to the helper). 2199 * 2200 * This is flexible enough to be used in several ways: 2201 * 2202 * * With *from_size* == 0, *to_size* > 0 and *seed* set to 2203 * checksum, it can be used when pushing new data. 2204 * * With *from_size* > 0, *to_size* == 0 and *seed* set to 2205 * checksum, it can be used when removing data from a packet. 2206 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it 2207 * can be used to compute a diff. Note that *from_size* and 2208 * *to_size* do not need to be equal. 2209 * 2210 * This helper can be used in combination with 2211 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to 2212 * which one can feed in the difference computed with 2213 * **bpf_csum_diff**\ (). 2214 * Return 2215 * The checksum result, or a negative error code in case of 2216 * failure. 2217 * 2218 * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 2219 * Description 2220 * Retrieve tunnel options metadata for the packet associated to 2221 * *skb*, and store the raw tunnel option data to the buffer *opt* 2222 * of *size*. 2223 * 2224 * This helper can be used with encapsulation devices that can 2225 * operate in "collect metadata" mode (please refer to the related 2226 * note in the description of **bpf_skb_get_tunnel_key**\ () for 2227 * more details). A particular example where this can be used is 2228 * in combination with the Geneve encapsulation protocol, where it 2229 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) 2230 * and retrieving arbitrary TLVs (Type-Length-Value headers) from 2231 * the eBPF program. This allows for full customization of these 2232 * headers. 2233 * Return 2234 * The size of the option data retrieved. 2235 * 2236 * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 2237 * Description 2238 * Set tunnel options metadata for the packet associated to *skb* 2239 * to the option data contained in the raw buffer *opt* of *size*. 2240 * 2241 * See also the description of the **bpf_skb_get_tunnel_opt**\ () 2242 * helper for additional information. 2243 * Return 2244 * 0 on success, or a negative error in case of failure. 2245 * 2246 * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) 2247 * Description 2248 * Change the protocol of the *skb* to *proto*. Currently 2249 * supported are transition from IPv4 to IPv6, and from IPv6 to 2250 * IPv4. The helper takes care of the groundwork for the 2251 * transition, including resizing the socket buffer. The eBPF 2252 * program is expected to fill the new headers, if any, via 2253 * **skb_store_bytes**\ () and to recompute the checksums with 2254 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ 2255 * (). The main case for this helper is to perform NAT64 2256 * operations out of an eBPF program. 2257 * 2258 * Internally, the GSO type is marked as dodgy so that headers are 2259 * checked and segments are recalculated by the GSO/GRO engine. 2260 * The size for GSO target is adapted as well. 2261 * 2262 * All values for *flags* are reserved for future usage, and must 2263 * be left at zero. 2264 * 2265 * A call to this helper is susceptible to change the underlying 2266 * packet buffer. Therefore, at load time, all checks on pointers 2267 * previously done by the verifier are invalidated and must be 2268 * performed again, if the helper is used in combination with 2269 * direct packet access. 2270 * Return 2271 * 0 on success, or a negative error in case of failure. 2272 * 2273 * long bpf_skb_change_type(struct sk_buff *skb, u32 type) 2274 * Description 2275 * Change the packet type for the packet associated to *skb*. This 2276 * comes down to setting *skb*\ **->pkt_type** to *type*, except 2277 * the eBPF program does not have a write access to *skb*\ 2278 * **->pkt_type** beside this helper. Using a helper here allows 2279 * for graceful handling of errors. 2280 * 2281 * The major use case is to change incoming *skb*s to 2282 * **PACKET_HOST** in a programmatic way instead of having to 2283 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for 2284 * example. 2285 * 2286 * Note that *type* only allows certain values. At this time, they 2287 * are: 2288 * 2289 * **PACKET_HOST** 2290 * Packet is for us. 2291 * **PACKET_BROADCAST** 2292 * Send packet to all. 2293 * **PACKET_MULTICAST** 2294 * Send packet to group. 2295 * **PACKET_OTHERHOST** 2296 * Send packet to someone else. 2297 * Return 2298 * 0 on success, or a negative error in case of failure. 2299 * 2300 * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) 2301 * Description 2302 * Check whether *skb* is a descendant of the cgroup2 held by 2303 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 2304 * Return 2305 * The return value depends on the result of the test, and can be: 2306 * 2307 * * 0, if the *skb* failed the cgroup2 descendant test. 2308 * * 1, if the *skb* succeeded the cgroup2 descendant test. 2309 * * A negative error code, if an error occurred. 2310 * 2311 * u32 bpf_get_hash_recalc(struct sk_buff *skb) 2312 * Description 2313 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is 2314 * not set, in particular if the hash was cleared due to mangling, 2315 * recompute this hash. Later accesses to the hash can be done 2316 * directly with *skb*\ **->hash**. 2317 * 2318 * Calling **bpf_set_hash_invalid**\ (), changing a packet 2319 * prototype with **bpf_skb_change_proto**\ (), or calling 2320 * **bpf_skb_store_bytes**\ () with the 2321 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear 2322 * the hash and to trigger a new computation for the next call to 2323 * **bpf_get_hash_recalc**\ (). 2324 * Return 2325 * The 32-bit hash. 2326 * 2327 * u64 bpf_get_current_task(void) 2328 * Description 2329 * Get the current task. 2330 * Return 2331 * A pointer to the current task struct. 2332 * 2333 * long bpf_probe_write_user(void *dst, const void *src, u32 len) 2334 * Description 2335 * Attempt in a safe way to write *len* bytes from the buffer 2336 * *src* to *dst* in memory. It only works for threads that are in 2337 * user context, and *dst* must be a valid user space address. 2338 * 2339 * This helper should not be used to implement any kind of 2340 * security mechanism because of TOC-TOU attacks, but rather to 2341 * debug, divert, and manipulate execution of semi-cooperative 2342 * processes. 2343 * 2344 * Keep in mind that this feature is meant for experiments, and it 2345 * has a risk of crashing the system and running programs. 2346 * Therefore, when an eBPF program using this helper is attached, 2347 * a warning including PID and process name is printed to kernel 2348 * logs. 2349 * Return 2350 * 0 on success, or a negative error in case of failure. 2351 * 2352 * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) 2353 * Description 2354 * Check whether the probe is being run is the context of a given 2355 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by 2356 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 2357 * Return 2358 * The return value depends on the result of the test, and can be: 2359 * 2360 * * 1, if current task belongs to the cgroup2. 2361 * * 0, if current task does not belong to the cgroup2. 2362 * * A negative error code, if an error occurred. 2363 * 2364 * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) 2365 * Description 2366 * Resize (trim or grow) the packet associated to *skb* to the 2367 * new *len*. The *flags* are reserved for future usage, and must 2368 * be left at zero. 2369 * 2370 * The basic idea is that the helper performs the needed work to 2371 * change the size of the packet, then the eBPF program rewrites 2372 * the rest via helpers like **bpf_skb_store_bytes**\ (), 2373 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () 2374 * and others. This helper is a slow path utility intended for 2375 * replies with control messages. And because it is targeted for 2376 * slow path, the helper itself can afford to be slow: it 2377 * implicitly linearizes, unclones and drops offloads from the 2378 * *skb*. 2379 * 2380 * A call to this helper is susceptible to change the underlying 2381 * packet buffer. Therefore, at load time, all checks on pointers 2382 * previously done by the verifier are invalidated and must be 2383 * performed again, if the helper is used in combination with 2384 * direct packet access. 2385 * Return 2386 * 0 on success, or a negative error in case of failure. 2387 * 2388 * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) 2389 * Description 2390 * Pull in non-linear data in case the *skb* is non-linear and not 2391 * all of *len* are part of the linear section. Make *len* bytes 2392 * from *skb* readable and writable. If a zero value is passed for 2393 * *len*, then all bytes in the linear part of *skb* will be made 2394 * readable and writable. 2395 * 2396 * This helper is only needed for reading and writing with direct 2397 * packet access. 2398 * 2399 * For direct packet access, testing that offsets to access 2400 * are within packet boundaries (test on *skb*\ **->data_end**) is 2401 * susceptible to fail if offsets are invalid, or if the requested 2402 * data is in non-linear parts of the *skb*. On failure the 2403 * program can just bail out, or in the case of a non-linear 2404 * buffer, use a helper to make the data available. The 2405 * **bpf_skb_load_bytes**\ () helper is a first solution to access 2406 * the data. Another one consists in using **bpf_skb_pull_data** 2407 * to pull in once the non-linear parts, then retesting and 2408 * eventually access the data. 2409 * 2410 * At the same time, this also makes sure the *skb* is uncloned, 2411 * which is a necessary condition for direct write. As this needs 2412 * to be an invariant for the write part only, the verifier 2413 * detects writes and adds a prologue that is calling 2414 * **bpf_skb_pull_data()** to effectively unclone the *skb* from 2415 * the very beginning in case it is indeed cloned. 2416 * 2417 * A call to this helper is susceptible to change the underlying 2418 * packet buffer. Therefore, at load time, all checks on pointers 2419 * previously done by the verifier are invalidated and must be 2420 * performed again, if the helper is used in combination with 2421 * direct packet access. 2422 * Return 2423 * 0 on success, or a negative error in case of failure. 2424 * 2425 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) 2426 * Description 2427 * Add the checksum *csum* into *skb*\ **->csum** in case the 2428 * driver has supplied a checksum for the entire packet into that 2429 * field. Return an error otherwise. This helper is intended to be 2430 * used in combination with **bpf_csum_diff**\ (), in particular 2431 * when the checksum needs to be updated after data has been 2432 * written into the packet through direct packet access. 2433 * Return 2434 * The checksum on success, or a negative error code in case of 2435 * failure. 2436 * 2437 * void bpf_set_hash_invalid(struct sk_buff *skb) 2438 * Description 2439 * Invalidate the current *skb*\ **->hash**. It can be used after 2440 * mangling on headers through direct packet access, in order to 2441 * indicate that the hash is outdated and to trigger a 2442 * recalculation the next time the kernel tries to access this 2443 * hash or when the **bpf_get_hash_recalc**\ () helper is called. 2444 * Return 2445 * void. 2446 * 2447 * long bpf_get_numa_node_id(void) 2448 * Description 2449 * Return the id of the current NUMA node. The primary use case 2450 * for this helper is the selection of sockets for the local NUMA 2451 * node, when the program is attached to sockets using the 2452 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), 2453 * but the helper is also available to other eBPF program types, 2454 * similarly to **bpf_get_smp_processor_id**\ (). 2455 * Return 2456 * The id of current NUMA node. 2457 * 2458 * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) 2459 * Description 2460 * Grows headroom of packet associated to *skb* and adjusts the 2461 * offset of the MAC header accordingly, adding *len* bytes of 2462 * space. It automatically extends and reallocates memory as 2463 * required. 2464 * 2465 * This helper can be used on a layer 3 *skb* to push a MAC header 2466 * for redirection into a layer 2 device. 2467 * 2468 * All values for *flags* are reserved for future usage, and must 2469 * be left at zero. 2470 * 2471 * A call to this helper is susceptible to change the underlying 2472 * packet buffer. Therefore, at load time, all checks on pointers 2473 * previously done by the verifier are invalidated and must be 2474 * performed again, if the helper is used in combination with 2475 * direct packet access. 2476 * Return 2477 * 0 on success, or a negative error in case of failure. 2478 * 2479 * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) 2480 * Description 2481 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that 2482 * it is possible to use a negative value for *delta*. This helper 2483 * can be used to prepare the packet for pushing or popping 2484 * headers. 2485 * 2486 * A call to this helper is susceptible to change the underlying 2487 * packet buffer. Therefore, at load time, all checks on pointers 2488 * previously done by the verifier are invalidated and must be 2489 * performed again, if the helper is used in combination with 2490 * direct packet access. 2491 * Return 2492 * 0 on success, or a negative error in case of failure. 2493 * 2494 * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) 2495 * Description 2496 * Copy a NUL terminated string from an unsafe kernel address 2497 * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for 2498 * more details. 2499 * 2500 * Generally, use **bpf_probe_read_user_str**\ () or 2501 * **bpf_probe_read_kernel_str**\ () instead. 2502 * Return 2503 * On success, the strictly positive length of the string, 2504 * including the trailing NUL character. On error, a negative 2505 * value. 2506 * 2507 * u64 bpf_get_socket_cookie(struct sk_buff *skb) 2508 * Description 2509 * If the **struct sk_buff** pointed by *skb* has a known socket, 2510 * retrieve the cookie (generated by the kernel) of this socket. 2511 * If no cookie has been set yet, generate a new cookie. Once 2512 * generated, the socket cookie remains stable for the life of the 2513 * socket. This helper can be useful for monitoring per socket 2514 * networking traffic statistics as it provides a global socket 2515 * identifier that can be assumed unique. 2516 * Return 2517 * A 8-byte long unique number on success, or 0 if the socket 2518 * field is missing inside *skb*. 2519 * 2520 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) 2521 * Description 2522 * Equivalent to bpf_get_socket_cookie() helper that accepts 2523 * *skb*, but gets socket from **struct bpf_sock_addr** context. 2524 * Return 2525 * A 8-byte long unique number. 2526 * 2527 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) 2528 * Description 2529 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts 2530 * *skb*, but gets socket from **struct bpf_sock_ops** context. 2531 * Return 2532 * A 8-byte long unique number. 2533 * 2534 * u64 bpf_get_socket_cookie(struct sock *sk) 2535 * Description 2536 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts 2537 * *sk*, but gets socket from a BTF **struct sock**. This helper 2538 * also works for sleepable programs. 2539 * Return 2540 * A 8-byte long unique number or 0 if *sk* is NULL. 2541 * 2542 * u32 bpf_get_socket_uid(struct sk_buff *skb) 2543 * Description 2544 * Get the owner UID of the socked associated to *skb*. 2545 * Return 2546 * The owner UID of the socket associated to *skb*. If the socket 2547 * is **NULL**, or if it is not a full socket (i.e. if it is a 2548 * time-wait or a request socket instead), **overflowuid** value 2549 * is returned (note that **overflowuid** might also be the actual 2550 * UID value for the socket). 2551 * 2552 * long bpf_set_hash(struct sk_buff *skb, u32 hash) 2553 * Description 2554 * Set the full hash for *skb* (set the field *skb*\ **->hash**) 2555 * to value *hash*. 2556 * Return 2557 * 0 2558 * 2559 * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 2560 * Description 2561 * Emulate a call to **setsockopt()** on the socket associated to 2562 * *bpf_socket*, which must be a full socket. The *level* at 2563 * which the option resides and the name *optname* of the option 2564 * must be specified, see **setsockopt(2)** for more information. 2565 * The option value of length *optlen* is pointed by *optval*. 2566 * 2567 * *bpf_socket* should be one of the following: 2568 * 2569 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 2570 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 2571 * and **BPF_CGROUP_INET6_CONNECT**. 2572 * 2573 * This helper actually implements a subset of **setsockopt()**. 2574 * It supports the following *level*\ s: 2575 * 2576 * * **SOL_SOCKET**, which supports the following *optname*\ s: 2577 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, 2578 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, 2579 * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. 2580 * * **IPPROTO_TCP**, which supports the following *optname*\ s: 2581 * **TCP_CONGESTION**, **TCP_BPF_IW**, 2582 * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, 2583 * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, 2584 * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**. 2585 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 2586 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 2587 * Return 2588 * 0 on success, or a negative error in case of failure. 2589 * 2590 * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) 2591 * Description 2592 * Grow or shrink the room for data in the packet associated to 2593 * *skb* by *len_diff*, and according to the selected *mode*. 2594 * 2595 * By default, the helper will reset any offloaded checksum 2596 * indicator of the skb to CHECKSUM_NONE. This can be avoided 2597 * by the following flag: 2598 * 2599 * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded 2600 * checksum data of the skb to CHECKSUM_NONE. 2601 * 2602 * There are two supported modes at this time: 2603 * 2604 * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer 2605 * (room space is added or removed between the layer 2 and 2606 * layer 3 headers). 2607 * 2608 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer 2609 * (room space is added or removed between the layer 3 and 2610 * layer 4 headers). 2611 * 2612 * The following flags are supported at this time: 2613 * 2614 * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. 2615 * Adjusting mss in this way is not allowed for datagrams. 2616 * 2617 * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, 2618 * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: 2619 * Any new space is reserved to hold a tunnel header. 2620 * Configure skb offsets and other fields accordingly. 2621 * 2622 * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, 2623 * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: 2624 * Use with ENCAP_L3 flags to further specify the tunnel type. 2625 * 2626 * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): 2627 * Use with ENCAP_L3/L4 flags to further specify the tunnel 2628 * type; *len* is the length of the inner MAC header. 2629 * 2630 * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: 2631 * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the 2632 * L2 type as Ethernet. 2633 * 2634 * A call to this helper is susceptible to change the underlying 2635 * packet buffer. Therefore, at load time, all checks on pointers 2636 * previously done by the verifier are invalidated and must be 2637 * performed again, if the helper is used in combination with 2638 * direct packet access. 2639 * Return 2640 * 0 on success, or a negative error in case of failure. 2641 * 2642 * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) 2643 * Description 2644 * Redirect the packet to the endpoint referenced by *map* at 2645 * index *key*. Depending on its type, this *map* can contain 2646 * references to net devices (for forwarding packets through other 2647 * ports), or to CPUs (for redirecting XDP frames to another CPU; 2648 * but this is only implemented for native XDP (with driver 2649 * support) as of this writing). 2650 * 2651 * The lower two bits of *flags* are used as the return code if 2652 * the map lookup fails. This is so that the return value can be 2653 * one of the XDP program return codes up to **XDP_TX**, as chosen 2654 * by the caller. The higher bits of *flags* can be set to 2655 * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. 2656 * 2657 * With BPF_F_BROADCAST the packet will be broadcasted to all the 2658 * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress 2659 * interface will be excluded when do broadcasting. 2660 * 2661 * See also **bpf_redirect**\ (), which only supports redirecting 2662 * to an ifindex, but doesn't require a map to do so. 2663 * Return 2664 * **XDP_REDIRECT** on success, or the value of the two lower bits 2665 * of the *flags* argument on error. 2666 * 2667 * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) 2668 * Description 2669 * Redirect the packet to the socket referenced by *map* (of type 2670 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 2671 * egress interfaces can be used for redirection. The 2672 * **BPF_F_INGRESS** value in *flags* is used to make the 2673 * distinction (ingress path is selected if the flag is present, 2674 * egress path otherwise). This is the only flag supported for now. 2675 * Return 2676 * **SK_PASS** on success, or **SK_DROP** on error. 2677 * 2678 * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 2679 * Description 2680 * Add an entry to, or update a *map* referencing sockets. The 2681 * *skops* is used as a new value for the entry associated to 2682 * *key*. *flags* is one of: 2683 * 2684 * **BPF_NOEXIST** 2685 * The entry for *key* must not exist in the map. 2686 * **BPF_EXIST** 2687 * The entry for *key* must already exist in the map. 2688 * **BPF_ANY** 2689 * No condition on the existence of the entry for *key*. 2690 * 2691 * If the *map* has eBPF programs (parser and verdict), those will 2692 * be inherited by the socket being added. If the socket is 2693 * already attached to eBPF programs, this results in an error. 2694 * Return 2695 * 0 on success, or a negative error in case of failure. 2696 * 2697 * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) 2698 * Description 2699 * Adjust the address pointed by *xdp_md*\ **->data_meta** by 2700 * *delta* (which can be positive or negative). Note that this 2701 * operation modifies the address stored in *xdp_md*\ **->data**, 2702 * so the latter must be loaded only after the helper has been 2703 * called. 2704 * 2705 * The use of *xdp_md*\ **->data_meta** is optional and programs 2706 * are not required to use it. The rationale is that when the 2707 * packet is processed with XDP (e.g. as DoS filter), it is 2708 * possible to push further meta data along with it before passing 2709 * to the stack, and to give the guarantee that an ingress eBPF 2710 * program attached as a TC classifier on the same device can pick 2711 * this up for further post-processing. Since TC works with socket 2712 * buffers, it remains possible to set from XDP the **mark** or 2713 * **priority** pointers, or other pointers for the socket buffer. 2714 * Having this scratch space generic and programmable allows for 2715 * more flexibility as the user is free to store whatever meta 2716 * data they need. 2717 * 2718 * A call to this helper is susceptible to change the underlying 2719 * packet buffer. Therefore, at load time, all checks on pointers 2720 * previously done by the verifier are invalidated and must be 2721 * performed again, if the helper is used in combination with 2722 * direct packet access. 2723 * Return 2724 * 0 on success, or a negative error in case of failure. 2725 * 2726 * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) 2727 * Description 2728 * Read the value of a perf event counter, and store it into *buf* 2729 * of size *buf_size*. This helper relies on a *map* of type 2730 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event 2731 * counter is selected when *map* is updated with perf event file 2732 * descriptors. The *map* is an array whose size is the number of 2733 * available CPUs, and each cell contains a value relative to one 2734 * CPU. The value to retrieve is indicated by *flags*, that 2735 * contains the index of the CPU to look up, masked with 2736 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 2737 * **BPF_F_CURRENT_CPU** to indicate that the value for the 2738 * current CPU should be retrieved. 2739 * 2740 * This helper behaves in a way close to 2741 * **bpf_perf_event_read**\ () helper, save that instead of 2742 * just returning the value observed, it fills the *buf* 2743 * structure. This allows for additional data to be retrieved: in 2744 * particular, the enabled and running times (in *buf*\ 2745 * **->enabled** and *buf*\ **->running**, respectively) are 2746 * copied. In general, **bpf_perf_event_read_value**\ () is 2747 * recommended over **bpf_perf_event_read**\ (), which has some 2748 * ABI issues and provides fewer functionalities. 2749 * 2750 * These values are interesting, because hardware PMU (Performance 2751 * Monitoring Unit) counters are limited resources. When there are 2752 * more PMU based perf events opened than available counters, 2753 * kernel will multiplex these events so each event gets certain 2754 * percentage (but not all) of the PMU time. In case that 2755 * multiplexing happens, the number of samples or counter value 2756 * will not reflect the case compared to when no multiplexing 2757 * occurs. This makes comparison between different runs difficult. 2758 * Typically, the counter value should be normalized before 2759 * comparing to other experiments. The usual normalization is done 2760 * as follows. 2761 * 2762 * :: 2763 * 2764 * normalized_counter = counter * t_enabled / t_running 2765 * 2766 * Where t_enabled is the time enabled for event and t_running is 2767 * the time running for event since last normalization. The 2768 * enabled and running times are accumulated since the perf event 2769 * open. To achieve scaling factor between two invocations of an 2770 * eBPF program, users can use CPU id as the key (which is 2771 * typical for perf array usage model) to remember the previous 2772 * value and do the calculation inside the eBPF program. 2773 * Return 2774 * 0 on success, or a negative error in case of failure. 2775 * 2776 * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) 2777 * Description 2778 * For en eBPF program attached to a perf event, retrieve the 2779 * value of the event counter associated to *ctx* and store it in 2780 * the structure pointed by *buf* and of size *buf_size*. Enabled 2781 * and running times are also stored in the structure (see 2782 * description of helper **bpf_perf_event_read_value**\ () for 2783 * more details). 2784 * Return 2785 * 0 on success, or a negative error in case of failure. 2786 * 2787 * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 2788 * Description 2789 * Emulate a call to **getsockopt()** on the socket associated to 2790 * *bpf_socket*, which must be a full socket. The *level* at 2791 * which the option resides and the name *optname* of the option 2792 * must be specified, see **getsockopt(2)** for more information. 2793 * The retrieved value is stored in the structure pointed by 2794 * *opval* and of length *optlen*. 2795 * 2796 * *bpf_socket* should be one of the following: 2797 * 2798 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 2799 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 2800 * and **BPF_CGROUP_INET6_CONNECT**. 2801 * 2802 * This helper actually implements a subset of **getsockopt()**. 2803 * It supports the following *level*\ s: 2804 * 2805 * * **IPPROTO_TCP**, which supports *optname* 2806 * **TCP_CONGESTION**. 2807 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 2808 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 2809 * Return 2810 * 0 on success, or a negative error in case of failure. 2811 * 2812 * long bpf_override_return(struct pt_regs *regs, u64 rc) 2813 * Description 2814 * Used for error injection, this helper uses kprobes to override 2815 * the return value of the probed function, and to set it to *rc*. 2816 * The first argument is the context *regs* on which the kprobe 2817 * works. 2818 * 2819 * This helper works by setting the PC (program counter) 2820 * to an override function which is run in place of the original 2821 * probed function. This means the probed function is not run at 2822 * all. The replacement function just returns with the required 2823 * value. 2824 * 2825 * This helper has security implications, and thus is subject to 2826 * restrictions. It is only available if the kernel was compiled 2827 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration 2828 * option, and in this case it only works on functions tagged with 2829 * **ALLOW_ERROR_INJECTION** in the kernel code. 2830 * 2831 * Also, the helper is only available for the architectures having 2832 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, 2833 * x86 architecture is the only one to support this feature. 2834 * Return 2835 * 0 2836 * 2837 * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) 2838 * Description 2839 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field 2840 * for the full TCP socket associated to *bpf_sock_ops* to 2841 * *argval*. 2842 * 2843 * The primary use of this field is to determine if there should 2844 * be calls to eBPF programs of type 2845 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP 2846 * code. A program of the same type can change its value, per 2847 * connection and as necessary, when the connection is 2848 * established. This field is directly accessible for reading, but 2849 * this helper must be used for updates in order to return an 2850 * error if an eBPF program tries to set a callback that is not 2851 * supported in the current kernel. 2852 * 2853 * *argval* is a flag array which can combine these flags: 2854 * 2855 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) 2856 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) 2857 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) 2858 * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) 2859 * 2860 * Therefore, this function can be used to clear a callback flag by 2861 * setting the appropriate bit to zero. e.g. to disable the RTO 2862 * callback: 2863 * 2864 * **bpf_sock_ops_cb_flags_set(bpf_sock,** 2865 * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** 2866 * 2867 * Here are some examples of where one could call such eBPF 2868 * program: 2869 * 2870 * * When RTO fires. 2871 * * When a packet is retransmitted. 2872 * * When the connection terminates. 2873 * * When a packet is sent. 2874 * * When a packet is received. 2875 * Return 2876 * Code **-EINVAL** if the socket is not a full TCP socket; 2877 * otherwise, a positive number containing the bits that could not 2878 * be set is returned (which comes down to 0 if all bits were set 2879 * as required). 2880 * 2881 * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) 2882 * Description 2883 * This helper is used in programs implementing policies at the 2884 * socket level. If the message *msg* is allowed to pass (i.e. if 2885 * the verdict eBPF program returns **SK_PASS**), redirect it to 2886 * the socket referenced by *map* (of type 2887 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 2888 * egress interfaces can be used for redirection. The 2889 * **BPF_F_INGRESS** value in *flags* is used to make the 2890 * distinction (ingress path is selected if the flag is present, 2891 * egress path otherwise). This is the only flag supported for now. 2892 * Return 2893 * **SK_PASS** on success, or **SK_DROP** on error. 2894 * 2895 * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) 2896 * Description 2897 * For socket policies, apply the verdict of the eBPF program to 2898 * the next *bytes* (number of bytes) of message *msg*. 2899 * 2900 * For example, this helper can be used in the following cases: 2901 * 2902 * * A single **sendmsg**\ () or **sendfile**\ () system call 2903 * contains multiple logical messages that the eBPF program is 2904 * supposed to read and for which it should apply a verdict. 2905 * * An eBPF program only cares to read the first *bytes* of a 2906 * *msg*. If the message has a large payload, then setting up 2907 * and calling the eBPF program repeatedly for all bytes, even 2908 * though the verdict is already known, would create unnecessary 2909 * overhead. 2910 * 2911 * When called from within an eBPF program, the helper sets a 2912 * counter internal to the BPF infrastructure, that is used to 2913 * apply the last verdict to the next *bytes*. If *bytes* is 2914 * smaller than the current data being processed from a 2915 * **sendmsg**\ () or **sendfile**\ () system call, the first 2916 * *bytes* will be sent and the eBPF program will be re-run with 2917 * the pointer for start of data pointing to byte number *bytes* 2918 * **+ 1**. If *bytes* is larger than the current data being 2919 * processed, then the eBPF verdict will be applied to multiple 2920 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are 2921 * consumed. 2922 * 2923 * Note that if a socket closes with the internal counter holding 2924 * a non-zero value, this is not a problem because data is not 2925 * being buffered for *bytes* and is sent as it is received. 2926 * Return 2927 * 0 2928 * 2929 * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) 2930 * Description 2931 * For socket policies, prevent the execution of the verdict eBPF 2932 * program for message *msg* until *bytes* (byte number) have been 2933 * accumulated. 2934 * 2935 * This can be used when one needs a specific number of bytes 2936 * before a verdict can be assigned, even if the data spans 2937 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme 2938 * case would be a user calling **sendmsg**\ () repeatedly with 2939 * 1-byte long message segments. Obviously, this is bad for 2940 * performance, but it is still valid. If the eBPF program needs 2941 * *bytes* bytes to validate a header, this helper can be used to 2942 * prevent the eBPF program to be called again until *bytes* have 2943 * been accumulated. 2944 * Return 2945 * 0 2946 * 2947 * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) 2948 * Description 2949 * For socket policies, pull in non-linear data from user space 2950 * for *msg* and set pointers *msg*\ **->data** and *msg*\ 2951 * **->data_end** to *start* and *end* bytes offsets into *msg*, 2952 * respectively. 2953 * 2954 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 2955 * *msg* it can only parse data that the (**data**, **data_end**) 2956 * pointers have already consumed. For **sendmsg**\ () hooks this 2957 * is likely the first scatterlist element. But for calls relying 2958 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will 2959 * be the range (**0**, **0**) because the data is shared with 2960 * user space and by default the objective is to avoid allowing 2961 * user space to modify data while (or after) eBPF verdict is 2962 * being decided. This helper can be used to pull in data and to 2963 * set the start and end pointer to given values. Data will be 2964 * copied if necessary (i.e. if data was not linear and if start 2965 * and end pointers do not point to the same chunk). 2966 * 2967 * A call to this helper is susceptible to change the underlying 2968 * packet buffer. Therefore, at load time, all checks on pointers 2969 * previously done by the verifier are invalidated and must be 2970 * performed again, if the helper is used in combination with 2971 * direct packet access. 2972 * 2973 * All values for *flags* are reserved for future usage, and must 2974 * be left at zero. 2975 * Return 2976 * 0 on success, or a negative error in case of failure. 2977 * 2978 * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) 2979 * Description 2980 * Bind the socket associated to *ctx* to the address pointed by 2981 * *addr*, of length *addr_len*. This allows for making outgoing 2982 * connection from the desired IP address, which can be useful for 2983 * example when all processes inside a cgroup should use one 2984 * single IP address on a host that has multiple IP configured. 2985 * 2986 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The 2987 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or 2988 * **AF_INET6**). It's advised to pass zero port (**sin_port** 2989 * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like 2990 * behavior and lets the kernel efficiently pick up an unused 2991 * port as long as 4-tuple is unique. Passing non-zero port might 2992 * lead to degraded performance. 2993 * Return 2994 * 0 on success, or a negative error in case of failure. 2995 * 2996 * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) 2997 * Description 2998 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is 2999 * possible to both shrink and grow the packet tail. 3000 * Shrink done via *delta* being a negative integer. 3001 * 3002 * A call to this helper is susceptible to change the underlying 3003 * packet buffer. Therefore, at load time, all checks on pointers 3004 * previously done by the verifier are invalidated and must be 3005 * performed again, if the helper is used in combination with 3006 * direct packet access. 3007 * Return 3008 * 0 on success, or a negative error in case of failure. 3009 * 3010 * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) 3011 * Description 3012 * Retrieve the XFRM state (IP transform framework, see also 3013 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. 3014 * 3015 * The retrieved value is stored in the **struct bpf_xfrm_state** 3016 * pointed by *xfrm_state* and of length *size*. 3017 * 3018 * All values for *flags* are reserved for future usage, and must 3019 * be left at zero. 3020 * 3021 * This helper is available only if the kernel was compiled with 3022 * **CONFIG_XFRM** configuration option. 3023 * Return 3024 * 0 on success, or a negative error in case of failure. 3025 * 3026 * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) 3027 * Description 3028 * Return a user or a kernel stack in bpf program provided buffer. 3029 * To achieve this, the helper needs *ctx*, which is a pointer 3030 * to the context on which the tracing program is executed. 3031 * To store the stacktrace, the bpf program provides *buf* with 3032 * a nonnegative *size*. 3033 * 3034 * The last argument, *flags*, holds the number of stack frames to 3035 * skip (from 0 to 255), masked with 3036 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 3037 * the following flags: 3038 * 3039 * **BPF_F_USER_STACK** 3040 * Collect a user space stack instead of a kernel stack. 3041 * **BPF_F_USER_BUILD_ID** 3042 * Collect (build_id, file_offset) instead of ips for user 3043 * stack, only valid if **BPF_F_USER_STACK** is also 3044 * specified. 3045 * 3046 * *file_offset* is an offset relative to the beginning 3047 * of the executable or shared object file backing the vma 3048 * which the *ip* falls in. It is *not* an offset relative 3049 * to that object's base address. Accordingly, it must be 3050 * adjusted by adding (sh_addr - sh_offset), where 3051 * sh_{addr,offset} correspond to the executable section 3052 * containing *file_offset* in the object, for comparisons 3053 * to symbols' st_value to be valid. 3054 * 3055 * **bpf_get_stack**\ () can collect up to 3056 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 3057 * to sufficient large buffer size. Note that 3058 * this limit can be controlled with the **sysctl** program, and 3059 * that it should be manually increased in order to profile long 3060 * user stacks (such as stacks for Java programs). To do so, use: 3061 * 3062 * :: 3063 * 3064 * # sysctl kernel.perf_event_max_stack=<new value> 3065 * Return 3066 * The non-negative copied *buf* length equal to or less than 3067 * *size* on success, or a negative error in case of failure. 3068 * 3069 * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) 3070 * Description 3071 * This helper is similar to **bpf_skb_load_bytes**\ () in that 3072 * it provides an easy way to load *len* bytes from *offset* 3073 * from the packet associated to *skb*, into the buffer pointed 3074 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that 3075 * a fifth argument *start_header* exists in order to select a 3076 * base offset to start from. *start_header* can be one of: 3077 * 3078 * **BPF_HDR_START_MAC** 3079 * Base offset to load data from is *skb*'s mac header. 3080 * **BPF_HDR_START_NET** 3081 * Base offset to load data from is *skb*'s network header. 3082 * 3083 * In general, "direct packet access" is the preferred method to 3084 * access packet data, however, this helper is in particular useful 3085 * in socket filters where *skb*\ **->data** does not always point 3086 * to the start of the mac header and where "direct packet access" 3087 * is not available. 3088 * Return 3089 * 0 on success, or a negative error in case of failure. 3090 * 3091 * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) 3092 * Description 3093 * Do FIB lookup in kernel tables using parameters in *params*. 3094 * If lookup is successful and result shows packet is to be 3095 * forwarded, the neighbor tables are searched for the nexthop. 3096 * If successful (ie., FIB lookup shows forwarding and nexthop 3097 * is resolved), the nexthop address is returned in ipv4_dst 3098 * or ipv6_dst based on family, smac is set to mac address of 3099 * egress device, dmac is set to nexthop mac address, rt_metric 3100 * is set to metric from route (IPv4/IPv6 only), and ifindex 3101 * is set to the device index of the nexthop from the FIB lookup. 3102 * 3103 * *plen* argument is the size of the passed in struct. 3104 * *flags* argument can be a combination of one or more of the 3105 * following values: 3106 * 3107 * **BPF_FIB_LOOKUP_DIRECT** 3108 * Do a direct table lookup vs full lookup using FIB 3109 * rules. 3110 * **BPF_FIB_LOOKUP_OUTPUT** 3111 * Perform lookup from an egress perspective (default is 3112 * ingress). 3113 * 3114 * *ctx* is either **struct xdp_md** for XDP programs or 3115 * **struct sk_buff** tc cls_act programs. 3116 * Return 3117 * * < 0 if any input argument is invalid 3118 * * 0 on success (packet is forwarded, nexthop neighbor exists) 3119 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the 3120 * packet is not forwarded or needs assist from full stack 3121 * 3122 * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU 3123 * was exceeded and output params->mtu_result contains the MTU. 3124 * 3125 * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 3126 * Description 3127 * Add an entry to, or update a sockhash *map* referencing sockets. 3128 * The *skops* is used as a new value for the entry associated to 3129 * *key*. *flags* is one of: 3130 * 3131 * **BPF_NOEXIST** 3132 * The entry for *key* must not exist in the map. 3133 * **BPF_EXIST** 3134 * The entry for *key* must already exist in the map. 3135 * **BPF_ANY** 3136 * No condition on the existence of the entry for *key*. 3137 * 3138 * If the *map* has eBPF programs (parser and verdict), those will 3139 * be inherited by the socket being added. If the socket is 3140 * already attached to eBPF programs, this results in an error. 3141 * Return 3142 * 0 on success, or a negative error in case of failure. 3143 * 3144 * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) 3145 * Description 3146 * This helper is used in programs implementing policies at the 3147 * socket level. If the message *msg* is allowed to pass (i.e. if 3148 * the verdict eBPF program returns **SK_PASS**), redirect it to 3149 * the socket referenced by *map* (of type 3150 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 3151 * egress interfaces can be used for redirection. The 3152 * **BPF_F_INGRESS** value in *flags* is used to make the 3153 * distinction (ingress path is selected if the flag is present, 3154 * egress path otherwise). This is the only flag supported for now. 3155 * Return 3156 * **SK_PASS** on success, or **SK_DROP** on error. 3157 * 3158 * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) 3159 * Description 3160 * This helper is used in programs implementing policies at the 3161 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. 3162 * if the verdict eBPF program returns **SK_PASS**), redirect it 3163 * to the socket referenced by *map* (of type 3164 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 3165 * egress interfaces can be used for redirection. The 3166 * **BPF_F_INGRESS** value in *flags* is used to make the 3167 * distinction (ingress path is selected if the flag is present, 3168 * egress otherwise). This is the only flag supported for now. 3169 * Return 3170 * **SK_PASS** on success, or **SK_DROP** on error. 3171 * 3172 * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 3173 * Description 3174 * Encapsulate the packet associated to *skb* within a Layer 3 3175 * protocol header. This header is provided in the buffer at 3176 * address *hdr*, with *len* its size in bytes. *type* indicates 3177 * the protocol of the header and can be one of: 3178 * 3179 * **BPF_LWT_ENCAP_SEG6** 3180 * IPv6 encapsulation with Segment Routing Header 3181 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, 3182 * the IPv6 header is computed by the kernel. 3183 * **BPF_LWT_ENCAP_SEG6_INLINE** 3184 * Only works if *skb* contains an IPv6 packet. Insert a 3185 * Segment Routing Header (**struct ipv6_sr_hdr**) inside 3186 * the IPv6 header. 3187 * **BPF_LWT_ENCAP_IP** 3188 * IP encapsulation (GRE/GUE/IPIP/etc). The outer header 3189 * must be IPv4 or IPv6, followed by zero or more 3190 * additional headers, up to **LWT_BPF_MAX_HEADROOM** 3191 * total bytes in all prepended headers. Please note that 3192 * if **skb_is_gso**\ (*skb*) is true, no more than two 3193 * headers can be prepended, and the inner header, if 3194 * present, should be either GRE or UDP/GUE. 3195 * 3196 * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs 3197 * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can 3198 * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and 3199 * **BPF_PROG_TYPE_LWT_XMIT**. 3200 * 3201 * A call to this helper is susceptible to change the underlying 3202 * packet buffer. Therefore, at load time, all checks on pointers 3203 * previously done by the verifier are invalidated and must be 3204 * performed again, if the helper is used in combination with 3205 * direct packet access. 3206 * Return 3207 * 0 on success, or a negative error in case of failure. 3208 * 3209 * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) 3210 * Description 3211 * Store *len* bytes from address *from* into the packet 3212 * associated to *skb*, at *offset*. Only the flags, tag and TLVs 3213 * inside the outermost IPv6 Segment Routing Header can be 3214 * modified through this helper. 3215 * 3216 * A call to this helper is susceptible to change the underlying 3217 * packet buffer. Therefore, at load time, all checks on pointers 3218 * previously done by the verifier are invalidated and must be 3219 * performed again, if the helper is used in combination with 3220 * direct packet access. 3221 * Return 3222 * 0 on success, or a negative error in case of failure. 3223 * 3224 * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) 3225 * Description 3226 * Adjust the size allocated to TLVs in the outermost IPv6 3227 * Segment Routing Header contained in the packet associated to 3228 * *skb*, at position *offset* by *delta* bytes. Only offsets 3229 * after the segments are accepted. *delta* can be as well 3230 * positive (growing) as negative (shrinking). 3231 * 3232 * A call to this helper is susceptible to change the underlying 3233 * packet buffer. Therefore, at load time, all checks on pointers 3234 * previously done by the verifier are invalidated and must be 3235 * performed again, if the helper is used in combination with 3236 * direct packet access. 3237 * Return 3238 * 0 on success, or a negative error in case of failure. 3239 * 3240 * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) 3241 * Description 3242 * Apply an IPv6 Segment Routing action of type *action* to the 3243 * packet associated to *skb*. Each action takes a parameter 3244 * contained at address *param*, and of length *param_len* bytes. 3245 * *action* can be one of: 3246 * 3247 * **SEG6_LOCAL_ACTION_END_X** 3248 * End.X action: Endpoint with Layer-3 cross-connect. 3249 * Type of *param*: **struct in6_addr**. 3250 * **SEG6_LOCAL_ACTION_END_T** 3251 * End.T action: Endpoint with specific IPv6 table lookup. 3252 * Type of *param*: **int**. 3253 * **SEG6_LOCAL_ACTION_END_B6** 3254 * End.B6 action: Endpoint bound to an SRv6 policy. 3255 * Type of *param*: **struct ipv6_sr_hdr**. 3256 * **SEG6_LOCAL_ACTION_END_B6_ENCAP** 3257 * End.B6.Encap action: Endpoint bound to an SRv6 3258 * encapsulation policy. 3259 * Type of *param*: **struct ipv6_sr_hdr**. 3260 * 3261 * A call to this helper is susceptible to change the underlying 3262 * packet buffer. Therefore, at load time, all checks on pointers 3263 * previously done by the verifier are invalidated and must be 3264 * performed again, if the helper is used in combination with 3265 * direct packet access. 3266 * Return 3267 * 0 on success, or a negative error in case of failure. 3268 * 3269 * long bpf_rc_repeat(void *ctx) 3270 * Description 3271 * This helper is used in programs implementing IR decoding, to 3272 * report a successfully decoded repeat key message. This delays 3273 * the generation of a key up event for previously generated 3274 * key down event. 3275 * 3276 * Some IR protocols like NEC have a special IR message for 3277 * repeating last button, for when a button is held down. 3278 * 3279 * The *ctx* should point to the lirc sample as passed into 3280 * the program. 3281 * 3282 * This helper is only available is the kernel was compiled with 3283 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 3284 * "**y**". 3285 * Return 3286 * 0 3287 * 3288 * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) 3289 * Description 3290 * This helper is used in programs implementing IR decoding, to 3291 * report a successfully decoded key press with *scancode*, 3292 * *toggle* value in the given *protocol*. The scancode will be 3293 * translated to a keycode using the rc keymap, and reported as 3294 * an input key down event. After a period a key up event is 3295 * generated. This period can be extended by calling either 3296 * **bpf_rc_keydown**\ () again with the same values, or calling 3297 * **bpf_rc_repeat**\ (). 3298 * 3299 * Some protocols include a toggle bit, in case the button was 3300 * released and pressed again between consecutive scancodes. 3301 * 3302 * The *ctx* should point to the lirc sample as passed into 3303 * the program. 3304 * 3305 * The *protocol* is the decoded protocol number (see 3306 * **enum rc_proto** for some predefined values). 3307 * 3308 * This helper is only available is the kernel was compiled with 3309 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 3310 * "**y**". 3311 * Return 3312 * 0 3313 * 3314 * u64 bpf_skb_cgroup_id(struct sk_buff *skb) 3315 * Description 3316 * Return the cgroup v2 id of the socket associated with the *skb*. 3317 * This is roughly similar to the **bpf_get_cgroup_classid**\ () 3318 * helper for cgroup v1 by providing a tag resp. identifier that 3319 * can be matched on or used for map lookups e.g. to implement 3320 * policy. The cgroup v2 id of a given path in the hierarchy is 3321 * exposed in user space through the f_handle API in order to get 3322 * to the same 64-bit id. 3323 * 3324 * This helper can be used on TC egress path, but not on ingress, 3325 * and is available only if the kernel was compiled with the 3326 * **CONFIG_SOCK_CGROUP_DATA** configuration option. 3327 * Return 3328 * The id is returned or 0 in case the id could not be retrieved. 3329 * 3330 * u64 bpf_get_current_cgroup_id(void) 3331 * Description 3332 * Get the current cgroup id based on the cgroup within which 3333 * the current task is running. 3334 * Return 3335 * A 64-bit integer containing the current cgroup id based 3336 * on the cgroup within which the current task is running. 3337 * 3338 * void *bpf_get_local_storage(void *map, u64 flags) 3339 * Description 3340 * Get the pointer to the local storage area. 3341 * The type and the size of the local storage is defined 3342 * by the *map* argument. 3343 * The *flags* meaning is specific for each map type, 3344 * and has to be 0 for cgroup local storage. 3345 * 3346 * Depending on the BPF program type, a local storage area 3347 * can be shared between multiple instances of the BPF program, 3348 * running simultaneously. 3349 * 3350 * A user should care about the synchronization by himself. 3351 * For example, by using the **BPF_ATOMIC** instructions to alter 3352 * the shared data. 3353 * Return 3354 * A pointer to the local storage area. 3355 * 3356 * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) 3357 * Description 3358 * Select a **SO_REUSEPORT** socket from a 3359 * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. 3360 * It checks the selected socket is matching the incoming 3361 * request in the socket buffer. 3362 * Return 3363 * 0 on success, or a negative error in case of failure. 3364 * 3365 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) 3366 * Description 3367 * Return id of cgroup v2 that is ancestor of cgroup associated 3368 * with the *skb* at the *ancestor_level*. The root cgroup is at 3369 * *ancestor_level* zero and each step down the hierarchy 3370 * increments the level. If *ancestor_level* == level of cgroup 3371 * associated with *skb*, then return value will be same as that 3372 * of **bpf_skb_cgroup_id**\ (). 3373 * 3374 * The helper is useful to implement policies based on cgroups 3375 * that are upper in hierarchy than immediate cgroup associated 3376 * with *skb*. 3377 * 3378 * The format of returned id and helper limitations are same as in 3379 * **bpf_skb_cgroup_id**\ (). 3380 * Return 3381 * The id is returned or 0 in case the id could not be retrieved. 3382 * 3383 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 3384 * Description 3385 * Look for TCP socket matching *tuple*, optionally in a child 3386 * network namespace *netns*. The return value must be checked, 3387 * and if non-**NULL**, released via **bpf_sk_release**\ (). 3388 * 3389 * The *ctx* should point to the context of the program, such as 3390 * the skb or socket (depending on the hook in use). This is used 3391 * to determine the base network namespace for the lookup. 3392 * 3393 * *tuple_size* must be one of: 3394 * 3395 * **sizeof**\ (*tuple*\ **->ipv4**) 3396 * Look for an IPv4 socket. 3397 * **sizeof**\ (*tuple*\ **->ipv6**) 3398 * Look for an IPv6 socket. 3399 * 3400 * If the *netns* is a negative signed 32-bit integer, then the 3401 * socket lookup table in the netns associated with the *ctx* 3402 * will be used. For the TC hooks, this is the netns of the device 3403 * in the skb. For socket hooks, this is the netns of the socket. 3404 * If *netns* is any other signed 32-bit value greater than or 3405 * equal to zero then it specifies the ID of the netns relative to 3406 * the netns associated with the *ctx*. *netns* values beyond the 3407 * range of 32-bit integers are reserved for future use. 3408 * 3409 * All values for *flags* are reserved for future usage, and must 3410 * be left at zero. 3411 * 3412 * This helper is available only if the kernel was compiled with 3413 * **CONFIG_NET** configuration option. 3414 * Return 3415 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 3416 * For sockets with reuseport option, the **struct bpf_sock** 3417 * result is from *reuse*\ **->socks**\ [] using the hash of the 3418 * tuple. 3419 * 3420 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 3421 * Description 3422 * Look for UDP socket matching *tuple*, optionally in a child 3423 * network namespace *netns*. The return value must be checked, 3424 * and if non-**NULL**, released via **bpf_sk_release**\ (). 3425 * 3426 * The *ctx* should point to the context of the program, such as 3427 * the skb or socket (depending on the hook in use). This is used 3428 * to determine the base network namespace for the lookup. 3429 * 3430 * *tuple_size* must be one of: 3431 * 3432 * **sizeof**\ (*tuple*\ **->ipv4**) 3433 * Look for an IPv4 socket. 3434 * **sizeof**\ (*tuple*\ **->ipv6**) 3435 * Look for an IPv6 socket. 3436 * 3437 * If the *netns* is a negative signed 32-bit integer, then the 3438 * socket lookup table in the netns associated with the *ctx* 3439 * will be used. For the TC hooks, this is the netns of the device 3440 * in the skb. For socket hooks, this is the netns of the socket. 3441 * If *netns* is any other signed 32-bit value greater than or 3442 * equal to zero then it specifies the ID of the netns relative to 3443 * the netns associated with the *ctx*. *netns* values beyond the 3444 * range of 32-bit integers are reserved for future use. 3445 * 3446 * All values for *flags* are reserved for future usage, and must 3447 * be left at zero. 3448 * 3449 * This helper is available only if the kernel was compiled with 3450 * **CONFIG_NET** configuration option. 3451 * Return 3452 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 3453 * For sockets with reuseport option, the **struct bpf_sock** 3454 * result is from *reuse*\ **->socks**\ [] using the hash of the 3455 * tuple. 3456 * 3457 * long bpf_sk_release(void *sock) 3458 * Description 3459 * Release the reference held by *sock*. *sock* must be a 3460 * non-**NULL** pointer that was returned from 3461 * **bpf_sk_lookup_xxx**\ (). 3462 * Return 3463 * 0 on success, or a negative error in case of failure. 3464 * 3465 * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) 3466 * Description 3467 * Push an element *value* in *map*. *flags* is one of: 3468 * 3469 * **BPF_EXIST** 3470 * If the queue/stack is full, the oldest element is 3471 * removed to make room for this. 3472 * Return 3473 * 0 on success, or a negative error in case of failure. 3474 * 3475 * long bpf_map_pop_elem(struct bpf_map *map, void *value) 3476 * Description 3477 * Pop an element from *map*. 3478 * Return 3479 * 0 on success, or a negative error in case of failure. 3480 * 3481 * long bpf_map_peek_elem(struct bpf_map *map, void *value) 3482 * Description 3483 * Get an element from *map* without removing it. 3484 * Return 3485 * 0 on success, or a negative error in case of failure. 3486 * 3487 * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 3488 * Description 3489 * For socket policies, insert *len* bytes into *msg* at offset 3490 * *start*. 3491 * 3492 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 3493 * *msg* it may want to insert metadata or options into the *msg*. 3494 * This can later be read and used by any of the lower layer BPF 3495 * hooks. 3496 * 3497 * This helper may fail if under memory pressure (a malloc 3498 * fails) in these cases BPF programs will get an appropriate 3499 * error and BPF programs will need to handle them. 3500 * Return 3501 * 0 on success, or a negative error in case of failure. 3502 * 3503 * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 3504 * Description 3505 * Will remove *len* bytes from a *msg* starting at byte *start*. 3506 * This may result in **ENOMEM** errors under certain situations if 3507 * an allocation and copy are required due to a full ring buffer. 3508 * However, the helper will try to avoid doing the allocation 3509 * if possible. Other errors can occur if input parameters are 3510 * invalid either due to *start* byte not being valid part of *msg* 3511 * payload and/or *pop* value being to large. 3512 * Return 3513 * 0 on success, or a negative error in case of failure. 3514 * 3515 * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) 3516 * Description 3517 * This helper is used in programs implementing IR decoding, to 3518 * report a successfully decoded pointer movement. 3519 * 3520 * The *ctx* should point to the lirc sample as passed into 3521 * the program. 3522 * 3523 * This helper is only available is the kernel was compiled with 3524 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 3525 * "**y**". 3526 * Return 3527 * 0 3528 * 3529 * long bpf_spin_lock(struct bpf_spin_lock *lock) 3530 * Description 3531 * Acquire a spinlock represented by the pointer *lock*, which is 3532 * stored as part of a value of a map. Taking the lock allows to 3533 * safely update the rest of the fields in that value. The 3534 * spinlock can (and must) later be released with a call to 3535 * **bpf_spin_unlock**\ (\ *lock*\ ). 3536 * 3537 * Spinlocks in BPF programs come with a number of restrictions 3538 * and constraints: 3539 * 3540 * * **bpf_spin_lock** objects are only allowed inside maps of 3541 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this 3542 * list could be extended in the future). 3543 * * BTF description of the map is mandatory. 3544 * * The BPF program can take ONE lock at a time, since taking two 3545 * or more could cause dead locks. 3546 * * Only one **struct bpf_spin_lock** is allowed per map element. 3547 * * When the lock is taken, calls (either BPF to BPF or helpers) 3548 * are not allowed. 3549 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not 3550 * allowed inside a spinlock-ed region. 3551 * * The BPF program MUST call **bpf_spin_unlock**\ () to release 3552 * the lock, on all execution paths, before it returns. 3553 * * The BPF program can access **struct bpf_spin_lock** only via 3554 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () 3555 * helpers. Loading or storing data into the **struct 3556 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. 3557 * * To use the **bpf_spin_lock**\ () helper, the BTF description 3558 * of the map value must be a struct and have **struct 3559 * bpf_spin_lock** *anyname*\ **;** field at the top level. 3560 * Nested lock inside another struct is not allowed. 3561 * * The **struct bpf_spin_lock** *lock* field in a map value must 3562 * be aligned on a multiple of 4 bytes in that value. 3563 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy 3564 * the **bpf_spin_lock** field to user space. 3565 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from 3566 * a BPF program, do not update the **bpf_spin_lock** field. 3567 * * **bpf_spin_lock** cannot be on the stack or inside a 3568 * networking packet (it can only be inside of a map values). 3569 * * **bpf_spin_lock** is available to root only. 3570 * * Tracing programs and socket filter programs cannot use 3571 * **bpf_spin_lock**\ () due to insufficient preemption checks 3572 * (but this may change in the future). 3573 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. 3574 * Return 3575 * 0 3576 * 3577 * long bpf_spin_unlock(struct bpf_spin_lock *lock) 3578 * Description 3579 * Release the *lock* previously locked by a call to 3580 * **bpf_spin_lock**\ (\ *lock*\ ). 3581 * Return 3582 * 0 3583 * 3584 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) 3585 * Description 3586 * This helper gets a **struct bpf_sock** pointer such 3587 * that all the fields in this **bpf_sock** can be accessed. 3588 * Return 3589 * A **struct bpf_sock** pointer on success, or **NULL** in 3590 * case of failure. 3591 * 3592 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) 3593 * Description 3594 * This helper gets a **struct bpf_tcp_sock** pointer from a 3595 * **struct bpf_sock** pointer. 3596 * Return 3597 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in 3598 * case of failure. 3599 * 3600 * long bpf_skb_ecn_set_ce(struct sk_buff *skb) 3601 * Description 3602 * Set ECN (Explicit Congestion Notification) field of IP header 3603 * to **CE** (Congestion Encountered) if current value is **ECT** 3604 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 3605 * and IPv4. 3606 * Return 3607 * 1 if the **CE** flag is set (either by the current helper call 3608 * or because it was already present), 0 if it is not set. 3609 * 3610 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) 3611 * Description 3612 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. 3613 * **bpf_sk_release**\ () is unnecessary and not allowed. 3614 * Return 3615 * A **struct bpf_sock** pointer on success, or **NULL** in 3616 * case of failure. 3617 * 3618 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 3619 * Description 3620 * Look for TCP socket matching *tuple*, optionally in a child 3621 * network namespace *netns*. The return value must be checked, 3622 * and if non-**NULL**, released via **bpf_sk_release**\ (). 3623 * 3624 * This function is identical to **bpf_sk_lookup_tcp**\ (), except 3625 * that it also returns timewait or request sockets. Use 3626 * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the 3627 * full structure. 3628 * 3629 * This helper is available only if the kernel was compiled with 3630 * **CONFIG_NET** configuration option. 3631 * Return 3632 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 3633 * For sockets with reuseport option, the **struct bpf_sock** 3634 * result is from *reuse*\ **->socks**\ [] using the hash of the 3635 * tuple. 3636 * 3637 * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 3638 * Description 3639 * Check whether *iph* and *th* contain a valid SYN cookie ACK for 3640 * the listening socket in *sk*. 3641 * 3642 * *iph* points to the start of the IPv4 or IPv6 header, while 3643 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 3644 * **sizeof**\ (**struct ipv6hdr**). 3645 * 3646 * *th* points to the start of the TCP header, while *th_len* 3647 * contains the length of the TCP header (at least 3648 * **sizeof**\ (**struct tcphdr**)). 3649 * Return 3650 * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative 3651 * error otherwise. 3652 * 3653 * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) 3654 * Description 3655 * Get name of sysctl in /proc/sys/ and copy it into provided by 3656 * program buffer *buf* of size *buf_len*. 3657 * 3658 * The buffer is always NUL terminated, unless it's zero-sized. 3659 * 3660 * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is 3661 * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name 3662 * only (e.g. "tcp_mem"). 3663 * Return 3664 * Number of character copied (not including the trailing NUL). 3665 * 3666 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 3667 * truncated name in this case). 3668 * 3669 * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 3670 * Description 3671 * Get current value of sysctl as it is presented in /proc/sys 3672 * (incl. newline, etc), and copy it as a string into provided 3673 * by program buffer *buf* of size *buf_len*. 3674 * 3675 * The whole value is copied, no matter what file position user 3676 * space issued e.g. sys_read at. 3677 * 3678 * The buffer is always NUL terminated, unless it's zero-sized. 3679 * Return 3680 * Number of character copied (not including the trailing NUL). 3681 * 3682 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 3683 * truncated name in this case). 3684 * 3685 * **-EINVAL** if current value was unavailable, e.g. because 3686 * sysctl is uninitialized and read returns -EIO for it. 3687 * 3688 * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 3689 * Description 3690 * Get new value being written by user space to sysctl (before 3691 * the actual write happens) and copy it as a string into 3692 * provided by program buffer *buf* of size *buf_len*. 3693 * 3694 * User space may write new value at file position > 0. 3695 * 3696 * The buffer is always NUL terminated, unless it's zero-sized. 3697 * Return 3698 * Number of character copied (not including the trailing NUL). 3699 * 3700 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 3701 * truncated name in this case). 3702 * 3703 * **-EINVAL** if sysctl is being read. 3704 * 3705 * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) 3706 * Description 3707 * Override new value being written by user space to sysctl with 3708 * value provided by program in buffer *buf* of size *buf_len*. 3709 * 3710 * *buf* should contain a string in same form as provided by user 3711 * space on sysctl write. 3712 * 3713 * User space may write new value at file position > 0. To override 3714 * the whole sysctl value file position should be set to zero. 3715 * Return 3716 * 0 on success. 3717 * 3718 * **-E2BIG** if the *buf_len* is too big. 3719 * 3720 * **-EINVAL** if sysctl is being read. 3721 * 3722 * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) 3723 * Description 3724 * Convert the initial part of the string from buffer *buf* of 3725 * size *buf_len* to a long integer according to the given base 3726 * and save the result in *res*. 3727 * 3728 * The string may begin with an arbitrary amount of white space 3729 * (as determined by **isspace**\ (3)) followed by a single 3730 * optional '**-**' sign. 3731 * 3732 * Five least significant bits of *flags* encode base, other bits 3733 * are currently unused. 3734 * 3735 * Base must be either 8, 10, 16 or 0 to detect it automatically 3736 * similar to user space **strtol**\ (3). 3737 * Return 3738 * Number of characters consumed on success. Must be positive but 3739 * no more than *buf_len*. 3740 * 3741 * **-EINVAL** if no valid digits were found or unsupported base 3742 * was provided. 3743 * 3744 * **-ERANGE** if resulting value was out of range. 3745 * 3746 * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) 3747 * Description 3748 * Convert the initial part of the string from buffer *buf* of 3749 * size *buf_len* to an unsigned long integer according to the 3750 * given base and save the result in *res*. 3751 * 3752 * The string may begin with an arbitrary amount of white space 3753 * (as determined by **isspace**\ (3)). 3754 * 3755 * Five least significant bits of *flags* encode base, other bits 3756 * are currently unused. 3757 * 3758 * Base must be either 8, 10, 16 or 0 to detect it automatically 3759 * similar to user space **strtoul**\ (3). 3760 * Return 3761 * Number of characters consumed on success. Must be positive but 3762 * no more than *buf_len*. 3763 * 3764 * **-EINVAL** if no valid digits were found or unsupported base 3765 * was provided. 3766 * 3767 * **-ERANGE** if resulting value was out of range. 3768 * 3769 * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) 3770 * Description 3771 * Get a bpf-local-storage from a *sk*. 3772 * 3773 * Logically, it could be thought of getting the value from 3774 * a *map* with *sk* as the **key**. From this 3775 * perspective, the usage is not much different from 3776 * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this 3777 * helper enforces the key must be a full socket and the map must 3778 * be a **BPF_MAP_TYPE_SK_STORAGE** also. 3779 * 3780 * Underneath, the value is stored locally at *sk* instead of 3781 * the *map*. The *map* is used as the bpf-local-storage 3782 * "type". The bpf-local-storage "type" (i.e. the *map*) is 3783 * searched against all bpf-local-storages residing at *sk*. 3784 * 3785 * *sk* is a kernel **struct sock** pointer for LSM program. 3786 * *sk* is a **struct bpf_sock** pointer for other program types. 3787 * 3788 * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be 3789 * used such that a new bpf-local-storage will be 3790 * created if one does not exist. *value* can be used 3791 * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify 3792 * the initial value of a bpf-local-storage. If *value* is 3793 * **NULL**, the new bpf-local-storage will be zero initialized. 3794 * Return 3795 * A bpf-local-storage pointer is returned on success. 3796 * 3797 * **NULL** if not found or there was an error in adding 3798 * a new bpf-local-storage. 3799 * 3800 * long bpf_sk_storage_delete(struct bpf_map *map, void *sk) 3801 * Description 3802 * Delete a bpf-local-storage from a *sk*. 3803 * Return 3804 * 0 on success. 3805 * 3806 * **-ENOENT** if the bpf-local-storage cannot be found. 3807 * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). 3808 * 3809 * long bpf_send_signal(u32 sig) 3810 * Description 3811 * Send signal *sig* to the process of the current task. 3812 * The signal may be delivered to any of this process's threads. 3813 * Return 3814 * 0 on success or successfully queued. 3815 * 3816 * **-EBUSY** if work queue under nmi is full. 3817 * 3818 * **-EINVAL** if *sig* is invalid. 3819 * 3820 * **-EPERM** if no permission to send the *sig*. 3821 * 3822 * **-EAGAIN** if bpf program can try again. 3823 * 3824 * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 3825 * Description 3826 * Try to issue a SYN cookie for the packet with corresponding 3827 * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. 3828 * 3829 * *iph* points to the start of the IPv4 or IPv6 header, while 3830 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 3831 * **sizeof**\ (**struct ipv6hdr**). 3832 * 3833 * *th* points to the start of the TCP header, while *th_len* 3834 * contains the length of the TCP header with options (at least 3835 * **sizeof**\ (**struct tcphdr**)). 3836 * Return 3837 * On success, lower 32 bits hold the generated SYN cookie in 3838 * followed by 16 bits which hold the MSS value for that cookie, 3839 * and the top 16 bits are unused. 3840 * 3841 * On failure, the returned value is one of the following: 3842 * 3843 * **-EINVAL** SYN cookie cannot be issued due to error 3844 * 3845 * **-ENOENT** SYN cookie should not be issued (no SYN flood) 3846 * 3847 * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies 3848 * 3849 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 3850 * 3851 * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 3852 * Description 3853 * Write raw *data* blob into a special BPF perf event held by 3854 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 3855 * event must have the following attributes: **PERF_SAMPLE_RAW** 3856 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 3857 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 3858 * 3859 * The *flags* are used to indicate the index in *map* for which 3860 * the value must be put, masked with **BPF_F_INDEX_MASK**. 3861 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 3862 * to indicate that the index of the current CPU core should be 3863 * used. 3864 * 3865 * The value to write, of *size*, is passed through eBPF stack and 3866 * pointed by *data*. 3867 * 3868 * *ctx* is a pointer to in-kernel struct sk_buff. 3869 * 3870 * This helper is similar to **bpf_perf_event_output**\ () but 3871 * restricted to raw_tracepoint bpf programs. 3872 * Return 3873 * 0 on success, or a negative error in case of failure. 3874 * 3875 * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) 3876 * Description 3877 * Safely attempt to read *size* bytes from user space address 3878 * *unsafe_ptr* and store the data in *dst*. 3879 * Return 3880 * 0 on success, or a negative error in case of failure. 3881 * 3882 * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 3883 * Description 3884 * Safely attempt to read *size* bytes from kernel space address 3885 * *unsafe_ptr* and store the data in *dst*. 3886 * Return 3887 * 0 on success, or a negative error in case of failure. 3888 * 3889 * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) 3890 * Description 3891 * Copy a NUL terminated string from an unsafe user address 3892 * *unsafe_ptr* to *dst*. The *size* should include the 3893 * terminating NUL byte. In case the string length is smaller than 3894 * *size*, the target is not padded with further NUL bytes. If the 3895 * string length is larger than *size*, just *size*-1 bytes are 3896 * copied and the last byte is set to NUL. 3897 * 3898 * On success, returns the number of bytes that were written, 3899 * including the terminal NUL. This makes this helper useful in 3900 * tracing programs for reading strings, and more importantly to 3901 * get its length at runtime. See the following snippet: 3902 * 3903 * :: 3904 * 3905 * SEC("kprobe/sys_open") 3906 * void bpf_sys_open(struct pt_regs *ctx) 3907 * { 3908 * char buf[PATHLEN]; // PATHLEN is defined to 256 3909 * int res = bpf_probe_read_user_str(buf, sizeof(buf), 3910 * ctx->di); 3911 * 3912 * // Consume buf, for example push it to 3913 * // userspace via bpf_perf_event_output(); we 3914 * // can use res (the string length) as event 3915 * // size, after checking its boundaries. 3916 * } 3917 * 3918 * In comparison, using **bpf_probe_read_user**\ () helper here 3919 * instead to read the string would require to estimate the length 3920 * at compile time, and would often result in copying more memory 3921 * than necessary. 3922 * 3923 * Another useful use case is when parsing individual process 3924 * arguments or individual environment variables navigating 3925 * *current*\ **->mm->arg_start** and *current*\ 3926 * **->mm->env_start**: using this helper and the return value, 3927 * one can quickly iterate at the right offset of the memory area. 3928 * Return 3929 * On success, the strictly positive length of the output string, 3930 * including the trailing NUL character. On error, a negative 3931 * value. 3932 * 3933 * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) 3934 * Description 3935 * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* 3936 * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. 3937 * Return 3938 * On success, the strictly positive length of the string, including 3939 * the trailing NUL character. On error, a negative value. 3940 * 3941 * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) 3942 * Description 3943 * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. 3944 * *rcv_nxt* is the ack_seq to be sent out. 3945 * Return 3946 * 0 on success, or a negative error in case of failure. 3947 * 3948 * long bpf_send_signal_thread(u32 sig) 3949 * Description 3950 * Send signal *sig* to the thread corresponding to the current task. 3951 * Return 3952 * 0 on success or successfully queued. 3953 * 3954 * **-EBUSY** if work queue under nmi is full. 3955 * 3956 * **-EINVAL** if *sig* is invalid. 3957 * 3958 * **-EPERM** if no permission to send the *sig*. 3959 * 3960 * **-EAGAIN** if bpf program can try again. 3961 * 3962 * u64 bpf_jiffies64(void) 3963 * Description 3964 * Obtain the 64bit jiffies 3965 * Return 3966 * The 64 bit jiffies 3967 * 3968 * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) 3969 * Description 3970 * For an eBPF program attached to a perf event, retrieve the 3971 * branch records (**struct perf_branch_entry**) associated to *ctx* 3972 * and store it in the buffer pointed by *buf* up to size 3973 * *size* bytes. 3974 * Return 3975 * On success, number of bytes written to *buf*. On error, a 3976 * negative value. 3977 * 3978 * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to 3979 * instead return the number of bytes required to store all the 3980 * branch entries. If this flag is set, *buf* may be NULL. 3981 * 3982 * **-EINVAL** if arguments invalid or **size** not a multiple 3983 * of **sizeof**\ (**struct perf_branch_entry**\ ). 3984 * 3985 * **-ENOENT** if architecture does not support branch records. 3986 * 3987 * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) 3988 * Description 3989 * Returns 0 on success, values for *pid* and *tgid* as seen from the current 3990 * *namespace* will be returned in *nsdata*. 3991 * Return 3992 * 0 on success, or one of the following in case of failure: 3993 * 3994 * **-EINVAL** if dev and inum supplied don't match dev_t and inode number 3995 * with nsfs of current task, or if dev conversion to dev_t lost high bits. 3996 * 3997 * **-ENOENT** if pidns does not exists for the current task. 3998 * 3999 * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 4000 * Description 4001 * Write raw *data* blob into a special BPF perf event held by 4002 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 4003 * event must have the following attributes: **PERF_SAMPLE_RAW** 4004 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 4005 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 4006 * 4007 * The *flags* are used to indicate the index in *map* for which 4008 * the value must be put, masked with **BPF_F_INDEX_MASK**. 4009 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 4010 * to indicate that the index of the current CPU core should be 4011 * used. 4012 * 4013 * The value to write, of *size*, is passed through eBPF stack and 4014 * pointed by *data*. 4015 * 4016 * *ctx* is a pointer to in-kernel struct xdp_buff. 4017 * 4018 * This helper is similar to **bpf_perf_eventoutput**\ () but 4019 * restricted to raw_tracepoint bpf programs. 4020 * Return 4021 * 0 on success, or a negative error in case of failure. 4022 * 4023 * u64 bpf_get_netns_cookie(void *ctx) 4024 * Description 4025 * Retrieve the cookie (generated by the kernel) of the network 4026 * namespace the input *ctx* is associated with. The network 4027 * namespace cookie remains stable for its lifetime and provides 4028 * a global identifier that can be assumed unique. If *ctx* is 4029 * NULL, then the helper returns the cookie for the initial 4030 * network namespace. The cookie itself is very similar to that 4031 * of **bpf_get_socket_cookie**\ () helper, but for network 4032 * namespaces instead of sockets. 4033 * Return 4034 * A 8-byte long opaque number. 4035 * 4036 * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) 4037 * Description 4038 * Return id of cgroup v2 that is ancestor of the cgroup associated 4039 * with the current task at the *ancestor_level*. The root cgroup 4040 * is at *ancestor_level* zero and each step down the hierarchy 4041 * increments the level. If *ancestor_level* == level of cgroup 4042 * associated with the current task, then return value will be the 4043 * same as that of **bpf_get_current_cgroup_id**\ (). 4044 * 4045 * The helper is useful to implement policies based on cgroups 4046 * that are upper in hierarchy than immediate cgroup associated 4047 * with the current task. 4048 * 4049 * The format of returned id and helper limitations are same as in 4050 * **bpf_get_current_cgroup_id**\ (). 4051 * Return 4052 * The id is returned or 0 in case the id could not be retrieved. 4053 * 4054 * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags) 4055 * Description 4056 * Helper is overloaded depending on BPF program type. This 4057 * description applies to **BPF_PROG_TYPE_SCHED_CLS** and 4058 * **BPF_PROG_TYPE_SCHED_ACT** programs. 4059 * 4060 * Assign the *sk* to the *skb*. When combined with appropriate 4061 * routing configuration to receive the packet towards the socket, 4062 * will cause *skb* to be delivered to the specified socket. 4063 * Subsequent redirection of *skb* via **bpf_redirect**\ (), 4064 * **bpf_clone_redirect**\ () or other methods outside of BPF may 4065 * interfere with successful delivery to the socket. 4066 * 4067 * This operation is only valid from TC ingress path. 4068 * 4069 * The *flags* argument must be zero. 4070 * Return 4071 * 0 on success, or a negative error in case of failure: 4072 * 4073 * **-EINVAL** if specified *flags* are not supported. 4074 * 4075 * **-ENOENT** if the socket is unavailable for assignment. 4076 * 4077 * **-ENETUNREACH** if the socket is unreachable (wrong netns). 4078 * 4079 * **-EOPNOTSUPP** if the operation is not supported, for example 4080 * a call from outside of TC ingress. 4081 * 4082 * **-ESOCKTNOSUPPORT** if the socket type is not supported 4083 * (reuseport). 4084 * 4085 * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) 4086 * Description 4087 * Helper is overloaded depending on BPF program type. This 4088 * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. 4089 * 4090 * Select the *sk* as a result of a socket lookup. 4091 * 4092 * For the operation to succeed passed socket must be compatible 4093 * with the packet description provided by the *ctx* object. 4094 * 4095 * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must 4096 * be an exact match. While IP family (**AF_INET** or 4097 * **AF_INET6**) must be compatible, that is IPv6 sockets 4098 * that are not v6-only can be selected for IPv4 packets. 4099 * 4100 * Only TCP listeners and UDP unconnected sockets can be 4101 * selected. *sk* can also be NULL to reset any previous 4102 * selection. 4103 * 4104 * *flags* argument can combination of following values: 4105 * 4106 * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous 4107 * socket selection, potentially done by a BPF program 4108 * that ran before us. 4109 * 4110 * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip 4111 * load-balancing within reuseport group for the socket 4112 * being selected. 4113 * 4114 * On success *ctx->sk* will point to the selected socket. 4115 * 4116 * Return 4117 * 0 on success, or a negative errno in case of failure. 4118 * 4119 * * **-EAFNOSUPPORT** if socket family (*sk->family*) is 4120 * not compatible with packet family (*ctx->family*). 4121 * 4122 * * **-EEXIST** if socket has been already selected, 4123 * potentially by another program, and 4124 * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. 4125 * 4126 * * **-EINVAL** if unsupported flags were specified. 4127 * 4128 * * **-EPROTOTYPE** if socket L4 protocol 4129 * (*sk->protocol*) doesn't match packet protocol 4130 * (*ctx->protocol*). 4131 * 4132 * * **-ESOCKTNOSUPPORT** if socket is not in allowed 4133 * state (TCP listening or UDP unconnected). 4134 * 4135 * u64 bpf_ktime_get_boot_ns(void) 4136 * Description 4137 * Return the time elapsed since system boot, in nanoseconds. 4138 * Does include the time the system was suspended. 4139 * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) 4140 * Return 4141 * Current *ktime*. 4142 * 4143 * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) 4144 * Description 4145 * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print 4146 * out the format string. 4147 * The *m* represents the seq_file. The *fmt* and *fmt_size* are for 4148 * the format string itself. The *data* and *data_len* are format string 4149 * arguments. The *data* are a **u64** array and corresponding format string 4150 * values are stored in the array. For strings and pointers where pointees 4151 * are accessed, only the pointer values are stored in the *data* array. 4152 * The *data_len* is the size of *data* in bytes - must be a multiple of 8. 4153 * 4154 * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. 4155 * Reading kernel memory may fail due to either invalid address or 4156 * valid address but requiring a major memory fault. If reading kernel memory 4157 * fails, the string for **%s** will be an empty string, and the ip 4158 * address for **%p{i,I}{4,6}** will be 0. Not returning error to 4159 * bpf program is consistent with what **bpf_trace_printk**\ () does for now. 4160 * Return 4161 * 0 on success, or a negative error in case of failure: 4162 * 4163 * **-EBUSY** if per-CPU memory copy buffer is busy, can try again 4164 * by returning 1 from bpf program. 4165 * 4166 * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. 4167 * 4168 * **-E2BIG** if *fmt* contains too many format specifiers. 4169 * 4170 * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 4171 * 4172 * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) 4173 * Description 4174 * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. 4175 * The *m* represents the seq_file. The *data* and *len* represent the 4176 * data to write in bytes. 4177 * Return 4178 * 0 on success, or a negative error in case of failure: 4179 * 4180 * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 4181 * 4182 * u64 bpf_sk_cgroup_id(void *sk) 4183 * Description 4184 * Return the cgroup v2 id of the socket *sk*. 4185 * 4186 * *sk* must be a non-**NULL** pointer to a socket, e.g. one 4187 * returned from **bpf_sk_lookup_xxx**\ (), 4188 * **bpf_sk_fullsock**\ (), etc. The format of returned id is 4189 * same as in **bpf_skb_cgroup_id**\ (). 4190 * 4191 * This helper is available only if the kernel was compiled with 4192 * the **CONFIG_SOCK_CGROUP_DATA** configuration option. 4193 * Return 4194 * The id is returned or 0 in case the id could not be retrieved. 4195 * 4196 * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level) 4197 * Description 4198 * Return id of cgroup v2 that is ancestor of cgroup associated 4199 * with the *sk* at the *ancestor_level*. The root cgroup is at 4200 * *ancestor_level* zero and each step down the hierarchy 4201 * increments the level. If *ancestor_level* == level of cgroup 4202 * associated with *sk*, then return value will be same as that 4203 * of **bpf_sk_cgroup_id**\ (). 4204 * 4205 * The helper is useful to implement policies based on cgroups 4206 * that are upper in hierarchy than immediate cgroup associated 4207 * with *sk*. 4208 * 4209 * The format of returned id and helper limitations are same as in 4210 * **bpf_sk_cgroup_id**\ (). 4211 * Return 4212 * The id is returned or 0 in case the id could not be retrieved. 4213 * 4214 * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) 4215 * Description 4216 * Copy *size* bytes from *data* into a ring buffer *ringbuf*. 4217 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 4218 * of new data availability is sent. 4219 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 4220 * of new data availability is sent unconditionally. 4221 * If **0** is specified in *flags*, an adaptive notification 4222 * of new data availability is sent. 4223 * 4224 * An adaptive notification is a notification sent whenever the user-space 4225 * process has caught up and consumed all available payloads. In case the user-space 4226 * process is still processing a previous payload, then no notification is needed 4227 * as it will process the newly added payload automatically. 4228 * Return 4229 * 0 on success, or a negative error in case of failure. 4230 * 4231 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) 4232 * Description 4233 * Reserve *size* bytes of payload in a ring buffer *ringbuf*. 4234 * *flags* must be 0. 4235 * Return 4236 * Valid pointer with *size* bytes of memory available; NULL, 4237 * otherwise. 4238 * 4239 * void bpf_ringbuf_submit(void *data, u64 flags) 4240 * Description 4241 * Submit reserved ring buffer sample, pointed to by *data*. 4242 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 4243 * of new data availability is sent. 4244 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 4245 * of new data availability is sent unconditionally. 4246 * If **0** is specified in *flags*, an adaptive notification 4247 * of new data availability is sent. 4248 * 4249 * See 'bpf_ringbuf_output()' for the definition of adaptive notification. 4250 * Return 4251 * Nothing. Always succeeds. 4252 * 4253 * void bpf_ringbuf_discard(void *data, u64 flags) 4254 * Description 4255 * Discard reserved ring buffer sample, pointed to by *data*. 4256 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 4257 * of new data availability is sent. 4258 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 4259 * of new data availability is sent unconditionally. 4260 * If **0** is specified in *flags*, an adaptive notification 4261 * of new data availability is sent. 4262 * 4263 * See 'bpf_ringbuf_output()' for the definition of adaptive notification. 4264 * Return 4265 * Nothing. Always succeeds. 4266 * 4267 * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) 4268 * Description 4269 * Query various characteristics of provided ring buffer. What 4270 * exactly is queries is determined by *flags*: 4271 * 4272 * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. 4273 * * **BPF_RB_RING_SIZE**: The size of ring buffer. 4274 * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). 4275 * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). 4276 * 4277 * Data returned is just a momentary snapshot of actual values 4278 * and could be inaccurate, so this facility should be used to 4279 * power heuristics and for reporting, not to make 100% correct 4280 * calculation. 4281 * Return 4282 * Requested value, or 0, if *flags* are not recognized. 4283 * 4284 * long bpf_csum_level(struct sk_buff *skb, u64 level) 4285 * Description 4286 * Change the skbs checksum level by one layer up or down, or 4287 * reset it entirely to none in order to have the stack perform 4288 * checksum validation. The level is applicable to the following 4289 * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of 4290 * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | 4291 * through **bpf_skb_adjust_room**\ () helper with passing in 4292 * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call 4293 * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since 4294 * the UDP header is removed. Similarly, an encap of the latter 4295 * into the former could be accompanied by a helper call to 4296 * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the 4297 * skb is still intended to be processed in higher layers of the 4298 * stack instead of just egressing at tc. 4299 * 4300 * There are three supported level settings at this time: 4301 * 4302 * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs 4303 * with CHECKSUM_UNNECESSARY. 4304 * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs 4305 * with CHECKSUM_UNNECESSARY. 4306 * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and 4307 * sets CHECKSUM_NONE to force checksum validation by the stack. 4308 * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current 4309 * skb->csum_level. 4310 * Return 4311 * 0 on success, or a negative error in case of failure. In the 4312 * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level 4313 * is returned or the error code -EACCES in case the skb is not 4314 * subject to CHECKSUM_UNNECESSARY. 4315 * 4316 * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) 4317 * Description 4318 * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. 4319 * Return 4320 * *sk* if casting is valid, or **NULL** otherwise. 4321 * 4322 * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) 4323 * Description 4324 * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. 4325 * Return 4326 * *sk* if casting is valid, or **NULL** otherwise. 4327 * 4328 * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) 4329 * Description 4330 * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. 4331 * Return 4332 * *sk* if casting is valid, or **NULL** otherwise. 4333 * 4334 * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) 4335 * Description 4336 * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. 4337 * Return 4338 * *sk* if casting is valid, or **NULL** otherwise. 4339 * 4340 * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) 4341 * Description 4342 * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. 4343 * Return 4344 * *sk* if casting is valid, or **NULL** otherwise. 4345 * 4346 * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) 4347 * Description 4348 * Return a user or a kernel stack in bpf program provided buffer. 4349 * To achieve this, the helper needs *task*, which is a valid 4350 * pointer to **struct task_struct**. To store the stacktrace, the 4351 * bpf program provides *buf* with a nonnegative *size*. 4352 * 4353 * The last argument, *flags*, holds the number of stack frames to 4354 * skip (from 0 to 255), masked with 4355 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 4356 * the following flags: 4357 * 4358 * **BPF_F_USER_STACK** 4359 * Collect a user space stack instead of a kernel stack. 4360 * **BPF_F_USER_BUILD_ID** 4361 * Collect buildid+offset instead of ips for user stack, 4362 * only valid if **BPF_F_USER_STACK** is also specified. 4363 * 4364 * **bpf_get_task_stack**\ () can collect up to 4365 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 4366 * to sufficient large buffer size. Note that 4367 * this limit can be controlled with the **sysctl** program, and 4368 * that it should be manually increased in order to profile long 4369 * user stacks (such as stacks for Java programs). To do so, use: 4370 * 4371 * :: 4372 * 4373 * # sysctl kernel.perf_event_max_stack=<new value> 4374 * Return 4375 * The non-negative copied *buf* length equal to or less than 4376 * *size* on success, or a negative error in case of failure. 4377 * 4378 * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) 4379 * Description 4380 * Load header option. Support reading a particular TCP header 4381 * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). 4382 * 4383 * If *flags* is 0, it will search the option from the 4384 * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** 4385 * has details on what skb_data contains under different 4386 * *skops*\ **->op**. 4387 * 4388 * The first byte of the *searchby_res* specifies the 4389 * kind that it wants to search. 4390 * 4391 * If the searching kind is an experimental kind 4392 * (i.e. 253 or 254 according to RFC6994). It also 4393 * needs to specify the "magic" which is either 4394 * 2 bytes or 4 bytes. It then also needs to 4395 * specify the size of the magic by using 4396 * the 2nd byte which is "kind-length" of a TCP 4397 * header option and the "kind-length" also 4398 * includes the first 2 bytes "kind" and "kind-length" 4399 * itself as a normal TCP header option also does. 4400 * 4401 * For example, to search experimental kind 254 with 4402 * 2 byte magic 0xeB9F, the searchby_res should be 4403 * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. 4404 * 4405 * To search for the standard window scale option (3), 4406 * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. 4407 * Note, kind-length must be 0 for regular option. 4408 * 4409 * Searching for No-Op (0) and End-of-Option-List (1) are 4410 * not supported. 4411 * 4412 * *len* must be at least 2 bytes which is the minimal size 4413 * of a header option. 4414 * 4415 * Supported flags: 4416 * 4417 * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the 4418 * saved_syn packet or the just-received syn packet. 4419 * 4420 * Return 4421 * > 0 when found, the header option is copied to *searchby_res*. 4422 * The return value is the total length copied. On failure, a 4423 * negative error code is returned: 4424 * 4425 * **-EINVAL** if a parameter is invalid. 4426 * 4427 * **-ENOMSG** if the option is not found. 4428 * 4429 * **-ENOENT** if no syn packet is available when 4430 * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. 4431 * 4432 * **-ENOSPC** if there is not enough space. Only *len* number of 4433 * bytes are copied. 4434 * 4435 * **-EFAULT** on failure to parse the header options in the 4436 * packet. 4437 * 4438 * **-EPERM** if the helper cannot be used under the current 4439 * *skops*\ **->op**. 4440 * 4441 * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags) 4442 * Description 4443 * Store header option. The data will be copied 4444 * from buffer *from* with length *len* to the TCP header. 4445 * 4446 * The buffer *from* should have the whole option that 4447 * includes the kind, kind-length, and the actual 4448 * option data. The *len* must be at least kind-length 4449 * long. The kind-length does not have to be 4 byte 4450 * aligned. The kernel will take care of the padding 4451 * and setting the 4 bytes aligned value to th->doff. 4452 * 4453 * This helper will check for duplicated option 4454 * by searching the same option in the outgoing skb. 4455 * 4456 * This helper can only be called during 4457 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 4458 * 4459 * Return 4460 * 0 on success, or negative error in case of failure: 4461 * 4462 * **-EINVAL** If param is invalid. 4463 * 4464 * **-ENOSPC** if there is not enough space in the header. 4465 * Nothing has been written 4466 * 4467 * **-EEXIST** if the option already exists. 4468 * 4469 * **-EFAULT** on failure to parse the existing header options. 4470 * 4471 * **-EPERM** if the helper cannot be used under the current 4472 * *skops*\ **->op**. 4473 * 4474 * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags) 4475 * Description 4476 * Reserve *len* bytes for the bpf header option. The 4477 * space will be used by **bpf_store_hdr_opt**\ () later in 4478 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 4479 * 4480 * If **bpf_reserve_hdr_opt**\ () is called multiple times, 4481 * the total number of bytes will be reserved. 4482 * 4483 * This helper can only be called during 4484 * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. 4485 * 4486 * Return 4487 * 0 on success, or negative error in case of failure: 4488 * 4489 * **-EINVAL** if a parameter is invalid. 4490 * 4491 * **-ENOSPC** if there is not enough space in the header. 4492 * 4493 * **-EPERM** if the helper cannot be used under the current 4494 * *skops*\ **->op**. 4495 * 4496 * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags) 4497 * Description 4498 * Get a bpf_local_storage from an *inode*. 4499 * 4500 * Logically, it could be thought of as getting the value from 4501 * a *map* with *inode* as the **key**. From this 4502 * perspective, the usage is not much different from 4503 * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this 4504 * helper enforces the key must be an inode and the map must also 4505 * be a **BPF_MAP_TYPE_INODE_STORAGE**. 4506 * 4507 * Underneath, the value is stored locally at *inode* instead of 4508 * the *map*. The *map* is used as the bpf-local-storage 4509 * "type". The bpf-local-storage "type" (i.e. the *map*) is 4510 * searched against all bpf_local_storage residing at *inode*. 4511 * 4512 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be 4513 * used such that a new bpf_local_storage will be 4514 * created if one does not exist. *value* can be used 4515 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify 4516 * the initial value of a bpf_local_storage. If *value* is 4517 * **NULL**, the new bpf_local_storage will be zero initialized. 4518 * Return 4519 * A bpf_local_storage pointer is returned on success. 4520 * 4521 * **NULL** if not found or there was an error in adding 4522 * a new bpf_local_storage. 4523 * 4524 * int bpf_inode_storage_delete(struct bpf_map *map, void *inode) 4525 * Description 4526 * Delete a bpf_local_storage from an *inode*. 4527 * Return 4528 * 0 on success. 4529 * 4530 * **-ENOENT** if the bpf_local_storage cannot be found. 4531 * 4532 * long bpf_d_path(struct path *path, char *buf, u32 sz) 4533 * Description 4534 * Return full path for given **struct path** object, which 4535 * needs to be the kernel BTF *path* object. The path is 4536 * returned in the provided buffer *buf* of size *sz* and 4537 * is zero terminated. 4538 * 4539 * Return 4540 * On success, the strictly positive length of the string, 4541 * including the trailing NUL character. On error, a negative 4542 * value. 4543 * 4544 * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr) 4545 * Description 4546 * Read *size* bytes from user space address *user_ptr* and store 4547 * the data in *dst*. This is a wrapper of **copy_from_user**\ (). 4548 * Return 4549 * 0 on success, or a negative error in case of failure. 4550 * 4551 * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags) 4552 * Description 4553 * Use BTF to store a string representation of *ptr*->ptr in *str*, 4554 * using *ptr*->type_id. This value should specify the type 4555 * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) 4556 * can be used to look up vmlinux BTF type ids. Traversing the 4557 * data structure using BTF, the type information and values are 4558 * stored in the first *str_size* - 1 bytes of *str*. Safe copy of 4559 * the pointer data is carried out to avoid kernel crashes during 4560 * operation. Smaller types can use string space on the stack; 4561 * larger programs can use map data to store the string 4562 * representation. 4563 * 4564 * The string can be subsequently shared with userspace via 4565 * bpf_perf_event_output() or ring buffer interfaces. 4566 * bpf_trace_printk() is to be avoided as it places too small 4567 * a limit on string size to be useful. 4568 * 4569 * *flags* is a combination of 4570 * 4571 * **BTF_F_COMPACT** 4572 * no formatting around type information 4573 * **BTF_F_NONAME** 4574 * no struct/union member names/types 4575 * **BTF_F_PTR_RAW** 4576 * show raw (unobfuscated) pointer values; 4577 * equivalent to printk specifier %px. 4578 * **BTF_F_ZERO** 4579 * show zero-valued struct/union members; they 4580 * are not displayed by default 4581 * 4582 * Return 4583 * The number of bytes that were written (or would have been 4584 * written if output had to be truncated due to string size), 4585 * or a negative error in cases of failure. 4586 * 4587 * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags) 4588 * Description 4589 * Use BTF to write to seq_write a string representation of 4590 * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). 4591 * *flags* are identical to those used for bpf_snprintf_btf. 4592 * Return 4593 * 0 on success or a negative error in case of failure. 4594 * 4595 * u64 bpf_skb_cgroup_classid(struct sk_buff *skb) 4596 * Description 4597 * See **bpf_get_cgroup_classid**\ () for the main description. 4598 * This helper differs from **bpf_get_cgroup_classid**\ () in that 4599 * the cgroup v1 net_cls class is retrieved only from the *skb*'s 4600 * associated socket instead of the current process. 4601 * Return 4602 * The id is returned or 0 in case the id could not be retrieved. 4603 * 4604 * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags) 4605 * Description 4606 * Redirect the packet to another net device of index *ifindex* 4607 * and fill in L2 addresses from neighboring subsystem. This helper 4608 * is somewhat similar to **bpf_redirect**\ (), except that it 4609 * populates L2 addresses as well, meaning, internally, the helper 4610 * relies on the neighbor lookup for the L2 address of the nexthop. 4611 * 4612 * The helper will perform a FIB lookup based on the skb's 4613 * networking header to get the address of the next hop, unless 4614 * this is supplied by the caller in the *params* argument. The 4615 * *plen* argument indicates the len of *params* and should be set 4616 * to 0 if *params* is NULL. 4617 * 4618 * The *flags* argument is reserved and must be 0. The helper is 4619 * currently only supported for tc BPF program types, and enabled 4620 * for IPv4 and IPv6 protocols. 4621 * Return 4622 * The helper returns **TC_ACT_REDIRECT** on success or 4623 * **TC_ACT_SHOT** on error. 4624 * 4625 * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu) 4626 * Description 4627 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a 4628 * pointer to the percpu kernel variable on *cpu*. A ksym is an 4629 * extern variable decorated with '__ksym'. For ksym, there is a 4630 * global var (either static or global) defined of the same name 4631 * in the kernel. The ksym is percpu if the global var is percpu. 4632 * The returned pointer points to the global percpu var on *cpu*. 4633 * 4634 * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the 4635 * kernel, except that bpf_per_cpu_ptr() may return NULL. This 4636 * happens if *cpu* is larger than nr_cpu_ids. The caller of 4637 * bpf_per_cpu_ptr() must check the returned value. 4638 * Return 4639 * A pointer pointing to the kernel percpu variable on *cpu*, or 4640 * NULL, if *cpu* is invalid. 4641 * 4642 * void *bpf_this_cpu_ptr(const void *percpu_ptr) 4643 * Description 4644 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a 4645 * pointer to the percpu kernel variable on this cpu. See the 4646 * description of 'ksym' in **bpf_per_cpu_ptr**\ (). 4647 * 4648 * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in 4649 * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would 4650 * never return NULL. 4651 * Return 4652 * A pointer pointing to the kernel percpu variable on this cpu. 4653 * 4654 * long bpf_redirect_peer(u32 ifindex, u64 flags) 4655 * Description 4656 * Redirect the packet to another net device of index *ifindex*. 4657 * This helper is somewhat similar to **bpf_redirect**\ (), except 4658 * that the redirection happens to the *ifindex*' peer device and 4659 * the netns switch takes place from ingress to ingress without 4660 * going through the CPU's backlog queue. 4661 * 4662 * The *flags* argument is reserved and must be 0. The helper is 4663 * currently only supported for tc BPF program types at the ingress 4664 * hook and for veth device types. The peer device must reside in a 4665 * different network namespace. 4666 * Return 4667 * The helper returns **TC_ACT_REDIRECT** on success or 4668 * **TC_ACT_SHOT** on error. 4669 * 4670 * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags) 4671 * Description 4672 * Get a bpf_local_storage from the *task*. 4673 * 4674 * Logically, it could be thought of as getting the value from 4675 * a *map* with *task* as the **key**. From this 4676 * perspective, the usage is not much different from 4677 * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this 4678 * helper enforces the key must be a task_struct and the map must also 4679 * be a **BPF_MAP_TYPE_TASK_STORAGE**. 4680 * 4681 * Underneath, the value is stored locally at *task* instead of 4682 * the *map*. The *map* is used as the bpf-local-storage 4683 * "type". The bpf-local-storage "type" (i.e. the *map*) is 4684 * searched against all bpf_local_storage residing at *task*. 4685 * 4686 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be 4687 * used such that a new bpf_local_storage will be 4688 * created if one does not exist. *value* can be used 4689 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify 4690 * the initial value of a bpf_local_storage. If *value* is 4691 * **NULL**, the new bpf_local_storage will be zero initialized. 4692 * Return 4693 * A bpf_local_storage pointer is returned on success. 4694 * 4695 * **NULL** if not found or there was an error in adding 4696 * a new bpf_local_storage. 4697 * 4698 * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task) 4699 * Description 4700 * Delete a bpf_local_storage from a *task*. 4701 * Return 4702 * 0 on success. 4703 * 4704 * **-ENOENT** if the bpf_local_storage cannot be found. 4705 * 4706 * struct task_struct *bpf_get_current_task_btf(void) 4707 * Description 4708 * Return a BTF pointer to the "current" task. 4709 * This pointer can also be used in helpers that accept an 4710 * *ARG_PTR_TO_BTF_ID* of type *task_struct*. 4711 * Return 4712 * Pointer to the current task. 4713 * 4714 * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags) 4715 * Description 4716 * Set or clear certain options on *bprm*: 4717 * 4718 * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit 4719 * which sets the **AT_SECURE** auxv for glibc. The bit 4720 * is cleared if the flag is not specified. 4721 * Return 4722 * **-EINVAL** if invalid *flags* are passed, zero otherwise. 4723 * 4724 * u64 bpf_ktime_get_coarse_ns(void) 4725 * Description 4726 * Return a coarse-grained version of the time elapsed since 4727 * system boot, in nanoseconds. Does not include time the system 4728 * was suspended. 4729 * 4730 * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**) 4731 * Return 4732 * Current *ktime*. 4733 * 4734 * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size) 4735 * Description 4736 * Returns the stored IMA hash of the *inode* (if it's available). 4737 * If the hash is larger than *size*, then only *size* 4738 * bytes will be copied to *dst* 4739 * Return 4740 * The **hash_algo** is returned on success, 4741 * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if 4742 * invalid arguments are passed. 4743 * 4744 * struct socket *bpf_sock_from_file(struct file *file) 4745 * Description 4746 * If the given file represents a socket, returns the associated 4747 * socket. 4748 * Return 4749 * A pointer to a struct socket on success or NULL if the file is 4750 * not a socket. 4751 * 4752 * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) 4753 * Description 4754 * Check packet size against exceeding MTU of net device (based 4755 * on *ifindex*). This helper will likely be used in combination 4756 * with helpers that adjust/change the packet size. 4757 * 4758 * The argument *len_diff* can be used for querying with a planned 4759 * size change. This allows to check MTU prior to changing packet 4760 * ctx. Providing a *len_diff* adjustment that is larger than the 4761 * actual packet size (resulting in negative packet size) will in 4762 * principle not exceed the MTU, which is why it is not considered 4763 * a failure. Other BPF helpers are needed for performing the 4764 * planned size change; therefore the responsibility for catching 4765 * a negative packet size belongs in those helpers. 4766 * 4767 * Specifying *ifindex* zero means the MTU check is performed 4768 * against the current net device. This is practical if this isn't 4769 * used prior to redirect. 4770 * 4771 * On input *mtu_len* must be a valid pointer, else verifier will 4772 * reject BPF program. If the value *mtu_len* is initialized to 4773 * zero then the ctx packet size is use. When value *mtu_len* is 4774 * provided as input this specify the L3 length that the MTU check 4775 * is done against. Remember XDP and TC length operate at L2, but 4776 * this value is L3 as this correlate to MTU and IP-header tot_len 4777 * values which are L3 (similar behavior as bpf_fib_lookup). 4778 * 4779 * The Linux kernel route table can configure MTUs on a more 4780 * specific per route level, which is not provided by this helper. 4781 * For route level MTU checks use the **bpf_fib_lookup**\ () 4782 * helper. 4783 * 4784 * *ctx* is either **struct xdp_md** for XDP programs or 4785 * **struct sk_buff** for tc cls_act programs. 4786 * 4787 * The *flags* argument can be a combination of one or more of the 4788 * following values: 4789 * 4790 * **BPF_MTU_CHK_SEGS** 4791 * This flag will only works for *ctx* **struct sk_buff**. 4792 * If packet context contains extra packet segment buffers 4793 * (often knows as GSO skb), then MTU check is harder to 4794 * check at this point, because in transmit path it is 4795 * possible for the skb packet to get re-segmented 4796 * (depending on net device features). This could still be 4797 * a MTU violation, so this flag enables performing MTU 4798 * check against segments, with a different violation 4799 * return code to tell it apart. Check cannot use len_diff. 4800 * 4801 * On return *mtu_len* pointer contains the MTU value of the net 4802 * device. Remember the net device configured MTU is the L3 size, 4803 * which is returned here and XDP and TC length operate at L2. 4804 * Helper take this into account for you, but remember when using 4805 * MTU value in your BPF-code. 4806 * 4807 * Return 4808 * * 0 on success, and populate MTU value in *mtu_len* pointer. 4809 * 4810 * * < 0 if any input argument is invalid (*mtu_len* not updated) 4811 * 4812 * MTU violations return positive values, but also populate MTU 4813 * value in *mtu_len* pointer, as this can be needed for 4814 * implementing PMTU handing: 4815 * 4816 * * **BPF_MTU_CHK_RET_FRAG_NEEDED** 4817 * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** 4818 * 4819 * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags) 4820 * Description 4821 * For each element in **map**, call **callback_fn** function with 4822 * **map**, **callback_ctx** and other map-specific parameters. 4823 * The **callback_fn** should be a static function and 4824 * the **callback_ctx** should be a pointer to the stack. 4825 * The **flags** is used to control certain aspects of the helper. 4826 * Currently, the **flags** must be 0. 4827 * 4828 * The following are a list of supported map types and their 4829 * respective expected callback signatures: 4830 * 4831 * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, 4832 * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, 4833 * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY 4834 * 4835 * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); 4836 * 4837 * For per_cpu maps, the map_value is the value on the cpu where the 4838 * bpf_prog is running. 4839 * 4840 * If **callback_fn** return 0, the helper will continue to the next 4841 * element. If return value is 1, the helper will skip the rest of 4842 * elements and return. Other return values are not used now. 4843 * 4844 * Return 4845 * The number of traversed map elements for success, **-EINVAL** for 4846 * invalid **flags**. 4847 * 4848 * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len) 4849 * Description 4850 * Outputs a string into the **str** buffer of size **str_size** 4851 * based on a format string stored in a read-only map pointed by 4852 * **fmt**. 4853 * 4854 * Each format specifier in **fmt** corresponds to one u64 element 4855 * in the **data** array. For strings and pointers where pointees 4856 * are accessed, only the pointer values are stored in the *data* 4857 * array. The *data_len* is the size of *data* in bytes - must be 4858 * a multiple of 8. 4859 * 4860 * Formats **%s** and **%p{i,I}{4,6}** require to read kernel 4861 * memory. Reading kernel memory may fail due to either invalid 4862 * address or valid address but requiring a major memory fault. If 4863 * reading kernel memory fails, the string for **%s** will be an 4864 * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. 4865 * Not returning error to bpf program is consistent with what 4866 * **bpf_trace_printk**\ () does for now. 4867 * 4868 * Return 4869 * The strictly positive length of the formatted string, including 4870 * the trailing zero character. If the return value is greater than 4871 * **str_size**, **str** contains a truncated string, guaranteed to 4872 * be zero-terminated except when **str_size** is 0. 4873 * 4874 * Or **-EBUSY** if the per-CPU memory copy buffer is busy. 4875 * 4876 * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size) 4877 * Description 4878 * Execute bpf syscall with given arguments. 4879 * Return 4880 * A syscall result. 4881 * 4882 * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags) 4883 * Description 4884 * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. 4885 * Return 4886 * Returns btf_id and btf_obj_fd in lower and upper 32 bits. 4887 * 4888 * long bpf_sys_close(u32 fd) 4889 * Description 4890 * Execute close syscall for given FD. 4891 * Return 4892 * A syscall result. 4893 * 4894 * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) 4895 * Description 4896 * Initialize the timer. 4897 * First 4 bits of *flags* specify clockid. 4898 * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. 4899 * All other bits of *flags* are reserved. 4900 * The verifier will reject the program if *timer* is not from 4901 * the same *map*. 4902 * Return 4903 * 0 on success. 4904 * **-EBUSY** if *timer* is already initialized. 4905 * **-EINVAL** if invalid *flags* are passed. 4906 * **-EPERM** if *timer* is in a map that doesn't have any user references. 4907 * The user space should either hold a file descriptor to a map with timers 4908 * or pin such map in bpffs. When map is unpinned or file descriptor is 4909 * closed all timers in the map will be cancelled and freed. 4910 * 4911 * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) 4912 * Description 4913 * Configure the timer to call *callback_fn* static function. 4914 * Return 4915 * 0 on success. 4916 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. 4917 * **-EPERM** if *timer* is in a map that doesn't have any user references. 4918 * The user space should either hold a file descriptor to a map with timers 4919 * or pin such map in bpffs. When map is unpinned or file descriptor is 4920 * closed all timers in the map will be cancelled and freed. 4921 * 4922 * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) 4923 * Description 4924 * Set timer expiration N nanoseconds from the current time. The 4925 * configured callback will be invoked in soft irq context on some cpu 4926 * and will not repeat unless another bpf_timer_start() is made. 4927 * In such case the next invocation can migrate to a different cpu. 4928 * Since struct bpf_timer is a field inside map element the map 4929 * owns the timer. The bpf_timer_set_callback() will increment refcnt 4930 * of BPF program to make sure that callback_fn code stays valid. 4931 * When user space reference to a map reaches zero all timers 4932 * in a map are cancelled and corresponding program's refcnts are 4933 * decremented. This is done to make sure that Ctrl-C of a user 4934 * process doesn't leave any timers running. If map is pinned in 4935 * bpffs the callback_fn can re-arm itself indefinitely. 4936 * bpf_map_update/delete_elem() helpers and user space sys_bpf commands 4937 * cancel and free the timer in the given map element. 4938 * The map can contain timers that invoke callback_fn-s from different 4939 * programs. The same callback_fn can serve different timers from 4940 * different maps if key/value layout matches across maps. 4941 * Every bpf_timer_set_callback() can have different callback_fn. 4942 * 4943 * Return 4944 * 0 on success. 4945 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier 4946 * or invalid *flags* are passed. 4947 * 4948 * long bpf_timer_cancel(struct bpf_timer *timer) 4949 * Description 4950 * Cancel the timer and wait for callback_fn to finish if it was running. 4951 * Return 4952 * 0 if the timer was not active. 4953 * 1 if the timer was active. 4954 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. 4955 * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its 4956 * own timer which would have led to a deadlock otherwise. 4957 * 4958 * u64 bpf_get_func_ip(void *ctx) 4959 * Description 4960 * Get address of the traced function (for tracing and kprobe programs). 4961 * Return 4962 * Address of the traced function. 4963 * 0 for kprobes placed within the function (not at the entry). 4964 * 4965 * u64 bpf_get_attach_cookie(void *ctx) 4966 * Description 4967 * Get bpf_cookie value provided (optionally) during the program 4968 * attachment. It might be different for each individual 4969 * attachment, even if BPF program itself is the same. 4970 * Expects BPF program context *ctx* as a first argument. 4971 * 4972 * Supported for the following program types: 4973 * - kprobe/uprobe; 4974 * - tracepoint; 4975 * - perf_event. 4976 * Return 4977 * Value specified by user at BPF link creation/attachment time 4978 * or 0, if it was not specified. 4979 * 4980 * long bpf_task_pt_regs(struct task_struct *task) 4981 * Description 4982 * Get the struct pt_regs associated with **task**. 4983 * Return 4984 * A pointer to struct pt_regs. 4985 * 4986 * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags) 4987 * Description 4988 * Get branch trace from hardware engines like Intel LBR. The 4989 * hardware engine is stopped shortly after the helper is 4990 * called. Therefore, the user need to filter branch entries 4991 * based on the actual use case. To capture branch trace 4992 * before the trigger point of the BPF program, the helper 4993 * should be called at the beginning of the BPF program. 4994 * 4995 * The data is stored as struct perf_branch_entry into output 4996 * buffer *entries*. *size* is the size of *entries* in bytes. 4997 * *flags* is reserved for now and must be zero. 4998 * 4999 * Return 5000 * On success, number of bytes written to *buf*. On error, a 5001 * negative value. 5002 * 5003 * **-EINVAL** if *flags* is not zero. 5004 * 5005 * **-ENOENT** if architecture does not support branch records. 5006 * 5007 * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len) 5008 * Description 5009 * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64 5010 * to format and can handle more format args as a result. 5011 * 5012 * Arguments are to be used as in **bpf_seq_printf**\ () helper. 5013 * Return 5014 * The number of bytes written to the buffer, or a negative error 5015 * in case of failure. 5016 * 5017 * struct unix_sock *bpf_skc_to_unix_sock(void *sk) 5018 * Description 5019 * Dynamically cast a *sk* pointer to a *unix_sock* pointer. 5020 * Return 5021 * *sk* if casting is valid, or **NULL** otherwise. 5022 * 5023 * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res) 5024 * Description 5025 * Get the address of a kernel symbol, returned in *res*. *res* is 5026 * set to 0 if the symbol is not found. 5027 * Return 5028 * On success, zero. On error, a negative value. 5029 * 5030 * **-EINVAL** if *flags* is not zero. 5031 * 5032 * **-EINVAL** if string *name* is not the same size as *name_sz*. 5033 * 5034 * **-ENOENT** if symbol is not found. 5035 * 5036 * **-EPERM** if caller does not have permission to obtain kernel address. 5037 * 5038 * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags) 5039 * Description 5040 * Find vma of *task* that contains *addr*, call *callback_fn* 5041 * function with *task*, *vma*, and *callback_ctx*. 5042 * The *callback_fn* should be a static function and 5043 * the *callback_ctx* should be a pointer to the stack. 5044 * The *flags* is used to control certain aspects of the helper. 5045 * Currently, the *flags* must be 0. 5046 * 5047 * The expected callback signature is 5048 * 5049 * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx); 5050 * 5051 * Return 5052 * 0 on success. 5053 * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. 5054 * **-EBUSY** if failed to try lock mmap_lock. 5055 * **-EINVAL** for invalid **flags**. 5056 * 5057 * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags) 5058 * Description 5059 * For **nr_loops**, call **callback_fn** function 5060 * with **callback_ctx** as the context parameter. 5061 * The **callback_fn** should be a static function and 5062 * the **callback_ctx** should be a pointer to the stack. 5063 * The **flags** is used to control certain aspects of the helper. 5064 * Currently, the **flags** must be 0. Currently, nr_loops is 5065 * limited to 1 << 23 (~8 million) loops. 5066 * 5067 * long (\*callback_fn)(u32 index, void \*ctx); 5068 * 5069 * where **index** is the current index in the loop. The index 5070 * is zero-indexed. 5071 * 5072 * If **callback_fn** returns 0, the helper will continue to the next 5073 * loop. If return value is 1, the helper will skip the rest of 5074 * the loops and return. Other return values are not used now, 5075 * and will be rejected by the verifier. 5076 * 5077 * Return 5078 * The number of loops performed, **-EINVAL** for invalid **flags**, 5079 * **-E2BIG** if **nr_loops** exceeds the maximum number of loops. 5080 * 5081 * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2) 5082 * Description 5083 * Do strncmp() between **s1** and **s2**. **s1** doesn't need 5084 * to be null-terminated and **s1_sz** is the maximum storage 5085 * size of **s1**. **s2** must be a read-only string. 5086 * Return 5087 * An integer less than, equal to, or greater than zero 5088 * if the first **s1_sz** bytes of **s1** is found to be 5089 * less than, to match, or be greater than **s2**. 5090 * 5091 * long bpf_get_func_arg(void *ctx, u32 n, u64 *value) 5092 * Description 5093 * Get **n**-th argument register (zero based) of the traced function (for tracing programs) 5094 * returned in **value**. 5095 * 5096 * Return 5097 * 0 on success. 5098 * **-EINVAL** if n >= argument register count of traced function. 5099 * 5100 * long bpf_get_func_ret(void *ctx, u64 *value) 5101 * Description 5102 * Get return value of the traced function (for tracing programs) 5103 * in **value**. 5104 * 5105 * Return 5106 * 0 on success. 5107 * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN. 5108 * 5109 * long bpf_get_func_arg_cnt(void *ctx) 5110 * Description 5111 * Get number of registers of the traced function (for tracing programs) where 5112 * function arguments are stored in these registers. 5113 * 5114 * Return 5115 * The number of argument registers of the traced function. 5116 * 5117 * int bpf_get_retval(void) 5118 * Description 5119 * Get the BPF program's return value that will be returned to the upper layers. 5120 * 5121 * This helper is currently supported by cgroup programs and only by the hooks 5122 * where BPF program's return value is returned to the userspace via errno. 5123 * Return 5124 * The BPF program's return value. 5125 * 5126 * int bpf_set_retval(int retval) 5127 * Description 5128 * Set the BPF program's return value that will be returned to the upper layers. 5129 * 5130 * This helper is currently supported by cgroup programs and only by the hooks 5131 * where BPF program's return value is returned to the userspace via errno. 5132 * 5133 * Note that there is the following corner case where the program exports an error 5134 * via bpf_set_retval but signals success via 'return 1': 5135 * 5136 * bpf_set_retval(-EPERM); 5137 * return 1; 5138 * 5139 * In this case, the BPF program's return value will use helper's -EPERM. This 5140 * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case. 5141 * 5142 * Return 5143 * 0 on success, or a negative error in case of failure. 5144 * 5145 * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md) 5146 * Description 5147 * Get the total size of a given xdp buff (linear and paged area) 5148 * Return 5149 * The total size of a given xdp buffer. 5150 * 5151 * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) 5152 * Description 5153 * This helper is provided as an easy way to load data from a 5154 * xdp buffer. It can be used to load *len* bytes from *offset* from 5155 * the frame associated to *xdp_md*, into the buffer pointed by 5156 * *buf*. 5157 * Return 5158 * 0 on success, or a negative error in case of failure. 5159 * 5160 * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) 5161 * Description 5162 * Store *len* bytes from buffer *buf* into the frame 5163 * associated to *xdp_md*, at *offset*. 5164 * Return 5165 * 0 on success, or a negative error in case of failure. 5166 * 5167 * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags) 5168 * Description 5169 * Read *size* bytes from user space address *user_ptr* in *tsk*'s 5170 * address space, and stores the data in *dst*. *flags* is not 5171 * used yet and is provided for future extensibility. This helper 5172 * can only be used by sleepable programs. 5173 * Return 5174 * 0 on success, or a negative error in case of failure. On error 5175 * *dst* buffer is zeroed out. 5176 * 5177 * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) 5178 * Description 5179 * Change the __sk_buff->tstamp_type to *tstamp_type* 5180 * and set *tstamp* to the __sk_buff->tstamp together. 5181 * 5182 * If there is no need to change the __sk_buff->tstamp_type, 5183 * the tstamp value can be directly written to __sk_buff->tstamp 5184 * instead. 5185 * 5186 * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that 5187 * will be kept during bpf_redirect_*(). A non zero 5188 * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO 5189 * *tstamp_type*. 5190 * 5191 * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used 5192 * with a zero *tstamp*. 5193 * 5194 * Only IPv4 and IPv6 skb->protocol are supported. 5195 * 5196 * This function is most useful when it needs to set a 5197 * mono delivery time to __sk_buff->tstamp and then 5198 * bpf_redirect_*() to the egress of an iface. For example, 5199 * changing the (rcv) timestamp in __sk_buff->tstamp at 5200 * ingress to a mono delivery time and then bpf_redirect_*() 5201 * to sch_fq@phy-dev. 5202 * Return 5203 * 0 on success. 5204 * **-EINVAL** for invalid input 5205 * **-EOPNOTSUPP** for unsupported protocol 5206 * 5207 * long bpf_ima_file_hash(struct file *file, void *dst, u32 size) 5208 * Description 5209 * Returns a calculated IMA hash of the *file*. 5210 * If the hash is larger than *size*, then only *size* 5211 * bytes will be copied to *dst* 5212 * Return 5213 * The **hash_algo** is returned on success, 5214 * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if 5215 * invalid arguments are passed. 5216 * 5217 * void *bpf_kptr_xchg(void *map_value, void *ptr) 5218 * Description 5219 * Exchange kptr at pointer *map_value* with *ptr*, and return the 5220 * old value. *ptr* can be NULL, otherwise it must be a referenced 5221 * pointer which will be released when this helper is called. 5222 * Return 5223 * The old value of kptr (which can be NULL). The returned pointer 5224 * if not NULL, is a reference which must be released using its 5225 * corresponding release function, or moved into a BPF map before 5226 * program exit. 5227 * 5228 * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu) 5229 * Description 5230 * Perform a lookup in *percpu map* for an entry associated to 5231 * *key* on *cpu*. 5232 * Return 5233 * Map value associated to *key* on *cpu*, or **NULL** if no entry 5234 * was found or *cpu* is invalid. 5235 * 5236 * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk) 5237 * Description 5238 * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer. 5239 * Return 5240 * *sk* if casting is valid, or **NULL** otherwise. 5241 * 5242 * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr) 5243 * Description 5244 * Get a dynptr to local memory *data*. 5245 * 5246 * *data* must be a ptr to a map value. 5247 * The maximum *size* supported is DYNPTR_MAX_SIZE. 5248 * *flags* is currently unused. 5249 * Return 5250 * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE, 5251 * -EINVAL if flags is not 0. 5252 * 5253 * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr) 5254 * Description 5255 * Reserve *size* bytes of payload in a ring buffer *ringbuf* 5256 * through the dynptr interface. *flags* must be 0. 5257 * 5258 * Please note that a corresponding bpf_ringbuf_submit_dynptr or 5259 * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the 5260 * reservation fails. This is enforced by the verifier. 5261 * Return 5262 * 0 on success, or a negative error in case of failure. 5263 * 5264 * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags) 5265 * Description 5266 * Submit reserved ring buffer sample, pointed to by *data*, 5267 * through the dynptr interface. This is a no-op if the dynptr is 5268 * invalid/null. 5269 * 5270 * For more information on *flags*, please see 5271 * 'bpf_ringbuf_submit'. 5272 * Return 5273 * Nothing. Always succeeds. 5274 * 5275 * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags) 5276 * Description 5277 * Discard reserved ring buffer sample through the dynptr 5278 * interface. This is a no-op if the dynptr is invalid/null. 5279 * 5280 * For more information on *flags*, please see 5281 * 'bpf_ringbuf_discard'. 5282 * Return 5283 * Nothing. Always succeeds. 5284 * 5285 * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags) 5286 * Description 5287 * Read *len* bytes from *src* into *dst*, starting from *offset* 5288 * into *src*. 5289 * *flags* is currently unused. 5290 * Return 5291 * 0 on success, -E2BIG if *offset* + *len* exceeds the length 5292 * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if 5293 * *flags* is not 0. 5294 * 5295 * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags) 5296 * Description 5297 * Write *len* bytes from *src* into *dst*, starting from *offset* 5298 * into *dst*. 5299 * *flags* is currently unused. 5300 * Return 5301 * 0 on success, -E2BIG if *offset* + *len* exceeds the length 5302 * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* 5303 * is a read-only dynptr or if *flags* is not 0. 5304 * 5305 * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) 5306 * Description 5307 * Get a pointer to the underlying dynptr data. 5308 * 5309 * *len* must be a statically known value. The returned data slice 5310 * is invalidated whenever the dynptr is invalidated. 5311 * Return 5312 * Pointer to the underlying dynptr data, NULL if the dynptr is 5313 * read-only, if the dynptr is invalid, or if the offset and length 5314 * is out of bounds. 5315 * 5316 * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len) 5317 * Description 5318 * Try to issue a SYN cookie for the packet with corresponding 5319 * IPv4/TCP headers, *iph* and *th*, without depending on a 5320 * listening socket. 5321 * 5322 * *iph* points to the IPv4 header. 5323 * 5324 * *th* points to the start of the TCP header, while *th_len* 5325 * contains the length of the TCP header (at least 5326 * **sizeof**\ (**struct tcphdr**)). 5327 * Return 5328 * On success, lower 32 bits hold the generated SYN cookie in 5329 * followed by 16 bits which hold the MSS value for that cookie, 5330 * and the top 16 bits are unused. 5331 * 5332 * On failure, the returned value is one of the following: 5333 * 5334 * **-EINVAL** if *th_len* is invalid. 5335 * 5336 * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len) 5337 * Description 5338 * Try to issue a SYN cookie for the packet with corresponding 5339 * IPv6/TCP headers, *iph* and *th*, without depending on a 5340 * listening socket. 5341 * 5342 * *iph* points to the IPv6 header. 5343 * 5344 * *th* points to the start of the TCP header, while *th_len* 5345 * contains the length of the TCP header (at least 5346 * **sizeof**\ (**struct tcphdr**)). 5347 * Return 5348 * On success, lower 32 bits hold the generated SYN cookie in 5349 * followed by 16 bits which hold the MSS value for that cookie, 5350 * and the top 16 bits are unused. 5351 * 5352 * On failure, the returned value is one of the following: 5353 * 5354 * **-EINVAL** if *th_len* is invalid. 5355 * 5356 * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. 5357 * 5358 * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th) 5359 * Description 5360 * Check whether *iph* and *th* contain a valid SYN cookie ACK 5361 * without depending on a listening socket. 5362 * 5363 * *iph* points to the IPv4 header. 5364 * 5365 * *th* points to the TCP header. 5366 * Return 5367 * 0 if *iph* and *th* are a valid SYN cookie ACK. 5368 * 5369 * On failure, the returned value is one of the following: 5370 * 5371 * **-EACCES** if the SYN cookie is not valid. 5372 * 5373 * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th) 5374 * Description 5375 * Check whether *iph* and *th* contain a valid SYN cookie ACK 5376 * without depending on a listening socket. 5377 * 5378 * *iph* points to the IPv6 header. 5379 * 5380 * *th* points to the TCP header. 5381 * Return 5382 * 0 if *iph* and *th* are a valid SYN cookie ACK. 5383 * 5384 * On failure, the returned value is one of the following: 5385 * 5386 * **-EACCES** if the SYN cookie is not valid. 5387 * 5388 * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. 5389 * 5390 * u64 bpf_ktime_get_tai_ns(void) 5391 * Description 5392 * A nonsettable system-wide clock derived from wall-clock time but 5393 * ignoring leap seconds. This clock does not experience 5394 * discontinuities and backwards jumps caused by NTP inserting leap 5395 * seconds as CLOCK_REALTIME does. 5396 * 5397 * See: **clock_gettime**\ (**CLOCK_TAI**) 5398 * Return 5399 * Current *ktime*. 5400 * 5401 * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags) 5402 * Description 5403 * Drain samples from the specified user ring buffer, and invoke 5404 * the provided callback for each such sample: 5405 * 5406 * long (\*callback_fn)(struct bpf_dynptr \*dynptr, void \*ctx); 5407 * 5408 * If **callback_fn** returns 0, the helper will continue to try 5409 * and drain the next sample, up to a maximum of 5410 * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1, 5411 * the helper will skip the rest of the samples and return. Other 5412 * return values are not used now, and will be rejected by the 5413 * verifier. 5414 * Return 5415 * The number of drained samples if no error was encountered while 5416 * draining samples, or 0 if no samples were present in the ring 5417 * buffer. If a user-space producer was epoll-waiting on this map, 5418 * and at least one sample was drained, they will receive an event 5419 * notification notifying them of available space in the ring 5420 * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this 5421 * function, no wakeup notification will be sent. If the 5422 * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will 5423 * be sent even if no sample was drained. 5424 * 5425 * On failure, the returned value is one of the following: 5426 * 5427 * **-EBUSY** if the ring buffer is contended, and another calling 5428 * context was concurrently draining the ring buffer. 5429 * 5430 * **-EINVAL** if user-space is not properly tracking the ring 5431 * buffer due to the producer position not being aligned to 8 5432 * bytes, a sample not being aligned to 8 bytes, or the producer 5433 * position not matching the advertised length of a sample. 5434 * 5435 * **-E2BIG** if user-space has tried to publish a sample which is 5436 * larger than the size of the ring buffer, or which cannot fit 5437 * within a struct bpf_dynptr. 5438 */ 5439 #define __BPF_FUNC_MAPPER(FN) \ 5440 FN(unspec), \ 5441 FN(map_lookup_elem), \ 5442 FN(map_update_elem), \ 5443 FN(map_delete_elem), \ 5444 FN(probe_read), \ 5445 FN(ktime_get_ns), \ 5446 FN(trace_printk), \ 5447 FN(get_prandom_u32), \ 5448 FN(get_smp_processor_id), \ 5449 FN(skb_store_bytes), \ 5450 FN(l3_csum_replace), \ 5451 FN(l4_csum_replace), \ 5452 FN(tail_call), \ 5453 FN(clone_redirect), \ 5454 FN(get_current_pid_tgid), \ 5455 FN(get_current_uid_gid), \ 5456 FN(get_current_comm), \ 5457 FN(get_cgroup_classid), \ 5458 FN(skb_vlan_push), \ 5459 FN(skb_vlan_pop), \ 5460 FN(skb_get_tunnel_key), \ 5461 FN(skb_set_tunnel_key), \ 5462 FN(perf_event_read), \ 5463 FN(redirect), \ 5464 FN(get_route_realm), \ 5465 FN(perf_event_output), \ 5466 FN(skb_load_bytes), \ 5467 FN(get_stackid), \ 5468 FN(csum_diff), \ 5469 FN(skb_get_tunnel_opt), \ 5470 FN(skb_set_tunnel_opt), \ 5471 FN(skb_change_proto), \ 5472 FN(skb_change_type), \ 5473 FN(skb_under_cgroup), \ 5474 FN(get_hash_recalc), \ 5475 FN(get_current_task), \ 5476 FN(probe_write_user), \ 5477 FN(current_task_under_cgroup), \ 5478 FN(skb_change_tail), \ 5479 FN(skb_pull_data), \ 5480 FN(csum_update), \ 5481 FN(set_hash_invalid), \ 5482 FN(get_numa_node_id), \ 5483 FN(skb_change_head), \ 5484 FN(xdp_adjust_head), \ 5485 FN(probe_read_str), \ 5486 FN(get_socket_cookie), \ 5487 FN(get_socket_uid), \ 5488 FN(set_hash), \ 5489 FN(setsockopt), \ 5490 FN(skb_adjust_room), \ 5491 FN(redirect_map), \ 5492 FN(sk_redirect_map), \ 5493 FN(sock_map_update), \ 5494 FN(xdp_adjust_meta), \ 5495 FN(perf_event_read_value), \ 5496 FN(perf_prog_read_value), \ 5497 FN(getsockopt), \ 5498 FN(override_return), \ 5499 FN(sock_ops_cb_flags_set), \ 5500 FN(msg_redirect_map), \ 5501 FN(msg_apply_bytes), \ 5502 FN(msg_cork_bytes), \ 5503 FN(msg_pull_data), \ 5504 FN(bind), \ 5505 FN(xdp_adjust_tail), \ 5506 FN(skb_get_xfrm_state), \ 5507 FN(get_stack), \ 5508 FN(skb_load_bytes_relative), \ 5509 FN(fib_lookup), \ 5510 FN(sock_hash_update), \ 5511 FN(msg_redirect_hash), \ 5512 FN(sk_redirect_hash), \ 5513 FN(lwt_push_encap), \ 5514 FN(lwt_seg6_store_bytes), \ 5515 FN(lwt_seg6_adjust_srh), \ 5516 FN(lwt_seg6_action), \ 5517 FN(rc_repeat), \ 5518 FN(rc_keydown), \ 5519 FN(skb_cgroup_id), \ 5520 FN(get_current_cgroup_id), \ 5521 FN(get_local_storage), \ 5522 FN(sk_select_reuseport), \ 5523 FN(skb_ancestor_cgroup_id), \ 5524 FN(sk_lookup_tcp), \ 5525 FN(sk_lookup_udp), \ 5526 FN(sk_release), \ 5527 FN(map_push_elem), \ 5528 FN(map_pop_elem), \ 5529 FN(map_peek_elem), \ 5530 FN(msg_push_data), \ 5531 FN(msg_pop_data), \ 5532 FN(rc_pointer_rel), \ 5533 FN(spin_lock), \ 5534 FN(spin_unlock), \ 5535 FN(sk_fullsock), \ 5536 FN(tcp_sock), \ 5537 FN(skb_ecn_set_ce), \ 5538 FN(get_listener_sock), \ 5539 FN(skc_lookup_tcp), \ 5540 FN(tcp_check_syncookie), \ 5541 FN(sysctl_get_name), \ 5542 FN(sysctl_get_current_value), \ 5543 FN(sysctl_get_new_value), \ 5544 FN(sysctl_set_new_value), \ 5545 FN(strtol), \ 5546 FN(strtoul), \ 5547 FN(sk_storage_get), \ 5548 FN(sk_storage_delete), \ 5549 FN(send_signal), \ 5550 FN(tcp_gen_syncookie), \ 5551 FN(skb_output), \ 5552 FN(probe_read_user), \ 5553 FN(probe_read_kernel), \ 5554 FN(probe_read_user_str), \ 5555 FN(probe_read_kernel_str), \ 5556 FN(tcp_send_ack), \ 5557 FN(send_signal_thread), \ 5558 FN(jiffies64), \ 5559 FN(read_branch_records), \ 5560 FN(get_ns_current_pid_tgid), \ 5561 FN(xdp_output), \ 5562 FN(get_netns_cookie), \ 5563 FN(get_current_ancestor_cgroup_id), \ 5564 FN(sk_assign), \ 5565 FN(ktime_get_boot_ns), \ 5566 FN(seq_printf), \ 5567 FN(seq_write), \ 5568 FN(sk_cgroup_id), \ 5569 FN(sk_ancestor_cgroup_id), \ 5570 FN(ringbuf_output), \ 5571 FN(ringbuf_reserve), \ 5572 FN(ringbuf_submit), \ 5573 FN(ringbuf_discard), \ 5574 FN(ringbuf_query), \ 5575 FN(csum_level), \ 5576 FN(skc_to_tcp6_sock), \ 5577 FN(skc_to_tcp_sock), \ 5578 FN(skc_to_tcp_timewait_sock), \ 5579 FN(skc_to_tcp_request_sock), \ 5580 FN(skc_to_udp6_sock), \ 5581 FN(get_task_stack), \ 5582 FN(load_hdr_opt), \ 5583 FN(store_hdr_opt), \ 5584 FN(reserve_hdr_opt), \ 5585 FN(inode_storage_get), \ 5586 FN(inode_storage_delete), \ 5587 FN(d_path), \ 5588 FN(copy_from_user), \ 5589 FN(snprintf_btf), \ 5590 FN(seq_printf_btf), \ 5591 FN(skb_cgroup_classid), \ 5592 FN(redirect_neigh), \ 5593 FN(per_cpu_ptr), \ 5594 FN(this_cpu_ptr), \ 5595 FN(redirect_peer), \ 5596 FN(task_storage_get), \ 5597 FN(task_storage_delete), \ 5598 FN(get_current_task_btf), \ 5599 FN(bprm_opts_set), \ 5600 FN(ktime_get_coarse_ns), \ 5601 FN(ima_inode_hash), \ 5602 FN(sock_from_file), \ 5603 FN(check_mtu), \ 5604 FN(for_each_map_elem), \ 5605 FN(snprintf), \ 5606 FN(sys_bpf), \ 5607 FN(btf_find_by_name_kind), \ 5608 FN(sys_close), \ 5609 FN(timer_init), \ 5610 FN(timer_set_callback), \ 5611 FN(timer_start), \ 5612 FN(timer_cancel), \ 5613 FN(get_func_ip), \ 5614 FN(get_attach_cookie), \ 5615 FN(task_pt_regs), \ 5616 FN(get_branch_snapshot), \ 5617 FN(trace_vprintk), \ 5618 FN(skc_to_unix_sock), \ 5619 FN(kallsyms_lookup_name), \ 5620 FN(find_vma), \ 5621 FN(loop), \ 5622 FN(strncmp), \ 5623 FN(get_func_arg), \ 5624 FN(get_func_ret), \ 5625 FN(get_func_arg_cnt), \ 5626 FN(get_retval), \ 5627 FN(set_retval), \ 5628 FN(xdp_get_buff_len), \ 5629 FN(xdp_load_bytes), \ 5630 FN(xdp_store_bytes), \ 5631 FN(copy_from_user_task), \ 5632 FN(skb_set_tstamp), \ 5633 FN(ima_file_hash), \ 5634 FN(kptr_xchg), \ 5635 FN(map_lookup_percpu_elem), \ 5636 FN(skc_to_mptcp_sock), \ 5637 FN(dynptr_from_mem), \ 5638 FN(ringbuf_reserve_dynptr), \ 5639 FN(ringbuf_submit_dynptr), \ 5640 FN(ringbuf_discard_dynptr), \ 5641 FN(dynptr_read), \ 5642 FN(dynptr_write), \ 5643 FN(dynptr_data), \ 5644 FN(tcp_raw_gen_syncookie_ipv4), \ 5645 FN(tcp_raw_gen_syncookie_ipv6), \ 5646 FN(tcp_raw_check_syncookie_ipv4), \ 5647 FN(tcp_raw_check_syncookie_ipv6), \ 5648 FN(ktime_get_tai_ns), \ 5649 FN(user_ringbuf_drain), \ 5650 /* */ 5651 5652 /* integer value in 'imm' field of BPF_CALL instruction selects which helper 5653 * function eBPF program intends to call 5654 */ 5655 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 5656 enum bpf_func_id { 5657 __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 5658 __BPF_FUNC_MAX_ID, 5659 }; 5660 #undef __BPF_ENUM_FN 5661 5662 /* All flags used by eBPF helper functions, placed here. */ 5663 5664 /* BPF_FUNC_skb_store_bytes flags. */ 5665 enum { 5666 BPF_F_RECOMPUTE_CSUM = (1ULL << 0), 5667 BPF_F_INVALIDATE_HASH = (1ULL << 1), 5668 }; 5669 5670 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 5671 * First 4 bits are for passing the header field size. 5672 */ 5673 enum { 5674 BPF_F_HDR_FIELD_MASK = 0xfULL, 5675 }; 5676 5677 /* BPF_FUNC_l4_csum_replace flags. */ 5678 enum { 5679 BPF_F_PSEUDO_HDR = (1ULL << 4), 5680 BPF_F_MARK_MANGLED_0 = (1ULL << 5), 5681 BPF_F_MARK_ENFORCE = (1ULL << 6), 5682 }; 5683 5684 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 5685 enum { 5686 BPF_F_INGRESS = (1ULL << 0), 5687 }; 5688 5689 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 5690 enum { 5691 BPF_F_TUNINFO_IPV6 = (1ULL << 0), 5692 }; 5693 5694 /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ 5695 enum { 5696 BPF_F_SKIP_FIELD_MASK = 0xffULL, 5697 BPF_F_USER_STACK = (1ULL << 8), 5698 /* flags used by BPF_FUNC_get_stackid only. */ 5699 BPF_F_FAST_STACK_CMP = (1ULL << 9), 5700 BPF_F_REUSE_STACKID = (1ULL << 10), 5701 /* flags used by BPF_FUNC_get_stack only. */ 5702 BPF_F_USER_BUILD_ID = (1ULL << 11), 5703 }; 5704 5705 /* BPF_FUNC_skb_set_tunnel_key flags. */ 5706 enum { 5707 BPF_F_ZERO_CSUM_TX = (1ULL << 1), 5708 BPF_F_DONT_FRAGMENT = (1ULL << 2), 5709 BPF_F_SEQ_NUMBER = (1ULL << 3), 5710 }; 5711 5712 /* BPF_FUNC_skb_get_tunnel_key flags. */ 5713 enum { 5714 BPF_F_TUNINFO_FLAGS = (1ULL << 4), 5715 }; 5716 5717 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 5718 * BPF_FUNC_perf_event_read_value flags. 5719 */ 5720 enum { 5721 BPF_F_INDEX_MASK = 0xffffffffULL, 5722 BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, 5723 /* BPF_FUNC_perf_event_output for sk_buff input context. */ 5724 BPF_F_CTXLEN_MASK = (0xfffffULL << 32), 5725 }; 5726 5727 /* Current network namespace */ 5728 enum { 5729 BPF_F_CURRENT_NETNS = (-1L), 5730 }; 5731 5732 /* BPF_FUNC_csum_level level values. */ 5733 enum { 5734 BPF_CSUM_LEVEL_QUERY, 5735 BPF_CSUM_LEVEL_INC, 5736 BPF_CSUM_LEVEL_DEC, 5737 BPF_CSUM_LEVEL_RESET, 5738 }; 5739 5740 /* BPF_FUNC_skb_adjust_room flags. */ 5741 enum { 5742 BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), 5743 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), 5744 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), 5745 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), 5746 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), 5747 BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), 5748 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6), 5749 }; 5750 5751 enum { 5752 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, 5753 BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, 5754 }; 5755 5756 #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ 5757 BPF_ADJ_ROOM_ENCAP_L2_MASK) \ 5758 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) 5759 5760 /* BPF_FUNC_sysctl_get_name flags. */ 5761 enum { 5762 BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), 5763 }; 5764 5765 /* BPF_FUNC_<kernel_obj>_storage_get flags */ 5766 enum { 5767 BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0), 5768 /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility 5769 * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead. 5770 */ 5771 BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE, 5772 }; 5773 5774 /* BPF_FUNC_read_branch_records flags. */ 5775 enum { 5776 BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), 5777 }; 5778 5779 /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and 5780 * BPF_FUNC_bpf_ringbuf_output flags. 5781 */ 5782 enum { 5783 BPF_RB_NO_WAKEUP = (1ULL << 0), 5784 BPF_RB_FORCE_WAKEUP = (1ULL << 1), 5785 }; 5786 5787 /* BPF_FUNC_bpf_ringbuf_query flags */ 5788 enum { 5789 BPF_RB_AVAIL_DATA = 0, 5790 BPF_RB_RING_SIZE = 1, 5791 BPF_RB_CONS_POS = 2, 5792 BPF_RB_PROD_POS = 3, 5793 }; 5794 5795 /* BPF ring buffer constants */ 5796 enum { 5797 BPF_RINGBUF_BUSY_BIT = (1U << 31), 5798 BPF_RINGBUF_DISCARD_BIT = (1U << 30), 5799 BPF_RINGBUF_HDR_SZ = 8, 5800 }; 5801 5802 /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ 5803 enum { 5804 BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), 5805 BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), 5806 }; 5807 5808 /* Mode for BPF_FUNC_skb_adjust_room helper. */ 5809 enum bpf_adj_room_mode { 5810 BPF_ADJ_ROOM_NET, 5811 BPF_ADJ_ROOM_MAC, 5812 }; 5813 5814 /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ 5815 enum bpf_hdr_start_off { 5816 BPF_HDR_START_MAC, 5817 BPF_HDR_START_NET, 5818 }; 5819 5820 /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ 5821 enum bpf_lwt_encap_mode { 5822 BPF_LWT_ENCAP_SEG6, 5823 BPF_LWT_ENCAP_SEG6_INLINE, 5824 BPF_LWT_ENCAP_IP, 5825 }; 5826 5827 /* Flags for bpf_bprm_opts_set helper */ 5828 enum { 5829 BPF_F_BPRM_SECUREEXEC = (1ULL << 0), 5830 }; 5831 5832 /* Flags for bpf_redirect_map helper */ 5833 enum { 5834 BPF_F_BROADCAST = (1ULL << 3), 5835 BPF_F_EXCLUDE_INGRESS = (1ULL << 4), 5836 }; 5837 5838 #define __bpf_md_ptr(type, name) \ 5839 union { \ 5840 type name; \ 5841 __u64 :64; \ 5842 } __attribute__((aligned(8))) 5843 5844 enum { 5845 BPF_SKB_TSTAMP_UNSPEC, 5846 BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ 5847 /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, 5848 * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC 5849 * and try to deduce it by ingress, egress or skb->sk->sk_clockid. 5850 */ 5851 }; 5852 5853 /* user accessible mirror of in-kernel sk_buff. 5854 * new fields can only be added to the end of this structure 5855 */ 5856 struct __sk_buff { 5857 __u32 len; 5858 __u32 pkt_type; 5859 __u32 mark; 5860 __u32 queue_mapping; 5861 __u32 protocol; 5862 __u32 vlan_present; 5863 __u32 vlan_tci; 5864 __u32 vlan_proto; 5865 __u32 priority; 5866 __u32 ingress_ifindex; 5867 __u32 ifindex; 5868 __u32 tc_index; 5869 __u32 cb[5]; 5870 __u32 hash; 5871 __u32 tc_classid; 5872 __u32 data; 5873 __u32 data_end; 5874 __u32 napi_id; 5875 5876 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ 5877 __u32 family; 5878 __u32 remote_ip4; /* Stored in network byte order */ 5879 __u32 local_ip4; /* Stored in network byte order */ 5880 __u32 remote_ip6[4]; /* Stored in network byte order */ 5881 __u32 local_ip6[4]; /* Stored in network byte order */ 5882 __u32 remote_port; /* Stored in network byte order */ 5883 __u32 local_port; /* stored in host byte order */ 5884 /* ... here. */ 5885 5886 __u32 data_meta; 5887 __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); 5888 __u64 tstamp; 5889 __u32 wire_len; 5890 __u32 gso_segs; 5891 __bpf_md_ptr(struct bpf_sock *, sk); 5892 __u32 gso_size; 5893 __u8 tstamp_type; 5894 __u32 :24; /* Padding, future use. */ 5895 __u64 hwtstamp; 5896 }; 5897 5898 struct bpf_tunnel_key { 5899 __u32 tunnel_id; 5900 union { 5901 __u32 remote_ipv4; 5902 __u32 remote_ipv6[4]; 5903 }; 5904 __u8 tunnel_tos; 5905 __u8 tunnel_ttl; 5906 union { 5907 __u16 tunnel_ext; /* compat */ 5908 __be16 tunnel_flags; 5909 }; 5910 __u32 tunnel_label; 5911 union { 5912 __u32 local_ipv4; 5913 __u32 local_ipv6[4]; 5914 }; 5915 }; 5916 5917 /* user accessible mirror of in-kernel xfrm_state. 5918 * new fields can only be added to the end of this structure 5919 */ 5920 struct bpf_xfrm_state { 5921 __u32 reqid; 5922 __u32 spi; /* Stored in network byte order */ 5923 __u16 family; 5924 __u16 ext; /* Padding, future use. */ 5925 union { 5926 __u32 remote_ipv4; /* Stored in network byte order */ 5927 __u32 remote_ipv6[4]; /* Stored in network byte order */ 5928 }; 5929 }; 5930 5931 /* Generic BPF return codes which all BPF program types may support. 5932 * The values are binary compatible with their TC_ACT_* counter-part to 5933 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 5934 * programs. 5935 * 5936 * XDP is handled seprately, see XDP_*. 5937 */ 5938 enum bpf_ret_code { 5939 BPF_OK = 0, 5940 /* 1 reserved */ 5941 BPF_DROP = 2, 5942 /* 3-6 reserved */ 5943 BPF_REDIRECT = 7, 5944 /* >127 are reserved for prog type specific return codes. 5945 * 5946 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and 5947 * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been 5948 * changed and should be routed based on its new L3 header. 5949 * (This is an L3 redirect, as opposed to L2 redirect 5950 * represented by BPF_REDIRECT above). 5951 */ 5952 BPF_LWT_REROUTE = 128, 5953 /* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR 5954 * to indicate that no custom dissection was performed, and 5955 * fallback to standard dissector is requested. 5956 */ 5957 BPF_FLOW_DISSECTOR_CONTINUE = 129, 5958 }; 5959 5960 struct bpf_sock { 5961 __u32 bound_dev_if; 5962 __u32 family; 5963 __u32 type; 5964 __u32 protocol; 5965 __u32 mark; 5966 __u32 priority; 5967 /* IP address also allows 1 and 2 bytes access */ 5968 __u32 src_ip4; 5969 __u32 src_ip6[4]; 5970 __u32 src_port; /* host byte order */ 5971 __be16 dst_port; /* network byte order */ 5972 __u16 :16; /* zero padding */ 5973 __u32 dst_ip4; 5974 __u32 dst_ip6[4]; 5975 __u32 state; 5976 __s32 rx_queue_mapping; 5977 }; 5978 5979 struct bpf_tcp_sock { 5980 __u32 snd_cwnd; /* Sending congestion window */ 5981 __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 5982 __u32 rtt_min; 5983 __u32 snd_ssthresh; /* Slow start size threshold */ 5984 __u32 rcv_nxt; /* What we want to receive next */ 5985 __u32 snd_nxt; /* Next sequence we send */ 5986 __u32 snd_una; /* First byte we want an ack for */ 5987 __u32 mss_cache; /* Cached effective mss, not including SACKS */ 5988 __u32 ecn_flags; /* ECN status bits. */ 5989 __u32 rate_delivered; /* saved rate sample: packets delivered */ 5990 __u32 rate_interval_us; /* saved rate sample: time elapsed */ 5991 __u32 packets_out; /* Packets which are "in flight" */ 5992 __u32 retrans_out; /* Retransmitted packets out */ 5993 __u32 total_retrans; /* Total retransmits for entire connection */ 5994 __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn 5995 * total number of segments in. 5996 */ 5997 __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn 5998 * total number of data segments in. 5999 */ 6000 __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut 6001 * The total number of segments sent. 6002 */ 6003 __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut 6004 * total number of data segments sent. 6005 */ 6006 __u32 lost_out; /* Lost packets */ 6007 __u32 sacked_out; /* SACK'd packets */ 6008 __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived 6009 * sum(delta(rcv_nxt)), or how many bytes 6010 * were acked. 6011 */ 6012 __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 6013 * sum(delta(snd_una)), or how many bytes 6014 * were acked. 6015 */ 6016 __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups 6017 * total number of DSACK blocks received 6018 */ 6019 __u32 delivered; /* Total data packets delivered incl. rexmits */ 6020 __u32 delivered_ce; /* Like the above but only ECE marked packets */ 6021 __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ 6022 }; 6023 6024 struct bpf_sock_tuple { 6025 union { 6026 struct { 6027 __be32 saddr; 6028 __be32 daddr; 6029 __be16 sport; 6030 __be16 dport; 6031 } ipv4; 6032 struct { 6033 __be32 saddr[4]; 6034 __be32 daddr[4]; 6035 __be16 sport; 6036 __be16 dport; 6037 } ipv6; 6038 }; 6039 }; 6040 6041 struct bpf_xdp_sock { 6042 __u32 queue_id; 6043 }; 6044 6045 #define XDP_PACKET_HEADROOM 256 6046 6047 /* User return codes for XDP prog type. 6048 * A valid XDP program must return one of these defined values. All other 6049 * return codes are reserved for future use. Unknown return codes will 6050 * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). 6051 */ 6052 enum xdp_action { 6053 XDP_ABORTED = 0, 6054 XDP_DROP, 6055 XDP_PASS, 6056 XDP_TX, 6057 XDP_REDIRECT, 6058 }; 6059 6060 /* user accessible metadata for XDP packet hook 6061 * new fields must be added to the end of this structure 6062 */ 6063 struct xdp_md { 6064 __u32 data; 6065 __u32 data_end; 6066 __u32 data_meta; 6067 /* Below access go through struct xdp_rxq_info */ 6068 __u32 ingress_ifindex; /* rxq->dev->ifindex */ 6069 __u32 rx_queue_index; /* rxq->queue_index */ 6070 6071 __u32 egress_ifindex; /* txq->dev->ifindex */ 6072 }; 6073 6074 /* DEVMAP map-value layout 6075 * 6076 * The struct data-layout of map-value is a configuration interface. 6077 * New members can only be added to the end of this structure. 6078 */ 6079 struct bpf_devmap_val { 6080 __u32 ifindex; /* device index */ 6081 union { 6082 int fd; /* prog fd on map write */ 6083 __u32 id; /* prog id on map read */ 6084 } bpf_prog; 6085 }; 6086 6087 /* CPUMAP map-value layout 6088 * 6089 * The struct data-layout of map-value is a configuration interface. 6090 * New members can only be added to the end of this structure. 6091 */ 6092 struct bpf_cpumap_val { 6093 __u32 qsize; /* queue size to remote target CPU */ 6094 union { 6095 int fd; /* prog fd on map write */ 6096 __u32 id; /* prog id on map read */ 6097 } bpf_prog; 6098 }; 6099 6100 enum sk_action { 6101 SK_DROP = 0, 6102 SK_PASS, 6103 }; 6104 6105 /* user accessible metadata for SK_MSG packet hook, new fields must 6106 * be added to the end of this structure 6107 */ 6108 struct sk_msg_md { 6109 __bpf_md_ptr(void *, data); 6110 __bpf_md_ptr(void *, data_end); 6111 6112 __u32 family; 6113 __u32 remote_ip4; /* Stored in network byte order */ 6114 __u32 local_ip4; /* Stored in network byte order */ 6115 __u32 remote_ip6[4]; /* Stored in network byte order */ 6116 __u32 local_ip6[4]; /* Stored in network byte order */ 6117 __u32 remote_port; /* Stored in network byte order */ 6118 __u32 local_port; /* stored in host byte order */ 6119 __u32 size; /* Total size of sk_msg */ 6120 6121 __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ 6122 }; 6123 6124 struct sk_reuseport_md { 6125 /* 6126 * Start of directly accessible data. It begins from 6127 * the tcp/udp header. 6128 */ 6129 __bpf_md_ptr(void *, data); 6130 /* End of directly accessible data */ 6131 __bpf_md_ptr(void *, data_end); 6132 /* 6133 * Total length of packet (starting from the tcp/udp header). 6134 * Note that the directly accessible bytes (data_end - data) 6135 * could be less than this "len". Those bytes could be 6136 * indirectly read by a helper "bpf_skb_load_bytes()". 6137 */ 6138 __u32 len; 6139 /* 6140 * Eth protocol in the mac header (network byte order). e.g. 6141 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) 6142 */ 6143 __u32 eth_protocol; 6144 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ 6145 __u32 bind_inany; /* Is sock bound to an INANY address? */ 6146 __u32 hash; /* A hash of the packet 4 tuples */ 6147 /* When reuse->migrating_sk is NULL, it is selecting a sk for the 6148 * new incoming connection request (e.g. selecting a listen sk for 6149 * the received SYN in the TCP case). reuse->sk is one of the sk 6150 * in the reuseport group. The bpf prog can use reuse->sk to learn 6151 * the local listening ip/port without looking into the skb. 6152 * 6153 * When reuse->migrating_sk is not NULL, reuse->sk is closed and 6154 * reuse->migrating_sk is the socket that needs to be migrated 6155 * to another listening socket. migrating_sk could be a fullsock 6156 * sk that is fully established or a reqsk that is in-the-middle 6157 * of 3-way handshake. 6158 */ 6159 __bpf_md_ptr(struct bpf_sock *, sk); 6160 __bpf_md_ptr(struct bpf_sock *, migrating_sk); 6161 }; 6162 6163 #define BPF_TAG_SIZE 8 6164 6165 struct bpf_prog_info { 6166 __u32 type; 6167 __u32 id; 6168 __u8 tag[BPF_TAG_SIZE]; 6169 __u32 jited_prog_len; 6170 __u32 xlated_prog_len; 6171 __aligned_u64 jited_prog_insns; 6172 __aligned_u64 xlated_prog_insns; 6173 __u64 load_time; /* ns since boottime */ 6174 __u32 created_by_uid; 6175 __u32 nr_map_ids; 6176 __aligned_u64 map_ids; 6177 char name[BPF_OBJ_NAME_LEN]; 6178 __u32 ifindex; 6179 __u32 gpl_compatible:1; 6180 __u32 :31; /* alignment pad */ 6181 __u64 netns_dev; 6182 __u64 netns_ino; 6183 __u32 nr_jited_ksyms; 6184 __u32 nr_jited_func_lens; 6185 __aligned_u64 jited_ksyms; 6186 __aligned_u64 jited_func_lens; 6187 __u32 btf_id; 6188 __u32 func_info_rec_size; 6189 __aligned_u64 func_info; 6190 __u32 nr_func_info; 6191 __u32 nr_line_info; 6192 __aligned_u64 line_info; 6193 __aligned_u64 jited_line_info; 6194 __u32 nr_jited_line_info; 6195 __u32 line_info_rec_size; 6196 __u32 jited_line_info_rec_size; 6197 __u32 nr_prog_tags; 6198 __aligned_u64 prog_tags; 6199 __u64 run_time_ns; 6200 __u64 run_cnt; 6201 __u64 recursion_misses; 6202 __u32 verified_insns; 6203 __u32 attach_btf_obj_id; 6204 __u32 attach_btf_id; 6205 } __attribute__((aligned(8))); 6206 6207 struct bpf_map_info { 6208 __u32 type; 6209 __u32 id; 6210 __u32 key_size; 6211 __u32 value_size; 6212 __u32 max_entries; 6213 __u32 map_flags; 6214 char name[BPF_OBJ_NAME_LEN]; 6215 __u32 ifindex; 6216 __u32 btf_vmlinux_value_type_id; 6217 __u64 netns_dev; 6218 __u64 netns_ino; 6219 __u32 btf_id; 6220 __u32 btf_key_type_id; 6221 __u32 btf_value_type_id; 6222 __u32 :32; /* alignment pad */ 6223 __u64 map_extra; 6224 } __attribute__((aligned(8))); 6225 6226 struct bpf_btf_info { 6227 __aligned_u64 btf; 6228 __u32 btf_size; 6229 __u32 id; 6230 __aligned_u64 name; 6231 __u32 name_len; 6232 __u32 kernel_btf; 6233 } __attribute__((aligned(8))); 6234 6235 struct bpf_link_info { 6236 __u32 type; 6237 __u32 id; 6238 __u32 prog_id; 6239 union { 6240 struct { 6241 __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ 6242 __u32 tp_name_len; /* in/out: tp_name buffer len */ 6243 } raw_tracepoint; 6244 struct { 6245 __u32 attach_type; 6246 __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */ 6247 __u32 target_btf_id; /* BTF type id inside the object */ 6248 } tracing; 6249 struct { 6250 __u64 cgroup_id; 6251 __u32 attach_type; 6252 } cgroup; 6253 struct { 6254 __aligned_u64 target_name; /* in/out: target_name buffer ptr */ 6255 __u32 target_name_len; /* in/out: target_name buffer len */ 6256 6257 /* If the iter specific field is 32 bits, it can be put 6258 * in the first or second union. Otherwise it should be 6259 * put in the second union. 6260 */ 6261 union { 6262 struct { 6263 __u32 map_id; 6264 } map; 6265 }; 6266 union { 6267 struct { 6268 __u64 cgroup_id; 6269 __u32 order; 6270 } cgroup; 6271 struct { 6272 __u32 tid; 6273 __u32 pid; 6274 } task; 6275 }; 6276 } iter; 6277 struct { 6278 __u32 netns_ino; 6279 __u32 attach_type; 6280 } netns; 6281 struct { 6282 __u32 ifindex; 6283 } xdp; 6284 }; 6285 } __attribute__((aligned(8))); 6286 6287 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed 6288 * by user and intended to be used by socket (e.g. to bind to, depends on 6289 * attach type). 6290 */ 6291 struct bpf_sock_addr { 6292 __u32 user_family; /* Allows 4-byte read, but no write. */ 6293 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. 6294 * Stored in network byte order. 6295 */ 6296 __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 6297 * Stored in network byte order. 6298 */ 6299 __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. 6300 * Stored in network byte order 6301 */ 6302 __u32 family; /* Allows 4-byte read, but no write */ 6303 __u32 type; /* Allows 4-byte read, but no write */ 6304 __u32 protocol; /* Allows 4-byte read, but no write */ 6305 __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. 6306 * Stored in network byte order. 6307 */ 6308 __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 6309 * Stored in network byte order. 6310 */ 6311 __bpf_md_ptr(struct bpf_sock *, sk); 6312 }; 6313 6314 /* User bpf_sock_ops struct to access socket values and specify request ops 6315 * and their replies. 6316 * Some of this fields are in network (bigendian) byte order and may need 6317 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). 6318 * New fields can only be added at the end of this structure 6319 */ 6320 struct bpf_sock_ops { 6321 __u32 op; 6322 union { 6323 __u32 args[4]; /* Optionally passed to bpf program */ 6324 __u32 reply; /* Returned by bpf program */ 6325 __u32 replylong[4]; /* Optionally returned by bpf prog */ 6326 }; 6327 __u32 family; 6328 __u32 remote_ip4; /* Stored in network byte order */ 6329 __u32 local_ip4; /* Stored in network byte order */ 6330 __u32 remote_ip6[4]; /* Stored in network byte order */ 6331 __u32 local_ip6[4]; /* Stored in network byte order */ 6332 __u32 remote_port; /* Stored in network byte order */ 6333 __u32 local_port; /* stored in host byte order */ 6334 __u32 is_fullsock; /* Some TCP fields are only valid if 6335 * there is a full socket. If not, the 6336 * fields read as zero. 6337 */ 6338 __u32 snd_cwnd; 6339 __u32 srtt_us; /* Averaged RTT << 3 in usecs */ 6340 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ 6341 __u32 state; 6342 __u32 rtt_min; 6343 __u32 snd_ssthresh; 6344 __u32 rcv_nxt; 6345 __u32 snd_nxt; 6346 __u32 snd_una; 6347 __u32 mss_cache; 6348 __u32 ecn_flags; 6349 __u32 rate_delivered; 6350 __u32 rate_interval_us; 6351 __u32 packets_out; 6352 __u32 retrans_out; 6353 __u32 total_retrans; 6354 __u32 segs_in; 6355 __u32 data_segs_in; 6356 __u32 segs_out; 6357 __u32 data_segs_out; 6358 __u32 lost_out; 6359 __u32 sacked_out; 6360 __u32 sk_txhash; 6361 __u64 bytes_received; 6362 __u64 bytes_acked; 6363 __bpf_md_ptr(struct bpf_sock *, sk); 6364 /* [skb_data, skb_data_end) covers the whole TCP header. 6365 * 6366 * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received 6367 * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the 6368 * header has not been written. 6369 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have 6370 * been written so far. 6371 * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes 6372 * the 3WHS. 6373 * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes 6374 * the 3WHS. 6375 * 6376 * bpf_load_hdr_opt() can also be used to read a particular option. 6377 */ 6378 __bpf_md_ptr(void *, skb_data); 6379 __bpf_md_ptr(void *, skb_data_end); 6380 __u32 skb_len; /* The total length of a packet. 6381 * It includes the header, options, 6382 * and payload. 6383 */ 6384 __u32 skb_tcp_flags; /* tcp_flags of the header. It provides 6385 * an easy way to check for tcp_flags 6386 * without parsing skb_data. 6387 * 6388 * In particular, the skb_tcp_flags 6389 * will still be available in 6390 * BPF_SOCK_OPS_HDR_OPT_LEN even though 6391 * the outgoing header has not 6392 * been written yet. 6393 */ 6394 }; 6395 6396 /* Definitions for bpf_sock_ops_cb_flags */ 6397 enum { 6398 BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), 6399 BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), 6400 BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), 6401 BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), 6402 /* Call bpf for all received TCP headers. The bpf prog will be 6403 * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB 6404 * 6405 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB 6406 * for the header option related helpers that will be useful 6407 * to the bpf programs. 6408 * 6409 * It could be used at the client/active side (i.e. connect() side) 6410 * when the server told it that the server was in syncookie 6411 * mode and required the active side to resend the bpf-written 6412 * options. The active side can keep writing the bpf-options until 6413 * it received a valid packet from the server side to confirm 6414 * the earlier packet (and options) has been received. The later 6415 * example patch is using it like this at the active side when the 6416 * server is in syncookie mode. 6417 * 6418 * The bpf prog will usually turn this off in the common cases. 6419 */ 6420 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4), 6421 /* Call bpf when kernel has received a header option that 6422 * the kernel cannot handle. The bpf prog will be called under 6423 * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB. 6424 * 6425 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB 6426 * for the header option related helpers that will be useful 6427 * to the bpf programs. 6428 */ 6429 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5), 6430 /* Call bpf when the kernel is writing header options for the 6431 * outgoing packet. The bpf prog will first be called 6432 * to reserve space in a skb under 6433 * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then 6434 * the bpf prog will be called to write the header option(s) 6435 * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 6436 * 6437 * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB 6438 * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option 6439 * related helpers that will be useful to the bpf programs. 6440 * 6441 * The kernel gets its chance to reserve space and write 6442 * options first before the BPF program does. 6443 */ 6444 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6), 6445 /* Mask of all currently supported cb flags */ 6446 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, 6447 }; 6448 6449 /* List of known BPF sock_ops operators. 6450 * New entries can only be added at the end 6451 */ 6452 enum { 6453 BPF_SOCK_OPS_VOID, 6454 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or 6455 * -1 if default value should be used 6456 */ 6457 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized 6458 * window (in packets) or -1 if default 6459 * value should be used 6460 */ 6461 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an 6462 * active connection is initialized 6463 */ 6464 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an 6465 * active connection is 6466 * established 6467 */ 6468 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a 6469 * passive connection is 6470 * established 6471 */ 6472 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 6473 * needs ECN 6474 */ 6475 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is 6476 * based on the path and may be 6477 * dependent on the congestion control 6478 * algorithm. In general it indicates 6479 * a congestion threshold. RTTs above 6480 * this indicate congestion 6481 */ 6482 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. 6483 * Arg1: value of icsk_retransmits 6484 * Arg2: value of icsk_rto 6485 * Arg3: whether RTO has expired 6486 */ 6487 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. 6488 * Arg1: sequence number of 1st byte 6489 * Arg2: # segments 6490 * Arg3: return value of 6491 * tcp_transmit_skb (0 => success) 6492 */ 6493 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. 6494 * Arg1: old_state 6495 * Arg2: new_state 6496 */ 6497 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after 6498 * socket transition to LISTEN state. 6499 */ 6500 BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. 6501 */ 6502 BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option. 6503 * It will be called to handle 6504 * the packets received at 6505 * an already established 6506 * connection. 6507 * 6508 * sock_ops->skb_data: 6509 * Referring to the received skb. 6510 * It covers the TCP header only. 6511 * 6512 * bpf_load_hdr_opt() can also 6513 * be used to search for a 6514 * particular option. 6515 */ 6516 BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the 6517 * header option later in 6518 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 6519 * Arg1: bool want_cookie. (in 6520 * writing SYNACK only) 6521 * 6522 * sock_ops->skb_data: 6523 * Not available because no header has 6524 * been written yet. 6525 * 6526 * sock_ops->skb_tcp_flags: 6527 * The tcp_flags of the 6528 * outgoing skb. (e.g. SYN, ACK, FIN). 6529 * 6530 * bpf_reserve_hdr_opt() should 6531 * be used to reserve space. 6532 */ 6533 BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options 6534 * Arg1: bool want_cookie. (in 6535 * writing SYNACK only) 6536 * 6537 * sock_ops->skb_data: 6538 * Referring to the outgoing skb. 6539 * It covers the TCP header 6540 * that has already been written 6541 * by the kernel and the 6542 * earlier bpf-progs. 6543 * 6544 * sock_ops->skb_tcp_flags: 6545 * The tcp_flags of the outgoing 6546 * skb. (e.g. SYN, ACK, FIN). 6547 * 6548 * bpf_store_hdr_opt() should 6549 * be used to write the 6550 * option. 6551 * 6552 * bpf_load_hdr_opt() can also 6553 * be used to search for a 6554 * particular option that 6555 * has already been written 6556 * by the kernel or the 6557 * earlier bpf-progs. 6558 */ 6559 }; 6560 6561 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect 6562 * changes between the TCP and BPF versions. Ideally this should never happen. 6563 * If it does, we need to add code to convert them before calling 6564 * the BPF sock_ops function. 6565 */ 6566 enum { 6567 BPF_TCP_ESTABLISHED = 1, 6568 BPF_TCP_SYN_SENT, 6569 BPF_TCP_SYN_RECV, 6570 BPF_TCP_FIN_WAIT1, 6571 BPF_TCP_FIN_WAIT2, 6572 BPF_TCP_TIME_WAIT, 6573 BPF_TCP_CLOSE, 6574 BPF_TCP_CLOSE_WAIT, 6575 BPF_TCP_LAST_ACK, 6576 BPF_TCP_LISTEN, 6577 BPF_TCP_CLOSING, /* Now a valid state */ 6578 BPF_TCP_NEW_SYN_RECV, 6579 6580 BPF_TCP_MAX_STATES /* Leave at the end! */ 6581 }; 6582 6583 enum { 6584 TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ 6585 TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ 6586 TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */ 6587 TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */ 6588 /* Copy the SYN pkt to optval 6589 * 6590 * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the 6591 * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit 6592 * to only getting from the saved_syn. It can either get the 6593 * syn packet from: 6594 * 6595 * 1. the just-received SYN packet (only available when writing the 6596 * SYNACK). It will be useful when it is not necessary to 6597 * save the SYN packet for latter use. It is also the only way 6598 * to get the SYN during syncookie mode because the syn 6599 * packet cannot be saved during syncookie. 6600 * 6601 * OR 6602 * 6603 * 2. the earlier saved syn which was done by 6604 * bpf_setsockopt(TCP_SAVE_SYN). 6605 * 6606 * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the 6607 * SYN packet is obtained. 6608 * 6609 * If the bpf-prog does not need the IP[46] header, the 6610 * bpf-prog can avoid parsing the IP header by using 6611 * TCP_BPF_SYN. Otherwise, the bpf-prog can get both 6612 * IP[46] and TCP header by using TCP_BPF_SYN_IP. 6613 * 6614 * >0: Total number of bytes copied 6615 * -ENOSPC: Not enough space in optval. Only optlen number of 6616 * bytes is copied. 6617 * -ENOENT: The SYN skb is not available now and the earlier SYN pkt 6618 * is not saved by setsockopt(TCP_SAVE_SYN). 6619 */ 6620 TCP_BPF_SYN = 1005, /* Copy the TCP header */ 6621 TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ 6622 TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ 6623 }; 6624 6625 enum { 6626 BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0), 6627 }; 6628 6629 /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and 6630 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 6631 */ 6632 enum { 6633 BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the 6634 * total option spaces 6635 * required for an established 6636 * sk in order to calculate the 6637 * MSS. No skb is actually 6638 * sent. 6639 */ 6640 BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode 6641 * when sending a SYN. 6642 */ 6643 }; 6644 6645 struct bpf_perf_event_value { 6646 __u64 counter; 6647 __u64 enabled; 6648 __u64 running; 6649 }; 6650 6651 enum { 6652 BPF_DEVCG_ACC_MKNOD = (1ULL << 0), 6653 BPF_DEVCG_ACC_READ = (1ULL << 1), 6654 BPF_DEVCG_ACC_WRITE = (1ULL << 2), 6655 }; 6656 6657 enum { 6658 BPF_DEVCG_DEV_BLOCK = (1ULL << 0), 6659 BPF_DEVCG_DEV_CHAR = (1ULL << 1), 6660 }; 6661 6662 struct bpf_cgroup_dev_ctx { 6663 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ 6664 __u32 access_type; 6665 __u32 major; 6666 __u32 minor; 6667 }; 6668 6669 struct bpf_raw_tracepoint_args { 6670 __u64 args[0]; 6671 }; 6672 6673 /* DIRECT: Skip the FIB rules and go to FIB table associated with device 6674 * OUTPUT: Do lookup from egress perspective; default is ingress 6675 */ 6676 enum { 6677 BPF_FIB_LOOKUP_DIRECT = (1U << 0), 6678 BPF_FIB_LOOKUP_OUTPUT = (1U << 1), 6679 }; 6680 6681 enum { 6682 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ 6683 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ 6684 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ 6685 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ 6686 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ 6687 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ 6688 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ 6689 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ 6690 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ 6691 }; 6692 6693 struct bpf_fib_lookup { 6694 /* input: network family for lookup (AF_INET, AF_INET6) 6695 * output: network family of egress nexthop 6696 */ 6697 __u8 family; 6698 6699 /* set if lookup is to consider L4 data - e.g., FIB rules */ 6700 __u8 l4_protocol; 6701 __be16 sport; 6702 __be16 dport; 6703 6704 union { /* used for MTU check */ 6705 /* input to lookup */ 6706 __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */ 6707 6708 /* output: MTU value */ 6709 __u16 mtu_result; 6710 }; 6711 /* input: L3 device index for lookup 6712 * output: device index from FIB lookup 6713 */ 6714 __u32 ifindex; 6715 6716 union { 6717 /* inputs to lookup */ 6718 __u8 tos; /* AF_INET */ 6719 __be32 flowinfo; /* AF_INET6, flow_label + priority */ 6720 6721 /* output: metric of fib result (IPv4/IPv6 only) */ 6722 __u32 rt_metric; 6723 }; 6724 6725 union { 6726 __be32 ipv4_src; 6727 __u32 ipv6_src[4]; /* in6_addr; network order */ 6728 }; 6729 6730 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in 6731 * network header. output: bpf_fib_lookup sets to gateway address 6732 * if FIB lookup returns gateway route 6733 */ 6734 union { 6735 __be32 ipv4_dst; 6736 __u32 ipv6_dst[4]; /* in6_addr; network order */ 6737 }; 6738 6739 /* output */ 6740 __be16 h_vlan_proto; 6741 __be16 h_vlan_TCI; 6742 __u8 smac[6]; /* ETH_ALEN */ 6743 __u8 dmac[6]; /* ETH_ALEN */ 6744 }; 6745 6746 struct bpf_redir_neigh { 6747 /* network family for lookup (AF_INET, AF_INET6) */ 6748 __u32 nh_family; 6749 /* network address of nexthop; skips fib lookup to find gateway */ 6750 union { 6751 __be32 ipv4_nh; 6752 __u32 ipv6_nh[4]; /* in6_addr; network order */ 6753 }; 6754 }; 6755 6756 /* bpf_check_mtu flags*/ 6757 enum bpf_check_mtu_flags { 6758 BPF_MTU_CHK_SEGS = (1U << 0), 6759 }; 6760 6761 enum bpf_check_mtu_ret { 6762 BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */ 6763 BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */ 6764 BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */ 6765 }; 6766 6767 enum bpf_task_fd_type { 6768 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ 6769 BPF_FD_TYPE_TRACEPOINT, /* tp name */ 6770 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ 6771 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ 6772 BPF_FD_TYPE_UPROBE, /* filename + offset */ 6773 BPF_FD_TYPE_URETPROBE, /* filename + offset */ 6774 }; 6775 6776 enum { 6777 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), 6778 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), 6779 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), 6780 }; 6781 6782 struct bpf_flow_keys { 6783 __u16 nhoff; 6784 __u16 thoff; 6785 __u16 addr_proto; /* ETH_P_* of valid addrs */ 6786 __u8 is_frag; 6787 __u8 is_first_frag; 6788 __u8 is_encap; 6789 __u8 ip_proto; 6790 __be16 n_proto; 6791 __be16 sport; 6792 __be16 dport; 6793 union { 6794 struct { 6795 __be32 ipv4_src; 6796 __be32 ipv4_dst; 6797 }; 6798 struct { 6799 __u32 ipv6_src[4]; /* in6_addr; network order */ 6800 __u32 ipv6_dst[4]; /* in6_addr; network order */ 6801 }; 6802 }; 6803 __u32 flags; 6804 __be32 flow_label; 6805 }; 6806 6807 struct bpf_func_info { 6808 __u32 insn_off; 6809 __u32 type_id; 6810 }; 6811 6812 #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) 6813 #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) 6814 6815 struct bpf_line_info { 6816 __u32 insn_off; 6817 __u32 file_name_off; 6818 __u32 line_off; 6819 __u32 line_col; 6820 }; 6821 6822 struct bpf_spin_lock { 6823 __u32 val; 6824 }; 6825 6826 struct bpf_timer { 6827 __u64 :64; 6828 __u64 :64; 6829 } __attribute__((aligned(8))); 6830 6831 struct bpf_dynptr { 6832 __u64 :64; 6833 __u64 :64; 6834 } __attribute__((aligned(8))); 6835 6836 struct bpf_sysctl { 6837 __u32 write; /* Sysctl is being read (= 0) or written (= 1). 6838 * Allows 1,2,4-byte read, but no write. 6839 */ 6840 __u32 file_pos; /* Sysctl file position to read from, write to. 6841 * Allows 1,2,4-byte read an 4-byte write. 6842 */ 6843 }; 6844 6845 struct bpf_sockopt { 6846 __bpf_md_ptr(struct bpf_sock *, sk); 6847 __bpf_md_ptr(void *, optval); 6848 __bpf_md_ptr(void *, optval_end); 6849 6850 __s32 level; 6851 __s32 optname; 6852 __s32 optlen; 6853 __s32 retval; 6854 }; 6855 6856 struct bpf_pidns_info { 6857 __u32 pid; 6858 __u32 tgid; 6859 }; 6860 6861 /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ 6862 struct bpf_sk_lookup { 6863 union { 6864 __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ 6865 __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */ 6866 }; 6867 6868 __u32 family; /* Protocol family (AF_INET, AF_INET6) */ 6869 __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ 6870 __u32 remote_ip4; /* Network byte order */ 6871 __u32 remote_ip6[4]; /* Network byte order */ 6872 __be16 remote_port; /* Network byte order */ 6873 __u16 :16; /* Zero padding */ 6874 __u32 local_ip4; /* Network byte order */ 6875 __u32 local_ip6[4]; /* Network byte order */ 6876 __u32 local_port; /* Host byte order */ 6877 __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */ 6878 }; 6879 6880 /* 6881 * struct btf_ptr is used for typed pointer representation; the 6882 * type id is used to render the pointer data as the appropriate type 6883 * via the bpf_snprintf_btf() helper described above. A flags field - 6884 * potentially to specify additional details about the BTF pointer 6885 * (rather than its mode of display) - is included for future use. 6886 * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately. 6887 */ 6888 struct btf_ptr { 6889 void *ptr; 6890 __u32 type_id; 6891 __u32 flags; /* BTF ptr flags; unused at present. */ 6892 }; 6893 6894 /* 6895 * Flags to control bpf_snprintf_btf() behaviour. 6896 * - BTF_F_COMPACT: no formatting around type information 6897 * - BTF_F_NONAME: no struct/union member names/types 6898 * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values; 6899 * equivalent to %px. 6900 * - BTF_F_ZERO: show zero-valued struct/union members; they 6901 * are not displayed by default 6902 */ 6903 enum { 6904 BTF_F_COMPACT = (1ULL << 0), 6905 BTF_F_NONAME = (1ULL << 1), 6906 BTF_F_PTR_RAW = (1ULL << 2), 6907 BTF_F_ZERO = (1ULL << 3), 6908 }; 6909 6910 /* bpf_core_relo_kind encodes which aspect of captured field/type/enum value 6911 * has to be adjusted by relocations. It is emitted by llvm and passed to 6912 * libbpf and later to the kernel. 6913 */ 6914 enum bpf_core_relo_kind { 6915 BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */ 6916 BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */ 6917 BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */ 6918 BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ 6919 BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ 6920 BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ 6921 BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ 6922 BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */ 6923 BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */ 6924 BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ 6925 BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ 6926 BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ 6927 BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */ 6928 }; 6929 6930 /* 6931 * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf 6932 * and from libbpf to the kernel. 6933 * 6934 * CO-RE relocation captures the following data: 6935 * - insn_off - instruction offset (in bytes) within a BPF program that needs 6936 * its insn->imm field to be relocated with actual field info; 6937 * - type_id - BTF type ID of the "root" (containing) entity of a relocatable 6938 * type or field; 6939 * - access_str_off - offset into corresponding .BTF string section. String 6940 * interpretation depends on specific relocation kind: 6941 * - for field-based relocations, string encodes an accessed field using 6942 * a sequence of field and array indices, separated by colon (:). It's 6943 * conceptually very close to LLVM's getelementptr ([0]) instruction's 6944 * arguments for identifying offset to a field. 6945 * - for type-based relocations, strings is expected to be just "0"; 6946 * - for enum value-based relocations, string contains an index of enum 6947 * value within its enum type; 6948 * - kind - one of enum bpf_core_relo_kind; 6949 * 6950 * Example: 6951 * struct sample { 6952 * int a; 6953 * struct { 6954 * int b[10]; 6955 * }; 6956 * }; 6957 * 6958 * struct sample *s = ...; 6959 * int *x = &s->a; // encoded as "0:0" (a is field #0) 6960 * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, 6961 * // b is field #0 inside anon struct, accessing elem #5) 6962 * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) 6963 * 6964 * type_id for all relocs in this example will capture BTF type id of 6965 * `struct sample`. 6966 * 6967 * Such relocation is emitted when using __builtin_preserve_access_index() 6968 * Clang built-in, passing expression that captures field address, e.g.: 6969 * 6970 * bpf_probe_read(&dst, sizeof(dst), 6971 * __builtin_preserve_access_index(&src->a.b.c)); 6972 * 6973 * In this case Clang will emit field relocation recording necessary data to 6974 * be able to find offset of embedded `a.b.c` field within `src` struct. 6975 * 6976 * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction 6977 */ 6978 struct bpf_core_relo { 6979 __u32 insn_off; 6980 __u32 type_id; 6981 __u32 access_str_off; 6982 enum bpf_core_relo_kind kind; 6983 }; 6984 6985 #endif /* _UAPI__LINUX_BPF_H__ */ 6986